aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/machdep.c5
-rw-r--r--sys/amd64/amd64/vm_machdep.c3
-rw-r--r--sys/amd64/conf/GENERIC5
-rw-r--r--sys/amd64/conf/MINIMAL4
-rw-r--r--sys/amd64/conf/NOTES3
-rw-r--r--sys/amd64/include/ifunc.h5
-rw-r--r--sys/amd64/include/md_var.h1
-rw-r--r--sys/amd64/include/vmm.h133
-rw-r--r--sys/amd64/vmm/amd/svm.c2
-rw-r--r--sys/amd64/vmm/intel/vmx.c2
-rw-r--r--sys/amd64/vmm/io/vhpet.c4
-rw-r--r--sys/amd64/vmm/io/vioapic.c1
-rw-r--r--sys/amd64/vmm/io/vlapic.c1
-rw-r--r--sys/amd64/vmm/vmm.c623
-rw-r--r--sys/amd64/vmm/vmm_host.h2
-rw-r--r--sys/amd64/vmm/vmm_ioport.c1
-rw-r--r--sys/amd64/vmm/vmm_lapic.c1
-rw-r--r--sys/amd64/vmm/x86.c1
-rw-r--r--sys/arm/arm/gic.c15
-rw-r--r--sys/arm/arm/machdep.c2
-rw-r--r--sys/arm/broadcom/bcm2835/files.bcm283x2
-rw-r--r--sys/arm/include/machdep.h1
-rw-r--r--sys/arm64/arm64/identcpu.c8
-rw-r--r--sys/arm64/arm64/machdep.c35
-rw-r--r--sys/arm64/arm64/trap.c64
-rw-r--r--sys/arm64/conf/NOTES1
-rw-r--r--sys/arm64/include/armreg.h28
-rw-r--r--sys/arm64/include/ifunc.h28
-rw-r--r--sys/arm64/include/vmm.h116
-rw-r--r--sys/arm64/include/vmm_instruction_emul.h8
-rw-r--r--sys/arm64/iommu/smmu.c5
-rw-r--r--sys/arm64/vmm/io/vgic_v3.c6
-rw-r--r--sys/arm64/vmm/io/vtimer.c2
-rw-r--r--sys/arm64/vmm/vmm.c421
-rw-r--r--sys/arm64/vmm/vmm_arm64.c2
-rw-r--r--sys/arm64/vmm/vmm_mmu.c27
-rw-r--r--sys/arm64/vmm/vmm_reset.c2
-rw-r--r--sys/bsm/audit_kevents.h1
-rw-r--r--sys/cam/cam_periph.c1
-rw-r--r--sys/cam/scsi/scsi_all.c34
-rw-r--r--sys/cam/scsi/scsi_all.h4
-rw-r--r--sys/cam/scsi/scsi_cd.c2
-rw-r--r--sys/cddl/compat/opensolaris/sys/cpuvar_defs.h3
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c4
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h5
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/sys/dtrace_impl.h7
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/sys/isa_defs.h206
-rw-r--r--sys/cddl/dev/sdt/sdt.c3
-rw-r--r--sys/compat/freebsd32/freebsd32.h40
-rw-r--r--sys/compat/freebsd32/freebsd32_misc.c32
-rw-r--r--sys/compat/freebsd32/freebsd32_proto.h9
-rw-r--r--sys/compat/freebsd32/freebsd32_syscall.h4
-rw-r--r--sys/compat/freebsd32/freebsd32_syscalls.c2
-rw-r--r--sys/compat/freebsd32/freebsd32_sysent.c2
-rw-r--r--sys/compat/freebsd32/freebsd32_systrace_args.c68
-rw-r--r--sys/compat/linux/linux_if.c11
-rw-r--r--sys/compat/linux/linux_ioctl.c120
-rw-r--r--sys/compat/linux/linux_ioctl.h29
-rw-r--r--sys/compat/linuxkpi/common/include/linux/acpi.h2
-rw-r--r--sys/compat/linuxkpi/common/include/linux/bitops.h12
-rw-r--r--sys/compat/linuxkpi/common/include/linux/dma-mapping.h9
-rw-r--r--sys/compat/linuxkpi/common/include/linux/eventfd.h54
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ieee80211.h40
-rw-r--r--sys/compat/linuxkpi/common/include/linux/minmax.h3
-rw-r--r--sys/compat/linuxkpi/common/include/linux/netdevice.h24
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pci.h27
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ptp_clock_kernel.h1
-rw-r--r--sys/compat/linuxkpi/common/include/linux/seq_buf.h73
-rw-r--r--sys/compat/linuxkpi/common/include/linux/skbuff.h9
-rw-r--r--sys/compat/linuxkpi/common/include/linux/soc/airoha/airoha_offload.h48
-rw-r--r--sys/compat/linuxkpi/common/include/linux/soc/mediatek/mtk_wed.h14
-rw-r--r--sys/compat/linuxkpi/common/include/net/cfg80211.h8
-rw-r--r--sys/compat/linuxkpi/common/include/net/mac80211.h17
-rw-r--r--sys/compat/linuxkpi/common/src/linux_80211.c112
-rw-r--r--sys/compat/linuxkpi/common/src/linux_current.c10
-rw-r--r--sys/compat/linuxkpi/common/src/linux_eventfd.c63
-rw-r--r--sys/compat/linuxkpi/common/src/linux_firmware.c3
-rw-r--r--sys/compat/linuxkpi/common/src/linux_pci.c36
-rw-r--r--sys/compat/linuxkpi/common/src/linux_seq_buf.c64
-rw-r--r--sys/compat/linuxkpi/common/src/linux_shmemfs.c7
-rw-r--r--sys/compat/linuxkpi/dummy/include/linux/eventfd.h0
-rw-r--r--sys/conf/Makefile.powerpc5
-rw-r--r--sys/conf/NOTES3
-rw-r--r--sys/conf/files6
-rw-r--r--sys/conf/files.amd641
-rw-r--r--sys/conf/files.arm6431
-rw-r--r--sys/conf/files.powerpc51
-rw-r--r--sys/conf/files.x861
-rw-r--r--sys/conf/kern.mk8
-rw-r--r--sys/conf/kern.pre.mk2
-rw-r--r--sys/conf/kmod.mk2
-rw-r--r--sys/conf/ldscript.powerpcspe143
-rw-r--r--sys/conf/options1
-rw-r--r--sys/conf/options.arm643
-rw-r--r--sys/conf/options.powerpc1
-rw-r--r--sys/conf/std.nodebug1
-rw-r--r--sys/contrib/dev/athk/ath10k/core.c28
-rw-r--r--sys/contrib/dev/athk/ath10k/core.h6
-rw-r--r--sys/contrib/dev/athk/ath10k/mac.c2
-rw-r--r--sys/contrib/dev/athk/ath10k/qmi.c2
-rw-r--r--sys/contrib/dev/athk/ath10k/testmode.c253
-rw-r--r--sys/contrib/dev/athk/ath10k/testmode_i.h15
-rw-r--r--sys/contrib/dev/athk/ath10k/wmi.h19
-rw-r--r--sys/contrib/dev/mediatek/mt76/Kconfig51
-rw-r--r--sys/contrib/dev/mediatek/mt76/Makefile48
-rw-r--r--sys/contrib/dev/mediatek/mt76/agg-rx.c4
-rw-r--r--sys/contrib/dev/mediatek/mt76/channel.c15
-rw-r--r--sys/contrib/dev/mediatek/mt76/debugfs.c6
-rw-r--r--sys/contrib/dev/mediatek/mt76/dma.c302
-rw-r--r--sys/contrib/dev/mediatek/mt76/dma.h98
-rw-r--r--sys/contrib/dev/mediatek/mt76/eeprom.c87
-rw-r--r--sys/contrib/dev/mediatek/mt76/mac80211.c65
-rw-r--r--sys/contrib/dev/mediatek/mt76/mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mmio.c20
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76.h232
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/Kconfig12
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/Makefile7
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/beacon.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/core.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/debugfs.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/dma.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/eeprom.c5
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/eeprom.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/init.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/mac.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/main.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/mcu.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/pci.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/regs.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7603/soc.c4
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/Kconfig56
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/Makefile2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/debugfs.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/dma.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/eeprom.c6
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/eeprom.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/init.c7
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/mac.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/main.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/mcu.c6
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/mcu.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/mmio.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/mt7615.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/mt7615_trace.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/pci.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/pci_init.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/pci_mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/regs.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/sdio.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/soc.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/testmode.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/trace.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/usb.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76_connac.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76_connac2_mac.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76_connac3_mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76_connac3_mac.h9
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76_connac_mac.c21
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76_connac_mcu.c39
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76_connac_mcu.h4
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x0/pci.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x0/pci_mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x0/usb_mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_beacon.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_debugfs.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_dfs.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_dfs.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_dma.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_eeprom.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_eeprom.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_mcu.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_phy.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_phy.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_regs.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_trace.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_trace.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_txrx.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_usb.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_usb_core.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_usb_mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x02_util.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/Kconfig29
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/Makefile15
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/eeprom.c6
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/init.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/mac.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/mcu.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/mt76x2u.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/pci.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/pci_main.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/pci_mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/pci_phy.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/phy.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/usb_init.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/usb_mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/usb_main.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/usb_mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt76x2/usb_phy.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/Kconfig2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/Makefile2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/coredump.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/coredump.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/debugfs.c76
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/dma.c6
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/eeprom.c6
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/eeprom.h8
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/init.c13
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/mac.c4
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/mac.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/main.c4
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/mcu.c203
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/mcu.h8
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/mmio.c8
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/mt7915.h11
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/pci.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/regs.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/soc.c23
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/testmode.c4
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7915/testmode.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/Kconfig2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/Makefile2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/debugfs.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/init.c8
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/main.c4
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/mcu.c4
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/mcu.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/mt7921.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/pci.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/pci_mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/pci_mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/regs.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/sdio.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/sdio_mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/sdio_mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/testmode.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7921/usb.c5
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/Kconfig2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/Makefile4
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/debugfs.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/init.c156
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/mac.c6
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/mac.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/main.c107
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/mcu.c127
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/mcu.h10
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/mt7925.h11
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/pci.c41
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/pci_mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/pci_mcu.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/regd.c265
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/regd.h19
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/regs.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/testmode.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7925/usb.c5
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x.h5
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x_acpi_sar.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x_acpi_sar.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x_core.c9
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x_debugfs.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x_dma.c8
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x_mac.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x_regs.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x_trace.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x_trace.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt792x_usb.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/Kconfig9
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/Makefile3
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/coredump.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/coredump.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/debugfs.c74
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/dma.c343
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/eeprom.c5
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/eeprom.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/init.c390
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/mac.c836
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/mac.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/main.c614
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/mcu.c378
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/mcu.h19
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/mmio.c113
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/mt7996.h144
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/pci.c10
-rw-r--r--sys/contrib/dev/mediatek/mt76/mt7996/regs.h34
-rw-r--r--sys/contrib/dev/mediatek/mt76/pci.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/scan.c15
-rw-r--r--sys/contrib/dev/mediatek/mt76/sdio.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/sdio.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/sdio_txrx.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/testmode.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/testmode.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/trace.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/trace.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/tx.c11
-rw-r--r--sys/contrib/dev/mediatek/mt76/usb.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/usb_trace.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/usb_trace.h2
-rw-r--r--sys/contrib/dev/mediatek/mt76/util.c2
-rw-r--r--sys/contrib/dev/mediatek/mt76/util.h132
-rw-r--r--sys/contrib/dev/mediatek/mt76/wed.c20
-rw-r--r--sys/contrib/dev/rtw88/bf.c8
-rw-r--r--sys/contrib/dev/rtw88/bf.h7
-rw-r--r--sys/contrib/dev/rtw88/rtw8822bu.c2
-rw-r--r--sys/contrib/dev/rtw88/rtw8822cu.c2
-rw-r--r--sys/contrib/dev/rtw88/sdio.c4
-rw-r--r--sys/contrib/openzfs/module/nvpair/nvpair.c3
-rw-r--r--sys/crypto/sha2/sha256c.c8
-rw-r--r--sys/crypto/sha2/sha512c.c9
-rw-r--r--sys/dev/acpi_support/acpi_ibm.c4
-rw-r--r--sys/dev/acpica/acpi.c293
-rw-r--r--sys/dev/acpica/acpi_battery.c8
-rw-r--r--sys/dev/acpica/acpi_spmc.c618
-rw-r--r--sys/dev/acpica/acpiio.h1
-rw-r--r--sys/dev/acpica/acpivar.h1
-rw-r--r--sys/dev/aq/aq_common.h3
-rw-r--r--sys/dev/aq/aq_dbg.c6
-rw-r--r--sys/dev/aq/aq_device.h2
-rw-r--r--sys/dev/aq/aq_fw.c16
-rw-r--r--sys/dev/aq/aq_fw1x.c5
-rw-r--r--sys/dev/aq/aq_fw2x.c5
-rw-r--r--sys/dev/aq/aq_hw.c15
-rw-r--r--sys/dev/aq/aq_hw.h3
-rw-r--r--sys/dev/aq/aq_hw_llh.c96
-rw-r--r--sys/dev/aq/aq_media.c4
-rw-r--r--sys/dev/aq/aq_ring.c18
-rw-r--r--sys/dev/asmc/asmc.c7
-rw-r--r--sys/dev/asmc/asmcvar.h35
-rw-r--r--sys/dev/ata/ata-pci.h4
-rw-r--r--sys/dev/bge/if_bge.c14
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt.h8
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_hwrm.c13
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_mgmt.c9
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_mgmt.h11
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_ulp.c6
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_ulp.h8
-rw-r--r--sys/dev/bnxt/bnxt_en/if_bnxt.c29
-rw-r--r--sys/dev/bnxt/bnxt_re/bnxt_re-abi.h12
-rw-r--r--sys/dev/bnxt/bnxt_re/bnxt_re.h12
-rw-r--r--sys/dev/bnxt/bnxt_re/ib_verbs.c240
-rw-r--r--sys/dev/bnxt/bnxt_re/ib_verbs.h25
-rw-r--r--sys/dev/bnxt/bnxt_re/main.c79
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_fp.c115
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_fp.h13
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_res.c3
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_res.h19
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_sp.c48
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_sp.h9
-rw-r--r--sys/dev/clk/rockchip/rk_clk_fract.c2
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c13
-rw-r--r--sys/dev/cxgbe/common/t4_regs.h1056
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg.txt34
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt26
-rw-r--r--sys/dev/cxgbe/t4_netmap.c4
-rw-r--r--sys/dev/cxgbe/t4_sge.c131
-rw-r--r--sys/dev/dpaa/bman_fdt.c39
-rw-r--r--sys/dev/dpaa/qman_fdt.c40
-rw-r--r--sys/dev/dpaa2/dpaa2_ni.c28
-rw-r--r--sys/dev/dwc/dwc1000_core.c2
-rw-r--r--sys/dev/dwc/dwc1000_dma.c25
-rw-r--r--sys/dev/dwc/if_dwc.c20
-rw-r--r--sys/dev/hpt27xx/hpt27xx_osm_bsd.c10
-rw-r--r--sys/dev/hptmv/entry.c3
-rw-r--r--sys/dev/hptnr/hptnr_osm_bsd.c10
-rw-r--r--sys/dev/hptrr/hptrr_osm_bsd.c10
-rw-r--r--sys/dev/iicbus/adc/ads111x.c12
-rw-r--r--sys/dev/isl/isl.c10
-rw-r--r--sys/dev/jme/if_jme.c10
-rw-r--r--sys/dev/md/md.c2
-rw-r--r--sys/dev/mpr/mpr.c1
-rw-r--r--sys/dev/mpr/mpr_config.c1
-rw-r--r--sys/dev/mpr/mpr_mapping.c1
-rw-r--r--sys/dev/mpr/mpr_pci.c1
-rw-r--r--sys/dev/mpr/mpr_sas.c10
-rw-r--r--sys/dev/mpr/mpr_sas_lsi.c2
-rw-r--r--sys/dev/mpr/mpr_table.c1
-rw-r--r--sys/dev/mpr/mpr_user.c1
-rw-r--r--sys/dev/mps/mps.c1
-rw-r--r--sys/dev/mps/mps_config.c1
-rw-r--r--sys/dev/mps/mps_mapping.c1
-rw-r--r--sys/dev/mps/mps_pci.c1
-rw-r--r--sys/dev/mps/mps_sas.c10
-rw-r--r--sys/dev/mps/mps_sas_lsi.c2
-rw-r--r--sys/dev/mps/mps_table.c1
-rw-r--r--sys/dev/mps/mps_user.c1
-rw-r--r--sys/dev/mxge/if_mxge.c2
-rw-r--r--sys/dev/nvme/nvme_ctrlr.c46
-rw-r--r--sys/dev/nvme/nvme_ctrlr_cmd.c12
-rw-r--r--sys/dev/nvme/nvme_private.h21
-rw-r--r--sys/dev/sdio/sdio_subr.c30
-rw-r--r--sys/dev/sdio/sdio_subr.h2
-rw-r--r--sys/dev/smartpqi/smartpqi_cam.c53
-rw-r--r--sys/dev/smartpqi/smartpqi_cmd.c4
-rw-r--r--sys/dev/smartpqi/smartpqi_controllers.h371
-rw-r--r--sys/dev/smartpqi/smartpqi_defines.h43
-rw-r--r--sys/dev/smartpqi/smartpqi_discovery.c167
-rw-r--r--sys/dev/smartpqi/smartpqi_event.c10
-rw-r--r--sys/dev/smartpqi/smartpqi_helper.c32
-rw-r--r--sys/dev/smartpqi/smartpqi_init.c13
-rw-r--r--sys/dev/smartpqi/smartpqi_intr.c3
-rw-r--r--sys/dev/smartpqi/smartpqi_ioctl.c12
-rw-r--r--sys/dev/smartpqi/smartpqi_main.c320
-rw-r--r--sys/dev/smartpqi/smartpqi_misc.c28
-rw-r--r--sys/dev/smartpqi/smartpqi_prototypes.h48
-rw-r--r--sys/dev/smartpqi/smartpqi_queue.c25
-rw-r--r--sys/dev/smartpqi/smartpqi_request.c100
-rw-r--r--sys/dev/smartpqi/smartpqi_response.c14
-rw-r--r--sys/dev/smartpqi/smartpqi_sis.c6
-rw-r--r--sys/dev/smartpqi/smartpqi_structures.h5
-rw-r--r--sys/dev/sound/midi/midi.c18
-rw-r--r--sys/dev/sound/midi/midiq.h4
-rw-r--r--sys/dev/sound/pci/cs4281.c2
-rw-r--r--sys/dev/sound/pci/hdspe-pcm.c4
-rw-r--r--sys/dev/sound/usb/uaudio.c4
-rw-r--r--sys/dev/sym/sym_hipd.c308
-rw-r--r--sys/dev/ufshci/ufshci_sim.c2
-rw-r--r--sys/dev/usb/usb_device.c2
-rw-r--r--sys/dev/usb/wlan/if_mtw.c33
-rw-r--r--sys/dev/vmm/vmm_dev.c41
-rw-r--r--sys/dev/vmm/vmm_ktr.h4
-rw-r--r--sys/dev/vmm/vmm_mem.c1
-rw-r--r--sys/dev/vmm/vmm_mem.h3
-rw-r--r--sys/dev/vmm/vmm_stat.h2
-rw-r--r--sys/dev/vmm/vmm_vm.c476
-rw-r--r--sys/dev/vmm/vmm_vm.h233
-rw-r--r--sys/dev/vt/vt.h1
-rw-r--r--sys/dev/vt/vt_core.c6
-rw-r--r--sys/dev/wg/if_wg.c4
-rw-r--r--sys/fs/fuse/fuse_device.c6
-rw-r--r--sys/fs/fuse/fuse_internal.c3
-rw-r--r--sys/fs/fuse/fuse_ipc.c4
-rw-r--r--sys/fs/fuse/fuse_ipc.h5
-rw-r--r--sys/fs/fuse/fuse_vfsops.c1
-rw-r--r--sys/fs/fuse/fuse_vnops.c95
-rw-r--r--sys/fs/nfs/nfs.h5
-rw-r--r--sys/fs/nfs/nfs_var.h2
-rw-r--r--sys/fs/nfs/nfsdport.h2
-rw-r--r--sys/fs/nfs/nfsport.h2
-rw-r--r--sys/fs/nfsclient/nfs_clrpcops.c20
-rw-r--r--sys/fs/nfsclient/nfs_clvnops.c12
-rw-r--r--sys/fs/nfsserver/nfs_nfsdport.c57
-rw-r--r--sys/fs/nfsserver/nfs_nfsdserv.c13
-rw-r--r--sys/fs/nfsserver/nfs_nfsdsubs.c44
-rw-r--r--sys/fs/unionfs/union_subr.c2
-rw-r--r--sys/i386/conf/MINIMAL2
-rw-r--r--sys/i386/i386/vm_machdep.c3
-rw-r--r--sys/i386/include/ifunc.h5
-rw-r--r--sys/kern/init_main.c12
-rw-r--r--sys/kern/init_sysent.c2
-rw-r--r--sys/kern/kern_exit.c405
-rw-r--r--sys/kern/kern_fork.c95
-rw-r--r--sys/kern/kern_jail.c204
-rw-r--r--sys/kern/kern_jaildesc.c77
-rw-r--r--sys/kern/kern_ktrace.c10
-rw-r--r--sys/kern/kern_sendfile.c9
-rw-r--r--sys/kern/kern_time.c1
-rw-r--r--sys/kern/subr_bus.c2
-rw-r--r--sys/kern/subr_capability.c1
-rw-r--r--sys/kern/subr_devstat.c56
-rw-r--r--sys/kern/subr_smp.c13
-rw-r--r--sys/kern/subr_witness.c352
-rw-r--r--sys/kern/sys_eventfd.c55
-rw-r--r--sys/kern/sys_procdesc.c8
-rw-r--r--sys/kern/syscalls.c2
-rw-r--r--sys/kern/syscalls.master19
-rw-r--r--sys/kern/systrace_args.c68
-rw-r--r--sys/kern/uipc_domain.c16
-rw-r--r--sys/kern/uipc_shm.c2
-rw-r--r--sys/kern/vfs_cache.c77
-rw-r--r--sys/modules/Makefile30
-rw-r--r--sys/modules/ath10k/Makefile1
-rw-r--r--sys/modules/iwlwifi/Makefile9
-rw-r--r--sys/modules/linuxkpi/Makefile2
-rw-r--r--sys/modules/mt76/Makefile.inc3
-rw-r--r--sys/modules/mt76/mt7925/Makefile2
-rw-r--r--sys/modules/rtw88/Makefile2
-rw-r--r--sys/modules/rtw89/Makefile15
-rw-r--r--sys/modules/sctp/Makefile1
-rw-r--r--sys/modules/vmm/Makefile3
-rw-r--r--sys/modules/zfs/Makefile4
-rw-r--r--sys/net/if.c4
-rw-r--r--sys/net/if_enc.c2
-rw-r--r--sys/net/if_ethersubr.c4
-rw-r--r--sys/net/if_ovpn.c32
-rw-r--r--sys/net/iflib.c66
-rw-r--r--sys/net/pfvar.h399
-rw-r--r--sys/net80211/ieee80211_ht.c18
-rw-r--r--sys/net80211/ieee80211_proto.c4
-rw-r--r--sys/net80211/ieee80211_radiotap.c15
-rw-r--r--sys/net80211/ieee80211_radiotap.h21
-rw-r--r--sys/netgraph/bluetooth/include/ng_hci.h4
-rw-r--r--sys/netgraph/netflow/netflow_v9.c1
-rw-r--r--sys/netinet/icmp6.h3
-rw-r--r--sys/netinet/ip6.h12
-rw-r--r--sys/netinet/ip_fastfwd.c32
-rw-r--r--sys/netinet/ip_input.c6
-rw-r--r--sys/netinet/ip_mroute.c41
-rw-r--r--sys/netinet/ip_mroute.h10
-rw-r--r--sys/netinet/sctp_bsd_addr.c1
-rw-r--r--sys/netinet/sctp_output.c6
-rw-r--r--sys/netinet6/icmp6.c3
-rw-r--r--sys/netinet6/in6.c90
-rw-r--r--sys/netinet6/in6.h2
-rw-r--r--sys/netinet6/in6_ifattach.c40
-rw-r--r--sys/netinet6/in6_rmx.c6
-rw-r--r--sys/netinet6/in6_src.c6
-rw-r--r--sys/netinet6/in6_var.h74
-rw-r--r--sys/netinet6/ip6_forward.c4
-rw-r--r--sys/netinet6/ip6_input.c113
-rw-r--r--sys/netinet6/ip6_mroute.c2
-rw-r--r--sys/netinet6/ip6_output.c115
-rw-r--r--sys/netinet6/ip6_var.h3
-rw-r--r--sys/netinet6/mld6.c54
-rw-r--r--sys/netinet6/mld6_var.h22
-rw-r--r--sys/netinet6/nd6.c188
-rw-r--r--sys/netinet6/nd6.h70
-rw-r--r--sys/netinet6/nd6_nbr.c24
-rw-r--r--sys/netinet6/nd6_rtr.c94
-rw-r--r--sys/netinet6/scope6.c19
-rw-r--r--sys/netinet6/scope6_var.h13
-rw-r--r--sys/netinet6/udp6_usrreq.c22
-rw-r--r--sys/netlink/ktest_netlink_message_writer.c5
-rw-r--r--sys/netlink/ktest_netlink_message_writer.h2
-rw-r--r--sys/netlink/route/iface.c13
-rw-r--r--sys/netpfil/ipfw/ip_fw2.c14
-rw-r--r--sys/netpfil/ipfw/ip_fw_bpf.c62
-rw-r--r--sys/netpfil/ipfw/ip_fw_dynamic.c55
-rw-r--r--sys/netpfil/ipfw/ip_fw_iface.c29
-rw-r--r--sys/netpfil/ipfw/ip_fw_log.c4
-rw-r--r--sys/netpfil/ipfw/ip_fw_nat.c5
-rw-r--r--sys/netpfil/ipfw/ip_fw_private.h31
-rw-r--r--sys/netpfil/ipfw/ip_fw_sockopt.c88
-rw-r--r--sys/netpfil/ipfw/ip_fw_table.c299
-rw-r--r--sys/netpfil/ipfw/ip_fw_table.h76
-rw-r--r--sys/netpfil/ipfw/ip_fw_table_value.c125
-rw-r--r--sys/netpfil/pf/pf.c703
-rw-r--r--sys/netpfil/pf/pf.h7
-rw-r--r--sys/netpfil/pf/pf_ioctl.c792
-rw-r--r--sys/netpfil/pf/pf_nl.c460
-rw-r--r--sys/netpfil/pf/pf_nl.h83
-rw-r--r--sys/netpfil/pf/pf_table.c20
-rw-r--r--sys/powerpc/aim/moea64_native.c5
-rw-r--r--sys/powerpc/booke/booke_machdep.c8
-rw-r--r--sys/powerpc/booke/spe.c685
-rw-r--r--sys/powerpc/booke/trap_subr.S22
-rw-r--r--sys/powerpc/conf/MPC85XXSPE151
-rw-r--r--sys/powerpc/include/atomic.h82
-rw-r--r--sys/powerpc/include/ieeefp.h8
-rw-r--r--sys/powerpc/include/param.h4
-rw-r--r--sys/powerpc/mpc85xx/pci_mpc85xx.c19
-rw-r--r--sys/powerpc/mpc85xx/platform_mpc85xx.c16
-rw-r--r--sys/powerpc/powermac/platform_powermac.c99
-rw-r--r--sys/powerpc/powerpc/exec_machdep.c22
-rw-r--r--sys/powerpc/powerpc/fpu.c3
-rw-r--r--sys/powerpc/powerpc/machdep.c5
-rw-r--r--sys/powerpc/powerpc/ptrace_machdep.c4
-rw-r--r--sys/powerpc/powerpc/swtch32.S12
-rw-r--r--sys/powerpc/powerpc/trap.c36
-rw-r--r--sys/powerpc/powerpc/vm_machdep.c4
-rw-r--r--sys/riscv/include/acpica_machdep.h19
-rw-r--r--sys/riscv/include/cpufunc.h7
-rw-r--r--sys/riscv/include/vmm.h98
-rw-r--r--sys/riscv/include/vmm_instruction_emul.h6
-rw-r--r--sys/riscv/riscv/pmap.c98
-rw-r--r--sys/riscv/vmm/vmm.c413
-rw-r--r--sys/riscv/vmm/vmm_aplic.c4
-rw-r--r--sys/riscv/vmm/vmm_fence.c8
-rw-r--r--sys/riscv/vmm/vmm_riscv.c4
-rw-r--r--sys/riscv/vmm/vmm_sbi.c2
-rw-r--r--sys/riscv/vmm/vmm_vtimer.c2
-rw-r--r--sys/rpc/rpcsec_tls/rpctls_impl.c2
-rw-r--r--sys/rpc/xdr.h9
-rw-r--r--sys/security/audit/audit_bsm.c28
-rw-r--r--sys/security/mac/mac_framework.c1
-rw-r--r--sys/security/mac/mac_framework.h18
-rw-r--r--sys/security/mac/mac_internal.h32
-rw-r--r--sys/security/mac/mac_policy.h48
-rw-r--r--sys/security/mac/mac_prison.c255
-rw-r--r--sys/security/mac/mac_syscalls.c242
-rw-r--r--sys/security/mac/mac_syscalls.h10
-rw-r--r--sys/security/mac_stub/mac_stub.c84
-rw-r--r--sys/security/mac_test/mac_test.c172
-rw-r--r--sys/sys/_types.h5
-rw-r--r--sys/sys/abi_compat.h10
-rw-r--r--sys/sys/buf_ring.h12
-rw-r--r--sys/sys/caprights.h1
-rw-r--r--sys/sys/eventfd.h5
-rw-r--r--sys/sys/exterr_cat.h2
-rw-r--r--sys/sys/jail.h1
-rw-r--r--sys/sys/jaildesc.h1
-rw-r--r--sys/sys/module.h8
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/procdesc.h6
-rw-r--r--sys/sys/sdt.h2
-rw-r--r--sys/sys/smp.h8
-rw-r--r--sys/sys/syscall.h4
-rw-r--r--sys/sys/syscall.mk4
-rw-r--r--sys/sys/syscallsubr.h2
-rw-r--r--sys/sys/sysproto.h16
-rw-r--r--sys/vm/swap_pager.c64
-rw-r--r--sys/vm/vm.h1
-rw-r--r--sys/vm/vm_fault.c5
-rw-r--r--sys/vm/vm_init.c8
-rw-r--r--sys/vm/vm_map.c52
-rw-r--r--sys/vm/vm_object.c46
-rw-r--r--sys/vm/vm_object.h4
-rw-r--r--sys/x86/cpufreq/hwpstate_amd.c154
-rw-r--r--sys/x86/include/apicreg.h7
-rw-r--r--sys/x86/include/intr_machdep.h1
-rw-r--r--sys/x86/x86/intr_machdep.c9
-rw-r--r--sys/x86/x86/local_apic.c208
-rw-r--r--sys/xdr/xdr.c7
627 files changed, 16217 insertions, 8567 deletions
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 2fce1a7e64b6..cae58181000f 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -1518,13 +1518,11 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
/*
* We initialize the PCB pointer early so that exception
- * handlers will work. Also set up td_critnest to short-cut
- * the page fault handler.
+ * handlers will work.
*/
cpu_max_ext_state_size = sizeof(struct savefpu);
set_top_of_stack_td(&thread0);
thread0.td_pcb = get_pcb_td(&thread0);
- thread0.td_critnest = 1;
/*
* The console and kdb should be initialized even earlier than here,
@@ -1615,7 +1613,6 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
#ifdef FDT
x86_init_fdt();
#endif
- thread0.td_critnest = 0;
kasan_init();
kmsan_init();
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index c763ff58680e..2e180003e93d 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -93,8 +93,7 @@ set_top_of_stack_td(struct thread *td)
struct savefpu *
get_pcb_user_save_td(struct thread *td)
{
- KASSERT(((vm_offset_t)td->td_md.md_usr_fpu_save %
- XSAVE_AREA_ALIGN) == 0,
+ KASSERT(__is_aligned(td->td_md.md_usr_fpu_save, XSAVE_AREA_ALIGN),
("Unaligned pcb_user_save area ptr %p td %p",
td->td_md.md_usr_fpu_save, td));
return (td->td_md.md_usr_fpu_save);
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index ef7ce215474e..1003572805fb 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -240,8 +240,10 @@ device vmx # VMware VMXNET3 Ethernet
device axp # AMD EPYC integrated NIC (requires miibus)
# PCI Ethernet NICs.
+device aq # Aquantia / Marvell AQC1xx
device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE
device le # AMD Am7900 LANCE and Am79C9xx PCnet
+device rge # Realtek 8125/8126/8127
device ti # Alteon Networks Tigon I/II gigabit Ethernet
# Nvidia/Mellanox Connect-X 4 and later, Ethernet only
@@ -324,7 +326,8 @@ device bpf # Berkeley packet filter
# random(4)
device rdrand_rng # Intel Bull Mountain RNG
-device tpm # Trusted Platform Module
+# Disabled for now since tpm(4) breaks suspend/resume.
+#device tpm # Trusted Platform Module
options RANDOM_ENABLE_TPM # enable entropy from TPM 2.0
options RANDOM_ENABLE_KBD
options RANDOM_ENABLE_MOUSE
diff --git a/sys/amd64/conf/MINIMAL b/sys/amd64/conf/MINIMAL
index 8df3349b4c34..fec4be6da8f7 100644
--- a/sys/amd64/conf/MINIMAL
+++ b/sys/amd64/conf/MINIMAL
@@ -122,7 +122,8 @@ device bpf # Berkeley packet filter
# random(4)
device rdrand_rng # Intel Bull Mountain RNG
-device tpm # Trusted Platform Module
+# Disabled for now since tpm(4) breaks suspend/resume.
+#device tpm # Trusted Platform Module
options RANDOM_ENABLE_TPM # enable entropy from TPM 2.0
options RANDOM_ENABLE_KBD
options RANDOM_ENABLE_MOUSE
@@ -133,6 +134,7 @@ device virtio_pci # VirtIO PCI device
device vtnet # VirtIO Ethernet device
device virtio_blk # VirtIO Block device
device virtio_balloon # VirtIO Memory Balloon device
+device virtio_scsi # VirtIO SCSI device
# Linux KVM paravirtualization support
device kvm_clock # KVM paravirtual clock driver
diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES
index efcc03623c05..b1a6995e90eb 100644
--- a/sys/amd64/conf/NOTES
+++ b/sys/amd64/conf/NOTES
@@ -43,6 +43,9 @@ cpu HAMMER # aka K8, aka Opteron & Athlon64
# Optional devices:
#
+# vt(4) drivers.
+device vt_efifb # EFI framebuffer
+
# 3Dfx Voodoo Graphics, Voodoo II /dev/3dfx CDEV support. This will create
# the /dev/3dfx0 device to work with glide implementations. This should get
# linked to /dev/3dfx and /dev/voodoo. Note that this is not the same as
diff --git a/sys/amd64/include/ifunc.h b/sys/amd64/include/ifunc.h
new file mode 100644
index 000000000000..1af46757b836
--- /dev/null
+++ b/sys/amd64/include/ifunc.h
@@ -0,0 +1,5 @@
+/*
+ * This file is in the public domain.
+ */
+
+#include <x86/ifunc.h>
diff --git a/sys/amd64/include/md_var.h b/sys/amd64/include/md_var.h
index b6d8c469cdf6..19eab48168f7 100644
--- a/sys/amd64/include/md_var.h
+++ b/sys/amd64/include/md_var.h
@@ -58,6 +58,7 @@ extern vm_paddr_t KERNend;
extern bool efi_boot;
struct __mcontext;
+struct pcpu;
struct savefpu;
struct sysentvec;
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 5cf1ae2d769c..eef8e6760fd6 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -123,6 +123,33 @@ enum x2apic_state {
#define VM_INTINFO_SWINTR (4 << 8)
#ifdef _KERNEL
+#define VMM_VCPU_MD_FIELDS \
+ struct vlapic *vlapic; /* (i) APIC device model */ \
+ enum x2apic_state x2apic_state; /* (i) APIC mode */ \
+ uint64_t exitintinfo; /* (i) events pending at VM exit */ \
+ int nmi_pending; /* (i) NMI pending */ \
+ int extint_pending; /* (i) INTR pending */ \
+ int exception_pending; /* (i) exception pending */ \
+ int exc_vector; /* (x) exception collateral */ \
+ int exc_errcode_valid; \
+ uint32_t exc_errcode; \
+ struct savefpu *guestfpu; /* (a,i) guest fpu state */ \
+ uint64_t guest_xcr0; /* (i) guest %xcr0 register */ \
+ struct vm_exit exitinfo; /* (x) exit reason and collateral */ \
+ cpuset_t exitinfo_cpuset; /* (x) storage for vmexit handlers */ \
+ uint64_t nextrip; /* (x) next instruction to execute */ \
+ uint64_t tsc_offset /* (o) TSC offsetting */
+
+#define VMM_VM_MD_FIELDS \
+ cpuset_t startup_cpus; /* (i) [r] waiting for startup */ \
+ void *iommu; /* (x) iommu-specific data */ \
+ struct vioapic *vioapic; /* (i) virtual ioapic */ \
+ struct vatpic *vatpic; /* (i) virtual atpic */ \
+ struct vatpit *vatpit; /* (i) virtual atpit */ \
+ struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ \
+ struct vrtc *vrtc; /* (o) virtual RTC */ \
+ struct vhpet *vhpet /* (i) virtual HPET */
+
struct vm;
struct vm_exception;
struct vm_mem;
@@ -133,17 +160,12 @@ struct vhpet;
struct vioapic;
struct vlapic;
struct vmspace;
+struct vm_eventinfo;
struct vm_object;
struct vm_guest_paging;
struct pmap;
enum snapshot_req;
-struct vm_eventinfo {
- cpuset_t *rptr; /* rendezvous cookie */
- int *sptr; /* suspend cookie */
- int *iptr; /* reqidle cookie */
-};
-
#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \
typedef ret_type (*vmmops_##opname##_t) args; \
ret_type vmmops_##opname args
@@ -206,20 +228,6 @@ struct vmm_ops {
extern const struct vmm_ops vmm_ops_intel;
extern const struct vmm_ops vmm_ops_amd;
-int vm_create(const char *name, struct vm **retvm);
-struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
-void vm_disable_vcpu_creation(struct vm *vm);
-void vm_lock_vcpus(struct vm *vm);
-void vm_unlock_vcpus(struct vm *vm);
-void vm_destroy(struct vm *vm);
-int vm_reinit(struct vm *vm);
-const char *vm_name(struct vm *vm);
-uint16_t vm_get_maxcpus(struct vm *vm);
-void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
- uint16_t *threads, uint16_t *maxcpus);
-int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
- uint16_t threads, uint16_t maxcpus);
-
int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
@@ -232,16 +240,12 @@ int vm_get_seg_desc(struct vcpu *vcpu, int reg,
int vm_set_seg_desc(struct vcpu *vcpu, int reg,
struct seg_desc *desc);
int vm_run(struct vcpu *vcpu);
-int vm_suspend(struct vm *vm, enum vm_suspend_how how);
int vm_inject_nmi(struct vcpu *vcpu);
int vm_nmi_pending(struct vcpu *vcpu);
void vm_nmi_clear(struct vcpu *vcpu);
int vm_inject_extint(struct vcpu *vcpu);
int vm_extint_pending(struct vcpu *vcpu);
void vm_extint_clear(struct vcpu *vcpu);
-int vcpu_vcpuid(struct vcpu *vcpu);
-struct vm *vcpu_vm(struct vcpu *vcpu);
-struct vcpu *vm_vcpu(struct vm *vm, int cpu);
struct vlapic *vm_lapic(struct vcpu *vcpu);
struct vioapic *vm_ioapic(struct vm *vm);
struct vhpet *vm_hpet(struct vm *vm);
@@ -250,9 +254,6 @@ int vm_set_capability(struct vcpu *vcpu, int type, int val);
int vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state);
int vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state);
int vm_apicid2vcpuid(struct vm *vm, int apicid);
-int vm_activate_cpu(struct vcpu *vcpu);
-int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
-int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
int vm_restart_instruction(struct vcpu *vcpu);
struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
cpuset_t *vm_exitinfo_cpuset(struct vcpu *vcpu);
@@ -265,57 +266,10 @@ int vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta);
int vm_restore_time(struct vm *vm);
#ifdef _SYS__CPUSET_H_
-/*
- * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.
- * The rendezvous 'func(arg)' is not allowed to do anything that will
- * cause the thread to be put to sleep.
- *
- * The caller cannot hold any locks when initiating the rendezvous.
- *
- * The implementation of this API may cause vcpus other than those specified
- * by 'dest' to be stalled. The caller should not rely on any vcpus making
- * forward progress when the rendezvous is in progress.
- */
-typedef void (*vm_rendezvous_func_t)(struct vcpu *vcpu, void *arg);
-int vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
- vm_rendezvous_func_t func, void *arg);
-
-cpuset_t vm_active_cpus(struct vm *vm);
-cpuset_t vm_debug_cpus(struct vm *vm);
-cpuset_t vm_suspended_cpus(struct vm *vm);
cpuset_t vm_start_cpus(struct vm *vm, const cpuset_t *tostart);
void vm_await_start(struct vm *vm, const cpuset_t *waiting);
#endif /* _SYS__CPUSET_H_ */
-static __inline int
-vcpu_rendezvous_pending(struct vcpu *vcpu, struct vm_eventinfo *info)
-{
- /*
- * This check isn't done with atomic operations or under a lock because
- * there's no need to. If the vcpuid bit is set, the vcpu is part of a
- * rendezvous and the bit won't be cleared until the vcpu enters the
- * rendezvous. On rendezvous exit, the cpuset is cleared and the vcpu
- * will see an empty cpuset. So, the races are harmless.
- */
- return (CPU_ISSET(vcpu_vcpuid(vcpu), info->rptr));
-}
-
-static __inline int
-vcpu_suspended(struct vm_eventinfo *info)
-{
-
- return (*info->sptr);
-}
-
-static __inline int
-vcpu_reqidle(struct vm_eventinfo *info)
-{
-
- return (*info->iptr);
-}
-
-int vcpu_debugged(struct vcpu *vcpu);
-
/*
* Return true if device indicated by bus/slot/func is supposed to be a
* pci passthrough device.
@@ -326,38 +280,7 @@ bool vmm_is_pptdev(int bus, int slot, int func);
void *vm_iommu_domain(struct vm *vm);
-enum vcpu_state {
- VCPU_IDLE,
- VCPU_FROZEN,
- VCPU_RUNNING,
- VCPU_SLEEPING,
-};
-
-int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
-int vcpu_set_state_all(struct vm *vm, enum vcpu_state state);
-enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
-
-static int __inline
-vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
-{
- return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
-}
-
-#ifdef _SYS_PROC_H_
-static int __inline
-vcpu_should_yield(struct vcpu *vcpu)
-{
- struct thread *td;
-
- td = curthread;
- return (td->td_ast != 0 || td->td_owepreempt != 0);
-}
-#endif
-
-void *vcpu_stats(struct vcpu *vcpu);
-void vcpu_notify_event(struct vcpu *vcpu);
void vcpu_notify_lapic(struct vcpu *vcpu);
-struct vm_mem *vm_mem(struct vm *vm);
struct vatpic *vm_atpic(struct vm *vm);
struct vatpit *vm_atpit(struct vm *vm);
struct vpmtmr *vm_pmtmr(struct vm *vm);
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index 2fe6a5bc3584..37c950cfbc5f 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -50,12 +50,12 @@
#include <machine/specialreg.h>
#include <machine/smp.h>
#include <machine/vmm.h>
-#include <machine/vmm_dev.h>
#include <machine/vmm_instruction_emul.h>
#include <machine/vmm_snapshot.h>
#include <dev/vmm/vmm_ktr.h>
#include <dev/vmm/vmm_mem.h>
+#include <dev/vmm/vmm_vm.h>
#include "vmm_lapic.h"
#include "vmm_stat.h"
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 4189c1214b40..7a2d0de6beff 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -53,13 +53,13 @@
#include <machine/vmparam.h>
#include <machine/vmm.h>
-#include <machine/vmm_dev.h>
#include <machine/vmm_instruction_emul.h>
#include <machine/vmm_snapshot.h>
#include <dev/vmm/vmm_dev.h>
#include <dev/vmm/vmm_ktr.h>
#include <dev/vmm/vmm_mem.h>
+#include <dev/vmm/vmm_vm.h>
#include "vmm_lapic.h"
#include "vmm_host.h"
diff --git a/sys/amd64/vmm/io/vhpet.c b/sys/amd64/vmm/io/vhpet.c
index 88063f2952e5..b01736a56c00 100644
--- a/sys/amd64/vmm/io/vhpet.c
+++ b/sys/amd64/vmm/io/vhpet.c
@@ -38,11 +38,13 @@
#include <sys/systm.h>
#include <machine/vmm.h>
-#include <machine/vmm_dev.h>
#include <machine/vmm_snapshot.h>
#include <dev/acpica/acpi_hpet.h>
+
+#include <dev/vmm/vmm_dev.h>
#include <dev/vmm/vmm_ktr.h>
+#include <dev/vmm/vmm_vm.h>
#include "vmm_lapic.h"
#include "vatpic.h"
diff --git a/sys/amd64/vmm/io/vioapic.c b/sys/amd64/vmm/io/vioapic.c
index 7df6193d6dc0..a3956785d049 100644
--- a/sys/amd64/vmm/io/vioapic.c
+++ b/sys/amd64/vmm/io/vioapic.c
@@ -43,6 +43,7 @@
#include <machine/vmm_snapshot.h>
#include <dev/vmm/vmm_ktr.h>
+#include <dev/vmm/vmm_vm.h>
#include "vmm_lapic.h"
#include "vlapic.h"
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index afd5045de574..6849ef7aa589 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -47,6 +47,7 @@
#include <machine/vmm_snapshot.h>
#include <dev/vmm/vmm_ktr.h>
+#include <dev/vmm/vmm_vm.h>
#include "vmm_lapic.h"
#include "vmm_stat.h"
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index f3f9717129c9..050cc93d2605 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -71,6 +71,7 @@
#include <dev/vmm/vmm_dev.h>
#include <dev/vmm/vmm_ktr.h>
#include <dev/vmm/vmm_mem.h>
+#include <dev/vmm/vmm_vm.h>
#include "vmm_ioport.h"
#include "vmm_host.h"
@@ -91,88 +92,6 @@
struct vlapic;
-/*
- * Initialization:
- * (a) allocated when vcpu is created
- * (i) initialized when vcpu is created and when it is reinitialized
- * (o) initialized the first time the vcpu is created
- * (x) initialized before use
- */
-struct vcpu {
- struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */
- enum vcpu_state state; /* (o) vcpu state */
- int vcpuid; /* (o) */
- int hostcpu; /* (o) vcpu's host cpu */
- int reqidle; /* (i) request vcpu to idle */
- struct vm *vm; /* (o) */
- void *cookie; /* (i) cpu-specific data */
- struct vlapic *vlapic; /* (i) APIC device model */
- enum x2apic_state x2apic_state; /* (i) APIC mode */
- uint64_t exitintinfo; /* (i) events pending at VM exit */
- int nmi_pending; /* (i) NMI pending */
- int extint_pending; /* (i) INTR pending */
- int exception_pending; /* (i) exception pending */
- int exc_vector; /* (x) exception collateral */
- int exc_errcode_valid;
- uint32_t exc_errcode;
- struct savefpu *guestfpu; /* (a,i) guest fpu state */
- uint64_t guest_xcr0; /* (i) guest %xcr0 register */
- void *stats; /* (a,i) statistics */
- struct vm_exit exitinfo; /* (x) exit reason and collateral */
- cpuset_t exitinfo_cpuset; /* (x) storage for vmexit handlers */
- uint64_t nextrip; /* (x) next instruction to execute */
- uint64_t tsc_offset; /* (o) TSC offsetting */
-};
-
-#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
-#define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
-#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
-#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
-#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
-
-/*
- * Initialization:
- * (o) initialized the first time the VM is created
- * (i) initialized when VM is created and when it is reinitialized
- * (x) initialized before use
- *
- * Locking:
- * [m] mem_segs_lock
- * [r] rendezvous_mtx
- * [v] reads require one frozen vcpu, writes require freezing all vcpus
- */
-struct vm {
- void *cookie; /* (i) cpu-specific data */
- void *iommu; /* (x) iommu-specific data */
- struct vhpet *vhpet; /* (i) virtual HPET */
- struct vioapic *vioapic; /* (i) virtual ioapic */
- struct vatpic *vatpic; /* (i) virtual atpic */
- struct vatpit *vatpit; /* (i) virtual atpit */
- struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
- struct vrtc *vrtc; /* (o) virtual RTC */
- volatile cpuset_t active_cpus; /* (i) active vcpus */
- volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */
- cpuset_t startup_cpus; /* (i) [r] waiting for startup */
- int suspend; /* (i) stop VM execution */
- bool dying; /* (o) is dying */
- volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
- volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
- cpuset_t rendezvous_req_cpus; /* (x) [r] rendezvous requested */
- cpuset_t rendezvous_done_cpus; /* (x) [r] rendezvous finished */
- void *rendezvous_arg; /* (x) [r] rendezvous func/arg */
- vm_rendezvous_func_t rendezvous_func;
- struct mtx rendezvous_mtx; /* (o) rendezvous lock */
- struct vm_mem mem; /* (i) [m+v] guest memory */
- char name[VM_MAX_NAMELEN+1]; /* (o) virtual machine name */
- struct vcpu **vcpu; /* (o) guest vcpus */
- /* The following describe the vm cpu topology */
- uint16_t sockets; /* (o) num of sockets */
- uint16_t cores; /* (o) num of cores/socket */
- uint16_t threads; /* (o) num of threads/core */
- uint16_t maxcpus; /* (o) max pluggable cpus */
- struct sx vcpus_init_lock; /* (o) */
-};
-
#define VMM_CTR0(vcpu, format) \
VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format)
@@ -242,8 +161,7 @@ static MALLOC_DEFINE(M_VM, "vm", "vm");
/* statistics */
static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
-SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
- NULL);
+SYSCTL_DECL(_hw_vmm);
/*
* Halt the guest if all vcpus are executing a HLT instruction with
@@ -254,10 +172,6 @@ SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
&halt_detection_enabled, 0,
"Halt VM if all vcpus execute HLT with interrupts disabled");
-static int vmm_ipinum;
-SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
- "IPI vector used for vcpu notifications");
-
static int trace_guest_exceptions;
SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
&trace_guest_exceptions, 0,
@@ -267,8 +181,6 @@ static int trap_wbinvd;
SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0,
"WBINVD triggers a VM-exit");
-static void vcpu_notify_event_locked(struct vcpu *vcpu);
-
/* global statistics */
VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
@@ -292,26 +204,6 @@ VMM_STAT(VMEXIT_USERSPACE, "number of vm exits handled in userspace");
VMM_STAT(VMEXIT_RENDEZVOUS, "number of times rendezvous pending at exit");
VMM_STAT(VMEXIT_EXCEPTION, "number of vm exits due to exceptions");
-#ifdef KTR
-static const char *
-vcpu_state2str(enum vcpu_state state)
-{
-
- switch (state) {
- case VCPU_IDLE:
- return ("idle");
- case VCPU_FROZEN:
- return ("frozen");
- case VCPU_RUNNING:
- return ("running");
- case VCPU_SLEEPING:
- return ("sleeping");
- default:
- return ("unknown");
- }
-}
-#endif
-
static void
vcpu_cleanup(struct vcpu *vcpu, bool destroy)
{
@@ -365,7 +257,6 @@ vcpu_init(struct vcpu *vcpu)
int
vcpu_trace_exceptions(struct vcpu *vcpu)
{
-
return (trace_guest_exceptions);
}
@@ -445,14 +336,6 @@ vm_init(struct vm *vm, bool create)
}
}
-void
-vm_disable_vcpu_creation(struct vm *vm)
-{
- sx_xlock(&vm->vcpus_init_lock);
- vm->dying = true;
- sx_xunlock(&vm->vcpus_init_lock);
-}
-
struct vcpu *
vm_alloc_vcpu(struct vm *vm, int vcpuid)
{
@@ -483,18 +366,6 @@ vm_alloc_vcpu(struct vm *vm, int vcpuid)
return (vcpu);
}
-void
-vm_lock_vcpus(struct vm *vm)
-{
- sx_xlock(&vm->vcpus_init_lock);
-}
-
-void
-vm_unlock_vcpus(struct vm *vm)
-{
- sx_unlock(&vm->vcpus_init_lock);
-}
-
int
vm_create(const char *name, struct vm **retvm)
{
@@ -524,35 +395,6 @@ vm_create(const char *name, struct vm **retvm)
return (0);
}
-void
-vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
- uint16_t *threads, uint16_t *maxcpus)
-{
- *sockets = vm->sockets;
- *cores = vm->cores;
- *threads = vm->threads;
- *maxcpus = vm->maxcpus;
-}
-
-uint16_t
-vm_get_maxcpus(struct vm *vm)
-{
- return (vm->maxcpus);
-}
-
-int
-vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
- uint16_t threads, uint16_t maxcpus __unused)
-{
- /* Ignore maxcpus. */
- if ((sockets * cores * threads) > vm->maxcpus)
- return (EINVAL);
- vm->sockets = sockets;
- vm->cores = cores;
- vm->threads = threads;
- return(0);
-}
-
static void
vm_cleanup(struct vm *vm, bool destroy)
{
@@ -601,29 +443,11 @@ vm_destroy(struct vm *vm)
free(vm, M_VM);
}
-int
-vm_reinit(struct vm *vm)
-{
- int error;
-
- /*
- * A virtual machine can be reset only if all vcpus are suspended.
- */
- if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
- vm_cleanup(vm, false);
- vm_init(vm, false);
- error = 0;
- } else {
- error = EBUSY;
- }
-
- return (error);
-}
-
-const char *
-vm_name(struct vm *vm)
+void
+vm_reset(struct vm *vm)
{
- return (vm->name);
+ vm_cleanup(vm, false);
+ vm_init(vm, false);
}
int
@@ -891,210 +715,6 @@ save_guest_fpustate(struct vcpu *vcpu)
static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
-/*
- * Invoke the rendezvous function on the specified vcpu if applicable. Return
- * true if the rendezvous is finished, false otherwise.
- */
-static bool
-vm_rendezvous(struct vcpu *vcpu)
-{
- struct vm *vm = vcpu->vm;
- int vcpuid;
-
- mtx_assert(&vcpu->vm->rendezvous_mtx, MA_OWNED);
- KASSERT(vcpu->vm->rendezvous_func != NULL,
- ("vm_rendezvous: no rendezvous pending"));
-
- /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
- CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus,
- &vm->active_cpus);
-
- vcpuid = vcpu->vcpuid;
- if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
- !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
- VMM_CTR0(vcpu, "Calling rendezvous func");
- (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg);
- CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
- }
- if (CPU_CMP(&vm->rendezvous_req_cpus,
- &vm->rendezvous_done_cpus) == 0) {
- VMM_CTR0(vcpu, "Rendezvous completed");
- CPU_ZERO(&vm->rendezvous_req_cpus);
- vm->rendezvous_func = NULL;
- wakeup(&vm->rendezvous_func);
- return (true);
- }
- return (false);
-}
-
-static void
-vcpu_wait_idle(struct vcpu *vcpu)
-{
- KASSERT(vcpu->state != VCPU_IDLE, ("vcpu already idle"));
-
- vcpu->reqidle = 1;
- vcpu_notify_event_locked(vcpu);
- VMM_CTR1(vcpu, "vcpu state change from %s to "
- "idle requested", vcpu_state2str(vcpu->state));
- msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
-}
-
-static int
-vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
- bool from_idle)
-{
- int error;
-
- vcpu_assert_locked(vcpu);
-
- /*
- * State transitions from the vmmdev_ioctl() must always begin from
- * the VCPU_IDLE state. This guarantees that there is only a single
- * ioctl() operating on a vcpu at any point.
- */
- if (from_idle) {
- while (vcpu->state != VCPU_IDLE)
- vcpu_wait_idle(vcpu);
- } else {
- KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
- "vcpu idle state"));
- }
-
- if (vcpu->state == VCPU_RUNNING) {
- KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
- "mismatch for running vcpu", curcpu, vcpu->hostcpu));
- } else {
- KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
- "vcpu that is not running", vcpu->hostcpu));
- }
-
- /*
- * The following state transitions are allowed:
- * IDLE -> FROZEN -> IDLE
- * FROZEN -> RUNNING -> FROZEN
- * FROZEN -> SLEEPING -> FROZEN
- */
- switch (vcpu->state) {
- case VCPU_IDLE:
- case VCPU_RUNNING:
- case VCPU_SLEEPING:
- error = (newstate != VCPU_FROZEN);
- break;
- case VCPU_FROZEN:
- error = (newstate == VCPU_FROZEN);
- break;
- default:
- error = 1;
- break;
- }
-
- if (error)
- return (EBUSY);
-
- VMM_CTR2(vcpu, "vcpu state changed from %s to %s",
- vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
-
- vcpu->state = newstate;
- if (newstate == VCPU_RUNNING)
- vcpu->hostcpu = curcpu;
- else
- vcpu->hostcpu = NOCPU;
-
- if (newstate == VCPU_IDLE)
- wakeup(&vcpu->state);
-
- return (0);
-}
-
-/*
- * Try to lock all of the vCPUs in the VM while taking care to avoid deadlocks
- * with vm_smp_rendezvous().
- *
- * The complexity here suggests that the rendezvous mechanism needs a rethink.
- */
-int
-vcpu_set_state_all(struct vm *vm, enum vcpu_state newstate)
-{
- cpuset_t locked;
- struct vcpu *vcpu;
- int error, i;
- uint16_t maxcpus;
-
- KASSERT(newstate != VCPU_IDLE,
- ("vcpu_set_state_all: invalid target state %d", newstate));
-
- error = 0;
- CPU_ZERO(&locked);
- maxcpus = vm->maxcpus;
-
- mtx_lock(&vm->rendezvous_mtx);
-restart:
- if (vm->rendezvous_func != NULL) {
- /*
- * If we have a pending rendezvous, then the initiator may be
- * blocked waiting for other vCPUs to execute the callback. The
- * current thread may be a vCPU thread so we must not block
- * waiting for the initiator, otherwise we get a deadlock.
- * Thus, execute the callback on behalf of any idle vCPUs.
- */
- for (i = 0; i < maxcpus; i++) {
- vcpu = vm_vcpu(vm, i);
- if (vcpu == NULL)
- continue;
- vcpu_lock(vcpu);
- if (vcpu->state == VCPU_IDLE) {
- (void)vcpu_set_state_locked(vcpu, VCPU_FROZEN,
- true);
- CPU_SET(i, &locked);
- }
- if (CPU_ISSET(i, &locked)) {
- /*
- * We can safely execute the callback on this
- * vCPU's behalf.
- */
- vcpu_unlock(vcpu);
- (void)vm_rendezvous(vcpu);
- vcpu_lock(vcpu);
- }
- vcpu_unlock(vcpu);
- }
- }
-
- /*
- * Now wait for remaining vCPUs to become idle. This may include the
- * initiator of a rendezvous that is currently blocked on the rendezvous
- * mutex.
- */
- CPU_FOREACH_ISCLR(i, &locked) {
- if (i >= maxcpus)
- break;
- vcpu = vm_vcpu(vm, i);
- if (vcpu == NULL)
- continue;
- vcpu_lock(vcpu);
- while (vcpu->state != VCPU_IDLE) {
- mtx_unlock(&vm->rendezvous_mtx);
- vcpu_wait_idle(vcpu);
- vcpu_unlock(vcpu);
- mtx_lock(&vm->rendezvous_mtx);
- if (vm->rendezvous_func != NULL)
- goto restart;
- vcpu_lock(vcpu);
- }
- error = vcpu_set_state_locked(vcpu, newstate, true);
- vcpu_unlock(vcpu);
- if (error != 0) {
- /* Roll back state changes. */
- CPU_FOREACH_ISSET(i, &locked)
- (void)vcpu_set_state(vcpu, VCPU_IDLE, false);
- break;
- }
- CPU_SET(i, &locked);
- }
- mtx_unlock(&vm->rendezvous_mtx);
- return (error);
-}
-
static void
vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
{
@@ -1113,37 +733,6 @@ vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
panic("Error %d setting state to %d", error, newstate);
}
-static int
-vm_handle_rendezvous(struct vcpu *vcpu)
-{
- struct vm *vm;
- struct thread *td;
-
- td = curthread;
- vm = vcpu->vm;
-
- mtx_lock(&vm->rendezvous_mtx);
- while (vm->rendezvous_func != NULL) {
- if (vm_rendezvous(vcpu))
- break;
-
- VMM_CTR0(vcpu, "Wait for rendezvous completion");
- mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
- "vmrndv", hz);
- if (td_ast_pending(td, TDA_SUSPEND)) {
- int error;
-
- mtx_unlock(&vm->rendezvous_mtx);
- error = thread_check_susp(td, true);
- if (error != 0)
- return (error);
- mtx_lock(&vm->rendezvous_mtx);
- }
- }
- mtx_unlock(&vm->rendezvous_mtx);
- return (0);
-}
-
/*
* Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
*/
@@ -1467,33 +1056,6 @@ vm_handle_db(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
return (0);
}
-int
-vm_suspend(struct vm *vm, enum vm_suspend_how how)
-{
- int i;
-
- if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
- return (EINVAL);
-
- if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
- VM_CTR2(vm, "virtual machine already suspended %d/%d",
- vm->suspend, how);
- return (EALREADY);
- }
-
- VM_CTR1(vm, "virtual machine successfully suspended %d", how);
-
- /*
- * Notify all active vcpus that they are now suspended.
- */
- for (i = 0; i < vm->maxcpus; i++) {
- if (CPU_ISSET(i, &vm->active_cpus))
- vcpu_notify_event(vm_vcpu(vm, i));
- }
-
- return (0);
-}
-
void
vm_exit_suspended(struct vcpu *vcpu, uint64_t rip)
{
@@ -2033,24 +1595,6 @@ vm_set_capability(struct vcpu *vcpu, int type, int val)
return (vmmops_setcap(vcpu->cookie, type, val));
}
-struct vm *
-vcpu_vm(struct vcpu *vcpu)
-{
- return (vcpu->vm);
-}
-
-int
-vcpu_vcpuid(struct vcpu *vcpu)
-{
- return (vcpu->vcpuid);
-}
-
-struct vcpu *
-vm_vcpu(struct vm *vm, int vcpuid)
-{
- return (vm->vcpu[vcpuid]);
-}
-
struct vlapic *
vm_lapic(struct vcpu *vcpu)
{
@@ -2120,107 +1664,6 @@ vm_iommu_domain(struct vm *vm)
return (vm->iommu);
}
-int
-vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle)
-{
- int error;
-
- vcpu_lock(vcpu);
- error = vcpu_set_state_locked(vcpu, newstate, from_idle);
- vcpu_unlock(vcpu);
-
- return (error);
-}
-
-enum vcpu_state
-vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
-{
- enum vcpu_state state;
-
- vcpu_lock(vcpu);
- state = vcpu->state;
- if (hostcpu != NULL)
- *hostcpu = vcpu->hostcpu;
- vcpu_unlock(vcpu);
-
- return (state);
-}
-
-int
-vm_activate_cpu(struct vcpu *vcpu)
-{
- struct vm *vm = vcpu->vm;
-
- if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
- return (EBUSY);
-
- VMM_CTR0(vcpu, "activated");
- CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
- return (0);
-}
-
-int
-vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
-{
- if (vcpu == NULL) {
- vm->debug_cpus = vm->active_cpus;
- for (int i = 0; i < vm->maxcpus; i++) {
- if (CPU_ISSET(i, &vm->active_cpus))
- vcpu_notify_event(vm_vcpu(vm, i));
- }
- } else {
- if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
- return (EINVAL);
-
- CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
- vcpu_notify_event(vcpu);
- }
- return (0);
-}
-
-int
-vm_resume_cpu(struct vm *vm, struct vcpu *vcpu)
-{
-
- if (vcpu == NULL) {
- CPU_ZERO(&vm->debug_cpus);
- } else {
- if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
- return (EINVAL);
-
- CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
- }
- return (0);
-}
-
-int
-vcpu_debugged(struct vcpu *vcpu)
-{
-
- return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
-}
-
-cpuset_t
-vm_active_cpus(struct vm *vm)
-{
-
- return (vm->active_cpus);
-}
-
-cpuset_t
-vm_debug_cpus(struct vm *vm)
-{
-
- return (vm->debug_cpus);
-}
-
-cpuset_t
-vm_suspended_cpus(struct vm *vm)
-{
-
- return (vm->suspended_cpus);
-}
-
/*
* Returns the subset of vCPUs in tostart that are awaiting startup.
* These vCPUs are also marked as no longer awaiting startup.
@@ -2245,13 +1688,6 @@ vm_await_start(struct vm *vm, const cpuset_t *waiting)
mtx_unlock(&vm->rendezvous_mtx);
}
-void *
-vcpu_stats(struct vcpu *vcpu)
-{
-
- return (vcpu->stats);
-}
-
int
vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
{
@@ -2273,47 +1709,6 @@ vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
return (0);
}
-/*
- * This function is called to ensure that a vcpu "sees" a pending event
- * as soon as possible:
- * - If the vcpu thread is sleeping then it is woken up.
- * - If the vcpu is running on a different host_cpu then an IPI will be directed
- * to the host_cpu to cause the vcpu to trap into the hypervisor.
- */
-static void
-vcpu_notify_event_locked(struct vcpu *vcpu)
-{
- int hostcpu;
-
- hostcpu = vcpu->hostcpu;
- if (vcpu->state == VCPU_RUNNING) {
- KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
- if (hostcpu != curcpu) {
- ipi_cpu(hostcpu, vmm_ipinum);
- } else {
- /*
- * If the 'vcpu' is running on 'curcpu' then it must
- * be sending a notification to itself (e.g. SELF_IPI).
- * The pending event will be picked up when the vcpu
- * transitions back to guest context.
- */
- }
- } else {
- KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
- "with hostcpu %d", vcpu->state, hostcpu));
- if (vcpu->state == VCPU_SLEEPING)
- wakeup_one(vcpu);
- }
-}
-
-void
-vcpu_notify_event(struct vcpu *vcpu)
-{
- vcpu_lock(vcpu);
- vcpu_notify_event_locked(vcpu);
- vcpu_unlock(vcpu);
-}
-
void
vcpu_notify_lapic(struct vcpu *vcpu)
{
@@ -2325,12 +1720,6 @@ vcpu_notify_lapic(struct vcpu *vcpu)
vcpu_unlock(vcpu);
}
-struct vm_mem *
-vm_mem(struct vm *vm)
-{
- return (&vm->mem);
-}
-
int
vm_apicid2vcpuid(struct vm *vm, int apicid)
{
diff --git a/sys/amd64/vmm/vmm_host.h b/sys/amd64/vmm/vmm_host.h
index eebb794843b6..adca53271448 100644
--- a/sys/amd64/vmm/vmm_host.h
+++ b/sys/amd64/vmm/vmm_host.h
@@ -29,6 +29,8 @@
#ifndef _VMM_HOST_H_
#define _VMM_HOST_H_
+#include <sys/pcpu.h>
+
#ifndef _KERNEL
#error "no user-serviceable parts inside"
#endif
diff --git a/sys/amd64/vmm/vmm_ioport.c b/sys/amd64/vmm/vmm_ioport.c
index 8aab28f5e68e..65710c8de717 100644
--- a/sys/amd64/vmm/vmm_ioport.c
+++ b/sys/amd64/vmm/vmm_ioport.c
@@ -33,6 +33,7 @@
#include <machine/vmm_instruction_emul.h>
#include <dev/vmm/vmm_ktr.h>
+#include <dev/vmm/vmm_vm.h>
#include "vatpic.h"
#include "vatpit.h"
diff --git a/sys/amd64/vmm/vmm_lapic.c b/sys/amd64/vmm/vmm_lapic.c
index 63bdee69bb59..44bae5da31e5 100644
--- a/sys/amd64/vmm/vmm_lapic.c
+++ b/sys/amd64/vmm/vmm_lapic.c
@@ -34,6 +34,7 @@
#include <x86/apicreg.h>
#include <dev/vmm/vmm_ktr.h>
+#include <dev/vmm/vmm_vm.h>
#include <machine/vmm.h>
#include "vmm_lapic.h"
diff --git a/sys/amd64/vmm/x86.c b/sys/amd64/vmm/x86.c
index 2e2224595ab4..f32107124eb8 100644
--- a/sys/amd64/vmm/x86.c
+++ b/sys/amd64/vmm/x86.c
@@ -39,6 +39,7 @@
#include <machine/vmm.h>
#include <dev/vmm/vmm_ktr.h>
+#include <dev/vmm/vmm_vm.h>
#include "vmm_host.h"
#include "vmm_util.h"
diff --git a/sys/arm/arm/gic.c b/sys/arm/arm/gic.c
index e33bda4886b9..64f19c848ea2 100644
--- a/sys/arm/arm/gic.c
+++ b/sys/arm/arm/gic.c
@@ -151,6 +151,8 @@ static struct arm_gic_softc *gic_sc = NULL;
/* CPU Interface */
#define gic_c_read_4(_sc, _reg) \
bus_read_4((_sc)->gic_res[GIC_RES_CPU], (_reg))
+#define gic_c_peek_4(_sc, _reg, _val) \
+ bus_peek_4((_sc)->gic_res[GIC_RES_CPU], (_reg), (_val))
#define gic_c_write_4(_sc, _reg, _val) \
bus_write_4((_sc)->gic_res[GIC_RES_CPU], (_reg), (_val))
/* Distributor Interface */
@@ -347,7 +349,18 @@ arm_gic_attach(device_t dev)
goto cleanup;
}
- icciidr = gic_c_read_4(sc, GICC_IIDR);
+ /*
+ * Try accessing a CPU interface register. On some broken
+ * virtualization environments this will raise an external
+ * data abort. When this happens we can detect it using
+ * by peeking at the register & checking for the fault.
+ * As there is no way to continue with a normal boot we
+ * panic.
+ */
+ if (gic_c_peek_4(sc, GICC_IIDR, &icciidr) != 0)
+ panic("Unable to access %s CPU registers, "
+ "broken hardware or hypervisor configuration",
+ device_get_nameunit(dev));
device_printf(dev,
"pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
diff --git a/sys/arm/arm/machdep.c b/sys/arm/arm/machdep.c
index cfc0b32f5102..9532e19a11b3 100644
--- a/sys/arm/arm/machdep.c
+++ b/sys/arm/arm/machdep.c
@@ -374,7 +374,7 @@ pcpu0_init(void)
/*
* Initialize proc0
*/
-void
+static void
init_proc0(vm_offset_t kstack)
{
proc_linkup0(&proc0, &thread0);
diff --git a/sys/arm/broadcom/bcm2835/files.bcm283x b/sys/arm/broadcom/bcm2835/files.bcm283x
index 44976f34d35a..cda5a28d3b76 100644
--- a/sys/arm/broadcom/bcm2835/files.bcm283x
+++ b/sys/arm/broadcom/bcm2835/files.bcm283x
@@ -32,7 +32,7 @@ contrib/vchiq/interface/compat/vchi_bsd.c optional vchiq \
contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c optional vchiq \
compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
contrib/vchiq/interface/vchiq_arm/vchiq_arm.c optional vchiq \
- compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+ compile-with "${NORMAL_C} -Wno-unused ${NO_WDEFAULT_CONST_INIT_FIELD_UNSAFE} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
contrib/vchiq/interface/vchiq_arm/vchiq_connected.c optional vchiq \
compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
contrib/vchiq/interface/vchiq_arm/vchiq_core.c optional vchiq \
diff --git a/sys/arm/include/machdep.h b/sys/arm/include/machdep.h
index 45e44a65368b..ada2dfa502aa 100644
--- a/sys/arm/include/machdep.h
+++ b/sys/arm/include/machdep.h
@@ -15,7 +15,6 @@ extern vm_offset_t abtstack;
/* misc prototypes used by the many arm machdeps */
struct trapframe;
-void init_proc0(vm_offset_t kstack);
void halt(void);
void abort_handler(struct trapframe *, int );
void set_stackptrs(int cpu);
diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c
index 2d07420bcdb0..e37c9813582b 100644
--- a/sys/arm64/arm64/identcpu.c
+++ b/sys/arm64/arm64/identcpu.c
@@ -1094,6 +1094,11 @@ static const struct mrs_field_value id_aa64isar2_mops[] = {
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64isar2_mops_caps[] = {
+ MRS_HWCAP(2, HWCAP2_MOPS, ID_AA64ISAR2_MOPS_IMPL),
+ MRS_HWCAP_END
+};
+
static const struct mrs_field_value id_aa64isar2_apa3[] = {
MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_NONE, ""),
MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_PAC, "APA3 PAC"),
@@ -1149,7 +1154,8 @@ static const struct mrs_field id_aa64isar2_fields[] = {
MRS_FIELD(ID_AA64ISAR2, PAC_frac, false, MRS_LOWER, 0,
id_aa64isar2_pac_frac),
MRS_FIELD(ID_AA64ISAR2, BC, false, MRS_LOWER, 0, id_aa64isar2_bc),
- MRS_FIELD(ID_AA64ISAR2, MOPS, false, MRS_LOWER, 0, id_aa64isar2_mops),
+ MRS_FIELD_HWCAP(ID_AA64ISAR2, MOPS, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar2_mops, id_aa64isar2_mops_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR2, APA3, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar2_apa3, id_aa64isar2_apa3_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR2, GPA3, false, MRS_LOWER, MRS_USERSPACE,
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
index 322bad273a08..6790f47a0f82 100644
--- a/sys/arm64/arm64/machdep.c
+++ b/sys/arm64/arm64/machdep.c
@@ -219,6 +219,41 @@ CPU_FEAT(feat_pan, "Privileged access never",
pan_check, NULL, pan_enable, pan_disabled,
CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
+static cpu_feat_en
+mops_check(const struct cpu_feat *feat __unused, u_int midr __unused)
+{
+ uint64_t id_aa64isar2;
+
+ if (!get_kernel_reg(ID_AA64ISAR2_EL1, &id_aa64isar2))
+ return (FEAT_ALWAYS_DISABLE);
+ if (ID_AA64ISAR2_MOPS_VAL(id_aa64isar2) == ID_AA64ISAR2_MOPS_NONE)
+ return (FEAT_ALWAYS_DISABLE);
+
+ return (FEAT_DEFAULT_ENABLE);
+}
+
+static bool
+mops_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
+ u_int errata_count __unused)
+{
+ WRITE_SPECIALREG(sctlr_el1, READ_SPECIALREG(sctlr_el1) | SCTLR_MSCEn);
+ isb();
+
+ return (true);
+}
+
+static void
+mops_disabled(const struct cpu_feat *feat __unused)
+{
+ WRITE_SPECIALREG(sctlr_el1, READ_SPECIALREG(sctlr_el1) & ~SCTLR_MSCEn);
+ isb();
+}
+
+CPU_FEAT(feat_mops, "MOPS",
+ mops_check, NULL, mops_enable, mops_disabled,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
+
bool
has_hyp(void)
{
diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c
index 75c9b5f87892..3de56187657c 100644
--- a/sys/arm64/arm64/trap.c
+++ b/sys/arm64/arm64/trap.c
@@ -597,6 +597,66 @@ do_el1h_sync(struct thread *td, struct trapframe *frame)
}
}
+static void
+handle_moe(struct thread *td, struct trapframe *frame, uint64_t esr)
+{
+ uint64_t src;
+ uint64_t dest;
+ uint64_t size;
+ int src_reg;
+ int dest_reg;
+ int size_reg;
+ int format_option;
+
+ format_option = esr & ISS_MOE_FORMAT_OPTION_MASK;
+ dest_reg = (esr & ISS_MOE_DESTREG_MASK) >> ISS_MOE_DESTREG_SHIFT;
+ size_reg = (esr & ISS_MOE_SIZEREG_MASK) >> ISS_MOE_SIZEREG_SHIFT;
+ dest = frame->tf_x[dest_reg];
+ size = frame->tf_x[size_reg];
+
+ /*
+ * Put the registers back in the original format suitable for a
+ * prologue instruction, using the generic return routine from the
+ * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH.
+ */
+ if (esr & ISS_MOE_MEMINST) {
+ /* SET* instruction */
+ if (format_option == ISS_MOE_FORMAT_OPTION_A ||
+ format_option == ISS_MOE_FORMAT_OPTION_A2) {
+ /* Format is from Option A; forward set */
+ frame->tf_x[dest_reg] = dest + size;
+ frame->tf_x[size_reg] = -size;
+ }
+ } else {
+ /* CPY* instruction */
+ src_reg = (esr & ISS_MOE_SRCREG_MASK) >> ISS_MOE_SRCREG_SHIFT;
+ src = frame->tf_x[src_reg];
+
+ if (format_option == ISS_MOE_FORMAT_OPTION_B ||
+ format_option == ISS_MOE_FORMAT_OPTION_B2) {
+ /* Format is from Option B */
+ if (frame->tf_spsr & PSR_N) {
+ /* Backward copy */
+ frame->tf_x[dest_reg] = dest - size;
+ frame->tf_x[src_reg] = src + size;
+ }
+ } else {
+ /* Format is from Option A */
+ if (frame->tf_x[size_reg] & (1UL << 63)) {
+ /* Forward copy */
+ frame->tf_x[dest_reg] = dest + size;
+ frame->tf_x[src_reg] = src + size;
+ frame->tf_x[size_reg] = -size;
+ }
+ }
+ }
+
+ if (esr & ISS_MOE_FROM_EPILOGUE)
+ frame->tf_elr -= 8;
+ else
+ frame->tf_elr -= 4;
+}
+
void
do_el0_sync(struct thread *td, struct trapframe *frame)
{
@@ -738,6 +798,10 @@ do_el0_sync(struct thread *td, struct trapframe *frame)
exception);
userret(td, frame);
break;
+ case EXCP_MOE:
+ handle_moe(td, frame, esr);
+ userret(td, frame);
+ break;
default:
call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)frame->tf_elr,
exception);
diff --git a/sys/arm64/conf/NOTES b/sys/arm64/conf/NOTES
index e773253da6d7..102066025e8f 100644
--- a/sys/arm64/conf/NOTES
+++ b/sys/arm64/conf/NOTES
@@ -92,6 +92,7 @@ device al_eth # Annapurna Alpine Ethernet NIC
device dwc # Synopsys Designware GMAC Ethernet
device dwc_rk # Rockchip Designware
device dwc_socfpga # Altera SOCFPGA Ethernet MAC
+device ixl # Intel 700 Series Physical Function
device ice # Intel 800 Series Physical Function
device ice_ddp # Intel 800 Series DDP Package
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index 27b02c44cd76..138a1eff51ac 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -592,6 +592,27 @@
#define ISS_MSR_REG(reg) \
__ISS_MSR_REG(reg##_op0, reg##_op1, reg##_CRn, reg##_CRm, reg##_op2)
+#define ISS_MOE_MEMINST_SHIFT 24
+#define ISS_MOE_MEMINST (0x01 << ISS_MOE_MEMINST_SHIFT)
+#define ISS_MOE_isSETG_SHIFT 24
+#define ISS_MOE_isSETG (0x01 << ISS_MOE_isSETG_SHIFT)
+#define ISS_MOE_OPTIONS_SHIFT 19
+#define ISS_MOE_OPTIONS_MASK (0x0f << ISS_MOE_OPTIONS_SHIFT)
+#define ISS_MOE_FROM_EPILOGUE_SHIFT 18
+#define ISS_MOE_FROM_EPILOGUE (0x01 << ISS_MOE_FROM_EPILOGUE_SHIFT)
+#define ISS_MOE_FORMAT_OPTION_SHIFT 16
+#define ISS_MOE_FORMAT_OPTION_MASK (0x03 << ISS_MOE_FORMAT_OPTION_SHIFT)
+#define ISS_MOE_FORMAT_OPTION_B (0x00 << ISS_MOE_FORMAT_OPTION_SHIFT)
+#define ISS_MOE_FORMAT_OPTION_A (0x01 << ISS_MOE_FORMAT_OPTION_SHIFT)
+#define ISS_MOE_FORMAT_OPTION_A2 (0x02 << ISS_MOE_FORMAT_OPTION_SHIFT)
+#define ISS_MOE_FORMAT_OPTION_B2 (0x03 << ISS_MOE_FORMAT_OPTION_SHIFT)
+#define ISS_MOE_DESTREG_SHIFT 10
+#define ISS_MOE_DESTREG_MASK (0x1f << ISS_MOE_DESTREG_SHIFT)
+#define ISS_MOE_SRCREG_SHIFT 5
+#define ISS_MOE_SRCREG_MASK (0x1f << ISS_MOE_SRCREG_SHIFT)
+#define ISS_MOE_SIZEREG_SHIFT 0
+#define ISS_MOE_SIZEREG_MASK (0x1f << ISS_MOE_SIZEREG_SHIFT)
+
#define ISS_DATA_ISV_SHIFT 24
#define ISS_DATA_ISV (0x01 << ISS_DATA_ISV_SHIFT)
#define ISS_DATA_SAS_SHIFT 22
@@ -656,6 +677,7 @@
#define EXCP_DATA_ABORT_L 0x24 /* Data abort, from lower EL */
#define EXCP_DATA_ABORT 0x25 /* Data abort, from same EL */
#define EXCP_SP_ALIGN 0x26 /* SP slignment fault */
+#define EXCP_MOE 0x27 /* Memory Operation Exception */
#define EXCP_TRAP_FP 0x2c /* Trapped FP exception */
#define EXCP_SERROR 0x2f /* SError interrupt */
#define EXCP_BRKPT_EL0 0x30 /* Hardware breakpoint, from same EL */
@@ -1960,7 +1982,7 @@
#define MAIR_EL1_CRn 10
#define MAIR_EL1_CRm 2
#define MAIR_EL1_op2 0
-#define MAIR_ATTR_MASK(idx) (UL(0xff) << ((n)* 8))
+#define MAIR_ATTR_MASK(idx) (UL(0xff) << ((idx) * 8))
#define MAIR_ATTR(attr, idx) ((attr) << ((idx) * 8))
#define MAIR_DEVICE_nGnRnE UL(0x00)
#define MAIR_DEVICE_nGnRE UL(0x04)
@@ -2627,7 +2649,9 @@
#define SCTLR_LSMAOE (UL(0x1) << 29)
#define SCTLR_EnIB (UL(0x1) << 30)
#define SCTLR_EnIA (UL(0x1) << 31)
-/* Bits 34:32 are reserved */
+/* Bit 32 is reserved */
+#define SCTLR_MSCEn (UL(0x1) << 33)
+/* Bit 34 is reserved */
#define SCTLR_BT0 (UL(0x1) << 35)
#define SCTLR_BT1 (UL(0x1) << 36)
#define SCTLR_ITFSB (UL(0x1) << 37)
diff --git a/sys/arm64/include/ifunc.h b/sys/arm64/include/ifunc.h
index de452ad34c8f..34e783df8fe5 100644
--- a/sys/arm64/include/ifunc.h
+++ b/sys/arm64/include/ifunc.h
@@ -29,20 +29,38 @@
#ifndef __ARM64_IFUNC_H
#define __ARM64_IFUNC_H
+struct __ifunc_arg_t
+{
+ unsigned long _size; /* Size of the struct, so it can grow. */
+ unsigned long _hwcap;
+ unsigned long _hwcap2;
+ unsigned long _hwcap3;
+ unsigned long _hwcap4;
+};
+
+typedef struct __ifunc_arg_t __ifunc_arg_t;
+
+#define _IFUNC_ARG_HWCAP (1ULL << 62)
+
#define DEFINE_IFUNC(qual, ret_type, name, args) \
static ret_type (*name##_resolver(void))args __used; \
qual ret_type name args __attribute__((ifunc(#name "_resolver"))); \
static ret_type (*name##_resolver(void))args
#define DEFINE_UIFUNC(qual, ret_type, name, args) \
- static ret_type (*name##_resolver(uint64_t, uint64_t, \
+ static ret_type (*name##_resolver(uint64_t, \
+ const struct __ifunc_arg_t *ifunc_arg, \
uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, \
uint64_t))args __used; \
qual ret_type name args __attribute__((ifunc(#name "_resolver"))); \
- static ret_type (*name##_resolver(uint64_t _arg1 __unused, \
- uint64_t _arg2 __unused, uint64_t _arg3 __unused, \
- uint64_t _arg4 __unused, uint64_t _arg5 __unused, \
- uint64_t _arg6 __unused, uint64_t _arg7 __unused, \
+ static ret_type (*name##_resolver( \
+ uint64_t at_hwcap __unused, \
+ const struct __ifunc_arg_t *ifunc_arg __unused, \
+ uint64_t _arg3 __unused, \
+ uint64_t _arg4 __unused, \
+ uint64_t _arg5 __unused, \
+ uint64_t _arg6 __unused, \
+ uint64_t _arg7 __unused, \
uint64_t _arg8 __unused))args
#endif
diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
index e67540eac66d..f076bd07f323 100644
--- a/sys/arm64/include/vmm.h
+++ b/sys/arm64/include/vmm.h
@@ -107,7 +107,39 @@ enum vm_reg_name {
#define VM_GUEST_BASE_IPA 0x80000000UL /* Guest kernel start ipa */
#ifdef _KERNEL
+#include <machine/vmm_instruction_emul.h>
+
+#define VMM_VCPU_MD_FIELDS \
+ struct vm_exit exitinfo; \
+ uint64_t nextpc; /* (x) next instruction to execute */ \
+ struct vfpstate *guestfpu /* (a,i) guest fpu state */
+
+#define VMM_VM_MD_FIELDS \
+ struct vmm_mmio_region mmio_region[VM_MAX_MMIO_REGIONS]; \
+ struct vmm_special_reg special_reg[VM_MAX_SPECIAL_REGS]
+
+struct vmm_mmio_region {
+ uint64_t start;
+ uint64_t end;
+ mem_region_read_t read;
+ mem_region_write_t write;
+};
+#define VM_MAX_MMIO_REGIONS 4
+
+struct vmm_special_reg {
+ uint32_t esr_iss;
+ uint32_t esr_mask;
+ reg_read_t reg_read;
+ reg_write_t reg_write;
+ void *arg;
+};
+#define VM_MAX_SPECIAL_REGS 16
+
+#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \
+ ret_type vmmops_##opname args
+
struct vm;
+struct vm_eventinfo;
struct vm_exception;
struct vm_exit;
struct vm_run;
@@ -116,15 +148,6 @@ struct vm_guest_paging;
struct vm_vgic_descr;
struct pmap;
-struct vm_eventinfo {
- void *rptr; /* rendezvous cookie */
- int *sptr; /* suspend cookie */
- int *iptr; /* reqidle cookie */
-};
-
-#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \
- ret_type vmmops_##opname args
-
DECLARE_VMMOPS_FUNC(int, modinit, (int ipinum));
DECLARE_VMMOPS_FUNC(int, modcleanup, (void));
DECLARE_VMMOPS_FUNC(void *, init, (struct vm *vm, struct pmap *pmap));
@@ -153,34 +176,13 @@ DECLARE_VMMOPS_FUNC(int, restore_tsc, (void *vcpui, uint64_t now));
#endif
#endif
-int vm_create(const char *name, struct vm **retvm);
-struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
-void vm_disable_vcpu_creation(struct vm *vm);
-void vm_lock_vcpus(struct vm *vm);
-void vm_unlock_vcpus(struct vm *vm);
-void vm_destroy(struct vm *vm);
-int vm_reinit(struct vm *vm);
-const char *vm_name(struct vm *vm);
-
-uint16_t vm_get_maxcpus(struct vm *vm);
-void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
- uint16_t *threads, uint16_t *maxcpus);
-int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
- uint16_t threads, uint16_t maxcpus);
int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
int vm_run(struct vcpu *vcpu);
-int vm_suspend(struct vm *vm, enum vm_suspend_how how);
void* vm_get_cookie(struct vm *vm);
-int vcpu_vcpuid(struct vcpu *vcpu);
void *vcpu_get_cookie(struct vcpu *vcpu);
-struct vm *vcpu_vm(struct vcpu *vcpu);
-struct vcpu *vm_vcpu(struct vm *vm, int cpu);
int vm_get_capability(struct vcpu *vcpu, int type, int *val);
int vm_set_capability(struct vcpu *vcpu, int type, int val);
-int vm_activate_cpu(struct vcpu *vcpu);
-int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
-int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
int vm_inject_exception(struct vcpu *vcpu, uint64_t esr, uint64_t far);
int vm_attach_vgic(struct vm *vm, struct vm_vgic_descr *descr);
int vm_assert_irq(struct vm *vm, uint32_t irq);
@@ -190,62 +192,8 @@ int vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot,
struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
void vm_exit_suspended(struct vcpu *vcpu, uint64_t pc);
void vm_exit_debug(struct vcpu *vcpu, uint64_t pc);
-void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t pc);
void vm_exit_astpending(struct vcpu *vcpu, uint64_t pc);
-cpuset_t vm_active_cpus(struct vm *vm);
-cpuset_t vm_debug_cpus(struct vm *vm);
-cpuset_t vm_suspended_cpus(struct vm *vm);
-
-static __inline int
-vcpu_rendezvous_pending(struct vm_eventinfo *info)
-{
-
- return (*((uintptr_t *)(info->rptr)) != 0);
-}
-
-static __inline int
-vcpu_suspended(struct vm_eventinfo *info)
-{
-
- return (*info->sptr);
-}
-
-int vcpu_debugged(struct vcpu *vcpu);
-
-enum vcpu_state {
- VCPU_IDLE,
- VCPU_FROZEN,
- VCPU_RUNNING,
- VCPU_SLEEPING,
-};
-
-int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
-enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
-
-static int __inline
-vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
-{
- return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
-}
-
-#ifdef _SYS_PROC_H_
-static int __inline
-vcpu_should_yield(struct vcpu *vcpu)
-{
- struct thread *td;
-
- td = curthread;
- return (td->td_ast != 0 || td->td_owepreempt != 0);
-}
-#endif
-
-void *vcpu_stats(struct vcpu *vcpu);
-void vcpu_notify_event(struct vcpu *vcpu);
-struct vm_mem *vm_mem(struct vm *vm);
-
-enum vm_reg_name vm_segment_name(int seg_encoding);
-
struct vm_copyinfo {
uint64_t gpa;
size_t len;
diff --git a/sys/arm64/include/vmm_instruction_emul.h b/sys/arm64/include/vmm_instruction_emul.h
index a295f7cce127..dc281f442543 100644
--- a/sys/arm64/include/vmm_instruction_emul.h
+++ b/sys/arm64/include/vmm_instruction_emul.h
@@ -27,6 +27,14 @@
#ifndef _VMM_INSTRUCTION_EMUL_H_
#define _VMM_INSTRUCTION_EMUL_H_
+#include <sys/types.h>
+
+struct vcpu;
+struct vm;
+struct vie;
+struct vre;
+struct vm_guest_paging;
+
/*
* Callback functions to read and write memory regions.
*/
diff --git a/sys/arm64/iommu/smmu.c b/sys/arm64/iommu/smmu.c
index ec8e04ce117b..a84ef4ae347e 100644
--- a/sys/arm64/iommu/smmu.c
+++ b/sys/arm64/iommu/smmu.c
@@ -1698,22 +1698,21 @@ smmu_domain_alloc(device_t dev, struct iommu_unit *iommu)
unit = (struct smmu_unit *)iommu;
- domain = malloc(sizeof(*domain), M_SMMU, M_WAITOK | M_ZERO);
-
error = smmu_asid_alloc(sc, &new_asid);
if (error) {
- free(domain, M_SMMU);
device_printf(sc->dev,
"Could not allocate ASID for a new domain.\n");
return (NULL);
}
+ domain = malloc(sizeof(*domain), M_SMMU, M_WAITOK | M_ZERO);
domain->asid = (uint16_t)new_asid;
smmu_pmap_pinit(&domain->p);
error = smmu_init_cd(sc, domain);
if (error) {
+ smmu_asid_free(sc, domain->asid);
free(domain, M_SMMU);
device_printf(sc->dev, "Could not initialize CD\n");
return (NULL);
diff --git a/sys/arm64/vmm/io/vgic_v3.c b/sys/arm64/vmm/io/vgic_v3.c
index 023406c64182..199749c12b9f 100644
--- a/sys/arm64/vmm/io/vgic_v3.c
+++ b/sys/arm64/vmm/io/vgic_v3.c
@@ -57,7 +57,6 @@
#include <machine/vmparam.h>
#include <machine/intr.h>
#include <machine/vmm.h>
-#include <machine/vmm_dev.h>
#include <machine/vmm_instruction_emul.h>
#include <arm/arm/gic_common.h>
@@ -69,6 +68,9 @@
#include <arm64/vmm/arm64.h>
#include <arm64/vmm/vmm_handlers.h>
+#include <dev/vmm/vmm_dev.h>
+#include <dev/vmm/vmm_vm.h>
+
#include "vgic.h"
#include "vgic_v3.h"
#include "vgic_v3_reg.h"
@@ -670,7 +672,7 @@ read_enabler(struct hypctx *hypctx, int n)
if (irq == NULL)
continue;
- if (!irq->enabled)
+ if (irq->enabled)
ret |= 1u << i;
vgic_v3_release_irq(irq);
}
diff --git a/sys/arm64/vmm/io/vtimer.c b/sys/arm64/vmm/io/vtimer.c
index 7c7fbb49e691..d1c489463882 100644
--- a/sys/arm64/vmm/io/vtimer.c
+++ b/sys/arm64/vmm/io/vtimer.c
@@ -47,6 +47,8 @@
#include <arm64/vmm/arm64.h>
+#include <dev/vmm/vmm_vm.h>
+
#include "vgic.h"
#include "vtimer.h"
diff --git a/sys/arm64/vmm/vmm.c b/sys/arm64/vmm/vmm.c
index 31d2fb3f516b..031400f3f1d0 100644
--- a/sys/arm64/vmm/vmm.c
+++ b/sys/arm64/vmm/vmm.c
@@ -40,7 +40,6 @@
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/smp.h>
-#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@@ -61,10 +60,12 @@
#include <machine/vmm_instruction_emul.h>
#include <dev/pci/pcireg.h>
+
#include <dev/vmm/vmm_dev.h>
#include <dev/vmm/vmm_ktr.h>
#include <dev/vmm/vmm_mem.h>
#include <dev/vmm/vmm_stat.h>
+#include <dev/vmm/vmm_vm.h>
#include "arm64.h"
#include "mmu.h"
@@ -72,85 +73,11 @@
#include "io/vgic.h"
#include "io/vtimer.h"
-struct vcpu {
- int flags;
- enum vcpu_state state;
- struct mtx mtx;
- int hostcpu; /* host cpuid this vcpu last ran on */
- int vcpuid;
- void *stats;
- struct vm_exit exitinfo;
- uint64_t nextpc; /* (x) next instruction to execute */
- struct vm *vm; /* (o) */
- void *cookie; /* (i) cpu-specific data */
- struct vfpstate *guestfpu; /* (a,i) guest fpu state */
-};
-
-#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
-#define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
-#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
-#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
-#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
-
-struct vmm_mmio_region {
- uint64_t start;
- uint64_t end;
- mem_region_read_t read;
- mem_region_write_t write;
-};
-#define VM_MAX_MMIO_REGIONS 4
-
-struct vmm_special_reg {
- uint32_t esr_iss;
- uint32_t esr_mask;
- reg_read_t reg_read;
- reg_write_t reg_write;
- void *arg;
-};
-#define VM_MAX_SPECIAL_REGS 16
-
-/*
- * Initialization:
- * (o) initialized the first time the VM is created
- * (i) initialized when VM is created and when it is reinitialized
- * (x) initialized before use
- */
-struct vm {
- void *cookie; /* (i) cpu-specific data */
- volatile cpuset_t active_cpus; /* (i) active vcpus */
- volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */
- int suspend; /* (i) stop VM execution */
- bool dying; /* (o) is dying */
- volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
- volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
- struct vm_mem mem; /* (i) guest memory */
- char name[VM_MAX_NAMELEN + 1]; /* (o) virtual machine name */
- struct vcpu **vcpu; /* (i) guest vcpus */
- struct vmm_mmio_region mmio_region[VM_MAX_MMIO_REGIONS];
- /* (o) guest MMIO regions */
- struct vmm_special_reg special_reg[VM_MAX_SPECIAL_REGS];
- /* The following describe the vm cpu topology */
- uint16_t sockets; /* (o) num of sockets */
- uint16_t cores; /* (o) num of cores/socket */
- uint16_t threads; /* (o) num of threads/core */
- uint16_t maxcpus; /* (o) max pluggable cpus */
- struct sx vcpus_init_lock; /* (o) */
-};
-
-static int vm_handle_wfi(struct vcpu *vcpu,
- struct vm_exit *vme, bool *retu);
-
static MALLOC_DEFINE(M_VMM, "vmm", "vmm");
/* statistics */
static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
-SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
-
-static int vmm_ipinum;
-SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
- "IPI vector used for vcpu notifications");
-
struct vmm_regs {
uint64_t id_aa64afr0;
uint64_t id_aa64afr1;
@@ -205,8 +132,6 @@ static const struct vmm_regs vmm_arch_regs_masks = {
/* Host registers masked by vmm_arch_regs_masks. */
static struct vmm_regs vmm_arch_regs;
-static void vcpu_notify_event_locked(struct vcpu *vcpu);
-
/* global statistics */
VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
VMM_STAT(VMEXIT_UNKNOWN, "number of vmexits for the unknown exception");
@@ -357,14 +282,6 @@ vm_init(struct vm *vm, bool create)
}
}
-void
-vm_disable_vcpu_creation(struct vm *vm)
-{
- sx_xlock(&vm->vcpus_init_lock);
- vm->dying = true;
- sx_xunlock(&vm->vcpus_init_lock);
-}
-
struct vcpu *
vm_alloc_vcpu(struct vm *vm, int vcpuid)
{
@@ -401,18 +318,6 @@ vm_alloc_vcpu(struct vm *vm, int vcpuid)
return (vcpu);
}
-void
-vm_lock_vcpus(struct vm *vm)
-{
- sx_xlock(&vm->vcpus_init_lock);
-}
-
-void
-vm_unlock_vcpus(struct vm *vm)
-{
- sx_unlock(&vm->vcpus_init_lock);
-}
-
int
vm_create(const char *name, struct vm **retvm)
{
@@ -426,6 +331,7 @@ vm_create(const char *name, struct vm **retvm)
return (error);
}
strcpy(vm->name, name);
+ mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
sx_init(&vm->vcpus_init_lock, "vm vcpus");
vm->sockets = 1;
@@ -442,35 +348,6 @@ vm_create(const char *name, struct vm **retvm)
return (0);
}
-void
-vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
- uint16_t *threads, uint16_t *maxcpus)
-{
- *sockets = vm->sockets;
- *cores = vm->cores;
- *threads = vm->threads;
- *maxcpus = vm->maxcpus;
-}
-
-uint16_t
-vm_get_maxcpus(struct vm *vm)
-{
- return (vm->maxcpus);
-}
-
-int
-vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
- uint16_t threads, uint16_t maxcpus)
-{
- /* Ignore maxcpus. */
- if ((sockets * cores * threads) > vm->maxcpus)
- return (EINVAL);
- vm->sockets = sockets;
- vm->cores = cores;
- vm->threads = threads;
- return(0);
-}
-
static void
vm_cleanup(struct vm *vm, bool destroy)
{
@@ -515,29 +392,11 @@ vm_destroy(struct vm *vm)
free(vm, M_VMM);
}
-int
-vm_reinit(struct vm *vm)
-{
- int error;
-
- /*
- * A virtual machine can be reset only if all vcpus are suspended.
- */
- if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
- vm_cleanup(vm, false);
- vm_init(vm, false);
- error = 0;
- } else {
- error = EBUSY;
- }
-
- return (error);
-}
-
-const char *
-vm_name(struct vm *vm)
+void
+vm_reset(struct vm *vm)
{
- return (vm->name);
+ vm_cleanup(vm, false);
+ vm_init(vm, false);
}
int
@@ -828,33 +687,6 @@ out_user:
return (0);
}
-int
-vm_suspend(struct vm *vm, enum vm_suspend_how how)
-{
- int i;
-
- if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
- return (EINVAL);
-
- if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
- VM_CTR2(vm, "virtual machine already suspended %d/%d",
- vm->suspend, how);
- return (EALREADY);
- }
-
- VM_CTR1(vm, "virtual machine successfully suspended %d", how);
-
- /*
- * Notify all active vcpus that they are now suspended.
- */
- for (i = 0; i < vm->maxcpus; i++) {
- if (CPU_ISSET(i, &vm->active_cpus))
- vcpu_notify_event(vm_vcpu(vm, i));
- }
-
- return (0);
-}
-
void
vm_exit_suspended(struct vcpu *vcpu, uint64_t pc)
{
@@ -882,136 +714,6 @@ vm_exit_debug(struct vcpu *vcpu, uint64_t pc)
vmexit->exitcode = VM_EXITCODE_DEBUG;
}
-int
-vm_activate_cpu(struct vcpu *vcpu)
-{
- struct vm *vm = vcpu->vm;
-
- if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
- return (EBUSY);
-
- CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
- return (0);
-
-}
-
-int
-vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
-{
- if (vcpu == NULL) {
- vm->debug_cpus = vm->active_cpus;
- for (int i = 0; i < vm->maxcpus; i++) {
- if (CPU_ISSET(i, &vm->active_cpus))
- vcpu_notify_event(vm_vcpu(vm, i));
- }
- } else {
- if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
- return (EINVAL);
-
- CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
- vcpu_notify_event(vcpu);
- }
- return (0);
-}
-
-int
-vm_resume_cpu(struct vm *vm, struct vcpu *vcpu)
-{
-
- if (vcpu == NULL) {
- CPU_ZERO(&vm->debug_cpus);
- } else {
- if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
- return (EINVAL);
-
- CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
- }
- return (0);
-}
-
-int
-vcpu_debugged(struct vcpu *vcpu)
-{
-
- return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
-}
-
-cpuset_t
-vm_active_cpus(struct vm *vm)
-{
-
- return (vm->active_cpus);
-}
-
-cpuset_t
-vm_debug_cpus(struct vm *vm)
-{
-
- return (vm->debug_cpus);
-}
-
-cpuset_t
-vm_suspended_cpus(struct vm *vm)
-{
-
- return (vm->suspended_cpus);
-}
-
-
-void *
-vcpu_stats(struct vcpu *vcpu)
-{
-
- return (vcpu->stats);
-}
-
-/*
- * This function is called to ensure that a vcpu "sees" a pending event
- * as soon as possible:
- * - If the vcpu thread is sleeping then it is woken up.
- * - If the vcpu is running on a different host_cpu then an IPI will be directed
- * to the host_cpu to cause the vcpu to trap into the hypervisor.
- */
-static void
-vcpu_notify_event_locked(struct vcpu *vcpu)
-{
- int hostcpu;
-
- hostcpu = vcpu->hostcpu;
- if (vcpu->state == VCPU_RUNNING) {
- KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
- if (hostcpu != curcpu) {
- ipi_cpu(hostcpu, vmm_ipinum);
- } else {
- /*
- * If the 'vcpu' is running on 'curcpu' then it must
- * be sending a notification to itself (e.g. SELF_IPI).
- * The pending event will be picked up when the vcpu
- * transitions back to guest context.
- */
- }
- } else {
- KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
- "with hostcpu %d", vcpu->state, hostcpu));
- if (vcpu->state == VCPU_SLEEPING)
- wakeup_one(vcpu);
- }
-}
-
-void
-vcpu_notify_event(struct vcpu *vcpu)
-{
- vcpu_lock(vcpu);
- vcpu_notify_event_locked(vcpu);
- vcpu_unlock(vcpu);
-}
-
-struct vm_mem *
-vm_mem(struct vm *vm)
-{
- return (&vm->mem);
-}
-
static void
restore_guest_fpustate(struct vcpu *vcpu)
{
@@ -1047,71 +749,6 @@ save_guest_fpustate(struct vcpu *vcpu)
KASSERT(PCPU_GET(fpcurthread) == NULL,
("%s: fpcurthread set with guest registers", __func__));
}
-static int
-vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
- bool from_idle)
-{
- int error;
-
- vcpu_assert_locked(vcpu);
-
- /*
- * State transitions from the vmmdev_ioctl() must always begin from
- * the VCPU_IDLE state. This guarantees that there is only a single
- * ioctl() operating on a vcpu at any point.
- */
- if (from_idle) {
- while (vcpu->state != VCPU_IDLE) {
- vcpu_notify_event_locked(vcpu);
- msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
- }
- } else {
- KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
- "vcpu idle state"));
- }
-
- if (vcpu->state == VCPU_RUNNING) {
- KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
- "mismatch for running vcpu", curcpu, vcpu->hostcpu));
- } else {
- KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
- "vcpu that is not running", vcpu->hostcpu));
- }
-
- /*
- * The following state transitions are allowed:
- * IDLE -> FROZEN -> IDLE
- * FROZEN -> RUNNING -> FROZEN
- * FROZEN -> SLEEPING -> FROZEN
- */
- switch (vcpu->state) {
- case VCPU_IDLE:
- case VCPU_RUNNING:
- case VCPU_SLEEPING:
- error = (newstate != VCPU_FROZEN);
- break;
- case VCPU_FROZEN:
- error = (newstate == VCPU_FROZEN);
- break;
- default:
- error = 1;
- break;
- }
-
- if (error)
- return (EBUSY);
-
- vcpu->state = newstate;
- if (newstate == VCPU_RUNNING)
- vcpu->hostcpu = curcpu;
- else
- vcpu->hostcpu = NOCPU;
-
- if (newstate == VCPU_IDLE)
- wakeup(&vcpu->state);
-
- return (0);
-}
static void
vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
@@ -1149,56 +786,12 @@ vm_set_capability(struct vcpu *vcpu, int type, int val)
return (vmmops_setcap(vcpu->cookie, type, val));
}
-struct vm *
-vcpu_vm(struct vcpu *vcpu)
-{
- return (vcpu->vm);
-}
-
-int
-vcpu_vcpuid(struct vcpu *vcpu)
-{
- return (vcpu->vcpuid);
-}
-
void *
vcpu_get_cookie(struct vcpu *vcpu)
{
return (vcpu->cookie);
}
-struct vcpu *
-vm_vcpu(struct vm *vm, int vcpuid)
-{
- return (vm->vcpu[vcpuid]);
-}
-
-int
-vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle)
-{
- int error;
-
- vcpu_lock(vcpu);
- error = vcpu_set_state_locked(vcpu, newstate, from_idle);
- vcpu_unlock(vcpu);
-
- return (error);
-}
-
-enum vcpu_state
-vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
-{
- enum vcpu_state state;
-
- vcpu_lock(vcpu);
- state = vcpu->state;
- if (hostcpu != NULL)
- *hostcpu = vcpu->hostcpu;
- vcpu_unlock(vcpu);
-
- return (state);
-}
-
int
vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
{
diff --git a/sys/arm64/vmm/vmm_arm64.c b/sys/arm64/vmm/vmm_arm64.c
index aa1361049f49..d529f000b828 100644
--- a/sys/arm64/vmm/vmm_arm64.c
+++ b/sys/arm64/vmm/vmm_arm64.c
@@ -52,12 +52,12 @@
#include <machine/cpu.h>
#include <machine/machdep.h>
#include <machine/vmm.h>
-#include <machine/vmm_dev.h>
#include <machine/atomic.h>
#include <machine/hypervisor.h>
#include <machine/pmap.h>
#include <dev/vmm/vmm_mem.h>
+#include <dev/vmm/vmm_vm.h>
#include "mmu.h"
#include "arm64.h"
diff --git a/sys/arm64/vmm/vmm_mmu.c b/sys/arm64/vmm/vmm_mmu.c
index 42537254e27b..ab876d4b47b6 100644
--- a/sys/arm64/vmm/vmm_mmu.c
+++ b/sys/arm64/vmm/vmm_mmu.c
@@ -350,8 +350,8 @@ vmmpmap_remove(vm_offset_t va, vm_size_t size, bool invalidate)
("%s: Mapping is not page-sized", __func__));
if (invalidate) {
- l3_list = malloc((size / PAGE_SIZE) * sizeof(l3_list[0]),
- M_TEMP, M_WAITOK | M_ZERO);
+ l3_list = malloc(atop(size) * sizeof(l3_list[0]), M_TEMP,
+ M_WAITOK | M_ZERO);
}
sva = va;
@@ -359,32 +359,17 @@ vmmpmap_remove(vm_offset_t va, vm_size_t size, bool invalidate)
mtx_lock(&vmmpmap_mtx);
for (i = 0; va < eva; va = va_next) {
l0e = atomic_load_64(&l0[pmap_l0_index(va)]);
- if (l0e == 0) {
- va_next = (va + L0_SIZE) & ~L0_OFFSET;
- if (va_next < va)
- va_next = eva;
- continue;
- }
+ MPASS(l0e != 0);
MPASS((l0e & ATTR_DESCR_MASK) == L0_TABLE);
l1 = (pd_entry_t *)PHYS_TO_DMAP(l0e & ~ATTR_MASK);
l1e = atomic_load_64(&l1[pmap_l1_index(va)]);
- if (l1e == 0) {
- va_next = (va + L1_SIZE) & ~L1_OFFSET;
- if (va_next < va)
- va_next = eva;
- continue;
- }
+ MPASS(l1e != 0);
MPASS((l1e & ATTR_DESCR_MASK) == L1_TABLE);
l2 = (pd_entry_t *)PHYS_TO_DMAP(l1e & ~ATTR_MASK);
l2e = atomic_load_64(&l2[pmap_l2_index(va)]);
- if (l2e == 0) {
- va_next = (va + L2_SIZE) & ~L2_OFFSET;
- if (va_next < va)
- va_next = eva;
- continue;
- }
+ MPASS(l2e != 0);
MPASS((l2e & ATTR_DESCR_MASK) == L2_TABLE);
l3 = (pd_entry_t *)PHYS_TO_DMAP(l2e & ~ATTR_MASK);
@@ -419,7 +404,7 @@ vmmpmap_remove(vm_offset_t va, vm_size_t size, bool invalidate)
/* Invalidate the memory from the D-cache */
vmm_call_hyp(HYP_DC_CIVAC, sva, size);
- for (i = 0; i < (size / PAGE_SIZE); i++) {
+ for (i = 0; i < atop(size); i++) {
atomic_store_64(l3_list[i], 0);
}
diff --git a/sys/arm64/vmm/vmm_reset.c b/sys/arm64/vmm/vmm_reset.c
index 0e4910ea87b4..06ac6dec5af8 100644
--- a/sys/arm64/vmm/vmm_reset.c
+++ b/sys/arm64/vmm/vmm_reset.c
@@ -34,6 +34,8 @@
#include <machine/cpu.h>
#include <machine/hypervisor.h>
+#include <dev/vmm/vmm_vm.h>
+
#include "arm64.h"
#include "reset.h"
diff --git a/sys/bsm/audit_kevents.h b/sys/bsm/audit_kevents.h
index 9381396f247c..ac6f5d69f07d 100644
--- a/sys/bsm/audit_kevents.h
+++ b/sys/bsm/audit_kevents.h
@@ -664,6 +664,7 @@
#define AUE_TIMERFD 43270 /* FreeBSD/Linux. */
#define AUE_SETCRED 43271 /* FreeBSD-specific. */
#define AUE_INOTIFY 43272 /* FreeBSD/Linux. */
+#define AUE_PDRFORK 43273 /* FreeBSD-specific. */
/*
* Darwin BSM uses a number of AUE_O_* definitions, which are aliased to the
diff --git a/sys/cam/cam_periph.c b/sys/cam/cam_periph.c
index 7f6855a5d51d..91ec7a71f612 100644
--- a/sys/cam/cam_periph.c
+++ b/sys/cam/cam_periph.c
@@ -1791,7 +1791,6 @@ camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
/*start*/TRUE,
/*load/eject*/le,
/*immediate*/FALSE,
- /*power_condition*/SSS_PC_START_VALID,
SSD_FULL_SIZE,
/*timeout*/50000);
break;
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
index d8bba97e79bc..76fcd3fee188 100644
--- a/sys/cam/scsi/scsi_all.c
+++ b/sys/cam/scsi/scsi_all.c
@@ -9009,6 +9009,40 @@ void
scsi_start_stop(struct ccb_scsiio *csio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
uint8_t tag_action, int start, int load_eject,
+ int immediate, uint8_t sense_len, uint32_t timeout)
+{
+ struct scsi_start_stop_unit *scsi_cmd;
+ int extra_flags = 0;
+
+ scsi_cmd = (struct scsi_start_stop_unit *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = START_STOP_UNIT;
+ if (start != 0) {
+ scsi_cmd->how |= SSS_START;
+ /* it takes a lot of power to start a drive */
+ extra_flags |= CAM_HIGH_POWER;
+ }
+ if (load_eject != 0)
+ scsi_cmd->how |= SSS_LOEJ;
+ if (immediate != 0)
+ scsi_cmd->byte2 |= SSS_IMMED;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE | extra_flags,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_start_stop_pc(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int start, int load_eject,
int immediate, uint8_t power_condition, uint8_t sense_len,
uint32_t timeout)
{
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index cadf2d1f2835..8aa87abce370 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -4350,6 +4350,10 @@ void scsi_unmap(struct ccb_scsiio *csio, uint32_t retries,
void scsi_start_stop(struct ccb_scsiio *csio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
uint8_t tag_action, int start, int load_eject,
+ int immediate, uint8_t sense_len, uint32_t timeout);
+void scsi_start_stop_pc(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int start, int load_eject,
int immediate, uint8_t power_condition, uint8_t sense_len,
uint32_t timeout);
void scsi_read_attribute(struct ccb_scsiio *csio, uint32_t retries,
diff --git a/sys/cam/scsi/scsi_cd.c b/sys/cam/scsi/scsi_cd.c
index bc4634562dc6..e622a96ec77e 100644
--- a/sys/cam/scsi/scsi_cd.c
+++ b/sys/cam/scsi/scsi_cd.c
@@ -3406,7 +3406,6 @@ cdstartunit(struct cam_periph *periph, int load)
/* start */ TRUE,
/* load_eject */ load,
/* immediate */ FALSE,
- /* power_condition */ SSS_PC_START_VALID,
/* sense_len */ SSD_FULL_SIZE,
/* timeout */ 50000);
@@ -3435,7 +3434,6 @@ cdstopunit(struct cam_periph *periph, uint32_t eject)
/* start */ FALSE,
/* load_eject */ eject,
/* immediate */ FALSE,
- /* power_condition */ SSS_PC_START_VALID,
/* sense_len */ SSD_FULL_SIZE,
/* timeout */ 50000);
diff --git a/sys/cddl/compat/opensolaris/sys/cpuvar_defs.h b/sys/cddl/compat/opensolaris/sys/cpuvar_defs.h
index d99eaea7947e..1bcc721b7c15 100644
--- a/sys/cddl/compat/opensolaris/sys/cpuvar_defs.h
+++ b/sys/cddl/compat/opensolaris/sys/cpuvar_defs.h
@@ -40,9 +40,6 @@
#define CPU_DTRACE_KPRIV 0x0080 /* DTrace fault: bad kernel access */
#define CPU_DTRACE_UPRIV 0x0100 /* DTrace fault: bad user access */
#define CPU_DTRACE_TUPOFLOW 0x0200 /* DTrace fault: tuple stack overflow */
-#if defined(__sparc)
-#define CPU_DTRACE_FAKERESTORE 0x0400 /* pid provider hint to getreg */
-#endif
#define CPU_DTRACE_ENTRY 0x0800 /* pid provider hint to ustack() */
#define CPU_DTRACE_BADSTACK 0x1000 /* DTrace fault: bad stack */
diff --git a/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c b/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c
index 28e2c26f5b1c..42c50ce07d03 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c
@@ -2167,11 +2167,7 @@ fasttrap_meta_create_probe(void *arg, void *parg,
pp->ftp_tps[i].fit_tp = tp;
pp->ftp_tps[i].fit_id.fti_probe = pp;
-#ifdef __sparc
- pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
-#else
pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
-#endif
}
/*
diff --git a/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h b/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h
index 242a32e140f5..c322071c6193 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h
@@ -2411,11 +2411,6 @@ extern void dtrace_invop_add(int (*)(uintptr_t, struct trapframe *, uintptr_t));
extern void dtrace_invop_remove(int (*)(uintptr_t, struct trapframe *,
uintptr_t));
-#ifdef __sparc
-extern int dtrace_blksuword32(uintptr_t, uint32_t *, int);
-extern void dtrace_getfsr(uint64_t *);
-#endif
-
#ifndef illumos
extern void dtrace_helpers_duplicate(proc_t *, proc_t *);
extern void dtrace_helpers_destroy(proc_t *);
diff --git a/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace_impl.h b/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace_impl.h
index 10ba0d37ac1b..c26956a646ff 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace_impl.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace_impl.h
@@ -1302,15 +1302,8 @@ extern int dtrace_attached(void);
extern hrtime_t dtrace_gethrestime(void);
#endif
-#ifdef __sparc
-extern void dtrace_flush_windows(void);
-extern void dtrace_flush_user_windows(void);
-extern uint_t dtrace_getotherwin(void);
-extern uint_t dtrace_getfprs(void);
-#else
extern void dtrace_copy(uintptr_t, uintptr_t, size_t);
extern void dtrace_copystr(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
-#endif
/*
* DTrace Assertions
diff --git a/sys/cddl/contrib/opensolaris/uts/common/sys/isa_defs.h b/sys/cddl/contrib/opensolaris/uts/common/sys/isa_defs.h
index 93f1855b3908..2e4281d959a8 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/sys/isa_defs.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/sys/isa_defs.h
@@ -109,7 +109,7 @@
*
* _LP64:
* Long/Pointer are 64 bits, Int is 32 bits. This is the chosen
- * implementation for 64-bit ABIs such as SPARC V9.
+ * implementation for 64-bit ABIs.
*
* _I32LPx:
* A compilation environment where 'int' is 32-bit, and
@@ -190,7 +190,7 @@
* __x86
* This is ONLY a synonym for defined(__i386) || defined(__amd64)
* which is useful only insofar as these two architectures share
- * common attributes. Analogous to __sparc.
+ * common attributes.
*
* _PSM_MODULES
* This indicates whether or not the implementation uses PSM
@@ -235,9 +235,6 @@ extern "C" {
/*
* Define the appropriate "processor characteristics"
*/
-#ifdef illumos
-#define _LITTLE_ENDIAN
-#endif
#define _STACK_GROWS_DOWNWARD
#define _LONG_LONG_LTOH
#define _BIT_FIELDS_LTOH
@@ -302,9 +299,6 @@ extern "C" {
/*
* Define the appropriate "processor characteristics"
*/
-#ifdef illumos
-#define _LITTLE_ENDIAN
-#endif
#define _STACK_GROWS_DOWNWARD
#define _LONG_LONG_LTOH
#define _BIT_FIELDS_LTOH
@@ -475,71 +469,6 @@ extern "C" {
#define _DONT_USE_1275_GENERIC_NAMES
#define _HAVE_CPUID_INSN
-#elif defined(__mips__)
-
-/*
- * Define the appropriate "processor characteristics"
- */
-#define _STACK_GROWS_DOWNWARD
-#define _LONG_LONG_LTOH
-#define _BIT_FIELDS_LTOH
-#define _IEEE_754
-#define _CHAR_IS_SIGNED
-#define _BOOL_ALIGNMENT 1
-#define _CHAR_ALIGNMENT 1
-#define _SHORT_ALIGNMENT 2
-#define _INT_ALIGNMENT 4
-#define _FLOAT_ALIGNMENT 4
-#define _FLOAT_COMPLEX_ALIGNMENT 4
-#if defined(__mips_n64)
-#define _LONG_ALIGNMENT 8
-#define _LONG_LONG_ALIGNMENT 8
-#define _DOUBLE_ALIGNMENT 8
-#define _DOUBLE_COMPLEX_ALIGNMENT 8
-#define _LONG_DOUBLE_ALIGNMENT 8
-#define _LONG_DOUBLE_COMPLEX_ALIGNMENT 8
-#define _POINTER_ALIGNMENT 8
-#define _MAX_ALIGNMENT 8
-#define _ALIGNMENT_REQUIRED 0
-
-#define _LONG_LONG_ALIGNMENT_32 _INT_ALIGNMENT
-/*
- * Define the appropriate "implementation choices".
- */
-#if !defined(_LP64)
-#define _LP64
-#endif
-#else
-#define _LONG_ALIGNMENT 4
-#define _LONG_LONG_ALIGNMENT 4
-#define _DOUBLE_ALIGNMENT 4
-#define _DOUBLE_COMPLEX_ALIGNMENT 4
-#define _LONG_DOUBLE_ALIGNMENT 4
-#define _LONG_DOUBLE_COMPLEX_ALIGNMENT 4
-#define _POINTER_ALIGNMENT 4
-#define _MAX_ALIGNMENT 4
-#define _ALIGNMENT_REQUIRED 0
-
-#define _LONG_LONG_ALIGNMENT_32 _LONG_LONG_ALIGNMENT
-
-/*
- * Define the appropriate "implementation choices".
- */
-#if !defined(_ILP32)
-#define _ILP32
-#endif
-#if !defined(_I32LPx) && defined(_KERNEL)
-#define _I32LPx
-#endif
-#endif
-#define _SUNOS_VTOC_16
-#define _DMA_USES_PHYSADDR
-#define _FIRMWARE_NEEDS_FDISK
-#define _PSM_MODULES
-#define _RTC_CONFIG
-#define _DONT_USE_1275_GENERIC_NAMES
-#define _HAVE_CPUID_INSN
-
#elif defined(__powerpc__)
#if defined(__BIG_ENDIAN__)
@@ -549,137 +478,6 @@ extern "C" {
#endif
/*
- * The following set of definitions characterize the Solaris on SPARC systems.
- *
- * The symbol __sparc indicates any of the SPARC family of processor
- * architectures. This includes SPARC V7, SPARC V8 and SPARC V9.
- *
- * The symbol __sparcv8 indicates the 32-bit SPARC V8 architecture as defined
- * by Version 8 of the SPARC Architecture Manual. (SPARC V7 is close enough
- * to SPARC V8 for the former to be subsumed into the latter definition.)
- *
- * The symbol __sparcv9 indicates the 64-bit SPARC V9 architecture as defined
- * by Version 9 of the SPARC Architecture Manual.
- *
- * The symbols __sparcv8 and __sparcv9 are mutually exclusive, and are only
- * relevant when the symbol __sparc is defined.
- */
-/*
- * XXX Due to the existence of 5110166, "defined(__sparcv9)" needs to be added
- * to support backwards builds. This workaround should be removed in s10_71.
- */
-#elif defined(__sparc) || defined(__sparcv9) || defined(__sparc__)
-#if !defined(__sparc)
-#define __sparc
-#endif
-
-/*
- * You can be 32-bit or 64-bit, but not both at the same time.
- */
-#if defined(__sparcv8) && defined(__sparcv9)
-#error "SPARC Versions 8 and 9 are mutually exclusive choices"
-#endif
-
-/*
- * Existing compilers do not set __sparcv8. Years will transpire before
- * the compilers can be depended on to set the feature test macro. In
- * the interim, we'll set it here on the basis of historical behaviour;
- * if you haven't asked for SPARC V9, then you must've meant SPARC V8.
- */
-#if !defined(__sparcv9) && !defined(__sparcv8)
-#define __sparcv8
-#endif
-
-/*
- * Define the appropriate "processor characteristics" shared between
- * all Solaris on SPARC systems.
- */
-#ifdef illumos
-#define _BIG_ENDIAN
-#endif
-#define _STACK_GROWS_DOWNWARD
-#define _LONG_LONG_HTOL
-#define _BIT_FIELDS_HTOL
-#define _IEEE_754
-#define _CHAR_IS_SIGNED
-#define _BOOL_ALIGNMENT 1
-#define _CHAR_ALIGNMENT 1
-#define _SHORT_ALIGNMENT 2
-#define _INT_ALIGNMENT 4
-#define _FLOAT_ALIGNMENT 4
-#define _FLOAT_COMPLEX_ALIGNMENT 4
-#define _LONG_LONG_ALIGNMENT 8
-#define _DOUBLE_ALIGNMENT 8
-#define _DOUBLE_COMPLEX_ALIGNMENT 8
-#define _ALIGNMENT_REQUIRED 1
-
-/*
- * Define the appropriate "implementation choices" shared between versions.
- */
-#define _SUNOS_VTOC_8
-#define _DMA_USES_VIRTADDR
-#define _NO_FDISK_PRESENT
-#define _HAVE_TEM_FIRMWARE
-#define _OBP
-
-/*
- * The following set of definitions characterize the implementation of
- * 32-bit Solaris on SPARC V8 systems.
- */
-#if defined(__sparcv8)
-
-/*
- * Define the appropriate "processor characteristics"
- */
-#define _LONG_ALIGNMENT 4
-#define _LONG_DOUBLE_ALIGNMENT 8
-#define _LONG_DOUBLE_COMPLEX_ALIGNMENT 8
-#define _POINTER_ALIGNMENT 4
-#define _MAX_ALIGNMENT 8
-
-#define _LONG_LONG_ALIGNMENT_32 _LONG_LONG_ALIGNMENT
-
-/*
- * Define the appropriate "implementation choices"
- */
-#define _ILP32
-#if !defined(_I32LPx) && defined(_KERNEL)
-#define _I32LPx
-#endif
-
-/*
- * The following set of definitions characterize the implementation of
- * 64-bit Solaris on SPARC V9 systems.
- */
-#elif defined(__sparcv9)
-
-/*
- * Define the appropriate "processor characteristics"
- */
-#define _LONG_ALIGNMENT 8
-#define _LONG_DOUBLE_ALIGNMENT 16
-#define _LONG_DOUBLE_COMPLEX_ALIGNMENT 16
-#define _POINTER_ALIGNMENT 8
-#define _MAX_ALIGNMENT 16
-
-#define _LONG_LONG_ALIGNMENT_32 _LONG_LONG_ALIGNMENT
-
-/*
- * Define the appropriate "implementation choices"
- */
-#if !defined(_LP64)
-#define _LP64
-#endif
-#if !defined(_I32LPx)
-#define _I32LPx
-#endif
-#define _MULTI_DATAMODEL
-
-#else
-#error "unknown SPARC version"
-#endif
-
-/*
* #error is strictly ansi-C, but works as well as anything for K&R systems.
*/
#else
diff --git a/sys/cddl/dev/sdt/sdt.c b/sys/cddl/dev/sdt/sdt.c
index 0a9059104671..97ef2de18525 100644
--- a/sys/cddl/dev/sdt/sdt.c
+++ b/sys/cddl/dev/sdt/sdt.c
@@ -60,6 +60,9 @@
#include <cddl/dev/dtrace/dtrace_cddl.h>
+_Static_assert(sizeof((struct sdt_probe *)NULL)->id == sizeof(dtrace_id_t),
+ "sdt_probe.id and dtrace_id_t size mismatch");
+
/* DTrace methods. */
static void sdt_getargdesc(void *, dtrace_id_t, void *, dtrace_argdesc_t *);
static uint64_t sdt_getargval(void *, dtrace_id_t, void *, int, int);
diff --git a/sys/compat/freebsd32/freebsd32.h b/sys/compat/freebsd32/freebsd32.h
index 9d724c93fee7..25703859a7bb 100644
--- a/sys/compat/freebsd32/freebsd32.h
+++ b/sys/compat/freebsd32/freebsd32.h
@@ -30,14 +30,26 @@
#define _COMPAT_FREEBSD32_FREEBSD32_H_
#include <sys/abi_compat.h>
+#include <sys/devicestat.h>
+#include <sys/event.h>
+#include <sys/mount.h>
#include <sys/procfs.h>
#include <sys/socket.h>
#include <sys/user.h>
#include <sys/_ffcounter.h>
/*
- * i386 is the only arch with a 32-bit time_t
+ * i386 is the only arch with a 32-bit time_t.
+ * Also it is the only arch with (u)int64_t having 4-bytes alignment.
*/
+typedef struct {
+#ifdef __amd64__
+ uint32_t val[2];
+#else
+ uint64_t val;
+#endif
+} freebsd32_uint64_t;
+
#ifdef __amd64__
typedef int32_t time32_t;
#else
@@ -61,7 +73,7 @@ struct itimerspec32 {
struct bintime32 {
time32_t sec;
- uint32_t frac[2];
+ freebsd32_uint64_t frac;
};
struct ffclock_estimate32 {
@@ -531,4 +543,28 @@ struct ptrace_sc_remote32 {
uint32_t pscr_args;
};
+struct devstat32 {
+ u_int sequence0;
+ int allocated;
+ u_int start_count;
+ u_int end_count;
+ struct bintime32 busy_from;
+ struct { u_int32_t stqe_next; } dev_links;
+ u_int32_t device_number;
+ char device_name[DEVSTAT_NAME_LEN];
+ int unit_number;
+ freebsd32_uint64_t bytes[DEVSTAT_N_TRANS_FLAGS];
+ freebsd32_uint64_t operations[DEVSTAT_N_TRANS_FLAGS];
+ struct bintime32 duration[DEVSTAT_N_TRANS_FLAGS];
+ struct bintime32 busy_time;
+ struct bintime32 creation_time;
+ u_int32_t block_size;
+ freebsd32_uint64_t tag_types[3];
+ devstat_support_flags flags;
+ devstat_type_flags device_type;
+ devstat_priority priority;
+ u_int32_t id;
+ u_int sequence1;
+};
+
#endif /* !_COMPAT_FREEBSD32_FREEBSD32_H_ */
diff --git a/sys/compat/freebsd32/freebsd32_misc.c b/sys/compat/freebsd32/freebsd32_misc.c
index 7913940338c2..c76c9d5c1838 100644
--- a/sys/compat/freebsd32/freebsd32_misc.c
+++ b/sys/compat/freebsd32/freebsd32_misc.c
@@ -203,6 +203,7 @@ void
freebsd32_rusage_out(const struct rusage *s, struct rusage32 *s32)
{
+ bzero(s32, sizeof(*s32));
TV_CP(*s, *s32, ru_utime);
TV_CP(*s, *s32, ru_stime);
CP(*s, *s32, ru_maxrss);
@@ -280,6 +281,37 @@ freebsd32_wait6(struct thread *td, struct freebsd32_wait6_args *uap)
return (error);
}
+int
+freebsd32_pdwait(struct thread *td, struct freebsd32_pdwait_args *uap)
+{
+ struct __wrusage32 wru32;
+ struct __wrusage wru, *wrup;
+ struct __siginfo32 si32;
+ struct __siginfo si, *sip;
+ int error, status;
+
+ wrup = uap->wrusage != NULL ? &wru : NULL;
+ if (uap->info != NULL) {
+ sip = &si;
+ bzero(sip, sizeof(*sip));
+ } else {
+ sip = NULL;
+ }
+ error = kern_pdwait(td, uap->fd, &status, uap->options, wrup, sip);
+ if (uap->status != NULL && error == 0)
+ error = copyout(&status, uap->status, sizeof(status));
+ if (uap->wrusage != NULL && error == 0) {
+ freebsd32_rusage_out(&wru.wru_self, &wru32.wru_self);
+ freebsd32_rusage_out(&wru.wru_children, &wru32.wru_children);
+ error = copyout(&wru32, uap->wrusage, sizeof(wru32));
+ }
+ if (uap->info != NULL && error == 0) {
+ siginfo_to_siginfo32 (&si, &si32);
+ error = copyout(&si32, uap->info, sizeof(si32));
+ }
+ return (error);
+}
+
#ifdef COMPAT_FREEBSD4
static void
copy_statfs(struct statfs *in, struct ostatfs32 *out)
diff --git a/sys/compat/freebsd32/freebsd32_proto.h b/sys/compat/freebsd32/freebsd32_proto.h
index 5c0efc64f8a7..12458ed4cc4d 100644
--- a/sys/compat/freebsd32/freebsd32_proto.h
+++ b/sys/compat/freebsd32/freebsd32_proto.h
@@ -699,6 +699,13 @@ struct freebsd32_setcred_args {
char wcred_l_[PADL_(const struct setcred32 *)]; const struct setcred32 * wcred; char wcred_r_[PADR_(const struct setcred32 *)];
char size_l_[PADL_(size_t)]; size_t size; char size_r_[PADR_(size_t)];
};
+struct freebsd32_pdwait_args {
+ char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)];
+ char status_l_[PADL_(int *)]; int * status; char status_r_[PADR_(int *)];
+ char options_l_[PADL_(int)]; int options; char options_r_[PADR_(int)];
+ char wrusage_l_[PADL_(struct __wrusage32 *)]; struct __wrusage32 * wrusage; char wrusage_r_[PADR_(struct __wrusage32 *)];
+ char info_l_[PADL_(struct __siginfo32 *)]; struct __siginfo32 * info; char info_r_[PADR_(struct __siginfo32 *)];
+};
int freebsd32_wait4(struct thread *, struct freebsd32_wait4_args *);
int freebsd32_ptrace(struct thread *, struct freebsd32_ptrace_args *);
int freebsd32_recvmsg(struct thread *, struct freebsd32_recvmsg_args *);
@@ -817,6 +824,7 @@ int freebsd32_aio_readv(struct thread *, struct freebsd32_aio_readv_args *);
int freebsd32_timerfd_gettime(struct thread *, struct freebsd32_timerfd_gettime_args *);
int freebsd32_timerfd_settime(struct thread *, struct freebsd32_timerfd_settime_args *);
int freebsd32_setcred(struct thread *, struct freebsd32_setcred_args *);
+int freebsd32_pdwait(struct thread *, struct freebsd32_pdwait_args *);
#ifdef COMPAT_43
@@ -1319,6 +1327,7 @@ int freebsd11_freebsd32_fstatat(struct thread *, struct freebsd11_freebsd32_fsta
#define FREEBSD32_SYS_AUE_freebsd32_timerfd_gettime AUE_TIMERFD
#define FREEBSD32_SYS_AUE_freebsd32_timerfd_settime AUE_TIMERFD
#define FREEBSD32_SYS_AUE_freebsd32_setcred AUE_SETCRED
+#define FREEBSD32_SYS_AUE_freebsd32_pdwait AUE_PDWAIT
#undef PAD_
#undef PADL_
diff --git a/sys/compat/freebsd32/freebsd32_syscall.h b/sys/compat/freebsd32/freebsd32_syscall.h
index f8ef7e4a20d3..67ff022922a8 100644
--- a/sys/compat/freebsd32/freebsd32_syscall.h
+++ b/sys/compat/freebsd32/freebsd32_syscall.h
@@ -517,4 +517,6 @@
#define FREEBSD32_SYS_setgroups 596
#define FREEBSD32_SYS_jail_attach_jd 597
#define FREEBSD32_SYS_jail_remove_jd 598
-#define FREEBSD32_SYS_MAXSYSCALL 600
+#define FREEBSD32_SYS_pdrfork 600
+#define FREEBSD32_SYS_freebsd32_pdwait 601
+#define FREEBSD32_SYS_MAXSYSCALL 602
diff --git a/sys/compat/freebsd32/freebsd32_syscalls.c b/sys/compat/freebsd32/freebsd32_syscalls.c
index 645cdccbc02d..54b826098a9d 100644
--- a/sys/compat/freebsd32/freebsd32_syscalls.c
+++ b/sys/compat/freebsd32/freebsd32_syscalls.c
@@ -605,4 +605,6 @@ const char *freebsd32_syscallnames[] = {
"jail_attach_jd", /* 597 = jail_attach_jd */
"jail_remove_jd", /* 598 = jail_remove_jd */
"#599", /* 599 = kexec_load */
+ "pdrfork", /* 600 = pdrfork */
+ "freebsd32_pdwait", /* 601 = freebsd32_pdwait */
};
diff --git a/sys/compat/freebsd32/freebsd32_sysent.c b/sys/compat/freebsd32/freebsd32_sysent.c
index 240b54ae9011..2b5609e8a317 100644
--- a/sys/compat/freebsd32/freebsd32_sysent.c
+++ b/sys/compat/freebsd32/freebsd32_sysent.c
@@ -667,4 +667,6 @@ struct sysent freebsd32_sysent[] = {
{ .sy_narg = AS(jail_attach_jd_args), .sy_call = (sy_call_t *)sys_jail_attach_jd, .sy_auevent = AUE_JAIL_ATTACH, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 597 = jail_attach_jd */
{ .sy_narg = AS(jail_remove_jd_args), .sy_call = (sy_call_t *)sys_jail_remove_jd, .sy_auevent = AUE_JAIL_REMOVE, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 598 = jail_remove_jd */
{ .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 599 = freebsd32_kexec_load */
+ { .sy_narg = AS(pdrfork_args), .sy_call = (sy_call_t *)sys_pdrfork, .sy_auevent = AUE_PDRFORK, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 600 = pdrfork */
+ { .sy_narg = AS(freebsd32_pdwait_args), .sy_call = (sy_call_t *)freebsd32_pdwait, .sy_auevent = AUE_PDWAIT, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 601 = freebsd32_pdwait */
};
diff --git a/sys/compat/freebsd32/freebsd32_systrace_args.c b/sys/compat/freebsd32/freebsd32_systrace_args.c
index 29a5497e9efa..59a74d365e1c 100644
--- a/sys/compat/freebsd32/freebsd32_systrace_args.c
+++ b/sys/compat/freebsd32/freebsd32_systrace_args.c
@@ -3427,6 +3427,26 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
*n_args = 1;
break;
}
+ /* pdrfork */
+ case 600: {
+ struct pdrfork_args *p = params;
+ uarg[a++] = (intptr_t)p->fdp; /* int * */
+ iarg[a++] = p->pdflags; /* int */
+ iarg[a++] = p->rfflags; /* int */
+ *n_args = 3;
+ break;
+ }
+ /* freebsd32_pdwait */
+ case 601: {
+ struct freebsd32_pdwait_args *p = params;
+ iarg[a++] = p->fd; /* int */
+ uarg[a++] = (intptr_t)p->status; /* int * */
+ iarg[a++] = p->options; /* int */
+ uarg[a++] = (intptr_t)p->wrusage; /* struct __wrusage32 * */
+ uarg[a++] = (intptr_t)p->info; /* struct __siginfo32 * */
+ *n_args = 5;
+ break;
+ }
default:
*n_args = 0;
break;
@@ -9256,6 +9276,44 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
break;
};
break;
+ /* pdrfork */
+ case 600:
+ switch (ndx) {
+ case 0:
+ p = "userland int *";
+ break;
+ case 1:
+ p = "int";
+ break;
+ case 2:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* freebsd32_pdwait */
+ case 601:
+ switch (ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland int *";
+ break;
+ case 2:
+ p = "int";
+ break;
+ case 3:
+ p = "userland struct __wrusage32 *";
+ break;
+ case 4:
+ p = "userland struct __siginfo32 *";
+ break;
+ default:
+ break;
+ };
+ break;
default:
break;
};
@@ -11174,6 +11232,16 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
if (ndx == 0 || ndx == 1)
p = "int";
break;
+ /* pdrfork */
+ case 600:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* freebsd32_pdwait */
+ case 601:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
default:
break;
};
diff --git a/sys/compat/linux/linux_if.c b/sys/compat/linux/linux_if.c
index f201f7c4b128..ca4e3a4079ed 100644
--- a/sys/compat/linux/linux_if.c
+++ b/sys/compat/linux/linux_if.c
@@ -105,12 +105,13 @@ static void
linux_ifnet_vnet_uninit(void *arg __unused)
{
/*
- * At a normal vnet shutdown all interfaces are gone at this point.
- * But when we kldunload linux.ko, the vnet_deregister_sysuninit()
- * would call this function for the default vnet.
+ * All cloned interfaces are already gone at this point, as well
+ * as interfaces that were if_vmove'd into this vnet. However,
+ * if a jail has created IFT_ETHER interfaces in self, or has had
+ * physical Ethernet drivers attached in self, than we may have
+ * allocated entries in the unr(9), so clear it to avoid KASSERT.
*/
- if (IS_DEFAULT_VNET(curvnet))
- clear_unrhdr(V_linux_eth_unr);
+ clear_unrhdr(V_linux_eth_unr);
delete_unrhdr(V_linux_eth_unr);
}
VNET_SYSUNINIT(linux_ifnet_vnet_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
diff --git a/sys/compat/linux/linux_ioctl.c b/sys/compat/linux/linux_ioctl.c
index ceb17bd040b5..d2fa0331026b 100644
--- a/sys/compat/linux/linux_ioctl.c
+++ b/sys/compat/linux/linux_ioctl.c
@@ -58,6 +58,7 @@
#include <net/if_types.h>
#include <dev/evdev/input.h>
+#include <dev/hid/hidraw.h>
#include <dev/usb/usb_ioctl.h>
#ifdef COMPAT_LINUX32
@@ -113,6 +114,7 @@ DEFINE_LINUX_IOCTL_SET(kcov, KCOV);
#ifndef COMPAT_LINUX32
DEFINE_LINUX_IOCTL_SET(nvme, NVME);
#endif
+DEFINE_LINUX_IOCTL_SET(hidraw, HIDRAW);
#undef DEFINE_LINUX_IOCTL_SET
@@ -331,6 +333,17 @@ struct linux_termios {
unsigned char c_cc[LINUX_NCCS];
};
+struct linux_termios2 {
+ unsigned int c_iflag;
+ unsigned int c_oflag;
+ unsigned int c_cflag;
+ unsigned int c_lflag;
+ unsigned char c_line;
+ unsigned char c_cc[LINUX_NCCS];
+ unsigned int c_ispeed;
+ unsigned int c_ospeed;
+};
+
struct linux_winsize {
unsigned short ws_row, ws_col;
unsigned short ws_xpixel, ws_ypixel;
@@ -386,7 +399,7 @@ bsd_to_linux_speed(int speed, struct speedtab *table)
for ( ; table->sp_speed != -1; table++)
if (table->sp_speed == speed)
return (table->sp_code);
- return (-1);
+ return (LINUX_BOTHER);
}
static void
@@ -509,6 +522,14 @@ bsd_to_linux_termios(struct termios *bios, struct linux_termios *lios)
}
static void
+bsd_to_linux_termios2(struct termios *bios, struct linux_termios2 *lios2)
+{
+ bsd_to_linux_termios(bios, (struct linux_termios *)lios2);
+ lios2->c_ospeed = bios->c_ospeed;
+ lios2->c_ispeed = bios->c_ispeed;
+}
+
+static void
linux_to_bsd_termios(struct linux_termios *lios, struct termios *bios)
{
int i;
@@ -629,6 +650,16 @@ linux_to_bsd_termios(struct linux_termios *lios, struct termios *bios)
}
static void
+linux_to_bsd_termios2(struct linux_termios2 *lios2, struct termios *bios)
+{
+ linux_to_bsd_termios((struct linux_termios *)lios2, bios);
+ if ((lios2->c_cflag & LINUX_CBAUD) == LINUX_BOTHER)
+ bios->c_ospeed = lios2->c_ospeed;
+ if ((lios2->c_cflag & LINUX_CIBAUD) == LINUX_BOTHER << LINUX_IBSHIFT)
+ bios->c_ispeed = lios2->c_ispeed;
+}
+
+static void
bsd_to_linux_termio(struct termios *bios, struct linux_termio *lio)
{
struct linux_termios lios;
@@ -664,6 +695,7 @@ linux_ioctl_termio(struct thread *td, struct linux_ioctl_args *args)
{
struct termios bios;
struct linux_termios lios;
+ struct linux_termios2 lios2;
struct linux_termio lio;
struct file *fp;
int error;
@@ -1001,6 +1033,43 @@ linux_ioctl_termio(struct thread *td, struct linux_ioctl_args *args)
args->cmd = TIOCCBRK;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
+
+ case LINUX_TCGETS2:
+ error = fo_ioctl(fp, TIOCGETA, (caddr_t)&bios, td->td_ucred,
+ td);
+ if (error)
+ break;
+ bsd_to_linux_termios2(&bios, &lios2);
+ error = copyout(&lios2, (void *)args->arg, sizeof(lios2));
+ break;
+
+ case LINUX_TCSETS2:
+ error = copyin((void *)args->arg, &lios2, sizeof(lios2));
+ if (error)
+ break;
+ linux_to_bsd_termios2(&lios2, &bios);
+ error = (fo_ioctl(fp, TIOCSETA, (caddr_t)&bios, td->td_ucred,
+ td));
+ break;
+
+ case LINUX_TCSETSW2:
+ error = copyin((void *)args->arg, &lios2, sizeof(lios2));
+ if (error)
+ break;
+ linux_to_bsd_termios2(&lios2, &bios);
+ error = (fo_ioctl(fp, TIOCSETAW, (caddr_t)&bios, td->td_ucred,
+ td));
+ break;
+
+ case LINUX_TCSETSF2:
+ error = copyin((void *)args->arg, &lios2, sizeof(lios2));
+ if (error)
+ break;
+ linux_to_bsd_termios2(&lios2, &bios);
+ error = (fo_ioctl(fp, TIOCSETAF, (caddr_t)&bios, td->td_ucred,
+ td));
+ break;
+
case LINUX_TIOCGPTN: {
int nb;
@@ -3570,6 +3639,55 @@ linux_ioctl_nvme(struct thread *td, struct linux_ioctl_args *args)
}
#endif
+static int
+linux_ioctl_hidraw(struct thread *td, struct linux_ioctl_args *args)
+{
+ int len = (args->cmd & 0x3fff0000) >> 16;
+ if (len > 8192)
+ return (EINVAL);
+
+ switch (args->cmd & 0xffff) {
+ case LINUX_HIDIOCGRDESCSIZE:
+ args->cmd = HIDIOCGRDESCSIZE;
+ break;
+ case LINUX_HIDIOCGRDESC:
+ args->cmd = HIDIOCGRDESC;
+ break;
+ case LINUX_HIDIOCGRAWINFO:
+ args->cmd = HIDIOCGRAWINFO;
+ break;
+ case LINUX_HIDIOCGRAWNAME:
+ args->cmd = HIDIOCGRAWNAME(len);
+ break;
+ case LINUX_HIDIOCGRAWPHYS:
+ args->cmd = HIDIOCGRAWPHYS(len);
+ break;
+ case LINUX_HIDIOCSFEATURE:
+ args->cmd = HIDIOCSFEATURE(len);
+ break;
+ case LINUX_HIDIOCGFEATURE:
+ args->cmd = HIDIOCGFEATURE(len);
+ break;
+ case LINUX_HIDIOCGRAWUNIQ:
+ args->cmd = HIDIOCGRAWUNIQ(len);
+ break;
+ case LINUX_HIDIOCSINPUT:
+ args->cmd = HIDIOCSINPUT(len);
+ break;
+ case LINUX_HIDIOCGINPUT:
+ args->cmd = HIDIOCGINPUT(len);
+ break;
+ case LINUX_HIDIOCSOUTPUT:
+ args->cmd = HIDIOCSOUTPUT(len);
+ break;
+ case LINUX_HIDIOCGOUTPUT:
+ args->cmd = HIDIOCGOUTPUT(len);
+ break;
+ }
+
+ return (sys_ioctl(td, (struct ioctl_args *)args));
+}
+
/*
* main ioctl syscall function
*/
diff --git a/sys/compat/linux/linux_ioctl.h b/sys/compat/linux/linux_ioctl.h
index 8345b7e4b719..116a4e676228 100644
--- a/sys/compat/linux/linux_ioctl.h
+++ b/sys/compat/linux/linux_ioctl.h
@@ -383,6 +383,11 @@
#define LINUX_TIOCSBRK 0x5427
#define LINUX_TIOCCBRK 0x5428
+#define LINUX_TCGETS2 0x542A
+#define LINUX_TCSETS2 0x542B
+#define LINUX_TCSETSW2 0x542C
+#define LINUX_TCSETSF2 0x542D
+
#define LINUX_TIOCGPTN 0x5430
#define LINUX_TIOCSPTLCK 0x5431
@@ -501,6 +506,7 @@
#define LINUX_FF1 0x0008000
#define LINUX_CBAUD 0x0000100f
+#define LINUX_CIBAUD (LINUX_CBAUD << LINUX_IBSHIFT)
#define LINUX_B0 0x00000000
#define LINUX_B50 0x00000001
@@ -537,8 +543,12 @@
#define LINUX_HUPCL 0x00000400
#define LINUX_CLOCAL 0x00000800
+#define LINUX_BOTHER 0x00001000
+
#define LINUX_CRTSCTS 0x80000000
+#define LINUX_IBSHIFT 16
+
/* Linux c_lflag masks */
#define LINUX_ISIG 0x00000001
#define LINUX_ICANON 0x00000002
@@ -797,6 +807,25 @@
#define LINUX_IOCTL_NVME_MAX LINUX_NVME_IOCTL_RESCAN
/*
+ * hidraw
+ */
+#define LINUX_HIDIOCGRDESCSIZE 0x4801
+#define LINUX_HIDIOCGRDESC 0x4802
+#define LINUX_HIDIOCGRAWINFO 0x4803
+#define LINUX_HIDIOCGRAWNAME 0x4804
+#define LINUX_HIDIOCGRAWPHYS 0x4805
+#define LINUX_HIDIOCSFEATURE 0x4806
+#define LINUX_HIDIOCGFEATURE 0x4807
+#define LINUX_HIDIOCGRAWUNIQ 0x4808
+#define LINUX_HIDIOCSINPUT 0x4809
+#define LINUX_HIDIOCGINPUT 0x480A
+#define LINUX_HIDIOCSOUTPUT 0x480B
+#define LINUX_HIDIOCGOUTPUT 0x480C
+
+#define LINUX_IOCTL_HIDRAW_MIN LINUX_HIDIOCGRDESCSIZE
+#define LINUX_IOCTL_HIDRAW_MAX LINUX_HIDIOCGOUTPUT
+
+/*
* Pluggable ioctl handlers
*/
struct linux_ioctl_args;
diff --git a/sys/compat/linuxkpi/common/include/linux/acpi.h b/sys/compat/linuxkpi/common/include/linux/acpi.h
index 3e1ec1b20626..a764a975c983 100644
--- a/sys/compat/linuxkpi/common/include/linux/acpi.h
+++ b/sys/compat/linuxkpi/common/include/linux/acpi.h
@@ -32,7 +32,7 @@
#include <linux/device.h>
#include <linux/uuid.h>
-#if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
+#if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) || defined(__riscv)
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
diff --git a/sys/compat/linuxkpi/common/include/linux/bitops.h b/sys/compat/linuxkpi/common/include/linux/bitops.h
index ebe9aa120094..125081ab5b74 100644
--- a/sys/compat/linuxkpi/common/include/linux/bitops.h
+++ b/sys/compat/linuxkpi/common/include/linux/bitops.h
@@ -51,12 +51,6 @@
#define BITS_PER_TYPE(t) (sizeof(t) * BITS_PER_BYTE)
#define BITS_TO_BYTES(n) howmany((n), BITS_PER_BYTE)
-#define hweight8(x) bitcount((uint8_t)(x))
-#define hweight16(x) bitcount16(x)
-#define hweight32(x) bitcount32(x)
-#define hweight64(x) bitcount64(x)
-#define hweight_long(x) bitcountl(x)
-
#if __has_builtin(__builtin_popcountg)
#define HWEIGHT8(x) (__builtin_popcountg((uint8_t)(x)))
#define HWEIGHT16(x) (__builtin_popcountg((uint16_t)(x)))
@@ -70,6 +64,12 @@
#define HWEIGHT64(x) (__const_bitcount64((uint64_t)(x)))
#endif
+#define hweight8(x) (__builtin_constant_p(x) ? HWEIGHT8(x) : bitcount((uint8_t)(x)))
+#define hweight16(x) (__builtin_constant_p(x) ? HWEIGHT16(x) : bitcount16(x))
+#define hweight32(x) (__builtin_constant_p(x) ? HWEIGHT32(x) : bitcount32(x))
+#define hweight64(x) (__builtin_constant_p(x) ? HWEIGHT64(x) : bitcount64(x))
+#define hweight_long(x) bitcountl(x)
+
static inline int
__ffs(int mask)
{
diff --git a/sys/compat/linuxkpi/common/include/linux/dma-mapping.h b/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
index 2d8e1196d3d3..76efbfd51074 100644
--- a/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
+++ b/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
@@ -96,6 +96,8 @@ void *linux_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void *linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
+void linuxkpi_dmam_free_coherent(struct device *dev, size_t size,
+ void *addr, dma_addr_t dma_handle);
dma_addr_t linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len); /* backward compat */
dma_addr_t lkpi_dma_map_phys(struct device *, vm_paddr_t, size_t,
enum dma_data_direction, unsigned long);
@@ -181,6 +183,13 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
kmem_free(cpu_addr, size);
}
+static inline void
+dmam_free_coherent(struct device *dev, size_t size, void *addr,
+ dma_addr_t dma_handle)
+{
+ linuxkpi_dmam_free_coherent(dev, size, addr, dma_handle);
+}
+
static inline dma_addr_t
dma_map_page_attrs(struct device *dev, struct page *page, size_t offset,
size_t size, enum dma_data_direction direction, unsigned long attrs)
diff --git a/sys/compat/linuxkpi/common/include/linux/eventfd.h b/sys/compat/linuxkpi/common/include/linux/eventfd.h
new file mode 100644
index 000000000000..d167d4b7d189
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/eventfd.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2025 The FreeBSD Foundation
+ * Copyright (c) 2025 Jean-Sébastien Pédron
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_EVENTFD_H_
+#define _LINUXKPI_LINUX_EVENTFD_H_
+
+#include <sys/eventfd.h>
+
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/percpu-defs.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+
+/*
+ * Linux uses `struct eventfd_ctx`, but FreeBSD defines `struct eventfd`. Here,
+ * we define a synonym to the FreeBSD structure. This allows to keep Linux code
+ * unmodified.
+ */
+#define eventfd_ctx eventfd
+
+#define eventfd_ctx_fdget lkpi_eventfd_ctx_fdget
+struct eventfd_ctx *lkpi_eventfd_ctx_fdget(int fd);
+
+#define eventfd_ctx_put lkpi_eventfd_ctx_put
+void lkpi_eventfd_ctx_put(struct eventfd_ctx *ctx);
+
+#endif /* _LINUXKPI_LINUX_EVENTFD_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/ieee80211.h b/sys/compat/linuxkpi/common/include/linux/ieee80211.h
index 12160df43915..d1eba94a3ad8 100644
--- a/sys/compat/linuxkpi/common/include/linux/ieee80211.h
+++ b/sys/compat/linuxkpi/common/include/linux/ieee80211.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2020-2025 The FreeBSD Foundation
+ * Copyright (c) 2020-2026 The FreeBSD Foundation
*
* This software was developed by Björn Zeeb under sponsorship from
* the FreeBSD Foundation.
@@ -51,8 +51,16 @@ extern int linuxkpi_debug_80211;
#define IMPROVE(fmt, ...) if (linuxkpi_debug_80211 & D80211_IMPROVE) \
printf("%s:%d: XXX LKPI80211 IMPROVE " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
-
-/* 9.4.2.55 Management MIC element (CMAC-256, GMAC-128, and GMAC-256). */
+/* 802.11-2024, 9.4.2.53 MME. */
+/* BIP-CMAC-128 */
+struct ieee80211_mmie {
+ uint8_t element_id;
+ uint8_t length;
+ uint16_t key_id;
+ uint8_t ipn[6];
+ uint8_t mic[8];
+};
+/* BIP-CMAC-256, BIP-GMAC-128, BIP-GMAC-256 */
struct ieee80211_mmie_16 {
uint8_t element_id;
uint8_t length;
@@ -108,7 +116,18 @@ struct ieee80211_mmie_16 {
#define IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT 0x0100
enum ieee80211_rate_flags {
- IEEE80211_RATE_SHORT_PREAMBLE = BIT(0),
+ IEEE80211_RATE_SHORT_PREAMBLE = BIT(0), /* 2.4Ghz, CCK */
+ IEEE80211_RATE_SUPPORTS_5MHZ = BIT(1),
+ IEEE80211_RATE_SUPPORTS_10MHZ = BIT(2),
+ IEEE80211_RATE_ERP_G = BIT(3),
+
+ /*
+ * According to documentation these are flags initialized internally.
+ * See lkpi_wiphy_band_annotate().
+ */
+ IEEE80211_RATE_MANDATORY_A = BIT(4),
+ IEEE80211_RATE_MANDATORY_G = BIT(5),
+ IEEE80211_RATE_MANDATORY_B = BIT(6),
};
enum ieee80211_rate_control_changed_flags {
@@ -200,6 +219,7 @@ enum ieee80211_min_mpdu_start_spacing {
#define IEEE80211_FCTL_TODS (IEEE80211_FC1_DIR_TODS << 8)
#define IEEE80211_FCTL_MOREFRAGS (IEEE80211_FC1_MORE_FRAG << 8)
#define IEEE80211_FCTL_PM (IEEE80211_FC1_PWR_MGT << 8)
+#define IEEE80211_FCTL_MOREDATA (IEEE80211_FC1_MORE_DATA << 8)
#define IEEE80211_FTYPE_MGMT IEEE80211_FC0_TYPE_MGT
#define IEEE80211_FTYPE_CTL IEEE80211_FC0_TYPE_CTL
@@ -461,18 +481,6 @@ enum ieee80211_tx_control_flags {
IEEE80211_TX_CTRL_MLO_LINK = 0xF0000000, /* This is IEEE80211_LINK_UNSPECIFIED on the high bits. */
};
-enum ieee80211_tx_rate_flags {
- /* XXX TODO .. right shift numbers */
- IEEE80211_TX_RC_40_MHZ_WIDTH = BIT(0),
- IEEE80211_TX_RC_80_MHZ_WIDTH = BIT(1),
- IEEE80211_TX_RC_160_MHZ_WIDTH = BIT(2),
- IEEE80211_TX_RC_GREEN_FIELD = BIT(3),
- IEEE80211_TX_RC_MCS = BIT(4),
- IEEE80211_TX_RC_SHORT_GI = BIT(5),
- IEEE80211_TX_RC_VHT_MCS = BIT(6),
- IEEE80211_TX_RC_USE_SHORT_PREAMBLE = BIT(7),
-};
-
#define IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED -128
#define IEEE80211_HT_CTL_LEN 4
diff --git a/sys/compat/linuxkpi/common/include/linux/minmax.h b/sys/compat/linuxkpi/common/include/linux/minmax.h
index d48958f0899f..fb8eb6f704b4 100644
--- a/sys/compat/linuxkpi/common/include/linux/minmax.h
+++ b/sys/compat/linuxkpi/common/include/linux/minmax.h
@@ -71,4 +71,7 @@
b = _swap_tmp; \
} while (0)
+/* XXX would have to make sure both are unsigned. */
+#define umin(x, y) MIN(x, y)
+
#endif /* _LINUXKPI_LINUX_MINMAX_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/netdevice.h b/sys/compat/linuxkpi/common/include/linux/netdevice.h
index cf27753bcb80..dfed5fbd61b4 100644
--- a/sys/compat/linuxkpi/common/include/linux/netdevice.h
+++ b/sys/compat/linuxkpi/common/include/linux/netdevice.h
@@ -160,6 +160,30 @@ struct net_device {
#define SET_NETDEV_DEV(_ndev, _dev) (_ndev)->dev.parent = _dev;
+enum net_device_path_type {
+ DEV_PATH_MTK_WDMA,
+};
+
+struct net_device_path {
+ enum net_device_path_type type;
+ const struct net_device *dev;
+ /* We assume there's a struct per type. */
+ union {
+ struct {
+ uint16_t wcid;
+ uint8_t wdma_idx;
+ uint8_t queue;
+ uint8_t bss;
+ uint8_t amsdu;
+ } mtk_wdma;
+ };
+};
+
+struct net_device_path_ctx {
+ const struct net_device *dev;
+};
+
+
/* -------------------------------------------------------------------------- */
/* According to linux::ipoib_main.c. */
struct netdev_notifier_info {
diff --git a/sys/compat/linuxkpi/common/include/linux/pci.h b/sys/compat/linuxkpi/common/include/linux/pci.h
index 8fe09554aed2..c337be67f5a4 100644
--- a/sys/compat/linuxkpi/common/include/linux/pci.h
+++ b/sys/compat/linuxkpi/common/include/linux/pci.h
@@ -1136,19 +1136,28 @@ pci_num_vf(struct pci_dev *dev)
static inline enum pci_bus_speed
pcie_get_speed_cap(struct pci_dev *dev)
{
+ struct pci_dev *pbus;
device_t root;
uint32_t lnkcap, lnkcap2;
int error, pos;
- root = device_get_parent(dev->dev.bsddev);
- if (root == NULL)
- return (PCI_SPEED_UNKNOWN);
- root = device_get_parent(root);
- if (root == NULL)
- return (PCI_SPEED_UNKNOWN);
- root = device_get_parent(root);
- if (root == NULL)
- return (PCI_SPEED_UNKNOWN);
+ /*
+ * We should always be called on a PCI device.
+ * The only current consumer I could find was amdgpu which either
+ * calls us directly on a pdev(drmn?) or with the result of
+ * pci_upstream_bridge().
+ *
+ * Treat "drmn" as special again as it is not a PCI device.
+ */
+ if (dev->pdrv != NULL && dev->pdrv->isdrm) {
+ pbus = pci_upstream_bridge(dev);
+ if (pbus == NULL)
+ return (PCI_SPEED_UNKNOWN);
+ } else
+ pbus = dev;
+
+ /* "root" may be misleading as it may not be that. */
+ root = pbus->dev.bsddev;
if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA ||
pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS)
diff --git a/sys/compat/linuxkpi/common/include/linux/ptp_clock_kernel.h b/sys/compat/linuxkpi/common/include/linux/ptp_clock_kernel.h
index aad46cc25b1b..6491cbeab7e2 100644
--- a/sys/compat/linuxkpi/common/include/linux/ptp_clock_kernel.h
+++ b/sys/compat/linuxkpi/common/include/linux/ptp_clock_kernel.h
@@ -49,6 +49,7 @@ struct ptp_clock_info {
int (*adjtime)(struct ptp_clock_info *, s64);
int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *);
int (*gettime64)(struct ptp_clock_info *, struct timespec *);
+ int (*settime64)(struct ptp_clock_info *, const struct timespec *);
};
static inline struct ptp_clock *
diff --git a/sys/compat/linuxkpi/common/include/linux/seq_buf.h b/sys/compat/linuxkpi/common/include/linux/seq_buf.h
new file mode 100644
index 000000000000..d6246a40e6f7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/seq_buf.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2025-2026 The FreeBSD Foundation
+ * Copyright (c) 2025-2026 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef _LINUXKPI_LINUX_SEQ_BUF_H_
+#define _LINUXKPI_LINUX_SEQ_BUF_H_
+
+#include <linux/bug.h>
+#include <linux/minmax.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+struct seq_buf {
+ char *buffer;
+ size_t size;
+ size_t len;
+};
+
+#define DECLARE_SEQ_BUF(NAME, SIZE) \
+ struct seq_buf NAME = { \
+ .buffer = (char[SIZE]) { 0 }, \
+ .size = SIZE, \
+ }
+
+static inline void
+seq_buf_clear(struct seq_buf *s)
+{
+ s->len = 0;
+ if (s->size > 0)
+ s->buffer[0] = '\0';
+}
+
+static inline void
+seq_buf_set_overflow(struct seq_buf *s)
+{
+ s->len = s->size + 1;
+}
+
+static inline bool
+seq_buf_has_overflowed(struct seq_buf *s)
+{
+ return (s->len > s->size);
+}
+
+static inline bool
+seq_buf_buffer_left(struct seq_buf *s)
+{
+ if (seq_buf_has_overflowed(s))
+ return (0);
+
+ return (s->size - s->len);
+}
+
+#define seq_buf_init(s, buf, size) linuxkpi_seq_buf_init((s), (buf), (size))
+void linuxkpi_seq_buf_init(struct seq_buf *s, char *buf, unsigned int size);
+
+#define seq_buf_printf(s, f, ...) linuxkpi_seq_buf_printf((s), (f), __VA_ARGS__)
+int linuxkpi_seq_buf_printf(struct seq_buf *s, const char *fmt, ...) \
+ __printflike(2, 3);
+
+#define seq_buf_vprintf(s, f, a) linuxkpi_seq_buf_vprintf((s), (f), (a))
+int linuxkpi_seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
+
+#define seq_buf_str(s) linuxkpi_seq_buf_str((s))
+const char * linuxkpi_seq_buf_str(struct seq_buf *s);
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/skbuff.h b/sys/compat/linuxkpi/common/include/linux/skbuff.h
index 2e560a120e41..c43d6daff5ee 100644
--- a/sys/compat/linuxkpi/common/include/linux/skbuff.h
+++ b/sys/compat/linuxkpi/common/include/linux/skbuff.h
@@ -770,7 +770,7 @@ ___skb_queue_splice(const struct sk_buff_head *from,
}
static inline void
-skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
+skb_queue_splice(const struct sk_buff_head *from, struct sk_buff_head *to)
{
SKB_TRACE2(from, to);
@@ -780,6 +780,13 @@ skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
___skb_queue_splice(from, (struct sk_buff *)to, to->next);
to->qlen += from->qlen;
+}
+
+static inline void
+skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
+{
+
+ skb_queue_splice(from, to);
__skb_queue_head_init(from);
}
diff --git a/sys/compat/linuxkpi/common/include/linux/soc/airoha/airoha_offload.h b/sys/compat/linuxkpi/common/include/linux/soc/airoha/airoha_offload.h
new file mode 100644
index 000000000000..ade0b06d839f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/soc/airoha/airoha_offload.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2026 Bjoern A. Zeeb
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef _LINUXKPI_LINUX_SOC_AIROHA_AIROHA_OFFLOAD_H
+#define _LINUXKPI_LINUX_SOC_AIROHA_AIROHA_OFFLOAD_H
+
+#include <linux/kernel.h> /* pr_debug */
+
+enum airoha_npu_wlan_get_cmd {
+ __dummy_airoha_npu_wlan_get_cmd,
+};
+enum airoha_npu_wlan_set_cmd {
+ __dummy_airoha_npu_wlan_set_cmd,
+};
+
+struct airoha_npu {
+};
+struct airoha_npu_rx_dma_desc {
+};
+struct airoha_npu_tx_dma_desc {
+};
+
+static __inline int
+airoha_npu_wlan_send_msg(void *npu, int ifindex,
+ enum airoha_npu_wlan_set_cmd cmd, void *val, size_t len, gfp_t gfp)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (-EOPNOTSUPP);
+}
+
+static __inline int
+airoha_npu_wlan_get_msg(void *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd cmd, void *val, size_t len, gfp_t gfp)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (-EOPNOTSUPP);
+}
+
+static __inline void
+airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+#endif /* _LINUXKPI_LINUX_SOC_AIROHA_AIROHA_OFFLOAD_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/soc/mediatek/mtk_wed.h b/sys/compat/linuxkpi/common/include/linux/soc/mediatek/mtk_wed.h
index 2b9c6ae4911e..64daa8c78c9d 100644
--- a/sys/compat/linuxkpi/common/include/linux/soc/mediatek/mtk_wed.h
+++ b/sys/compat/linuxkpi/common/include/linux/soc/mediatek/mtk_wed.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2022-2025 Bjoern A. Zeeb
+ * Copyright (c) 2022-2026 Bjoern A. Zeeb
*
* SPDX-License-Identifier: BSD-2-Clause
*/
@@ -37,7 +37,13 @@ mtk_wed_device_active(struct mtk_wed_device *dev __unused)
static inline bool
mtk_wed_get_rx_capa(struct mtk_wed_device *dev __unused)
{
+ pr_debug("%s: TODO\n", __func__);
+ return (false);
+}
+static inline bool
+mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev __unused)
+{
pr_debug("%s: TODO\n", __func__);
return (false);
}
@@ -66,6 +72,12 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev __unused)
{
return (false);
}
+
+static inline bool
+mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev __unused)
+{
+ return (false);
+}
#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
#endif /* _LINUXKPI_LINUX_SOC_MEDIATEK_MTK_WED_H */
diff --git a/sys/compat/linuxkpi/common/include/net/cfg80211.h b/sys/compat/linuxkpi/common/include/net/cfg80211.h
index d7ed2bc97c98..94d34fb9dc0c 100644
--- a/sys/compat/linuxkpi/common/include/net/cfg80211.h
+++ b/sys/compat/linuxkpi/common/include/net/cfg80211.h
@@ -124,6 +124,7 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_PSD = BIT(12),
IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(13),
IEEE80211_CHAN_CAN_MONITOR = BIT(14),
+ IEEE80211_CHAN_NO_EHT = BIT(15),
};
#define IEEE80211_CHAN_NO_HT40 (IEEE80211_CHAN_NO_HT40MINUS|IEEE80211_CHAN_NO_HT40PLUS)
@@ -152,6 +153,8 @@ struct linuxkpi_ieee80211_channel {
int orig_mpwr;
};
+#define NL80211_EHT_NSS_MAX 16
+
struct cfg80211_bitrate_mask {
/* TODO FIXME */
struct {
@@ -159,6 +162,7 @@ struct cfg80211_bitrate_mask {
uint8_t ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
uint16_t vht_mcs[8];
uint16_t he_mcs[8];
+ uint16_t eht_mcs[NL80211_EHT_NSS_MAX];
enum nl80211_txrate_gi gi;
enum nl80211_he_gi he_gi;
uint8_t he_ltf; /* XXX enum? */
@@ -1230,6 +1234,7 @@ struct cfg80211_ops {
struct wiphy *linuxkpi_wiphy_new(const struct cfg80211_ops *, size_t);
void linuxkpi_wiphy_free(struct wiphy *wiphy);
+int linuxkpi_80211_wiphy_register(struct wiphy *);
void linuxkpi_wiphy_work_queue(struct wiphy *, struct wiphy_work *);
void linuxkpi_wiphy_work_cancel(struct wiphy *, struct wiphy_work *);
@@ -1749,8 +1754,7 @@ wiphy_net(struct wiphy *wiphy)
static __inline int
wiphy_register(struct wiphy *wiphy)
{
- TODO();
- return (0);
+ return (linuxkpi_80211_wiphy_register(wiphy));
}
static __inline void
diff --git a/sys/compat/linuxkpi/common/include/net/mac80211.h b/sys/compat/linuxkpi/common/include/net/mac80211.h
index 6e2f3f2d8781..18891d035094 100644
--- a/sys/compat/linuxkpi/common/include/net/mac80211.h
+++ b/sys/compat/linuxkpi/common/include/net/mac80211.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2020-2025 The FreeBSD Foundation
+ * Copyright (c) 2020-2026 The FreeBSD Foundation
* Copyright (c) 2020-2025 Bjoern A. Zeeb
*
* This software was developed by Björn Zeeb under sponsorship from
@@ -789,10 +789,21 @@ struct ieee80211_tx_queue_params {
struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec;
};
+enum mac80211_rate_control_flags {
+ IEEE80211_TX_RC_40_MHZ_WIDTH = BIT(0),
+ IEEE80211_TX_RC_80_MHZ_WIDTH = BIT(1),
+ IEEE80211_TX_RC_160_MHZ_WIDTH = BIT(2),
+ IEEE80211_TX_RC_GREEN_FIELD = BIT(3),
+ IEEE80211_TX_RC_MCS = BIT(4),
+ IEEE80211_TX_RC_SHORT_GI = BIT(5),
+ IEEE80211_TX_RC_VHT_MCS = BIT(6),
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE = BIT(7),
+};
+
struct ieee80211_tx_rate {
uint8_t idx;
uint16_t count:5,
- flags:11;
+ flags:11; /* enum mac80211_rate_control_flags */
};
enum ieee80211_vif_driver_flags {
@@ -1092,6 +1103,8 @@ struct ieee80211_ops {
void (*rfkill_poll)(struct ieee80211_hw *);
+ int (*net_fill_forward_path)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct net_device_path_ctx *, struct net_device_path *);
+
/* #ifdef CONFIG_MAC80211_DEBUGFS */ /* Do not change depending on compile-time option. */
void (*sta_add_debugfs)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct dentry *);
void (*vif_add_debugfs)(struct ieee80211_hw *, struct ieee80211_vif *);
diff --git a/sys/compat/linuxkpi/common/src/linux_80211.c b/sys/compat/linuxkpi/common/src/linux_80211.c
index 1ac28dfef448..d25b32f1dae8 100644
--- a/sys/compat/linuxkpi/common/src/linux_80211.c
+++ b/sys/compat/linuxkpi/common/src/linux_80211.c
@@ -896,41 +896,34 @@ lkpi_lsta_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN],
/* Deflink information. */
for (band = 0; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *supband;
+ uint32_t rate_mandatory;;
supband = hw->wiphy->bands[band];
if (supband == NULL)
continue;
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ /* We have to assume 11g support here. */
+ rate_mandatory = IEEE80211_RATE_MANDATORY_G |
+ IEEE80211_RATE_MANDATORY_B;
+ break;
+ case NL80211_BAND_5GHZ:
+ rate_mandatory = IEEE80211_RATE_MANDATORY_A;
+ break;
+ default:
+ continue;
+ }
+
for (i = 0; i < supband->n_bitrates; i++) {
- switch (band) {
- case NL80211_BAND_2GHZ:
- switch (supband->bitrates[i].bitrate) {
- case 240: /* 11g only */
- case 120: /* 11g only */
- case 110:
- case 60: /* 11g only */
- case 55:
- case 20:
- case 10:
- sta->deflink.supp_rates[band] |= BIT(i);
- break;
- }
- break;
- case NL80211_BAND_5GHZ:
- switch (supband->bitrates[i].bitrate) {
- case 240:
- case 120:
- case 60:
- sta->deflink.supp_rates[band] |= BIT(i);
- break;
- }
- break;
- }
+ if ((supband->bitrates[i].flags & rate_mandatory) != 0)
+ sta->deflink.supp_rates[band] |= BIT(i);
}
}
sta->deflink.smps_mode = IEEE80211_SMPS_OFF;
sta->deflink.bandwidth = IEEE80211_STA_RX_BW_20;
+ sta->deflink.agg.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
sta->deflink.rx_nss = 1;
sta->deflink.sta = sta;
@@ -8028,6 +8021,77 @@ linuxkpi_wiphy_free(struct wiphy *wiphy)
kfree(lwiphy);
}
+static void
+lkpi_wiphy_band_annotate(struct wiphy *wiphy)
+{
+ int band;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *supband;
+ int i;
+
+ supband = wiphy->bands[band];
+ if (supband == NULL)
+ continue;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ case NL80211_BAND_5GHZ:
+ break;
+ default:
+#ifdef LINUXKPI_DEBUG_80211
+ IMPROVE("band %d(%s) not yet supported",
+ band, lkpi_nl80211_band_name(band));
+ /* For bands added here, also check lkpi_lsta_alloc(). */
+#endif
+ continue;
+ }
+
+ for (i = 0; i < supband->n_bitrates; i++) {
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ switch (supband->bitrates[i].bitrate) {
+ case 110:
+ case 55:
+ case 20:
+ case 10:
+ supband->bitrates[i].flags |=
+ IEEE80211_RATE_MANDATORY_B;
+ /* FALLTHROUGH */
+ /* 11g only */
+ case 240:
+ case 120:
+ case 60:
+ supband->bitrates[i].flags |=
+ IEEE80211_RATE_MANDATORY_G;
+ break;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ switch (supband->bitrates[i].bitrate) {
+ case 240:
+ case 120:
+ case 60:
+ supband->bitrates[i].flags |=
+ IEEE80211_RATE_MANDATORY_A;
+ break;
+ }
+ break;
+ }
+ }
+ }
+}
+
+int
+linuxkpi_80211_wiphy_register(struct wiphy *wiphy)
+{
+ TODO("Lots of checks and initialization");
+
+ lkpi_wiphy_band_annotate(wiphy);
+
+ return (0);
+}
+
static uint32_t
lkpi_cfg80211_calculate_bitrate_ht(struct rate_info *rate)
{
diff --git a/sys/compat/linuxkpi/common/src/linux_current.c b/sys/compat/linuxkpi/common/src/linux_current.c
index c342eb279caa..3bc5d31d211a 100644
--- a/sys/compat/linuxkpi/common/src/linux_current.c
+++ b/sys/compat/linuxkpi/common/src/linux_current.c
@@ -90,11 +90,8 @@ linux_alloc_current(struct thread *td, int flags)
}
ts = uma_zalloc(linux_current_zone, flags | M_ZERO);
- if (ts == NULL) {
- if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
- panic("linux_alloc_current: failed to allocate task");
+ if (ts == NULL)
return (ENOMEM);
- }
mm = NULL;
/* setup new task structure */
@@ -118,10 +115,7 @@ linux_alloc_current(struct thread *td, int flags)
PROC_UNLOCK(proc);
mm = uma_zalloc(linux_mm_zone, flags | M_ZERO);
if (mm == NULL) {
- if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
- panic(
- "linux_alloc_current: failed to allocate mm");
- uma_zfree(linux_current_zone, mm);
+ uma_zfree(linux_current_zone, ts);
return (ENOMEM);
}
diff --git a/sys/compat/linuxkpi/common/src/linux_eventfd.c b/sys/compat/linuxkpi/common/src/linux_eventfd.c
new file mode 100644
index 000000000000..126c6c54b9a5
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_eventfd.c
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 2025 The FreeBSD Foundation
+ * Copyright (c) 2025 Jean-Sébastien Pédron
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
+
+#include <linux/eventfd.h>
+
+struct eventfd_ctx *
+lkpi_eventfd_ctx_fdget(int fd)
+{
+ struct file *fp;
+ struct eventfd_ctx *ctx;
+
+ /* Lookup file pointer by file descriptor index. */
+ if (fget_unlocked(curthread, fd, &cap_no_rights, &fp) != 0)
+ return (ERR_PTR(-EBADF));
+
+ /*
+ * eventfd_get() bumps the refcount, so we can safely release the
+ * reference on the file itself afterwards.
+ */
+ ctx = eventfd_get(fp);
+ fdrop(fp, curthread);
+
+ if (ctx == NULL)
+ return (ERR_PTR(-EBADF));
+
+ return (ctx);
+}
+
+void
+lkpi_eventfd_ctx_put(struct eventfd_ctx *ctx)
+{
+ eventfd_put(ctx);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_firmware.c b/sys/compat/linuxkpi/common/src/linux_firmware.c
index 12658df5ce83..0c6d855501ef 100644
--- a/sys/compat/linuxkpi/common/src/linux_firmware.c
+++ b/sys/compat/linuxkpi/common/src/linux_firmware.c
@@ -66,7 +66,8 @@ _linuxkpi_request_firmware(const char *fw_name, const struct linuxkpi_firmware *
uint32_t flags;
if (fw_name == NULL || fw == NULL || dev == NULL) {
- *fw = NULL;
+ if (fw != NULL)
+ *fw = NULL;
return (-EINVAL);
}
diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c
index df5255d8a6ca..9e4fad45433a 100644
--- a/sys/compat/linuxkpi/common/src/linux_pci.c
+++ b/sys/compat/linuxkpi/common/src/linux_pci.c
@@ -1796,6 +1796,42 @@ lkpi_dmam_free_coherent(struct device *dev, void *p)
dma_free_coherent(dev, dr->size, dr->mem, *dr->handle);
}
+static int
+lkpi_dmam_coherent_match(struct device *dev, void *dr, void *mp)
+{
+ struct lkpi_devres_dmam_coherent *a, *b;
+
+ a = dr;
+ b = mp;
+
+ if (a->mem != b->mem)
+ return (0);
+ if (a->size != b->size || a->handle != b->handle)
+ dev_WARN(dev, "for mem %p: size %zu != %zu || handle %#jx != %#jx\n",
+ a->mem, a->size, b->size,
+ (uintmax_t)a->handle, (uintmax_t)b->handle);
+ return (1);
+}
+
+void
+linuxkpi_dmam_free_coherent(struct device *dev, size_t size,
+ void *addr, dma_addr_t dma_handle)
+{
+ struct lkpi_devres_dmam_coherent match = {
+ .size = size,
+ .handle = &dma_handle,
+ .mem = addr
+ };
+ int error;
+
+ error = devres_destroy(dev, lkpi_dmam_free_coherent,
+ lkpi_dmam_coherent_match, &match);
+ if (error != 0)
+ dev_WARN(dev, "devres_destroy returned %d, size %zu addr %p "
+ "dma_handle %#jx\n", error, size, addr, (uintmax_t)dma_handle);
+ dma_free_coherent(dev, size, addr, dma_handle);
+}
+
void *
linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
diff --git a/sys/compat/linuxkpi/common/src/linux_seq_buf.c b/sys/compat/linuxkpi/common/src/linux_seq_buf.c
new file mode 100644
index 000000000000..112c53044c22
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_seq_buf.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2025-2026 The FreeBSD Foundation
+ * Copyright (c) 2025-2026 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <linux/seq_buf.h>
+
+void
+linuxkpi_seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
+{
+ s->buffer = buf;
+ s->size = size;
+
+ seq_buf_clear(s);
+}
+
+int
+linuxkpi_seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+
+ va_start(args, fmt);
+ ret = seq_buf_vprintf(s, fmt, args);
+ va_end(args);
+
+ return (ret);
+}
+
+int
+linuxkpi_seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
+{
+ int ret;
+
+ if (!seq_buf_has_overflowed(s)) {
+ ret = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
+ if (s->len + ret < s->size) {
+ s->len += ret;
+ return (0);
+ }
+ }
+
+ seq_buf_set_overflow(s);
+ return (-1);
+}
+
+const char *
+linuxkpi_seq_buf_str(struct seq_buf *s)
+{
+ if (s->size == 0)
+ return ("");
+
+ if (seq_buf_buffer_left(s))
+ s->buffer[s->len] = '\0';
+ else
+ s->buffer[s->size - 1] = '\0';
+
+ return (s->buffer);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_shmemfs.c b/sys/compat/linuxkpi/common/src/linux_shmemfs.c
index 1fb17bc5c0cb..d5c118ba7624 100644
--- a/sys/compat/linuxkpi/common/src/linux_shmemfs.c
+++ b/sys/compat/linuxkpi/common/src/linux_shmemfs.c
@@ -62,11 +62,10 @@ linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex, gfp_t gfp)
struct linux_file *
linux_shmem_file_setup(const char *name, loff_t size, unsigned long flags)
{
- struct fileobj {
+ struct {
struct linux_file file __aligned(sizeof(void *));
struct vnode vnode __aligned(sizeof(void *));
- };
- struct fileobj *fileobj;
+ } *fileobj;
struct linux_file *filp;
struct vnode *vp;
int error;
@@ -89,7 +88,7 @@ linux_shmem_file_setup(const char *name, loff_t size, unsigned long flags)
}
return (filp);
err_1:
- kfree(filp);
+ kfree(fileobj);
err_0:
return (ERR_PTR(error));
}
diff --git a/sys/compat/linuxkpi/dummy/include/linux/eventfd.h b/sys/compat/linuxkpi/dummy/include/linux/eventfd.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/sys/compat/linuxkpi/dummy/include/linux/eventfd.h
+++ /dev/null
diff --git a/sys/conf/Makefile.powerpc b/sys/conf/Makefile.powerpc
index 643fd8677dda..68e198bbe85e 100644
--- a/sys/conf/Makefile.powerpc
+++ b/sys/conf/Makefile.powerpc
@@ -32,11 +32,6 @@ LDSCRIPT_NAME?= ldscript.${MACHINE_ARCH}
INCLUDES+= -I$S/contrib/libfdt
-.if "${MACHINE_ARCH}" == "powerpcspe"
-# Force __SPE__, since the builtin will be removed later with -mno-spe
-CFLAGS.gcc+= -mabi=spe -D__SPE__
-CFLAGS.clang+= -mspe -D__SPE__ -m32
-.endif
CFLAGS+= -msoft-float
CFLAGS.gcc+= -Wa,-many
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 8d413fb4f583..6d3ea088b819 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -1922,6 +1922,7 @@ device xmphy # XaQti XMAC II
# oce: Emulex 10 Gbit adapters (OneConnect Ethernet)
# ral: Ralink Technology IEEE 802.11 wireless adapter
# re: Realtek 8139C+/8169/816xS/811xS/8101E PCI/PCIe Ethernet adapter
+# rge: Realtek 8125/8126/8127 PCIe Ethernet adapter
# rl: Support for PCI fast ethernet adapters based on the Realtek 8129/8139
# chipset. Note that the Realtek driver defaults to using programmed
# I/O to do register accesses because memory mapped mode seems to cause
@@ -2009,6 +2010,7 @@ device cxgbe # Chelsio T4-T6 1/10/25/40/100 Gigabit Ethernet
device cxgbev # Chelsio T4-T6 Virtual Functions
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device mxge # Myricom Myri-10G 10GbE NIC
+device rge # Realtek 8125/8126/8127
device oce # Emulex 10GbE (OneConnect Ethernet)
device ti # Alteon Networks Tigon I/II gigabit Ethernet
@@ -2671,7 +2673,6 @@ options INIT_PATH=/sbin/init:/rescue/init
# Debug options
options BUS_DEBUG # enable newbus debugging
-options DEBUG_VFS_LOCKS # enable VFS lock debugging
options SOCKBUF_DEBUG # enable sockbuf last record/mb tail checking
options IFMEDIA_DEBUG # enable debugging in net/if_media.c
diff --git a/sys/conf/files b/sys/conf/files
index d0c4ea5f544d..97834f05431d 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -776,6 +776,7 @@ dev/acpica/acpi_thermal.c optional acpi
dev/acpica/acpi_throttle.c optional acpi
dev/acpica/acpi_video.c optional acpi_video acpi
dev/acpica/acpi_dock.c optional acpi_dock acpi
+dev/acpica/acpi_spmc.c optional acpi
dev/adlink/adlink.c optional adlink
dev/ae/if_ae.c optional ae pci
dev/age/if_age.c optional age pci
@@ -4662,6 +4663,8 @@ compat/linuxkpi/common/src/linux_dmi.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_domain.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
+compat/linuxkpi/common/src/linux_eventfd.c optional compat_linuxkpi \
+ compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_firmware.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_folio.c optional compat_linuxkpi \
@@ -4700,6 +4703,8 @@ compat/linuxkpi/common/src/linux_rcu.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C} -I$S/contrib/ck/include"
compat/linuxkpi/common/src/linux_schedule.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
+compat/linuxkpi/common/src/linux_seq_buf.c optional compat_linuxkpi \
+ compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_shmemfs.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_shrinker.c optional compat_linuxkpi \
@@ -5264,6 +5269,7 @@ security/mac/mac_net.c optional mac
security/mac/mac_pipe.c optional mac
security/mac/mac_posix_sem.c optional mac
security/mac/mac_posix_shm.c optional mac
+security/mac/mac_prison.c optional mac
security/mac/mac_priv.c optional mac
security/mac/mac_process.c optional mac
security/mac/mac_socket.c optional mac
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 5de81ce58ef8..88f9b1d5f10f 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -112,6 +112,7 @@ crypto/openssl/amd64/sha512-x86_64.S optional ossl
crypto/openssl/amd64/ossl_aes_gcm_avx512.c optional ossl
crypto/openssl/ossl_aes_gcm.c optional ossl
dev/amdgpio/amdgpio.c optional amdgpio
+dev/asmc/asmc.c optional asmc isa
dev/axgbe/if_axgbe_pci.c optional axp
dev/axgbe/xgbe-desc.c optional axp
dev/axgbe/xgbe-dev.c optional axp
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index 4bd03989afa0..2efdb21f66ad 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -413,6 +413,35 @@ dev/ipmi/ipmi_smbus.c optional ipmi smbus
dev/ipmi/ipmi_smic.c optional ipmi
dev/ipmi/ipmi_ssif.c optional ipmi smbus
+dev/ixl/if_ixl.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/ixl_pf_main.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/ixl_pf_iflib.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/ixl_pf_qmgr.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/ixl_pf_i2c.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/ixl_txrx.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/i40e_osdep.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/i40e_lan_hmc.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/i40e_hmc.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/i40e_common.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/i40e_nvm.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/i40e_adminq.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/i40e_dcb.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+
dev/mailbox/arm/arm_doorbell.c optional fdt arm_doorbell
dev/mbox/mbox_if.m optional soc_brcm_bcm2837
@@ -600,7 +629,7 @@ contrib/vchiq/interface/compat/vchi_bsd.c optional vchiq soc_brcm_bcm2837 fdt \
contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c optional vchiq soc_brcm_bcm2837 fdt \
compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
contrib/vchiq/interface/vchiq_arm/vchiq_arm.c optional vchiq soc_brcm_bcm2837 fdt \
- compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+ compile-with "${NORMAL_C} -Wno-unused ${NO_WDEFAULT_CONST_INIT_FIELD_UNSAFE} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
contrib/vchiq/interface/vchiq_arm/vchiq_connected.c optional vchiq soc_brcm_bcm2837 fdt \
compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
contrib/vchiq/interface/vchiq_arm/vchiq_core.c optional vchiq soc_brcm_bcm2837 fdt \
diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc
index 0deada385f31..7989f1c9bea4 100644
--- a/sys/conf/files.powerpc
+++ b/sys/conf/files.powerpc
@@ -75,7 +75,7 @@ crypto/openssl/powerpc64/sha512p8-ppc.S optional ossl powerpc64
crypto/openssl/powerpc64/vpaes-ppc.S optional ossl powerpc64
crypto/openssl/powerpc64/x25519-ppc64.S optional ossl powerpc64
-cddl/compat/opensolaris/kern/opensolaris_atomic.c optional zfs powerpc | dtrace powerpc | zfs powerpcspe | dtrace powerpcspe compile-with "${ZFS_C}"
+cddl/compat/opensolaris/kern/opensolaris_atomic.c optional zfs powerpc | dtrace powerpc compile-with "${ZFS_C}"
cddl/dev/dtrace/powerpc/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}"
cddl/dev/dtrace/powerpc/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}"
cddl/dev/fbt/powerpc/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}"
@@ -235,25 +235,25 @@ dev/tsec/if_tsec_fdt.c optional tsec
dev/uart/uart_cpu_powerpc.c optional uart
dev/usb/controller/ehci_fsl.c optional ehci mpc85xx
dev/vt/hw/ofwfb/ofwfb.c optional vt aim
-kern/subr_atomic64.c optional powerpc | powerpcspe
+kern/subr_atomic64.c optional powerpc
kern/subr_dummy_vdso_tc.c standard
kern/subr_sfbuf.c standard
-libkern/ashldi3.c optional powerpc | powerpcspe
-libkern/ashrdi3.c optional powerpc | powerpcspe
+libkern/ashldi3.c optional powerpc
+libkern/ashrdi3.c optional powerpc
libkern/bcopy.c standard
-libkern/cmpdi2.c optional powerpc | powerpcspe
-libkern/divdi3.c optional powerpc | powerpcspe
-libkern/lshrdi3.c optional powerpc | powerpcspe
+libkern/cmpdi2.c optional powerpc
+libkern/divdi3.c optional powerpc
+libkern/lshrdi3.c optional powerpc
libkern/memcmp.c standard
libkern/memset.c standard
-libkern/moddi3.c optional powerpc | powerpcspe
-libkern/qdivrem.c optional powerpc | powerpcspe
+libkern/moddi3.c optional powerpc
+libkern/qdivrem.c optional powerpc
libkern/strcmp.c standard
libkern/strlen.c standard
libkern/strncmp.c standard
-libkern/ucmpdi2.c optional powerpc | powerpcspe
-libkern/udivdi3.c optional powerpc | powerpcspe
-libkern/umoddi3.c optional powerpc | powerpcspe
+libkern/ucmpdi2.c optional powerpc
+libkern/udivdi3.c optional powerpc
+libkern/umoddi3.c optional powerpc
powerpc/aim/locore.S optional aim no-obj
powerpc/aim/aim_machdep.c optional aim
powerpc/aim/mmu_oea.c optional aim powerpc
@@ -263,29 +263,28 @@ powerpc/aim/moea64_native.c optional aim
powerpc/aim/mp_cpudep.c optional aim
powerpc/aim/slb.c optional aim powerpc64 | aim powerpc64le
powerpc/amigaone/platform_amigaone.c optional amigaone
-powerpc/amigaone/cpld_a1222.c optional powerpc amigaone | powerpcspe amigaone
-powerpc/amigaone/cpld_x5000.c optional powerpc amigaone | powerpc64 amigaone
+powerpc/amigaone/cpld_a1222.c optional powerpc amigaone
+powerpc/amigaone/cpld_x5000.c optional powerpc amigaone
powerpc/booke/locore.S optional booke no-obj
powerpc/booke/booke_machdep.c optional booke
powerpc/booke/machdep_e500.c optional booke_e500
powerpc/booke/mp_cpudep.c optional booke smp
powerpc/booke/platform_bare.c optional booke
powerpc/booke/pmap.c optional booke
-powerpc/booke/spe.c optional powerpcspe
powerpc/cpufreq/dfs.c optional cpufreq
powerpc/cpufreq/mpc85xx_jog.c optional cpufreq mpc85xx
powerpc/cpufreq/pcr.c optional cpufreq aim
powerpc/cpufreq/pmcr.c optional cpufreq aim powerpc64 | cpufreq aim powerpc64le
powerpc/cpufreq/pmufreq.c optional cpufreq aim pmu
-powerpc/fpu/fpu_add.c optional fpu_emu | powerpcspe
-powerpc/fpu/fpu_compare.c optional fpu_emu | powerpcspe
-powerpc/fpu/fpu_div.c optional fpu_emu | powerpcspe
+powerpc/fpu/fpu_add.c optional fpu_emu
+powerpc/fpu/fpu_compare.c optional fpu_emu
+powerpc/fpu/fpu_div.c optional fpu_emu
powerpc/fpu/fpu_emu.c optional fpu_emu
-powerpc/fpu/fpu_explode.c optional fpu_emu | powerpcspe
-powerpc/fpu/fpu_implode.c optional fpu_emu | powerpcspe
-powerpc/fpu/fpu_mul.c optional fpu_emu | powerpcspe
+powerpc/fpu/fpu_explode.c optional fpu_emu
+powerpc/fpu/fpu_implode.c optional fpu_emu
+powerpc/fpu/fpu_mul.c optional fpu_emu
powerpc/fpu/fpu_sqrt.c optional fpu_emu
-powerpc/fpu/fpu_subr.c optional fpu_emu | powerpcspe
+powerpc/fpu/fpu_subr.c optional fpu_emu
powerpc/mambo/mambocall.S optional mambo
powerpc/mambo/mambo.c optional mambo
powerpc/mambo/mambo_console.c optional mambo
@@ -358,7 +357,7 @@ powerpc/powernv/platform_powernv.c optional powernv
powerpc/powernv/powernv_centaur.c optional powernv
powerpc/powernv/powernv_xscom.c optional powernv
powerpc/powernv/xive.c optional powernv
-powerpc/powerpc/altivec.c optional !powerpcspe
+powerpc/powerpc/altivec.c standard
powerpc/powerpc/autoconf.c standard
powerpc/powerpc/bus_machdep.c standard
powerpc/powerpc/busdma_machdep.c standard
@@ -371,7 +370,7 @@ powerpc/powerpc/db_hwwatch.c optional ddb
powerpc/powerpc/db_interface.c optional ddb
powerpc/powerpc/db_trace.c optional ddb
powerpc/powerpc/dump_machdep.c standard
-powerpc/powerpc/elf32_machdep.c optional powerpc | powerpcspe | compat_freebsd32
+powerpc/powerpc/elf32_machdep.c optional powerpc | compat_freebsd32
powerpc/powerpc/elf64_machdep.c optional powerpc64 | powerpc64le
powerpc/powerpc/exec_machdep.c standard
powerpc/powerpc/fpu.c standard
@@ -393,9 +392,9 @@ powerpc/powerpc/ptrace_machdep.c standard
powerpc/powerpc/sc_machdep.c optional sc
powerpc/powerpc/sdt_machdep.c optional powerpc64 kdtrace_hooks
powerpc/powerpc/setjmp.S standard
-powerpc/powerpc/sigcode32.S optional powerpc | powerpcspe | compat_freebsd32
+powerpc/powerpc/sigcode32.S optional powerpc | compat_freebsd32
powerpc/powerpc/sigcode64.S optional powerpc64 | powerpc64le
-powerpc/powerpc/swtch32.S optional powerpc | powerpcspe
+powerpc/powerpc/swtch32.S optional powerpc
powerpc/powerpc/swtch64.S optional powerpc64 | powerpc64le
powerpc/powerpc/stack_machdep.c optional ddb | stack
powerpc/powerpc/support.S optional powerpc64 | powerpc64le | booke
diff --git a/sys/conf/files.x86 b/sys/conf/files.x86
index 31b8e88a6951..b1bd6f7291ca 100644
--- a/sys/conf/files.x86
+++ b/sys/conf/files.x86
@@ -67,7 +67,6 @@ dev/amdsbwd/amdsbwd.c optional amdsbwd
dev/amdsmn/amdsmn.c optional amdsmn | amdtemp
dev/amdtemp/amdtemp.c optional amdtemp
dev/arcmsr/arcmsr.c optional arcmsr pci
-dev/asmc/asmc.c optional asmc isa
dev/atkbdc/atkbd.c optional atkbd atkbdc
dev/atkbdc/atkbd_atkbdc.c optional atkbd atkbdc
dev/atkbdc/atkbdc.c optional atkbdc
diff --git a/sys/conf/kern.mk b/sys/conf/kern.mk
index 653a798778c4..e86ffb684f9a 100644
--- a/sys/conf/kern.mk
+++ b/sys/conf/kern.mk
@@ -37,6 +37,9 @@ NO_WBITWISE_INSTEAD_OF_LOGICAL= -Wno-bitwise-instead-of-logical
NO_WSTRICT_PROTOTYPES= -Wno-strict-prototypes
NO_WDEPRECATED_NON_PROTOTYPE= -Wno-deprecated-non-prototype
.endif
+.if ${COMPILER_VERSION} >= 210000
+NO_WDEFAULT_CONST_INIT_FIELD_UNSAFE= -Wno-default-const-init-field-unsafe
+.endif
# Several other warnings which might be useful in some cases, but not severe
# enough to error out the whole kernel build. Display them anyway, so there is
# some incentive to fix them eventually.
@@ -205,10 +208,6 @@ CFLAGS+= -mno-altivec -msoft-float
INLINE_LIMIT?= 15000
.endif
-.if ${MACHINE_ARCH} == "powerpcspe"
-CFLAGS.gcc+= -mno-spe
-.endif
-
#
# Use dot symbols (or, better, the V2 ELF ABI) on powerpc64 to make
# DDB happy. ELFv2, if available, has some other efficiency benefits.
@@ -407,7 +406,6 @@ LD_EMULATION_arm=armelf_fbsd
LD_EMULATION_armv7=armelf_fbsd
LD_EMULATION_i386=elf_i386_fbsd
LD_EMULATION_powerpc= elf32ppc_fbsd
-LD_EMULATION_powerpcspe= elf32ppc_fbsd
LD_EMULATION_powerpc64= elf64ppc_fbsd
LD_EMULATION_powerpc64le= elf64lppc_fbsd
LD_EMULATION_riscv64= elf64lriscv
diff --git a/sys/conf/kern.pre.mk b/sys/conf/kern.pre.mk
index 0251486247da..440ed2df5644 100644
--- a/sys/conf/kern.pre.mk
+++ b/sys/conf/kern.pre.mk
@@ -223,7 +223,7 @@ ZFS_CFLAGS+= -D__x86_64 -DHAVE_SSE2 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 \
.endif
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
- ${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
+ ${MACHINE_ARCH} == "arm"
ZFS_CFLAGS+= -DBITS_PER_LONG=32
.else
ZFS_CFLAGS+= -DBITS_PER_LONG=64
diff --git a/sys/conf/kmod.mk b/sys/conf/kmod.mk
index edc311348971..17358428a023 100644
--- a/sys/conf/kmod.mk
+++ b/sys/conf/kmod.mk
@@ -383,7 +383,7 @@ afterinstall: _kldxref
_kldxref: .PHONY
${KLDXREF_CMD} ${DESTDIR}${KMODDIR}
.if defined(NO_ROOT) && defined(METALOG)
- echo ".${DISTBASE}${KMODDIR}/linker.hints type=file mode=0644 uname=root gname=wheel" | \
+ echo ".${DISTBASE}${KMODDIR}/linker.hints type=file uname=root gname=wheel mode=0644" | \
cat -l >> ${METALOG}
.endif
.endif
diff --git a/sys/conf/ldscript.powerpcspe b/sys/conf/ldscript.powerpcspe
deleted file mode 100644
index fa82cbe8330f..000000000000
--- a/sys/conf/ldscript.powerpcspe
+++ /dev/null
@@ -1,143 +0,0 @@
-OUTPUT_FORMAT("elf32-powerpc-freebsd", "elf32-powerpc-freebsd",
- "elf32-powerpc-freebsd")
-OUTPUT_ARCH(powerpc)
-ENTRY(__start)
-SEARCH_DIR(/usr/lib);
-PROVIDE (__stack = 0);
-PHDRS
-{
- kernel PT_LOAD;
- dynamic PT_DYNAMIC;
-}
-SECTIONS
-{
- /* Read-only sections, merged into text segment: */
-
- . = kernbase + SIZEOF_HEADERS;
- PROVIDE (begin = . - SIZEOF_HEADERS);
-
- .text :
- {
- *(.glink)
- *(.text)
- *(.stub)
- /* .gnu.warning sections are handled specially by elf32.em. */
- *(.gnu.warning)
- *(.gnu.linkonce.t*)
- } :kernel =0
- _etext = .;
- PROVIDE (etext = .);
-
- .interp : { *(.interp) }
- .hash : { *(.hash) }
- .dynsym : { *(.dynsym) }
- .dynstr : { *(.dynstr) }
- .gnu.version : { *(.gnu.version) }
- .gnu.version_d : { *(.gnu.version_d) }
- .gnu.version_r : { *(.gnu.version_r) }
- .rela.text :
- { *(.rela.text) *(.rela.gnu.linkonce.t*) }
- .rela.data :
- { *(.rela.data) *(.rela.gnu.linkonce.d*) }
- .rela.rodata :
- { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
- .rela.got : { *(.rela.got) }
- .rela.got1 : { *(.rela.got1) }
- .rela.got2 : { *(.rela.got2) }
- .rela.ctors : { *(.rela.ctors) }
- .rela.dtors : { *(.rela.dtors) }
- .rela.init : { *(.rela.init) }
- .rela.fini : { *(.rela.fini) }
- .rela.bss : { *(.rela.bss) }
- .rela.plt : { *(.rela.plt) }
- .rela.sdata : { *(.rela.sdata) }
- .rela.sbss : { *(.rela.sbss) }
- .rela.sdata2 : { *(.rela.sdata2) }
- .rela.sbss2 : { *(.rela.sbss2) }
-
- .init : { *(.init) } =0
- .fini : { *(.fini) } =0
- .rodata : { *(.rodata) *(.gnu.linkonce.r*) }
- .rodata1 : { *(.rodata1) }
- .note.gnu.build-id : {
- PROVIDE (__build_id_start = .);
- *(.note.gnu.build-id)
- PROVIDE (__build_id_end = .);
- }
- .sdata2 : { *(.sdata2) }
- .sbss2 : { *(.sbss2) }
- /* Adjust the address for the data segment to the next page up. */
- . = ((. + 0x1000) & ~(0x1000 - 1));
- .data :
- {
- *(.data)
- *(.gnu.linkonce.d*)
- CONSTRUCTORS
- }
- .data1 : { *(.data1) }
- .got1 : { *(.got1) }
- . = ALIGN(4096);
- .got : { *(.got) }
- .got.plt : { *(.got.plt) }
- .init_array :
- {
- PROVIDE_HIDDEN (__init_array_start = .);
- KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*)))
- KEEP (*(.init_array))
- PROVIDE_HIDDEN (__init_array_end = .);
- }
- .fini_array :
- {
- PROVIDE_HIDDEN (__fini_array_start = .);
- KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*)))
- KEEP (*(.fini_array))
- PROVIDE_HIDDEN (__fini_array_end = .);
- }
- .dynamic : { *(.dynamic) } :kernel :dynamic
- /* Put .ctors and .dtors next to the .got2 section, so that the pointers
- get relocated with -mrelocatable. Also put in the .fixup pointers.
- The current compiler no longer needs this, but keep it around for 2.7.2 */
- PROVIDE (_GOT2_START_ = .);
- .got2 : { *(.got2) } :kernel
- PROVIDE (__CTOR_LIST__ = .);
- .ctors : { *(.ctors) }
- PROVIDE (__CTOR_END__ = .);
- PROVIDE (__DTOR_LIST__ = .);
- .dtors : { *(.dtors) }
- PROVIDE (__DTOR_END__ = .);
- PROVIDE (_FIXUP_START_ = .);
- .fixup : { *(.fixup) }
- PROVIDE (_FIXUP_END_ = .);
- PROVIDE (_GOT2_END_ = .);
- /* We want the small data sections together, so single-instruction offsets
- can access them all, and initialized data all before uninitialized, so
- we can shorten the on-disk segment size. */
- .sdata : { *(.sdata) }
- _edata = .;
- PROVIDE (edata = .);
- .sbss :
- {
- PROVIDE (__sbss_start = .);
- *(.sbss)
- *(.scommon)
- *(.dynsbss)
- PROVIDE (__sbss_end = .);
- }
- .plt : { *(.plt) }
- .bss :
- {
- PROVIDE (__bss_start = .);
- *(.dynbss)
- *(.bss)
- *(COMMON)
- }
- _end = . ;
- PROVIDE (end = .);
-
- /* Debug */
- INCLUDE debuginfo.ldscript
-
- .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
- /DISCARD/ : { *(.note.GNU-stack) }
-}
-
diff --git a/sys/conf/options b/sys/conf/options
index c86560491faf..c9b9307718b7 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -601,7 +601,6 @@ BLKDEV_IOSIZE opt_global.h
BURN_BRIDGES opt_global.h
DEBUG opt_global.h
DEBUG_LOCKS opt_global.h
-DEBUG_VFS_LOCKS opt_global.h
DFLTPHYS opt_global.h
DIAGNOSTIC opt_global.h
INVARIANT_SUPPORT opt_global.h
diff --git a/sys/conf/options.arm64 b/sys/conf/options.arm64
index 4bdd408f4651..5e6f25cd8db8 100644
--- a/sys/conf/options.arm64
+++ b/sys/conf/options.arm64
@@ -16,6 +16,9 @@ COMPAT_FREEBSD32 opt_global.h
# Emulate SWP/SWPB for COMPAT_FREEBSD32
EMUL_SWP opt_global.h
+# iWARP client interface support in ixl
+IXL_IW opt_ixl.h
+
# EFI Runtime services support
EFIRT opt_efirt.h
diff --git a/sys/conf/options.powerpc b/sys/conf/options.powerpc
index a6096d1b32ca..c8ab0e066f49 100644
--- a/sys/conf/options.powerpc
+++ b/sys/conf/options.powerpc
@@ -8,7 +8,6 @@ CELL
POWERPC
POWERPC64
POWERPC64LE
-POWERPCSPE
FPU_EMU
diff --git a/sys/conf/std.nodebug b/sys/conf/std.nodebug
index 79676a1d618f..7ce16e9b52aa 100644
--- a/sys/conf/std.nodebug
+++ b/sys/conf/std.nodebug
@@ -7,7 +7,6 @@ nooptions INVARIANT_SUPPORT
nooptions DIAGNOSTIC
nooptions WITNESS
nooptions WITNESS_SKIPSPIN
-nooptions DEBUG_VFS_LOCKS
nooptions BUF_TRACKING
nooptions FULL_BUF_TRACKING
nooptions DEADLKRES
diff --git a/sys/contrib/dev/athk/ath10k/core.c b/sys/contrib/dev/athk/ath10k/core.c
index a0407f693659..9ec08b402fd2 100644
--- a/sys/contrib/dev/athk/ath10k/core.c
+++ b/sys/contrib/dev/athk/ath10k/core.c
@@ -3,7 +3,6 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
@@ -1212,7 +1211,7 @@ static int ath10k_download_fw(struct ath10k *ar)
u32 address, data_len;
const void *data;
int ret;
- struct pm_qos_request latency_qos;
+ struct pm_qos_request latency_qos = {};
address = ar->hw_params.patch_load_addr;
@@ -1246,7 +1245,6 @@ static int ath10k_download_fw(struct ath10k *ar)
ret);
}
- memset(&latency_qos, 0, sizeof(latency_qos));
cpu_latency_qos_add_request(&latency_qos, 0);
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
@@ -2570,8 +2568,9 @@ static int ath10k_init_hw_params(struct ath10k *ar)
return 0;
}
-static bool ath10k_core_needs_recovery(struct ath10k *ar)
+static void ath10k_core_recovery_check_work(struct work_struct *work)
{
+ struct ath10k *ar = container_of(work, struct ath10k, recovery_check_work);
long time_left;
/* Sometimes the recovery will fail and then the next all recovery fail,
@@ -2581,7 +2580,7 @@ static bool ath10k_core_needs_recovery(struct ath10k *ar)
ath10k_err(ar, "consecutive fail %d times, will shutdown driver!",
atomic_read(&ar->fail_cont_count));
ar->state = ATH10K_STATE_WEDGED;
- return false;
+ return;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "total recovery count: %d", ++ar->recovery_count);
@@ -2595,27 +2594,24 @@ static bool ath10k_core_needs_recovery(struct ath10k *ar)
ATH10K_RECOVERY_TIMEOUT_HZ);
if (time_left) {
ath10k_warn(ar, "previous recovery succeeded, skip this!\n");
- return false;
+ return;
}
/* Record the continuous recovery fail count when recovery failed. */
atomic_inc(&ar->fail_cont_count);
/* Avoid having multiple recoveries at the same time. */
- return false;
+ return;
}
atomic_inc(&ar->pending_recovery);
-
- return true;
+ queue_work(ar->workqueue, &ar->restart_work);
}
void ath10k_core_start_recovery(struct ath10k *ar)
{
- if (!ath10k_core_needs_recovery(ar))
- return;
-
- queue_work(ar->workqueue, &ar->restart_work);
+ /* Use workqueue_aux to avoid blocking recovery tracking */
+ queue_work(ar->workqueue_aux, &ar->recovery_check_work);
}
EXPORT_SYMBOL(ath10k_core_start_recovery);
@@ -3440,7 +3436,7 @@ EXPORT_SYMBOL(ath10k_core_stop);
*/
static int ath10k_core_probe_fw(struct ath10k *ar)
{
- struct bmi_target_info target_info;
+ struct bmi_target_info target_info = {};
int ret = 0;
ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@@ -3451,7 +3447,6 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
switch (ar->hif.bus) {
case ATH10K_BUS_SDIO:
- memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
@@ -3463,7 +3458,6 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
case ATH10K_BUS_PCI:
case ATH10K_BUS_AHB:
case ATH10K_BUS_USB:
- memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info(ar, &target_info);
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
@@ -3473,7 +3467,6 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ar->hw->wiphy->hw_version = target_info.version;
break;
case ATH10K_BUS_SNOC:
- memset(&target_info, 0, sizeof(target_info));
ret = ath10k_hif_get_target_info(ar, &target_info);
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
@@ -3824,6 +3817,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_WORK(&ar->register_work, ath10k_core_register_work);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
+ INIT_WORK(&ar->recovery_check_work, ath10k_core_recovery_check_work);
INIT_WORK(&ar->set_coverage_class_work,
ath10k_core_set_coverage_class_work);
diff --git a/sys/contrib/dev/athk/ath10k/core.h b/sys/contrib/dev/athk/ath10k/core.h
index cb250ca6991d..eaf122d4b112 100644
--- a/sys/contrib/dev/athk/ath10k/core.h
+++ b/sys/contrib/dev/athk/ath10k/core.h
@@ -3,7 +3,6 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
@@ -1216,6 +1215,7 @@ struct ath10k {
struct work_struct register_work;
struct work_struct restart_work;
+ struct work_struct recovery_check_work;
struct work_struct bundle_tx_work;
struct work_struct tx_complete_work;
@@ -1267,9 +1267,13 @@ struct ath10k {
struct {
/* protected by conf_mutex */
struct ath10k_fw_components utf_mode_fw;
+ u8 ftm_msgref;
/* protected by data_lock */
bool utf_monitor;
+ u32 data_pos;
+ u32 expected_seq;
+ u8 *eventdata;
} testmode;
struct {
diff --git a/sys/contrib/dev/athk/ath10k/mac.c b/sys/contrib/dev/athk/ath10k/mac.c
index 6725c2c742bd..e2bda3c0d925 100644
--- a/sys/contrib/dev/athk/ath10k/mac.c
+++ b/sys/contrib/dev/athk/ath10k/mac.c
@@ -3,7 +3,6 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
@@ -5442,6 +5441,7 @@ static void ath10k_stop(struct ieee80211_hw *hw, bool suspend)
cancel_work_sync(&ar->set_coverage_class_work);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->restart_work);
+ cancel_work_sync(&ar->recovery_check_work);
}
static int ath10k_config_ps(struct ath10k *ar)
diff --git a/sys/contrib/dev/athk/ath10k/qmi.c b/sys/contrib/dev/athk/ath10k/qmi.c
index f1f33af0170a..8275345631a0 100644
--- a/sys/contrib/dev/athk/ath10k/qmi.c
+++ b/sys/contrib/dev/athk/ath10k/qmi.c
@@ -986,7 +986,7 @@ static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
- ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr_unsized *)&qmi->sq,
sizeof(qmi->sq), 0);
if (ret) {
ath10k_err(ar, "failed to connect to a remote QMI service port\n");
diff --git a/sys/contrib/dev/athk/ath10k/testmode.c b/sys/contrib/dev/athk/ath10k/testmode.c
index 3fcefc55b74f..d3bd385694d6 100644
--- a/sys/contrib/dev/athk/ath10k/testmode.c
+++ b/sys/contrib/dev/athk/ath10k/testmode.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "testmode.h"
@@ -10,12 +11,17 @@
#include "debug.h"
#include "wmi.h"
+#include "wmi-tlv.h"
#include "hif.h"
#include "hw.h"
#include "core.h"
#include "testmode_i.h"
+#define ATH10K_FTM_SEG_NONE ((u32)-1)
+#define ATH10K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
+#define ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
+
static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
[ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY,
@@ -25,41 +31,19 @@ static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
[ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
-/* Returns true if callee consumes the skb and the skb should be discarded.
- * Returns false if skb is not used. Does not sleep.
- */
-bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+static void ath10k_tm_event_unsegmented(struct ath10k *ar, u32 cmd_id,
+ struct sk_buff *skb)
{
struct sk_buff *nl_skb;
- bool consumed;
int ret;
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode event wmi cmd_id %d skb %p skb->len %d\n",
- cmd_id, skb, skb->len);
-
- ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
-
- spin_lock_bh(&ar->data_lock);
-
- if (!ar->testmode.utf_monitor) {
- consumed = false;
- goto out;
- }
-
- /* Only testmode.c should be handling events from utf firmware,
- * otherwise all sort of problems will arise as mac80211 operations
- * are not initialised.
- */
- consumed = true;
-
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * sizeof(u32) + skb->len,
GFP_ATOMIC);
if (!nl_skb) {
ath10k_warn(ar,
"failed to allocate skb for testmode wmi event\n");
- goto out;
+ return;
}
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
@@ -68,7 +52,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
"failed to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
- goto out;
+ return;
}
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
@@ -77,7 +61,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
"failed to put testmode wmi event cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
- goto out;
+ return;
}
ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
@@ -86,10 +70,122 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
"failed to copy skb to testmode wmi event: %d\n",
ret);
kfree_skb(nl_skb);
- goto out;
+ return;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+static void ath10k_tm_event_segmented(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ struct wmi_ftm_cmd *ftm = (struct wmi_ftm_cmd *)skb->data;
+ u8 total_segments, current_seq;
+ struct sk_buff *nl_skb;
+ u8 const *buf_pos;
+ u16 datalen;
+ u32 data_pos;
+ int ret;
+
+ if (skb->len < sizeof(*ftm)) {
+ ath10k_warn(ar, "Invalid ftm event length: %d\n", skb->len);
+ return;
+ }
+
+ current_seq = FIELD_GET(ATH10K_FTM_SEGHDR_CURRENT_SEQ,
+ __le32_to_cpu(ftm->seg_hdr.segmentinfo));
+ total_segments = FIELD_GET(ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS,
+ __le32_to_cpu(ftm->seg_hdr.segmentinfo));
+ datalen = skb->len - sizeof(*ftm);
+ buf_pos = ftm->data;
+
+ if (current_seq == 0) {
+ ar->testmode.expected_seq = 0;
+ ar->testmode.data_pos = 0;
+ }
+
+ data_pos = ar->testmode.data_pos;
+
+ if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
+ ath10k_warn(ar, "Invalid ftm event length at %u: %u\n",
+ data_pos, datalen);
+ ret = -EINVAL;
+ return;
+ }
+
+ memcpy(&ar->testmode.eventdata[data_pos], buf_pos, datalen);
+ data_pos += datalen;
+
+ if (++ar->testmode.expected_seq != total_segments) {
+ ar->testmode.data_pos = data_pos;
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "partial data received %u/%u\n",
+ current_seq + 1, total_segments);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "total data length %u\n", data_pos);
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * sizeof(u32) + data_pos,
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath10k_warn(ar, "failed to allocate skb for testmode wmi event\n");
+ return;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_TLV);
+ if (ret) {
+ ath10k_warn(ar, "failed to put testmode wmi event attribute: %d\n", ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to put testmode wmi event cmd_id: %d\n", ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, data_pos, &ar->testmode.eventdata[0]);
+ if (ret) {
+ ath10k_warn(ar, "failed to copy skb to testmode wmi event: %d\n", ret);
+ kfree_skb(nl_skb);
+ return;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+/* Returns true if callee consumes the skb and the skb should be discarded.
+ * Returns false if skb is not used. Does not sleep.
+ */
+bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ bool consumed;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d skb %p skb->len %d\n",
+ cmd_id, skb, skb->len);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!ar->testmode.utf_monitor) {
+ consumed = false;
+ goto out;
+ }
+
+ /* Only testmode.c should be handling events from utf firmware,
+ * otherwise all sort of problems will arise as mac80211 operations
+ * are not initialised.
+ */
+ consumed = true;
+
+ if (ar->testmode.expected_seq != ATH10K_FTM_SEG_NONE)
+ ath10k_tm_event_segmented(ar, cmd_id, skb);
+ else
+ ath10k_tm_event_unsegmented(ar, cmd_id, skb);
out:
spin_unlock_bh(&ar->data_lock);
@@ -281,12 +377,18 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
goto err_release_utf_mode_fw;
}
+ ar->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH, GFP_KERNEL);
+ if (!ar->testmode.eventdata) {
+ ret = -ENOMEM;
+ goto err_power_down;
+ }
+
ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF,
&ar->testmode.utf_mode_fw);
if (ret) {
ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
- goto err_power_down;
+ goto err_release_eventdata;
}
ar->state = ATH10K_STATE_UTF;
@@ -302,6 +404,10 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
return 0;
+err_release_eventdata:
+ kfree(ar->testmode.eventdata);
+ ar->testmode.eventdata = NULL;
+
err_power_down:
ath10k_hif_power_down(ar);
@@ -341,6 +447,9 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
+ kfree(ar->testmode.eventdata);
+ ar->testmode.eventdata = NULL;
+
ar->state = ATH10K_STATE_OFF;
}
@@ -424,6 +533,85 @@ out:
return ret;
}
+static int ath10k_tm_cmd_tlv(struct ath10k *ar, struct nlattr *tb[])
+{
+ u16 total_bytes, num_segments;
+ u32 cmd_id, buf_len;
+ u8 segnumber = 0;
+ u8 *bufpos;
+ void *buf;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
+ cmd_id = WMI_PDEV_UTF_CMDID;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "cmd wmi ftm cmd_id %d buffer length %d\n",
+ cmd_id, buf_len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ bufpos = buf;
+ total_bytes = buf_len;
+ num_segments = total_bytes / MAX_WMI_UTF_LEN;
+ ar->testmode.expected_seq = 0;
+
+ if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
+ num_segments++;
+
+ while (buf_len) {
+ u16 chunk_len = min_t(u16, buf_len, MAX_WMI_UTF_LEN);
+ struct wmi_ftm_cmd *ftm_cmd;
+ struct sk_buff *skb;
+ u32 hdr_info;
+ u8 seginfo;
+
+ skb = ath10k_wmi_alloc_skb(ar, (chunk_len +
+ sizeof(struct wmi_ftm_cmd)));
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ftm_cmd = (struct wmi_ftm_cmd *)skb->data;
+ hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TLV_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, (chunk_len +
+ sizeof(struct wmi_ftm_seg_hdr)));
+ ftm_cmd->tlv_header = __cpu_to_le32(hdr_info);
+ ftm_cmd->seg_hdr.len = __cpu_to_le32(total_bytes);
+ ftm_cmd->seg_hdr.msgref = __cpu_to_le32(ar->testmode.ftm_msgref);
+ seginfo = FIELD_PREP(ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) |
+ FIELD_PREP(ATH10K_FTM_SEGHDR_CURRENT_SEQ, segnumber);
+ ftm_cmd->seg_hdr.segmentinfo = __cpu_to_le32(seginfo);
+ segnumber++;
+
+ memcpy(&ftm_cmd->data, bufpos, chunk_len);
+
+ ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to send wmi ftm command: %d\n", ret);
+ goto out;
+ }
+
+ buf_len -= chunk_len;
+ bufpos += chunk_len;
+ }
+
+ ar->testmode.ftm_msgref++;
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
@@ -439,9 +627,14 @@ int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!tb[ATH10K_TM_ATTR_CMD])
return -EINVAL;
+ ar->testmode.expected_seq = ATH10K_FTM_SEG_NONE;
+
switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
case ATH10K_TM_CMD_GET_VERSION:
- return ath10k_tm_cmd_get_version(ar, tb);
+ if (!tb[ATH10K_TM_ATTR_DATA])
+ return ath10k_tm_cmd_get_version(ar, tb);
+ else /* ATH10K_TM_CMD_TLV */
+ return ath10k_tm_cmd_tlv(ar, tb);
case ATH10K_TM_CMD_UTF_START:
return ath10k_tm_cmd_utf_start(ar, tb);
case ATH10K_TM_CMD_UTF_STOP:
diff --git a/sys/contrib/dev/athk/ath10k/testmode_i.h b/sys/contrib/dev/athk/ath10k/testmode_i.h
index ee1cb27c1d60..1603f5276682 100644
--- a/sys/contrib/dev/athk/ath10k/testmode_i.h
+++ b/sys/contrib/dev/athk/ath10k/testmode_i.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
/* "API" level of the ath10k testmode interface. Bump it after every
@@ -14,6 +15,7 @@
#define ATH10K_TESTMODE_VERSION_MINOR 0
#define ATH10K_TM_DATA_MAX_LEN 5000
+#define ATH_FTM_EVENT_MAX_BUF_LENGTH 2048
enum ath10k_tm_attr {
__ATH10K_TM_ATTR_INVALID = 0,
@@ -57,4 +59,17 @@ enum ath10k_tm_cmd {
* ATH10K_TM_ATTR_DATA.
*/
ATH10K_TM_CMD_WMI = 3,
+
+ /* The command used to transmit a test command to the firmware
+ * and the event to receive test events from the firmware. The data
+ * received only contain the TLV payload, need to add the tlv header
+ * and send the cmd to firmware with command id WMI_PDEV_UTF_CMDID.
+ * The data payload size could be large and the driver needs to
+ * send segmented data to firmware.
+ *
+ * This legacy testmode command shares the same value as the get-version
+ * command. To distinguish between them, we check whether the data attribute
+ * is present.
+ */
+ ATH10K_TM_CMD_TLV = ATH10K_TM_CMD_GET_VERSION,
};
diff --git a/sys/contrib/dev/athk/ath10k/wmi.h b/sys/contrib/dev/athk/ath10k/wmi.h
index 0faefc0a9a40..7f50a1de6b97 100644
--- a/sys/contrib/dev/athk/ath10k/wmi.h
+++ b/sys/contrib/dev/athk/ath10k/wmi.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _WMI_H_
@@ -7418,6 +7418,23 @@ struct wmi_pdev_bb_timing_cfg_cmd {
__le32 bb_xpa_timing;
} __packed;
+struct wmi_ftm_seg_hdr {
+ __le32 len;
+ __le32 msgref;
+ __le32 segmentinfo;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_ftm_cmd {
+ __le32 tlv_header;
+ struct wmi_ftm_seg_hdr seg_hdr;
+ u8 data[];
+} __packed;
+
+#define WMI_TLV_LEN GENMASK(15, 0)
+#define WMI_TLV_TAG GENMASK(31, 16)
+#define MAX_WMI_UTF_LEN 252
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
diff --git a/sys/contrib/dev/mediatek/mt76/Kconfig b/sys/contrib/dev/mediatek/mt76/Kconfig
new file mode 100644
index 000000000000..502303622a53
--- /dev/null
+++ b/sys/contrib/dev/mediatek/mt76/Kconfig
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config MT76_CORE
+ tristate
+ select PAGE_POOL
+
+config MT76_LEDS
+ bool
+ depends on MT76_CORE
+ depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS
+ default y
+
+config MT76_USB
+ tristate
+ depends on MT76_CORE
+
+config MT76_SDIO
+ tristate
+ depends on MT76_CORE
+
+config MT76x02_LIB
+ tristate
+ select MT76_CORE
+
+config MT76x02_USB
+ tristate
+ select MT76_USB
+
+config MT76_CONNAC_LIB
+ tristate
+ select MT76_CORE
+
+config MT792x_LIB
+ tristate
+ select MT76_CONNAC_LIB
+
+config MT792x_USB
+ tristate
+ select MT76_USB
+
+config MT76_NPU
+ bool
+ depends on MT76_CORE
+
+source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7921/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7996/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7925/Kconfig"
diff --git a/sys/contrib/dev/mediatek/mt76/Makefile b/sys/contrib/dev/mediatek/mt76/Makefile
new file mode 100644
index 000000000000..1d42adfe8030
--- /dev/null
+++ b/sys/contrib/dev/mediatek/mt76/Makefile
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76_USB) += mt76-usb.o
+obj-$(CONFIG_MT76_SDIO) += mt76-sdio.o
+obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
+obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
+obj-$(CONFIG_MT76_CONNAC_LIB) += mt76-connac-lib.o
+obj-$(CONFIG_MT792x_LIB) += mt792x-lib.o
+obj-$(CONFIG_MT792x_USB) += mt792x-usb.o
+
+mt76-y := \
+ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
+ tx.o agg-rx.o mcu.o wed.o scan.o channel.o
+
+mt76-$(CONFIG_MT76_NPU) += npu.o
+mt76-$(CONFIG_PCI) += pci.o
+mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
+
+mt76-usb-y := usb.o usb_trace.o
+mt76-sdio-y := sdio.o sdio_txrx.o
+
+CFLAGS_trace.o := -I$(src)
+CFLAGS_usb_trace.o := -I$(src)
+CFLAGS_mt76x02_trace.o := -I$(src)
+CFLAGS_mt792x_trace.o := -I$(src)
+
+mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
+ mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
+ mt76x02_txrx.o mt76x02_trace.o mt76x02_debugfs.o \
+ mt76x02_dfs.o mt76x02_beacon.o
+
+mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
+
+mt76-connac-lib-y := mt76_connac_mcu.o mt76_connac_mac.o mt76_connac3_mac.o
+
+mt792x-lib-y := mt792x_core.o mt792x_mac.o mt792x_trace.o \
+ mt792x_debugfs.o mt792x_dma.o
+mt792x-lib-$(CONFIG_ACPI) += mt792x_acpi_sar.o
+mt792x-usb-y := mt792x_usb.o
+
+obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
+obj-$(CONFIG_MT7603E) += mt7603/
+obj-$(CONFIG_MT7615_COMMON) += mt7615/
+obj-$(CONFIG_MT7915E) += mt7915/
+obj-$(CONFIG_MT7921_COMMON) += mt7921/
+obj-$(CONFIG_MT7996E) += mt7996/
+obj-$(CONFIG_MT7925_COMMON) += mt7925/
diff --git a/sys/contrib/dev/mediatek/mt76/agg-rx.c b/sys/contrib/dev/mediatek/mt76/agg-rx.c
index 07c386c7b4d0..3d34caf7e4f7 100644
--- a/sys/contrib/dev/mediatek/mt76/agg-rx.c
+++ b/sys/contrib/dev/mediatek/mt76/agg-rx.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
*/
@@ -173,6 +173,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
return;
+ if (wcid->def_wcid)
+ wcid = wcid->def_wcid;
tid = rcu_dereference(wcid->aggr[tidno]);
if (!tid)
return;
diff --git a/sys/contrib/dev/mediatek/mt76/channel.c b/sys/contrib/dev/mediatek/mt76/channel.c
index 77b75792eb48..2b705bdb7993 100644
--- a/sys/contrib/dev/mediatek/mt76/channel.c
+++ b/sys/contrib/dev/mediatek/mt76/channel.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2024 Felix Fietkau <nbd@nbd.name>
*/
@@ -314,21 +314,24 @@ void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
kfree(mlink);
}
-static void mt76_roc_complete(struct mt76_phy *phy)
+void mt76_roc_complete(struct mt76_phy *phy)
{
struct mt76_vif_link *mlink = phy->roc_link;
+ struct mt76_dev *dev = phy->dev;
if (!phy->roc_vif)
return;
if (mlink)
mlink->mvif->roc_phy = NULL;
- if (phy->main_chandef.chan)
+ if (phy->main_chandef.chan &&
+ !test_bit(MT76_MCU_RESET, &dev->phy.state))
mt76_set_channel(phy, &phy->main_chandef, false);
mt76_put_vif_phy_link(phy, phy->roc_vif, phy->roc_link);
phy->roc_vif = NULL;
phy->roc_link = NULL;
- ieee80211_remain_on_channel_expired(phy->hw);
+ if (!test_bit(MT76_MCU_RESET, &dev->phy.state))
+ ieee80211_remain_on_channel_expired(phy->hw);
}
void mt76_roc_complete_work(struct work_struct *work)
@@ -351,6 +354,7 @@ void mt76_abort_roc(struct mt76_phy *phy)
mt76_roc_complete(phy);
mutex_unlock(&dev->mutex);
}
+EXPORT_SYMBOL_GPL(mt76_abort_roc);
int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel *chan, int duration,
@@ -368,7 +372,8 @@ int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mutex);
- if (phy->roc_vif || dev->scan.phy == phy) {
+ if (phy->roc_vif || dev->scan.phy == phy ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state)) {
ret = -EBUSY;
goto out;
}
diff --git a/sys/contrib/dev/mediatek/mt76/debugfs.c b/sys/contrib/dev/mediatek/mt76/debugfs.c
index b6a2746c187d..a5ac6ca86735 100644
--- a/sys/contrib/dev/mediatek/mt76/debugfs.c
+++ b/sys/contrib/dev/mediatek/mt76/debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
@@ -93,9 +93,9 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
{
int i;
- seq_printf(file, "%10s:", str);
+ seq_printf(file, "%16s:", str);
for (i = 0; i < len; i++)
- seq_printf(file, " %2d", val[i]);
+ seq_printf(file, " %4d", val[i]);
seq_puts(file, "\n");
}
EXPORT_SYMBOL_GPL(mt76_seq_puts_array);
diff --git a/sys/contrib/dev/mediatek/mt76/dma.c b/sys/contrib/dev/mediatek/mt76/dma.c
index af902a761e42..5c04a0dce1fa 100644
--- a/sys/contrib/dev/mediatek/mt76/dma.c
+++ b/sys/contrib/dev/mediatek/mt76/dma.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
@@ -11,37 +11,6 @@
#include "mt76.h"
#include "dma.h"
-#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
-
-#define Q_READ(_q, _field) ({ \
- u32 _offset = offsetof(struct mt76_queue_regs, _field); \
- u32 _val; \
- if ((_q)->flags & MT_QFLAG_WED) \
- _val = mtk_wed_device_reg_read((_q)->wed, \
- ((_q)->wed_regs + \
- _offset)); \
- else \
- _val = readl(&(_q)->regs->_field); \
- _val; \
-})
-
-#define Q_WRITE(_q, _field, _val) do { \
- u32 _offset = offsetof(struct mt76_queue_regs, _field); \
- if ((_q)->flags & MT_QFLAG_WED) \
- mtk_wed_device_reg_write((_q)->wed, \
- ((_q)->wed_regs + _offset), \
- _val); \
- else \
- writel(_val, &(_q)->regs->_field); \
-} while (0)
-
-#else
-
-#define Q_READ(_q, _field) readl(&(_q)->regs->_field)
-#define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
-
-#endif
-
static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
{
@@ -190,24 +159,61 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
static void
+mt76_dma_queue_magic_cnt_init(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ if (!mt76_queue_is_wed_rro(q))
+ return;
+
+ q->magic_cnt = 0;
+ if (mt76_queue_is_wed_rro_ind(q)) {
+ struct mt76_wed_rro_desc *rro_desc;
+ u32 data1 = FIELD_PREP(RRO_IND_DATA1_MAGIC_CNT_MASK,
+ MT_DMA_WED_IND_CMD_CNT - 1);
+ int i;
+
+ rro_desc = (struct mt76_wed_rro_desc *)q->desc;
+ for (i = 0; i < q->ndesc; i++) {
+ struct mt76_wed_rro_ind *cmd;
+
+ cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
+ cmd->data1 = cpu_to_le32(data1);
+ }
+ } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ struct mt76_rro_rxdmad_c *dmad = (void *)q->desc;
+ u32 data3 = FIELD_PREP(RRO_RXDMAD_DATA3_MAGIC_CNT_MASK,
+ MT_DMA_MAGIC_CNT - 1);
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ dmad[i].data3 = cpu_to_le32(data3);
+ }
+}
+
+static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
Q_WRITE(q, desc_base, q->desc_dma);
- if (q->flags & MT_QFLAG_WED_RRO_EN)
+ if ((q->flags & MT_QFLAG_WED_RRO_EN) && !mt76_npu_device_active(dev))
Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
else
Q_WRITE(q, ring_size, q->ndesc);
+
+ if (mt76_queue_is_npu_tx(q)) {
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(q->ndesc, &q->regs->ring_size);
+ }
q->head = Q_READ(q, dma_idx);
q->tail = q->head;
}
-void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx)
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx)
{
if (!q || !q->ndesc)
return;
- if (!mt76_queue_is_wed_rro_ind(q)) {
+ if (!mt76_queue_is_wed_rro_ind(q) &&
+ !mt76_queue_is_wed_rro_rxdmad_c(q) && !mt76_queue_is_npu(q)) {
int i;
/* clear descriptors */
@@ -215,27 +221,26 @@ void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
}
+ mt76_dma_queue_magic_cnt_init(dev, q);
if (reset_idx) {
- Q_WRITE(q, cpu_idx, 0);
+ if (mt76_queue_is_emi(q))
+ *q->emi_cpu_idx = 0;
+ else
+ Q_WRITE(q, cpu_idx, 0);
Q_WRITE(q, dma_idx, 0);
}
mt76_dma_sync_idx(dev, q);
}
-void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
-{
- __mt76_dma_queue_reset(dev, q, true);
-}
-
static int
mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, void *data)
{
struct mt76_queue_entry *entry = &q->entry[q->head];
struct mt76_txwi_cache *txwi = NULL;
+ u32 buf1 = 0, ctrl, info = 0;
struct mt76_desc *desc;
int idx = q->head;
- u32 buf1 = 0, ctrl;
int rx_token;
if (mt76_queue_is_wed_rro_ind(q)) {
@@ -244,6 +249,9 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
rro_desc = (struct mt76_wed_rro_desc *)q->desc;
data = &rro_desc[q->head];
goto done;
+ } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ data = &q->desc[q->head];
+ goto done;
}
desc = &q->desc[q->head];
@@ -252,7 +260,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
#endif
- if (mt76_queue_is_wed_rx(q)) {
+ if (mt76_queue_is_wed_rx(q) || mt76_queue_is_wed_rro_data(q)) {
txwi = mt76_get_rxwi(dev);
if (!txwi)
return -ENOMEM;
@@ -265,12 +273,26 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
ctrl |= MT_DMA_CTL_TO_HOST;
+
+ txwi->qid = q - dev->q_rx;
+ }
+
+ if (mt76_queue_is_wed_rro_msdu_pg(q) &&
+ dev->drv->rx_rro_add_msdu_page) {
+ if (dev->drv->rx_rro_add_msdu_page(dev, q, buf->addr, data))
+ return -ENOMEM;
+ }
+
+ if (q->flags & MT_QFLAG_WED_RRO_EN) {
+ info |= FIELD_PREP(MT_DMA_MAGIC_MASK, q->magic_cnt);
+ if ((q->head + 1) == q->ndesc)
+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
}
WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
- WRITE_ONCE(desc->info, 0);
+ WRITE_ONCE(desc->info, cpu_to_le32(info));
done:
entry->dma_addr[0] = buf->addr;
@@ -379,7 +401,10 @@ static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
wmb();
- Q_WRITE(q, cpu_idx, q->head);
+ if (mt76_queue_is_emi(q))
+ *q->emi_cpu_idx = cpu_to_le16(q->head);
+ else
+ Q_WRITE(q, cpu_idx, q->head);
}
static void
@@ -399,6 +424,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
+ mt76_npu_txdesc_cleanup(q, q->tail);
mt76_queue_tx_complete(dev, q, &entry);
if (entry.txwi) {
@@ -423,15 +449,61 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
}
static void *
+mt76_dma_get_rxdmad_c_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int *len, bool *more)
+{
+ struct mt76_queue_entry *e = &q->entry[idx];
+ struct mt76_rro_rxdmad_c *dmad = e->buf;
+ u32 data1 = le32_to_cpu(dmad->data1);
+ u32 data2 = le32_to_cpu(dmad->data2);
+ struct mt76_txwi_cache *t;
+ u16 rx_token_id;
+ u8 ind_reason;
+ void *buf;
+
+ rx_token_id = FIELD_GET(RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK, data2);
+ t = mt76_rx_token_release(dev, rx_token_id);
+ if (!t)
+ return ERR_PTR(-EAGAIN);
+
+ q = &dev->q_rx[t->qid];
+ dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+
+ if (len)
+ *len = FIELD_GET(RRO_RXDMAD_DATA1_SDL0_MASK, data1);
+ if (more)
+ *more = !FIELD_GET(RRO_RXDMAD_DATA1_LS_MASK, data1);
+
+ buf = t->ptr;
+ ind_reason = FIELD_GET(RRO_RXDMAD_DATA2_IND_REASON_MASK, data2);
+ if (ind_reason == MT_DMA_WED_IND_REASON_REPEAT ||
+ ind_reason == MT_DMA_WED_IND_REASON_OLDPKT) {
+ mt76_put_page_pool_buf(buf, false);
+ buf = ERR_PTR(-EAGAIN);
+ }
+ t->ptr = NULL;
+ t->dma_addr = 0;
+
+ mt76_put_rxwi(dev, t);
+
+ return buf;
+}
+
+static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
- int *len, u32 *info, bool *more, bool *drop)
+ int *len, u32 *info, bool *more, bool *drop, bool flush)
{
struct mt76_queue_entry *e = &q->entry[idx];
struct mt76_desc *desc = &q->desc[idx];
u32 ctrl, desc_info, buf1;
void *buf = e->buf;
- if (mt76_queue_is_wed_rro_ind(q))
+ if (mt76_queue_is_wed_rro_rxdmad_c(q) && !flush)
+ buf = mt76_dma_get_rxdmad_c_buf(dev, q, idx, len, more);
+
+ if (mt76_queue_is_wed_rro(q))
goto done;
ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
@@ -486,20 +558,50 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
if (!q->queued)
return NULL;
- if (mt76_queue_is_wed_rro_data(q))
- return NULL;
+ if (mt76_queue_is_wed_rro_data(q) || mt76_queue_is_wed_rro_msdu_pg(q))
+ goto done;
+
+ if (mt76_queue_is_wed_rro_ind(q)) {
+ struct mt76_wed_rro_ind *cmd;
+ u8 magic_cnt;
- if (!mt76_queue_is_wed_rro_ind(q)) {
+ if (flush)
+ goto done;
+
+ cmd = q->entry[idx].buf;
+ magic_cnt = FIELD_GET(RRO_IND_DATA1_MAGIC_CNT_MASK,
+ le32_to_cpu(cmd->data1));
+ if (magic_cnt != q->magic_cnt)
+ return NULL;
+
+ if (q->tail == q->ndesc - 1)
+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_WED_IND_CMD_CNT;
+ } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ struct mt76_rro_rxdmad_c *dmad;
+ u16 magic_cnt;
+
+ if (flush)
+ goto done;
+
+ dmad = q->entry[idx].buf;
+ magic_cnt = FIELD_GET(RRO_RXDMAD_DATA3_MAGIC_CNT_MASK,
+ le32_to_cpu(dmad->data3));
+ if (magic_cnt != q->magic_cnt)
+ return NULL;
+
+ if (q->tail == q->ndesc - 1)
+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
+ } else {
if (flush)
q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
return NULL;
}
-
+done:
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
}
static int
@@ -557,6 +659,10 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
if (test_bit(MT76_RESET, &phy->state))
goto free_skb;
+ /* TODO: Take into account unlinear skbs */
+ if (mt76_npu_device_active(dev) && skb_linearize(skb))
+ goto free_skb;
+
t = mt76_get_txwi(dev);
if (!t)
goto free_skb;
@@ -604,6 +710,9 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
if (ret < 0)
goto unmap;
+ if (mt76_npu_device_active(dev))
+ return mt76_npu_dma_add_buf(phy, q, skb, &tx_info.buf[1], txwi);
+
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
tx_info.info, tx_info.skb, t);
@@ -650,7 +759,8 @@ mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
void *buf = NULL;
int offset;
- if (mt76_queue_is_wed_rro_ind(q))
+ if (mt76_queue_is_wed_rro_ind(q) ||
+ mt76_queue_is_wed_rro_rxdmad_c(q))
goto done;
buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
@@ -680,9 +790,6 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
{
int frames;
- if (!q->ndesc)
- return 0;
-
spin_lock_bh(&q->lock);
frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
spin_unlock_bh(&q->lock);
@@ -708,27 +815,23 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
q->ndesc = n_desc;
q->buf_size = bufsize;
q->hw_idx = idx;
+ q->dev = dev;
+
+ if (mt76_queue_is_wed_rro_ind(q))
+ size = sizeof(struct mt76_wed_rro_desc);
+ else if (mt76_queue_is_npu_tx(q))
+ size = sizeof(struct airoha_npu_tx_dma_desc);
+ else if (mt76_queue_is_npu_rx(q))
+ size = sizeof(struct airoha_npu_rx_dma_desc);
+ else
+ size = sizeof(struct mt76_desc);
- size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
- : sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
&q->desc_dma, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
- if (mt76_queue_is_wed_rro_ind(q)) {
- struct mt76_wed_rro_desc *rro_desc;
- int i;
-
- rro_desc = (struct mt76_wed_rro_desc *)q->desc;
- for (i = 0; i < q->ndesc; i++) {
- struct mt76_wed_rro_ind *cmd;
-
- cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
- cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
- }
- }
-
+ mt76_dma_queue_magic_cnt_init(dev, q);
size = q->ndesc * sizeof(*q->entry);
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
if (!q->entry)
@@ -738,6 +841,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (ret)
return ret;
+ mt76_npu_queue_setup(dev, q);
ret = mt76_wed_dma_setup(dev, q, false);
if (ret)
return ret;
@@ -748,7 +852,10 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
return 0;
}
- mt76_dma_queue_reset(dev, q);
+ /* HW specific driver is supposed to reset brand-new EMI queues since
+ * it needs to set cpu index pointer.
+ */
+ mt76_dma_queue_reset(dev, q, !mt76_queue_is_emi(q));
return 0;
}
@@ -762,6 +869,11 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
if (!q->ndesc)
return;
+ if (mt76_queue_is_npu(q)) {
+ mt76_npu_queue_cleanup(dev, q);
+ return;
+ }
+
do {
spin_lock_bh(&q->lock);
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
@@ -791,7 +903,8 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
if (!q->ndesc)
return;
- if (!mt76_queue_is_wed_rro_ind(q)) {
+ if (!mt76_queue_is_wed_rro_ind(q) &&
+ !mt76_queue_is_wed_rro_rxdmad_c(q) && !mt76_queue_is_npu(q)) {
int i;
for (i = 0; i < q->ndesc; i++)
@@ -811,7 +924,10 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
return;
mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill_buf(dev, q, false);
+ if (mt76_queue_is_npu(q))
+ mt76_npu_fill_rx_queue(dev, q);
+ else
+ mt76_dma_rx_fill(dev, q, false);
}
static void
@@ -855,8 +971,9 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
bool allow_direct = !mt76_queue_is_wed_rx(q);
bool more;
- if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
- mt76_queue_is_wed_tx_free(q)) {
+ if ((q->flags & MT_QFLAG_WED_RRO_EN) ||
+ (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
+ mt76_queue_is_wed_tx_free(q))) {
dma_idx = Q_READ(q, dma_idx);
check_ddone = true;
}
@@ -878,6 +995,20 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
if (!data)
break;
+ if (PTR_ERR(data) == -EAGAIN) {
+ done++;
+ continue;
+ }
+
+ if (mt76_queue_is_wed_rro_ind(q) && dev->drv->rx_rro_ind_process)
+ dev->drv->rx_rro_ind_process(dev, data);
+
+ if (mt76_queue_is_wed_rro(q) &&
+ !mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ done++;
+ continue;
+ }
+
if (drop)
goto free_frag;
@@ -955,6 +1086,15 @@ int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
}
EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
+static void
+mt76_dma_rx_queue_init(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ int (*poll)(struct napi_struct *napi, int budget))
+{
+ netif_napi_add(dev->napi_dev, &dev->napi[qid], poll);
+ mt76_dma_rx_fill_buf(dev, &dev->q_rx[qid], false);
+ napi_enable(&dev->napi[qid]);
+}
+
static int
mt76_dma_init(struct mt76_dev *dev,
int (*poll)(struct napi_struct *napi, int budget))
@@ -987,9 +1127,10 @@ mt76_dma_init(struct mt76_dev *dev,
init_completion(&dev->mmio.wed_reset_complete);
mt76_for_each_q_rx(dev, i) {
- netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
- mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
- napi_enable(&dev->napi[i]);
+ if (mt76_queue_is_wed_rro(&dev->q_rx[i]))
+ continue;
+
+ mt76_dma_rx_queue_init(dev, i, poll);
}
return 0;
@@ -1002,6 +1143,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
.tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
+ .rx_queue_init = mt76_dma_rx_queue_init,
.rx_cleanup = mt76_dma_rx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
diff --git a/sys/contrib/dev/mediatek/mt76/dma.h b/sys/contrib/dev/mediatek/mt76/dma.h
index e3ddc7a83757..4a63de6c5bf5 100644
--- a/sys/contrib/dev/mediatek/mt76/dma.h
+++ b/sys/contrib/dev/mediatek/mt76/dma.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
@@ -31,7 +31,12 @@
#define MT_DMA_CTL_PN_CHK_FAIL BIT(13)
#define MT_DMA_CTL_VER_MASK BIT(7)
-#define MT_DMA_RRO_EN BIT(13)
+#define MT_DMA_SDP0 GENMASK(15, 0)
+#define MT_DMA_TOKEN_ID GENMASK(31, 16)
+#define MT_DMA_MAGIC_MASK GENMASK(31, 28)
+#define MT_DMA_RRO_EN BIT(13)
+
+#define MT_DMA_MAGIC_CNT 16
#define MT_DMA_WED_IND_CMD_CNT 8
#define MT_DMA_WED_IND_REASON GENMASK(15, 12)
@@ -41,6 +46,73 @@
#define MT_FCE_INFO_LEN 4
#define MT_RX_RXWI_LEN 32
+#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
+
+#define Q_READ(_q, _field) ({ \
+ u32 _offset = offsetof(struct mt76_queue_regs, _field); \
+ u32 _val; \
+ if ((_q)->flags & MT_QFLAG_WED) \
+ _val = mtk_wed_device_reg_read((_q)->wed, \
+ ((_q)->wed_regs + \
+ _offset)); \
+ else \
+ _val = readl(&(_q)->regs->_field); \
+ _val; \
+})
+
+#define Q_WRITE(_q, _field, _val) do { \
+ u32 _offset = offsetof(struct mt76_queue_regs, _field); \
+ if ((_q)->flags & MT_QFLAG_WED) \
+ mtk_wed_device_reg_write((_q)->wed, \
+ ((_q)->wed_regs + _offset), \
+ _val); \
+ else \
+ writel(_val, &(_q)->regs->_field); \
+} while (0)
+
+#elif IS_ENABLED(CONFIG_MT76_NPU)
+
+#define Q_READ(_q, _field) ({ \
+ u32 _offset = offsetof(struct mt76_queue_regs, _field); \
+ u32 _val = 0; \
+ if ((_q)->flags & MT_QFLAG_NPU) { \
+ struct airoha_npu *npu; \
+ \
+ rcu_read_lock(); \
+ npu = rcu_dereference(q->dev->mmio.npu); \
+ if (npu) \
+ regmap_read(npu->regmap, \
+ ((_q)->wed_regs + _offset), &_val); \
+ rcu_read_unlock(); \
+ } else { \
+ _val = readl(&(_q)->regs->_field); \
+ } \
+ _val; \
+})
+
+#define Q_WRITE(_q, _field, _val) do { \
+ u32 _offset = offsetof(struct mt76_queue_regs, _field); \
+ if ((_q)->flags & MT_QFLAG_NPU) { \
+ struct airoha_npu *npu; \
+ \
+ rcu_read_lock(); \
+ npu = rcu_dereference(q->dev->mmio.npu); \
+ if (npu) \
+ regmap_write(npu->regmap, \
+ ((_q)->wed_regs + _offset), _val); \
+ rcu_read_unlock(); \
+ } else { \
+ writel(_val, &(_q)->regs->_field); \
+ } \
+} while (0)
+
+#else
+
+#define Q_READ(_q, _field) readl(&(_q)->regs->_field)
+#define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
+
+#endif
+
struct mt76_desc {
__le32 buf0;
__le32 ctrl;
@@ -53,6 +125,21 @@ struct mt76_wed_rro_desc {
__le32 buf1;
} __packed __aligned(4);
+/* data1 */
+#define RRO_RXDMAD_DATA1_LS_MASK BIT(30)
+#define RRO_RXDMAD_DATA1_SDL0_MASK GENMASK(29, 16)
+/* data2 */
+#define RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK GENMASK(31, 16)
+#define RRO_RXDMAD_DATA2_IND_REASON_MASK GENMASK(15, 12)
+/* data3 */
+#define RRO_RXDMAD_DATA3_MAGIC_CNT_MASK GENMASK(31, 28)
+struct mt76_rro_rxdmad_c {
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
+};
+
enum mt76_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
@@ -81,14 +168,13 @@ void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct);
-void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx);
-void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q);
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
static inline void
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- dev->queue_ops->reset_q(dev, q);
+ dev->queue_ops->reset_q(dev, q, true);
if (mtk_wed_device_active(&dev->mmio.wed))
mt76_wed_dma_setup(dev, q, true);
}
diff --git a/sys/contrib/dev/mediatek/mt76/eeprom.c b/sys/contrib/dev/mediatek/mt76/eeprom.c
index f2eb2cd6e509..1d5b95226a16 100644
--- a/sys/contrib/dev/mediatek/mt76/eeprom.c
+++ b/sys/contrib/dev/mediatek/mt76/eeprom.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
@@ -175,14 +175,17 @@ static int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int len)
#endif
}
-void
+int
mt76_eeprom_override(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
#if defined(CONFIG_OF)
struct device_node *np = dev->dev->of_node;
+ int err;
- of_get_mac_address(np, phy->macaddr);
+ err = of_get_mac_address(np, phy->macaddr);
+ if (err == -EPROBE_DEFER)
+ return err;
if (!is_valid_ether_addr(phy->macaddr)) {
#endif
@@ -193,6 +196,8 @@ mt76_eeprom_override(struct mt76_phy *phy)
#if defined(CONFIG_OF)
}
#endif
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76_eeprom_override);
@@ -269,6 +274,19 @@ mt76_get_of_array(struct device_node *np, char *name, size_t *len, int min)
return prop->value;
}
+
+static const s8 *
+mt76_get_of_array_s8(struct device_node *np, char *name, size_t *len, int min)
+{
+ struct property *prop = of_find_property(np, name, NULL);
+
+ if (!prop || !prop->value || prop->length < min)
+ return NULL;
+
+ *len = prop->length;
+
+ return prop->value;
+}
#endif
struct device_node *
@@ -313,7 +331,7 @@ mt76_get_txs_delta(struct device_node *np, u8 nss)
}
static void
-mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const __be32 *data,
+mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const s8 *data,
s8 target_power, s8 nss_delta, s8 *max_power)
{
int i;
@@ -322,30 +340,29 @@ mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const __be32 *data,
return;
for (i = 0; i < pwr_len; i++) {
- pwr[i] = min_t(s8, target_power,
- be32_to_cpu(data[i]) + nss_delta);
+ pwr[i] = min_t(s8, target_power, data[i] + nss_delta);
*max_power = max(*max_power, pwr[i]);
}
}
static void
mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num,
- const __be32 *data, size_t len, s8 target_power,
- s8 nss_delta, s8 *max_power)
+ const s8 *data, size_t len, s8 target_power,
+ s8 nss_delta)
{
int i, cur;
+ s8 max_power = -128;
if (!data)
return;
- len /= 4;
- cur = be32_to_cpu(data[0]);
+ cur = data[0];
for (i = 0; i < pwr_num; i++) {
if (len < pwr_len + 1)
break;
mt76_apply_array_limit(pwr + pwr_len * i, pwr_len, data + 1,
- target_power, nss_delta, max_power);
+ target_power, nss_delta, &max_power);
if (--cur > 0)
continue;
@@ -354,7 +371,7 @@ mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num,
if (!len)
break;
- cur = be32_to_cpu(data[0]);
+ cur = data[0];
}
}
#endif
@@ -367,7 +384,7 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct mt76_dev *dev = phy->dev;
#if defined(CONFIG_OF)
struct device_node *np;
- const __be32 *val;
+ const s8 *val;
char name[16];
#endif
u32 mcs_rates = dev->drv->mcs_rates;
@@ -379,12 +396,17 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
s8 max_power = 0;
#if defined(CONFIG_OF)
s8 txs_delta;
+ s8 max_power_backoff = -127;
+ s8 txs_delta;
+ int n_chains = hweight16(phy->chainmask);
+ s8 target_power_combine = target_power + mt76_tx_power_path_delta(n_chains);
#endif
if (!mcs_rates)
mcs_rates = 10;
- memset(dest, target_power, sizeof(*dest));
+ memset(dest, target_power, sizeof(*dest) - sizeof(dest->path));
+ memset(&dest->path, 0, sizeof(dest->path));
if (!IS_ENABLED(CONFIG_OF))
return target_power;
@@ -419,24 +441,47 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
txs_delta = mt76_get_txs_delta(np, hweight16(phy->chainmask));
- val = mt76_get_of_array(np, "rates-cck", &len, ARRAY_SIZE(dest->cck));
+ val = mt76_get_of_array_s8(np, "rates-cck", &len, ARRAY_SIZE(dest->cck));
mt76_apply_array_limit(dest->cck, ARRAY_SIZE(dest->cck), val,
target_power, txs_delta, &max_power);
- val = mt76_get_of_array(np, "rates-ofdm",
- &len, ARRAY_SIZE(dest->ofdm));
+ val = mt76_get_of_array_s8(np, "rates-ofdm",
+ &len, ARRAY_SIZE(dest->ofdm));
mt76_apply_array_limit(dest->ofdm, ARRAY_SIZE(dest->ofdm), val,
target_power, txs_delta, &max_power);
- val = mt76_get_of_array(np, "rates-mcs", &len, mcs_rates + 1);
+ val = mt76_get_of_array_s8(np, "rates-mcs", &len, mcs_rates + 1);
mt76_apply_multi_array_limit(dest->mcs[0], ARRAY_SIZE(dest->mcs[0]),
ARRAY_SIZE(dest->mcs), val, len,
- target_power, txs_delta, &max_power);
+ target_power, txs_delta);
- val = mt76_get_of_array(np, "rates-ru", &len, ru_rates + 1);
+ val = mt76_get_of_array_s8(np, "rates-ru", &len, ru_rates + 1);
mt76_apply_multi_array_limit(dest->ru[0], ARRAY_SIZE(dest->ru[0]),
ARRAY_SIZE(dest->ru), val, len,
- target_power, txs_delta, &max_power);
+ target_power, txs_delta);
+
+ max_power_backoff = max_power;
+ val = mt76_get_of_array_s8(np, "paths-cck", &len, ARRAY_SIZE(dest->path.cck));
+ mt76_apply_array_limit(dest->path.cck, ARRAY_SIZE(dest->path.cck), val,
+ target_power_combine, txs_delta, &max_power_backoff);
+
+ val = mt76_get_of_array_s8(np, "paths-ofdm", &len, ARRAY_SIZE(dest->path.ofdm));
+ mt76_apply_array_limit(dest->path.ofdm, ARRAY_SIZE(dest->path.ofdm), val,
+ target_power_combine, txs_delta, &max_power_backoff);
+
+ val = mt76_get_of_array_s8(np, "paths-ofdm-bf", &len, ARRAY_SIZE(dest->path.ofdm_bf));
+ mt76_apply_array_limit(dest->path.ofdm_bf, ARRAY_SIZE(dest->path.ofdm_bf), val,
+ target_power_combine, txs_delta, &max_power_backoff);
+
+ val = mt76_get_of_array_s8(np, "paths-ru", &len, ARRAY_SIZE(dest->path.ru[0]) + 1);
+ mt76_apply_multi_array_limit(dest->path.ru[0], ARRAY_SIZE(dest->path.ru[0]),
+ ARRAY_SIZE(dest->path.ru), val, len,
+ target_power_combine, txs_delta);
+
+ val = mt76_get_of_array_s8(np, "paths-ru-bf", &len, ARRAY_SIZE(dest->path.ru_bf[0]) + 1);
+ mt76_apply_multi_array_limit(dest->path.ru_bf[0], ARRAY_SIZE(dest->path.ru_bf[0]),
+ ARRAY_SIZE(dest->path.ru_bf), val, len,
+ target_power_combine, txs_delta);
#endif
return max_power;
diff --git a/sys/contrib/dev/mediatek/mt76/mac80211.c b/sys/contrib/dev/mediatek/mt76/mac80211.c
index d2ace8870451..bd7aaeb4398e 100644
--- a/sys/contrib/dev/mediatek/mt76/mac80211.c
+++ b/sys/contrib/dev/mediatek/mt76/mac80211.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
@@ -652,6 +652,8 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
case MT_RXQ_MAIN:
case MT_RXQ_BAND1:
case MT_RXQ_BAND2:
+ case MT_RXQ_NPU0:
+ case MT_RXQ_NPU1:
pp_params.pool_size = 256;
break;
default:
@@ -846,6 +848,7 @@ void mt76_free_device(struct mt76_dev *dev)
destroy_workqueue(dev->wq);
dev->wq = NULL;
}
+ mt76_npu_deinit(dev);
ieee80211_free_hw(dev->hw);
}
EXPORT_SYMBOL_GPL(mt76_free_device);
@@ -856,6 +859,9 @@ static void mt76_reset_phy(struct mt76_phy *phy)
return;
INIT_LIST_HEAD(&phy->tx_list);
+ phy->num_sta = 0;
+ phy->chanctx = NULL;
+ mt76_roc_complete(phy);
}
void mt76_reset_device(struct mt76_dev *dev)
@@ -1267,6 +1273,8 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
mstat = *((struct mt76_rx_status *)skb->cb);
memset(status, 0, sizeof(*status));
+ skb->priority = mstat.qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
+
status->flag = mstat.flag;
status->freq = mstat.freq;
status->enc_flags = mstat.enc_flags;
@@ -1582,7 +1590,8 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
mt76_check_sta(dev, skb);
- if (mtk_wed_device_active(&dev->mmio.wed))
+ if (mtk_wed_device_active(&dev->mmio.wed) ||
+ mt76_npu_device_active(dev))
__skb_queue_tail(&frames, skb);
else
mt76_rx_aggr_reorder(skb, &frames);
@@ -2096,3 +2105,55 @@ void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
mt76_abort_roc(mvif->roc_phy);
}
EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
+
+u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links)
+{
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ struct {
+ u8 link_id;
+ enum nl80211_band band;
+ } data[IEEE80211_MLD_MAX_NUM_LINKS];
+ unsigned int link_id;
+ int i, n_data = 0;
+ u16 sel_links = 0;
+
+ if (!ieee80211_vif_is_mld(vif))
+ return 0;
+
+ if (vif->active_links == usable_links)
+ return vif->active_links;
+
+ rcu_read_lock();
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ n_data++;
+ }
+ rcu_read_unlock();
+
+ for (i = 0; i < n_data; i++) {
+ int j;
+
+ if (!(BIT(data[i].link_id) & vif->active_links))
+ continue;
+
+ sel_links = BIT(data[i].link_id);
+ for (j = 0; j < n_data; j++) {
+ if (data[i].band != data[j].band) {
+ sel_links |= BIT(data[j].link_id);
+ if (hweight16(sel_links) == max_active_links)
+ break;
+ }
+ }
+ break;
+ }
+
+ return sel_links;
+}
+EXPORT_SYMBOL_GPL(mt76_select_links);
diff --git a/sys/contrib/dev/mediatek/mt76/mcu.c b/sys/contrib/dev/mediatek/mt76/mcu.c
index d554eed10986..acf532c0bfe9 100644
--- a/sys/contrib/dev/mediatek/mt76/mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2019 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mmio.c b/sys/contrib/dev/mediatek/mt76/mmio.c
index 48943686003c..15fb8f436ab6 100644
--- a/sys/contrib/dev/mediatek/mt76/mmio.c
+++ b/sys/contrib/dev/mediatek/mt76/mmio.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
@@ -41,20 +41,30 @@ static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
static void mt76_mmio_write_copy(struct mt76_dev *dev, u32 offset,
const void *data, int len)
{
+ int i;
+
+ for (i = 0; i < ALIGN(len, 4); i += 4)
#if defined(__linux__)
- __iowrite32_copy(dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4));
+ writel(get_unaligned_le32(data + i),
+ dev->mmio.regs + offset + i);
#elif defined(__FreeBSD__)
- __iowrite32_copy((u8 *)dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4));
+ writel(get_unaligned_le32((const u8 *)data + i),
+ (u8 *)dev->mmio.regs + offset + i);
#endif
}
static void mt76_mmio_read_copy(struct mt76_dev *dev, u32 offset,
void *data, int len)
{
+ int i;
+
+ for (i = 0; i < ALIGN(len, 4); i += 4)
#if defined(__linux__)
- __ioread32_copy(data, dev->mmio.regs + offset, DIV_ROUND_UP(len, 4));
+ put_unaligned_le32(readl(dev->mmio.regs + offset + i),
+ data + i);
#elif defined(__FreeBSD__)
- __ioread32_copy(data, (u8 *)dev->mmio.regs + offset, DIV_ROUND_UP(len, 4));
+ put_unaligned_le32(readl((u8 *)dev->mmio.regs + offset + i),
+ (u8 *)data + i);
#endif
}
diff --git a/sys/contrib/dev/mediatek/mt76/mt76.h b/sys/contrib/dev/mediatek/mt76/mt76.h
index 0b7686e6c36e..840725b8b020 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
@@ -13,6 +13,7 @@
#include <linux/leds.h>
#include <linux/usb.h>
#include <linux/average.h>
+#include <linux/soc/airoha/airoha_offload.h>
#include <linux/soc/mediatek/mtk_wed.h>
#if defined(__FreeBSD__)
#include <linux/wait.h>
@@ -40,6 +41,8 @@
#define MT_QFLAG_WED BIT(5)
#define MT_QFLAG_WED_RRO BIT(6)
#define MT_QFLAG_WED_RRO_EN BIT(7)
+#define MT_QFLAG_EMI_EN BIT(8)
+#define MT_QFLAG_NPU BIT(9)
#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
@@ -52,6 +55,13 @@
#define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n)
#define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n)
#define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0)
+#define MT_WED_RRO_Q_RXDMAD_C __MT_WED_RRO_Q(MT76_WED_RRO_Q_RXDMAD_C, 0)
+
+#define __MT_NPU_Q(_type, _n) (MT_QFLAG_NPU | \
+ FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
+ FIELD_PREP(MT_QFLAG_WED_RING, _n))
+#define MT_NPU_Q_TX(_n) __MT_NPU_Q(MT76_WED_Q_TX, _n)
+#define MT_NPU_Q_RX(_n) __MT_NPU_Q(MT76_WED_Q_RX, _n)
struct mt76_dev;
struct mt76_phy;
@@ -78,6 +88,13 @@ enum mt76_wed_type {
MT76_WED_RRO_Q_DATA,
MT76_WED_RRO_Q_MSDU_PG,
MT76_WED_RRO_Q_IND,
+ MT76_WED_RRO_Q_RXDMAD_C,
+};
+
+enum mt76_hwrro_mode {
+ MT76_HWRRO_OFF,
+ MT76_HWRRO_V3,
+ MT76_HWRRO_V3_1,
};
struct mt76_bus_ops {
@@ -136,6 +153,9 @@ enum mt76_rxq_id {
MT_RXQ_TXFREE_BAND1,
MT_RXQ_TXFREE_BAND2,
MT_RXQ_RRO_IND,
+ MT_RXQ_RRO_RXDMAD_C,
+ MT_RXQ_NPU0,
+ MT_RXQ_NPU1,
__MT_RXQ_MAX
};
@@ -239,8 +259,12 @@ struct mt76_queue {
u8 buf_offset;
u16 flags;
+ u8 magic_cnt;
+
+ __le16 *emi_cpu_idx;
struct mtk_wed_device *wed;
+ struct mt76_dev *dev;
u32 wed_regs;
dma_addr_t desc_dma;
@@ -293,11 +317,15 @@ struct mt76_queue_ops {
void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
bool flush);
+ void (*rx_queue_init)(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ int (*poll)(struct napi_struct *napi, int budget));
+
void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
- void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
+ void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
};
enum mt76_phy_type {
@@ -405,15 +433,16 @@ struct mt76_txq {
bool aggr;
};
+/* data0 */
+#define RRO_IND_DATA0_IND_REASON_MASK GENMASK(31, 28)
+#define RRO_IND_DATA0_START_SEQ_MASK GENMASK(27, 16)
+#define RRO_IND_DATA0_SEQ_ID_MASK GENMASK(11, 0)
+/* data1 */
+#define RRO_IND_DATA1_MAGIC_CNT_MASK GENMASK(31, 29)
+#define RRO_IND_DATA1_IND_COUNT_MASK GENMASK(12, 0)
struct mt76_wed_rro_ind {
- u32 se_id : 12;
- u32 rsv : 4;
- u32 start_sn : 12;
- u32 ind_reason : 4;
- u32 ind_cnt : 13;
- u32 win_sz : 3;
- u32 rsv2 : 13;
- u32 magic_cnt : 3;
+ __le32 data0;
+ __le32 data1;
};
struct mt76_txwi_cache {
@@ -424,6 +453,8 @@ struct mt76_txwi_cache {
struct sk_buff *skb;
void *ptr;
};
+
+ u8 qid;
};
struct mt76_rx_tid {
@@ -540,6 +571,10 @@ struct mt76_driver_ops {
void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+ void (*rx_rro_ind_process)(struct mt76_dev *dev, void *data);
+ int (*rx_rro_add_msdu_page)(struct mt76_dev *dev, struct mt76_queue *q,
+ dma_addr_t p, void *data);
+
void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool ps);
@@ -689,6 +724,11 @@ struct mt76_mmio {
struct mtk_wed_device wed_hif2;
struct completion wed_reset;
struct completion wed_reset_complete;
+
+ struct airoha_ppe_dev __rcu *ppe_dev;
+ struct airoha_npu __rcu *npu;
+ phys_addr_t phy_addr;
+ int npu_type;
};
struct mt76_rx_status {
@@ -917,6 +957,7 @@ struct mt76_dev {
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
+ enum mt76_hwrro_mode hwrro_mode;
struct mt76_worker tx_worker;
struct napi_struct tx_napi;
@@ -925,6 +966,7 @@ struct mt76_dev {
struct idr token;
u16 wed_token_count;
u16 token_count;
+ u16 token_start;
u16 token_size;
spinlock_t rx_token_lock;
@@ -1095,6 +1137,14 @@ struct mt76_power_limits {
s8 mcs[4][10];
s8 ru[7][12];
s8 eht[16][16];
+
+ struct {
+ s8 cck[4];
+ s8 ofdm[4];
+ s8 ofdm_bf[4];
+ s8 ru[7][10];
+ s8 ru_bf[7][10];
+ } path;
};
struct mt76_ethtool_worker_info {
@@ -1221,6 +1271,7 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_init(dev, ...) (dev)->mt76.queue_ops->rx_queue_init(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
@@ -1233,6 +1284,15 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
#define mt76_dereference(p, dev) \
rcu_dereference_protected(p, lockdep_is_held(&(dev)->mutex))
+static inline struct mt76_dev *mt76_wed_to_dev(struct mtk_wed_device *wed)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (wed->wlan.hif2)
+ return container_of(wed, struct mt76_dev, mmio.wed_hif2);
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
+ return container_of(wed, struct mt76_dev, mmio.wed);
+}
+
static inline struct mt76_wcid *
__mt76_wcid_ptr(struct mt76_dev *dev, u16 idx)
{
@@ -1275,7 +1335,7 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len);
int mt76_eeprom_init(struct mt76_dev *dev, int len);
-void mt76_eeprom_override(struct mt76_phy *phy);
+int mt76_eeprom_override(struct mt76_phy *phy);
int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len);
int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep,
const char *cell_name, int len);
@@ -1579,6 +1639,109 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
+#ifdef CONFIG_MT76_NPU
+void mt76_npu_check_ppe(struct mt76_dev *dev, struct sk_buff *skb,
+ u32 info);
+int mt76_npu_dma_add_buf(struct mt76_phy *phy, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_queue_buf *buf,
+ void *txwi_ptr);
+int mt76_npu_rx_queue_init(struct mt76_dev *dev, struct mt76_queue *q);
+int mt76_npu_fill_rx_queue(struct mt76_dev *dev, struct mt76_queue *q);
+void mt76_npu_queue_cleanup(struct mt76_dev *dev, struct mt76_queue *q);
+void mt76_npu_disable_irqs(struct mt76_dev *dev);
+int mt76_npu_init(struct mt76_dev *dev, phys_addr_t phy_addr, int type);
+void mt76_npu_deinit(struct mt76_dev *dev);
+void mt76_npu_queue_setup(struct mt76_dev *dev, struct mt76_queue *q);
+void mt76_npu_txdesc_cleanup(struct mt76_queue *q, int index);
+int mt76_npu_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+#else
+static inline void mt76_npu_check_ppe(struct mt76_dev *dev,
+ struct sk_buff *skb, u32 info)
+{
+}
+
+static inline int mt76_npu_dma_add_buf(struct mt76_phy *phy,
+ struct mt76_queue *q,
+ struct sk_buff *skb,
+ struct mt76_queue_buf *buf,
+ void *txwi_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mt76_npu_fill_rx_queue(struct mt76_dev *dev,
+ struct mt76_queue *q)
+{
+ return 0;
+}
+
+static inline void mt76_npu_queue_cleanup(struct mt76_dev *dev,
+ struct mt76_queue *q)
+{
+}
+
+static inline void mt76_npu_disable_irqs(struct mt76_dev *dev)
+{
+}
+
+static inline int mt76_npu_init(struct mt76_dev *dev, phys_addr_t phy_addr,
+ int type)
+{
+ return 0;
+}
+
+static inline void mt76_npu_deinit(struct mt76_dev *dev)
+{
+}
+
+static inline void mt76_npu_queue_setup(struct mt76_dev *dev,
+ struct mt76_queue *q)
+{
+}
+
+static inline void mt76_npu_txdesc_cleanup(struct mt76_queue *q,
+ int index)
+{
+}
+
+static inline int mt76_npu_net_setup_tc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct net_device *dev,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_MT76_NPU */
+
+static inline bool mt76_npu_device_active(struct mt76_dev *dev)
+{
+ return !!rcu_access_pointer(dev->mmio.npu);
+}
+
+static inline bool mt76_ppe_device_active(struct mt76_dev *dev)
+{
+ return !!rcu_access_pointer(dev->mmio.ppe_dev);
+}
+
+static inline int mt76_npu_send_msg(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_set_cmd cmd,
+ u32 val, gfp_t gfp)
+{
+ return airoha_npu_wlan_send_msg(npu, ifindex, cmd, &val, sizeof(val),
+ gfp);
+}
+
+static inline int mt76_npu_get_msg(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd cmd,
+ u32 *val, gfp_t gfp)
+{
+ return airoha_npu_wlan_get_msg(npu, ifindex, cmd, val, sizeof(*val),
+ gfp);
+}
+
static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
{
#ifdef CONFIG_NL80211_TESTMODE
@@ -1624,6 +1787,7 @@ int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
void mt76_scan_work(struct work_struct *work);
void mt76_abort_scan(struct mt76_dev *dev);
void mt76_roc_complete_work(struct work_struct *work);
+void mt76_roc_complete(struct mt76_phy *phy);
void mt76_abort_roc(struct mt76_phy *phy);
struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
struct ieee80211_vif *vif);
@@ -1797,21 +1961,51 @@ static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q)
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND;
}
+static inline bool mt76_queue_is_wed_rro_rxdmad_c(struct mt76_queue *q)
+{
+ return mt76_queue_is_wed_rro(q) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_RXDMAD_C;
+}
+
static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q)
{
return mt76_queue_is_wed_rro(q) &&
- (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA ||
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG);
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA;
+}
+
+static inline bool mt76_queue_is_wed_rro_msdu_pg(struct mt76_queue *q)
+{
+ return mt76_queue_is_wed_rro(q) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) ==
+ MT76_WED_RRO_Q_MSDU_PG;
}
static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
{
- if (!(q->flags & MT_QFLAG_WED))
- return false;
+ return (q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
+}
+
+static inline bool mt76_queue_is_emi(struct mt76_queue *q)
+{
+ return q->flags & MT_QFLAG_EMI_EN;
+}
+
+static inline bool mt76_queue_is_npu(struct mt76_queue *q)
+{
+ return q->flags & MT_QFLAG_NPU;
+}
- return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX ||
- mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q);
+static inline bool mt76_queue_is_npu_tx(struct mt76_queue *q)
+{
+ return mt76_queue_is_npu(q) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TX;
+}
+static inline bool mt76_queue_is_npu_rx(struct mt76_queue *q)
+{
+ return mt76_queue_is_npu(q) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
}
struct mt76_txwi_cache *
@@ -1835,7 +2029,8 @@ mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
{
struct page *page;
- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
+ page = page_pool_alloc_frag(q->page_pool, offset, size,
+ GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32);
if (!page)
return NULL;
@@ -1891,6 +2086,7 @@ mt76_vif_init(struct ieee80211_vif *vif, struct mt76_vif_data *mvif)
}
void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif);
+u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links);
static inline struct mt76_vif_link *
mt76_vif_link(struct mt76_dev *dev, struct ieee80211_vif *vif, int link_id)
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/Kconfig b/sys/contrib/dev/mediatek/mt76/mt7603/Kconfig
new file mode 100644
index 000000000000..ae40a596e49c
--- /dev/null
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config MT7603E
+ tristate "MediaTek MT7603E (PCIe) and MT76x8 WLAN support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7603E PCIe wireless devices and the WLAN core
+ on MT7628/MT7688 SoC devices. This family supports IEEE 802.11n 2x2
+ to 300Mbps PHY rate
+
+ To compile this driver as a module, choose M here.
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/Makefile b/sys/contrib/dev/mediatek/mt76/mt7603/Makefile
new file mode 100644
index 000000000000..e954165ee133
--- /dev/null
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_MT7603E) += mt7603e.o
+
+mt7603e-y := \
+ pci.o soc.o main.o init.o mcu.o \
+ core.o dma.o mac.o eeprom.o \
+ beacon.o debugfs.o
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/beacon.c b/sys/contrib/dev/mediatek/mt76/mt7603/beacon.c
index 6457ee06bb5a..300a7f9c2ef1 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/beacon.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/beacon.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include "mt7603.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/core.c b/sys/contrib/dev/mediatek/mt76/mt7603/core.c
index 915b8349146a..9c2943fd904e 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/core.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/core.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include "mt7603.h"
#include "../trace.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/debugfs.c b/sys/contrib/dev/mediatek/mt76/mt7603/debugfs.c
index 3967f2f05774..c891ad5498e6 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/debugfs.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include "mt7603.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/dma.c b/sys/contrib/dev/mediatek/mt76/mt7603/dma.c
index e26cc78fff94..3a16851524a0 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/dma.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/dma.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include "mt7603.h"
#include "mac.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/eeprom.c b/sys/contrib/dev/mediatek/mt76/mt7603/eeprom.c
index f5a6b03bc61d..b89db2db6573 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/eeprom.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/eeprom.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include <linux/of.h>
#include "mt7603.h"
@@ -182,7 +182,6 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
dev->mphy.antenna_mask = 1;
dev->mphy.chainmask = dev->mphy.antenna_mask;
- mt76_eeprom_override(&dev->mphy);
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/eeprom.h b/sys/contrib/dev/mediatek/mt76/mt7603/eeprom.h
index 4687d6dc00dc..b6b746d1e56f 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/eeprom.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/eeprom.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
#ifndef __MT7603_EEPROM_H
#define __MT7603_EEPROM_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/init.c b/sys/contrib/dev/mediatek/mt76/mt7603/init.c
index 86617a3e4328..10f2ec70c792 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/init.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include <linux/etherdevice.h>
#include "mt7603.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/mac.c b/sys/contrib/dev/mediatek/mt76/mt7603/mac.c
index 6387f9e61060..d3110eeb45d7 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include <linux/etherdevice.h>
#include <linux/timekeeping.h>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/mac.h b/sys/contrib/dev/mediatek/mt76/mt7603/mac.h
index 17e34ecf2bfb..9f5fab51ff83 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/mac.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/mac.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
#ifndef __MT7603_MAC_H
#define __MT7603_MAC_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/main.c b/sys/contrib/dev/mediatek/mt76/mt7603/main.c
index 0d7c84941cd0..0f3a7508996c 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/main.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/main.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/mcu.c b/sys/contrib/dev/mediatek/mt76/mt7603/mcu.c
index 301668c3cc92..e432cce97640 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include <linux/firmware.h>
#include "mt7603.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/mcu.h b/sys/contrib/dev/mediatek/mt76/mt7603/mcu.h
index 30df8a3fd11a..7debe76cd092 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/mcu.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/mcu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
#ifndef __MT7603_MCU_H
#define __MT7603_MCU_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/mt7603.h b/sys/contrib/dev/mediatek/mt76/mt7603/mt7603.h
index 55a034ccbacd..071bfab3af7c 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/mt7603.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/mt7603.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
#ifndef __MT7603_H
#define __MT7603_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/pci.c b/sys/contrib/dev/mediatek/mt76/mt7603/pci.c
index 3d94cdb4314a..5fee610597a4 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/pci.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/pci.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/regs.h b/sys/contrib/dev/mediatek/mt76/mt7603/regs.h
index 524bceb8e958..97942f5ebdb4 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/regs.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
#ifndef __MT7603_REGS_H
#define __MT7603_REGS_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7603/soc.c b/sys/contrib/dev/mediatek/mt76/mt7603/soc.c
index 08590aa68356..b74256efba55 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7603/soc.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7603/soc.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include <linux/kernel.h>
#include <linux/module.h>
@@ -48,7 +48,7 @@ mt76_wmac_probe(struct platform_device *pdev)
return 0;
error:
- ieee80211_free_hw(mt76_hw(dev));
+ mt76_free_device(mdev);
return ret;
}
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/Kconfig b/sys/contrib/dev/mediatek/mt76/mt7615/Kconfig
new file mode 100644
index 000000000000..8cc0c1b5c24e
--- /dev/null
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/Kconfig
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+
+config MT7615_COMMON
+ tristate
+ select WANT_DEV_COREDUMP
+ select MT76_CONNAC_LIB
+
+config MT7615E
+ tristate "MediaTek MT7615E and MT7663E (PCIe) support"
+ select MT7615_COMMON
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7615-based PCIe wireless devices,
+ which support concurrent dual-band operation at both 5GHz
+ and 2.4GHz, IEEE 802.11ac 4x4:4SS 1733Mbps PHY rate, wave2
+ MU-MIMO up to 4 users/group and 160MHz channels.
+
+ To compile this driver as a module, choose M here.
+
+config MT7622_WMAC
+ bool "MT7622 (SoC) WMAC support"
+ depends on MT7615E
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ default y
+ help
+ This adds support for the built-in WMAC on MT7622 SoC devices
+ which has the same feature set as a MT7615, but limited to
+ 2.4 GHz only.
+
+config MT7663_USB_SDIO_COMMON
+ tristate
+ select MT7615_COMMON
+
+config MT7663U
+ tristate "MediaTek MT7663U (USB) support"
+ select MT76_USB
+ select MT7663_USB_SDIO_COMMON
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7663U 802.11ac 2x2:2 wireless devices.
+
+ To compile this driver as a module, choose M here.
+
+config MT7663S
+ tristate "MediaTek MT7663S (SDIO) support"
+ select MT76_SDIO
+ select MT7663_USB_SDIO_COMMON
+ depends on MAC80211
+ depends on MMC
+ help
+ This adds support for MT7663S 802.11ac 2x2:2 wireless devices.
+
+ To compile this driver as a module, choose M here.
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/Makefile b/sys/contrib/dev/mediatek/mt76/mt7615/Makefile
index 2b97b9dde477..4def3b13eae1 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/Makefile
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: BSD-3-Clause-Clear
obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o
obj-$(CONFIG_MT7615E) += mt7615e.o
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/debugfs.c b/sys/contrib/dev/mediatek/mt76/mt7615/debugfs.c
index 2a6d317db5e0..0f7b20152279 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/debugfs.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include "mt7615.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/dma.c b/sys/contrib/dev/mediatek/mt76/mt7615/dma.c
index bcf7864312d7..59d2b3e8696b 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/dma.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/dma.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Ryder Lee <ryder.lee@mediatek.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/eeprom.c b/sys/contrib/dev/mediatek/mt76/mt7615/eeprom.c
index d5ec498aa9ef..740c4acba124 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/eeprom.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/eeprom.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Ryder Lee <ryder.lee@mediatek.com>
@@ -366,8 +366,6 @@ int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr)
#endif
ETH_ALEN);
- mt76_eeprom_override(&dev->mphy);
-
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
EXPORT_SYMBOL_GPL(mt7615_eeprom_init);
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/eeprom.h b/sys/contrib/dev/mediatek/mt76/mt7615/eeprom.h
index a67fbb90f5b3..6aed52e14181 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/eeprom.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/eeprom.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2019 MediaTek Inc. */
#ifndef __MT7615_EEPROM_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/init.c b/sys/contrib/dev/mediatek/mt76/mt7615/init.c
index 06d5a3f2fa67..f3f07401e2b7 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/init.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Roy Luo <royluo@google.com>
@@ -576,7 +576,10 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
ETH_ALEN);
mphy->macaddr[0] |= 2;
mphy->macaddr[0] ^= BIT(7);
- mt76_eeprom_override(mphy);
+
+ ret = mt76_eeprom_override(mphy);
+ if (ret)
+ return ret;
/* second phy can only handle 5 GHz */
mphy->cap.has_5ghz = true;
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/mac.c b/sys/contrib/dev/mediatek/mt76/mt7615/mac.c
index 10bf7e5b3acb..a4a252dc0186 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Ryder Lee <ryder.lee@mediatek.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/mac.h b/sys/contrib/dev/mediatek/mt76/mt7615/mac.h
index d08fbe64c262..336ebce5db5f 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/mac.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/mac.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2019 MediaTek Inc. */
#ifndef __MT7615_MAC_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/main.c b/sys/contrib/dev/mediatek/mt76/mt7615/main.c
index 15fe155ac3f3..727266892c3d 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/main.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/main.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Roy Luo <royluo@google.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/mcu.c b/sys/contrib/dev/mediatek/mt76/mt7615/mcu.c
index ec2f759d407f..7f48b502effe 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Roy Luo <royluo@google.com>
@@ -878,8 +878,10 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
WTBL_RESET_AND_SET, NULL,
&wskb);
- if (IS_ERR(wtbl_hdr))
+ if (IS_ERR(wtbl_hdr)) {
+ dev_kfree_skb(sskb);
return PTR_ERR(wtbl_hdr);
+ }
if (enable) {
mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, wskb, vif, sta,
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/mcu.h b/sys/contrib/dev/mediatek/mt76/mt7615/mcu.h
index 8e9604be0792..851b0e4839b5 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/mcu.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/mcu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2019 MediaTek Inc. */
#ifndef __MT7615_MCU_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/mmio.c b/sys/contrib/dev/mediatek/mt76/mt7615/mmio.c
index dbb2c82407df..da7edd48ce2c 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/mmio.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/mmio.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/kernel.h>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/mt7615.h b/sys/contrib/dev/mediatek/mt76/mt7615/mt7615.h
index 9bdd29e8d25e..e16865dd8e52 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/mt7615.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/mt7615.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2019 MediaTek Inc. */
#ifndef __MT7615_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/mt7615_trace.h b/sys/contrib/dev/mediatek/mt76/mt7615/mt7615_trace.h
index 9be5a58a4e6d..697fc5d225de 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/mt7615_trace.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/mt7615_trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/pci.c b/sys/contrib/dev/mediatek/mt76/mt7615/pci.c
index 053faacd715a..fe181d269419 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/pci.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/pci.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Ryder Lee <ryder.lee@mediatek.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/pci_init.c b/sys/contrib/dev/mediatek/mt76/mt7615/pci_init.c
index ec1ae85f8de1..f608d560027a 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/pci_init.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/pci_init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Roy Luo <royluo@google.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/pci_mac.c b/sys/contrib/dev/mediatek/mt76/mt7615/pci_mac.c
index fe8a3d852dbf..3d471c6f8a98 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/pci_mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/pci_mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Ryder Lee <ryder.lee@mediatek.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/regs.h b/sys/contrib/dev/mediatek/mt76/mt7615/regs.h
index 806b3887c541..eb3c24d51987 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/regs.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2019 MediaTek Inc. */
#ifndef __MT7615_REGS_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/sdio.c b/sys/contrib/dev/mediatek/mt76/mt7615/sdio.c
index f56038cd4d3a..46188951ad19 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/sdio.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/sdio.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Felix Fietkau <nbd@nbd.name>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/soc.c b/sys/contrib/dev/mediatek/mt76/mt7615/soc.c
index 06a0f2a141e8..4bd189dd67e3 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/soc.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/soc.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Ryder Lee <ryder.lee@mediatek.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/testmode.c b/sys/contrib/dev/mediatek/mt76/mt7615/testmode.c
index 03f5af84424b..6eb97b7eba2d 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/testmode.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/testmode.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
#include "mt7615.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/trace.c b/sys/contrib/dev/mediatek/mt76/mt7615/trace.c
index 6c02d5aff68f..7ec39e0b3fb2 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/trace.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/trace.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/usb.c b/sys/contrib/dev/mediatek/mt76/mt7615/usb.c
index d96e06b4fee1..d91feffadda9 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/usb.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/usb.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Felix Fietkau <nbd@nbd.name>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7615/usb_sdio.c b/sys/contrib/dev/mediatek/mt76/mt7615/usb_sdio.c
index 722418e9863c..c43f3ebb4e99 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7615/usb_sdio.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7615/usb_sdio.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76_connac.h b/sys/contrib/dev/mediatek/mt76/mt76_connac.h
index 192dcc374a64..813d61bffc2c 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76_connac.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76_connac.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT76_CONNAC_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt76_connac2_mac.h b/sys/contrib/dev/mediatek/mt76/mt76_connac2_mac.h
index eb4765365b8c..d4e2c3140441 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76_connac2_mac.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76_connac2_mac.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2022 MediaTek Inc. */
#ifndef __MT76_CONNAC2_MAC_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt76_connac3_mac.c b/sys/contrib/dev/mediatek/mt76/mt76_connac3_mac.c
index 2d300948308d..651fcd4169f4 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76_connac3_mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76_connac3_mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include "mt76_connac.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt76_connac3_mac.h b/sys/contrib/dev/mediatek/mt76/mt76_connac3_mac.h
index 1013cad57a7f..247e2e7a47d8 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76_connac3_mac.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76_connac3_mac.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2023 MediaTek Inc. */
#ifndef __MT76_CONNAC3_MAC_H
@@ -294,6 +294,13 @@ enum tx_frag_idx {
#define MT_TXP_BUF_LEN GENMASK(11, 0)
#define MT_TXP_DMA_ADDR_H GENMASK(15, 12)
+#define MT_TXP0_TOKEN_ID0 GENMASK(14, 0)
+#define MT_TXP0_TOKEN_ID0_VALID_MASK BIT(15)
+
+#define MT_TXP1_TID_ADDBA GENMASK(14, 12)
+#define MT_TXP3_ML0_MASK BIT(15)
+#define MT_TXP3_DMA_ADDR_H GENMASK(13, 12)
+
#define MT_TX_RATE_STBC BIT(14)
#define MT_TX_RATE_NSS GENMASK(13, 10)
#define MT_TX_RATE_MODE GENMASK(9, 6)
diff --git a/sys/contrib/dev/mediatek/mt76/mt76_connac_mac.c b/sys/contrib/dev/mediatek/mt76/mt76_connac_mac.c
index 0db00efe88b0..3304b5971be0 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76_connac_mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76_connac_mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include "mt76_connac.h"
@@ -297,16 +297,18 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
struct ieee80211_bss_conf *conf,
bool beacon, bool mcast)
{
- struct mt76_vif_link *mvif = mt76_vif_conf_link(mphy->dev, conf->vif, conf);
- struct cfg80211_chan_def *chandef = mvif->ctx ?
- &mvif->ctx->def : &mphy->chandef;
- u8 nss = 0, mode = 0, band = chandef->chan->band;
- int rateidx = 0, mcast_rate;
- int offset = 0;
+ u8 nss = 0, mode = 0, band = NL80211_BAND_2GHZ;
+ int rateidx = 0, offset = 0, mcast_rate;
+ struct cfg80211_chan_def *chandef;
+ struct mt76_vif_link *mvif;
if (!conf)
goto legacy;
+ mvif = mt76_vif_conf_link(mphy->dev, conf->vif, conf);
+ chandef = mvif->ctx ? &mvif->ctx->def : &mphy->chandef;
+ band = chandef->chan->band;
+
if (is_mt7921(mphy->dev)) {
rateidx = ffs(conf->basic_rates) - 1;
goto legacy;
@@ -584,8 +586,9 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool multicast = ieee80211_is_data(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1);
- u16 rate = mt76_connac2_mac_tx_rate_val(mphy, &vif->bss_conf, beacon,
- multicast);
+ u16 rate = mt76_connac2_mac_tx_rate_val(mphy,
+ vif ? &vif->bss_conf : NULL,
+ beacon, multicast);
u32 val = MT_TXD6_FIXED_BW;
/* hardware won't add HTC for mgmt/ctrl frame */
diff --git a/sys/contrib/dev/mediatek/mt76/mt76_connac_mcu.c b/sys/contrib/dev/mediatek/mt76/mt76_connac_mcu.c
index ca0d022668e9..2a9af9f01388 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76_connac_mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76_connac_mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/firmware.h>
@@ -1662,6 +1662,31 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
return err;
}
+ if (enable && vif->bss_conf.bssid_indicator) {
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct bss_info_uni_mbssid mbssid;
+ } mbssid_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .mbssid = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_11V_MBSSID),
+ .len = cpu_to_le16(sizeof(struct bss_info_uni_mbssid)),
+ .max_indicator = vif->bss_conf.bssid_indicator,
+ .mbss_idx = vif->bss_conf.bssid_index,
+ },
+ };
+
+ err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE),
+ &mbssid_req, sizeof(mbssid_req), true);
+ if (err < 0)
+ return err;
+ }
+
return mt76_connac_mcu_uni_set_chctx(phy, mvif, ctx);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
@@ -1949,7 +1974,11 @@ int mt76_connac_mcu_chip_config(struct mt76_dev *dev)
.resp_type = 0,
};
- memcpy(req.data, "assert", 7);
+#if defined(__linux__)
+ strscpy(req.data, "assert");
+#elif defined(__FreeBSD__)
+ strscpy(req.data, "assert", sizeof(req.data));
+#endif
return mt76_mcu_send_msg(dev, MCU_CE_CMD(CHIP_CONFIG),
&req, sizeof(req), false);
@@ -2994,7 +3023,7 @@ int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
}
hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
- dev_info(dev->dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
+ dev_info(dev->dev, "WM Firmware Version: %.10s, Build Time: %.15s",
hdr->fw_ver, hdr->build_date);
ret = mt76_connac_mcu_send_ram_firmware(dev, hdr, fw->data, false);
@@ -3023,7 +3052,7 @@ int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
}
hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
- dev_info(dev->dev, "WA Firmware Version: %.10s, Build Time: %.15s\n",
+ dev_info(dev->dev, "WA Firmware Version: %.10s, Build Time: %.15s",
hdr->fw_ver, hdr->build_date);
ret = mt76_connac_mcu_send_ram_firmware(dev, hdr, fw->data, true);
@@ -3099,7 +3128,7 @@ int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name)
}
hdr = (const void *)fw->data;
- dev_info(dev->dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+ dev_info(dev->dev, "HW/SW Version: 0x%x, Build Time: %.16s",
be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
diff --git a/sys/contrib/dev/mediatek/mt76/mt76_connac_mcu.h b/sys/contrib/dev/mediatek/mt76/mt76_connac_mcu.h
index 3ba81ab77e99..5f6c7455b5ce 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76_connac_mcu.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76_connac_mcu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT76_CONNAC_MCU_H
@@ -1066,6 +1066,7 @@ enum {
MCU_UNI_EVENT_ROC = 0x27,
MCU_UNI_EVENT_TX_DONE = 0x2d,
MCU_UNI_EVENT_THERMAL = 0x35,
+ MCU_UNI_EVENT_RSSI_MONITOR = 0x41,
MCU_UNI_EVENT_NIC_CAPAB = 0x43,
MCU_UNI_EVENT_WED_RRO = 0x57,
MCU_UNI_EVENT_PER_STA_INFO = 0x6d,
@@ -1304,6 +1305,7 @@ enum {
MCU_UNI_CMD_THERMAL = 0x35,
MCU_UNI_CMD_VOW = 0x37,
MCU_UNI_CMD_FIXED_RATE_TABLE = 0x40,
+ MCU_UNI_CMD_RSSI_MONITOR = 0x41,
MCU_UNI_CMD_TESTMODE_CTRL = 0x46,
MCU_UNI_CMD_RRO = 0x57,
MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58,
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x0/pci.c b/sys/contrib/dev/mediatek/mt76/mt76x0/pci.c
index 11c16d1fc70f..f8d206a07f99 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x0/pci.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x0/pci.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x0/pci_mcu.c b/sys/contrib/dev/mediatek/mt76/mt76x0/pci_mcu.c
index f0962507f72f..efa549dc68ec 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x0/pci_mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x0/pci_mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x0/usb_mcu.c b/sys/contrib/dev/mediatek/mt76/mt76x0/usb_mcu.c
index 6dc1f51f5658..20a8f3659490 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x0/usb_mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02.h b/sys/contrib/dev/mediatek/mt76/mt76x02.h
index 8d06ef8c7c62..3c98808ccf26 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_beacon.c b/sys/contrib/dev/mediatek/mt76/mt76x02_beacon.c
index d570b99bccb9..7c9b16942275 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_beacon.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_beacon.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_debugfs.c b/sys/contrib/dev/mediatek/mt76/mt76x02_debugfs.c
index 8ce4bf44733d..d81f696b32c7 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_debugfs.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_dfs.c b/sys/contrib/dev/mediatek/mt76/mt76x02_dfs.c
index 7a07636d09c6..21c99ad7ef59 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_dfs.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_dfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_dfs.h b/sys/contrib/dev/mediatek/mt76/mt76x02_dfs.h
index 491010a32247..d40051992586 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_dfs.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_dfs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_dma.h b/sys/contrib/dev/mediatek/mt76/mt76x02_dma.h
index 23b0e7d10d57..2f6ba8cf51e8 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_dma.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_dma.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_eeprom.c b/sys/contrib/dev/mediatek/mt76/mt76x02_eeprom.c
index a5e3392c0b48..d16be0cb0dc7 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_eeprom.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_eeprom.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_eeprom.h b/sys/contrib/dev/mediatek/mt76/mt76x02_eeprom.h
index 13fa70853b0d..3cbb2977f375 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_eeprom.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_eeprom.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_mac.c b/sys/contrib/dev/mediatek/mt76/mt76x02_mac.c
index 83488b2d6efb..14ee5b3b94d3 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_mac.h b/sys/contrib/dev/mediatek/mt76/mt76x02_mac.h
index 5dc6c834111e..778454ac8974 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_mac.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_mac.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_mcu.c b/sys/contrib/dev/mediatek/mt76/mt76x02_mcu.c
index 75978820a260..e16f06a284ca 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_mcu.h b/sys/contrib/dev/mediatek/mt76/mt76x02_mcu.h
index e187ed52968e..a422cdc520cb 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_mcu.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_mcu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_mmio.c b/sys/contrib/dev/mediatek/mt76/mt76x02_mmio.c
index a683d53c7ceb..dd71c1c95cc9 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_mmio.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_mmio.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_phy.c b/sys/contrib/dev/mediatek/mt76/mt76x02_phy.c
index cbe7e6f0c29a..557380c9bfab 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_phy.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_phy.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_phy.h b/sys/contrib/dev/mediatek/mt76/mt76x02_phy.h
index 84d8a6138b3e..09e8edee2195 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_phy.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_phy.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_regs.h b/sys/contrib/dev/mediatek/mt76/mt76x02_regs.h
index fe0c5e3298bc..e87d3f8a1de9 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_regs.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_trace.c b/sys/contrib/dev/mediatek/mt76/mt76x02_trace.c
index a812c3a1e258..a92b2b7391ff 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_trace.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_trace.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_trace.h b/sys/contrib/dev/mediatek/mt76/mt76x02_trace.h
index 11d119cd0f6f..56eea2f68983 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_trace.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_txrx.c b/sys/contrib/dev/mediatek/mt76/mt76x02_txrx.c
index d8bc4ae185f5..301b43180006 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_txrx.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_txrx.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_usb.h b/sys/contrib/dev/mediatek/mt76/mt76x02_usb.h
index b5be884b3549..49ab05c1fe73 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_usb.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_usb.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_usb_core.c b/sys/contrib/dev/mediatek/mt76/mt76x02_usb_core.c
index 4840d0b500b3..3a28a8cc1338 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_usb_core.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_usb_core.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_usb_mcu.c b/sys/contrib/dev/mediatek/mt76/mt76x02_usb_mcu.c
index b2cc44914294..968c73e06a5f 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_usb_mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x02_util.c b/sys/contrib/dev/mediatek/mt76/mt76x02_util.c
index 7dfcb20c692c..e5d9d1bc9415 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x02_util.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x02_util.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/Kconfig b/sys/contrib/dev/mediatek/mt76/mt76x2/Kconfig
new file mode 100644
index 000000000000..d820510cb4bb
--- /dev/null
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config MT76x2_COMMON
+ tristate
+ select MT76x02_LIB
+
+config MT76x2E
+ tristate "MediaTek MT76x2E (PCIe) support"
+ select MT76x2_COMMON
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7612/MT7602/MT7662-based PCIe wireless
+ devices, which comply with IEEE 802.11ac standards and support
+ 2SS to 866Mbit/s PHY rate.
+
+ To compile this driver as a module, choose M here.
+
+config MT76x2U
+ tristate "MediaTek MT76x2U (USB) support"
+ select MT76x2_COMMON
+ select MT76x02_USB
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7612U-based USB 3.0 wireless dongles,
+ which comply with IEEE 802.11ac standards and support 2SS to
+ 866Mbit/s PHY rate.
+
+ To compile this driver as a module, choose M here.
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/Makefile b/sys/contrib/dev/mediatek/mt76/mt76x2/Makefile
new file mode 100644
index 000000000000..cbc90a9616a6
--- /dev/null
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
+
+mt76x2-common-y := \
+ eeprom.o mac.o init.o phy.o mcu.o
+
+mt76x2e-y := \
+ pci.o pci_main.o pci_init.o pci_mcu.o \
+ pci_phy.o
+
+mt76x2u-y := \
+ usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \
+ usb_phy.o
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/eeprom.c b/sys/contrib/dev/mediatek/mt76/mt76x2/eeprom.c
index 156b16c17b2b..782813aadc0a 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/eeprom.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/eeprom.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
@@ -499,7 +499,9 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
mt76x02_eeprom_parse_hw_cap(dev);
mt76x2_eeprom_get_macaddr(dev);
- mt76_eeprom_override(&dev->mphy);
+ ret = mt76_eeprom_override(&dev->mphy);
+ if (ret)
+ return ret;
dev->mphy.macaddr[0] &= ~BIT(1);
return 0;
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/eeprom.h b/sys/contrib/dev/mediatek/mt76/mt76x2/eeprom.h
index 43430ef98b11..1ee8be389b24 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/eeprom.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/eeprom.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/init.c b/sys/contrib/dev/mediatek/mt76/mt76x2/init.c
index 19c139290adb..408dc08b6457 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/init.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/mac.c b/sys/contrib/dev/mediatek/mt76/mt76x2/mac.c
index e08740ca3d0c..2fa34ca69095 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/mac.h b/sys/contrib/dev/mediatek/mt76/mt76x2/mac.h
index d5c3d26b94c1..f8ea70074c41 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/mac.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/mac.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/mcu.c b/sys/contrib/dev/mediatek/mt76/mt76x2/mcu.c
index ac83ce5f3e8b..769d924220e3 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/mcu.h b/sys/contrib/dev/mediatek/mt76/mt76x2/mcu.h
index 41fd66563e82..16a4386aa754 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/mcu.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/mcu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/mt76x2.h b/sys/contrib/dev/mediatek/mt76/mt76x2/mt76x2.h
index f051721bb00e..984756c81bdc 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/mt76x2.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/mt76x2.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/mt76x2u.h b/sys/contrib/dev/mediatek/mt76/mt76x2/mt76x2u.h
index f9d37c6cf1f0..27e478ab5b15 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/mt76x2u.h
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/mt76x2u.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/pci.c b/sys/contrib/dev/mediatek/mt76/mt76x2/pci.c
index 2303019670e2..491a32921a06 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/pci.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/pci.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/pci_init.c b/sys/contrib/dev/mediatek/mt76/mt76x2/pci_init.c
index e38e8e5685c2..bec84f932311 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/pci_init.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/pci_init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/pci_main.c b/sys/contrib/dev/mediatek/mt76/mt76x2/pci_main.c
index c5dfb06d81e8..550644676201 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/pci_main.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/pci_main.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/pci_mcu.c b/sys/contrib/dev/mediatek/mt76/mt76x2/pci_mcu.c
index e5b6282d1a6c..daba163802b6 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/pci_mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/pci_mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/pci_phy.c b/sys/contrib/dev/mediatek/mt76/mt76x2/pci_phy.c
index 8831337df23e..dcf4328c1cac 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/pci_phy.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/pci_phy.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/phy.c b/sys/contrib/dev/mediatek/mt76/mt76x2/phy.c
index e2b4cf30dc44..a5efa13a892f 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/phy.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/phy.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/usb.c b/sys/contrib/dev/mediatek/mt76/mt76x2/usb.c
index 96cecc576a98..01cb3b2830f3 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/usb.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/usb.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/usb_init.c b/sys/contrib/dev/mediatek/mt76/mt76x2/usb_init.c
index 3b5562811511..41778a8ef02c 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/usb_init.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/usb_init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/usb_mac.c b/sys/contrib/dev/mediatek/mt76/mt76x2/usb_mac.c
index eaa622833f85..d0cb511ac6a2 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/usb_mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/usb_mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/usb_main.c b/sys/contrib/dev/mediatek/mt76/mt76x2/usb_main.c
index 6671c53faf9f..66b06a493d95 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/usb_main.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/usb_main.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/usb_mcu.c b/sys/contrib/dev/mediatek/mt76/mt76x2/usb_mcu.c
index dd22d8af0901..9102be1803b7 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/usb_mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/usb_mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt76x2/usb_phy.c b/sys/contrib/dev/mediatek/mt76/mt76x2/usb_phy.c
index a04a98f5ce1e..b63dd7f3ee80 100644
--- a/sys/contrib/dev/mediatek/mt76/mt76x2/usb_phy.c
+++ b/sys/contrib/dev/mediatek/mt76/mt76x2/usb_phy.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/Kconfig b/sys/contrib/dev/mediatek/mt76/mt7915/Kconfig
index 193112c49bd1..c24be8227f11 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/Kconfig
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: BSD-3-Clause-Clear
config MT7915E
tristate "MediaTek MT7915E (PCIe) support"
select MT76_CONNAC_LIB
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/Makefile b/sys/contrib/dev/mediatek/mt76/mt7915/Makefile
index e0ca638c91a5..963fb3109af3 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/Makefile
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: BSD-3-Clause-Clear
obj-$(CONFIG_MT7915E) += mt7915e.o
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/coredump.c b/sys/contrib/dev/mediatek/mt76/mt7915/coredump.c
index 8c9a69837c86..e3dcf4c5557d 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/coredump.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/coredump.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2022 MediaTek Inc. */
#if defined(__FreeBSD__)
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/coredump.h b/sys/contrib/dev/mediatek/mt76/mt7915/coredump.h
index a7284b391daf..251b045b5aca 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/coredump.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/coredump.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2022 MediaTek Inc. */
#ifndef _COREDUMP_H_
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/debugfs.c b/sys/contrib/dev/mediatek/mt76/mt7915/debugfs.c
index b287b7d9394e..26ed3745af43 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/debugfs.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/relay.h>
@@ -1008,7 +1008,7 @@ mt7915_rate_txpower_get(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- ret = mt7915_mcu_get_txpower_sku(phy, txpwr, sizeof(txpwr));
+ ret = mt7915_mcu_get_txpower_sku(phy, txpwr, sizeof(txpwr), TX_POWER_INFO_RATE);
if (ret)
goto out;
@@ -1118,7 +1118,7 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
mutex_lock(&dev->mt76.mutex);
ret = mt7915_mcu_get_txpower_sku(phy, req.txpower_sku,
- sizeof(req.txpower_sku));
+ sizeof(req.txpower_sku), TX_POWER_INFO_RATE);
if (ret)
goto out;
@@ -1160,7 +1160,7 @@ out:
return ret ? ret : count;
}
-static const struct file_operations mt7915_rate_txpower_fops = {
+static const struct file_operations mt7915_txpower_fops = {
.write = mt7915_rate_txpower_set,
.read = mt7915_rate_txpower_get,
.open = simple_open,
@@ -1169,6 +1169,70 @@ static const struct file_operations mt7915_rate_txpower_fops = {
};
static int
+mt7915_path_txpower_show(struct seq_file *file)
+{
+ struct mt7915_phy *phy = file->private;
+ s8 txpower[MT7915_SKU_PATH_NUM], *buf = txpower;
+ int ret;
+
+#define PATH_POWER_SHOW(_name, _len, _skip) do { \
+ size_t __len = (_len); \
+ if (_skip) { \
+ buf -= 1; \
+ *buf = 0; \
+ } \
+ mt76_seq_puts_array(file, _name, buf, __len); \
+ buf += __len; \
+ } while (0)
+
+ seq_printf(file, "\n%*c", 18, ' ');
+ seq_puts(file, "1T1S/2T1S/3T1S/4T1S/2T2S/3T2S/4T2S/3T3S/4T3S/4T4S\n");
+ ret = mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower),
+ TX_POWER_INFO_PATH);
+ if (ret)
+ return ret;
+
+ PATH_POWER_SHOW("CCK", 4, 0);
+ PATH_POWER_SHOW("OFDM", 4, 0);
+ PATH_POWER_SHOW("BF-OFDM", 4, 1);
+
+ PATH_POWER_SHOW("HT/VHT20", 10, 0);
+ PATH_POWER_SHOW("BF-HT/VHT20", 10, 1);
+ PATH_POWER_SHOW("HT/VHT40", 10, 0);
+ PATH_POWER_SHOW("BF-HT/VHT40", 10, 1);
+
+ PATH_POWER_SHOW("BW20/RU242", 10, 0);
+ PATH_POWER_SHOW("BF-BW20/RU242", 10, 1);
+ PATH_POWER_SHOW("BW40/RU484", 10, 0);
+ PATH_POWER_SHOW("BF-BW40/RU484", 10, 1);
+ PATH_POWER_SHOW("BW80/RU996", 10, 0);
+ PATH_POWER_SHOW("BF-BW80/RU996", 10, 1);
+ PATH_POWER_SHOW("BW160/RU2x996", 10, 0);
+ PATH_POWER_SHOW("BF-BW160/RU2x996", 10, 1);
+ PATH_POWER_SHOW("RU26", 10, 0);
+ PATH_POWER_SHOW("BF-RU26", 10, 0);
+ PATH_POWER_SHOW("RU52", 10, 0);
+ PATH_POWER_SHOW("BF-RU52", 10, 0);
+ PATH_POWER_SHOW("RU106", 10, 0);
+ PATH_POWER_SHOW("BF-RU106", 10, 0);
+#undef PATH_POWER_SHOW
+
+ return 0;
+}
+
+static int
+mt7915_txpower_path_show(struct seq_file *file, void *data)
+{
+ struct mt7915_phy *phy = file->private;
+
+ seq_printf(file, "\nBand %d\n", phy != &phy->dev->phy);
+
+ return mt7915_path_txpower_show(file);
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7915_txpower_path);
+
+static int
mt7915_twt_stats(struct seq_file *s, void *data)
{
struct mt7915_dev *dev = dev_get_drvdata(s->private);
@@ -1254,7 +1318,9 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_file("implicit_txbf", 0600, dir, dev,
&fops_implicit_txbf);
debugfs_create_file("txpower_sku", 0400, dir, phy,
- &mt7915_rate_txpower_fops);
+ &mt7915_txpower_fops);
+ debugfs_create_file("txpower_path", 0400, dir, phy,
+ &mt7915_txpower_path_fops);
debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir,
mt7915_twt_stats);
debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval);
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/dma.c b/sys/contrib/dev/mediatek/mt76/mt7915/dma.c
index 0c62272fe7d0..aabd37366e86 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/dma.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/dma.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include "mt7915.h"
@@ -624,13 +624,13 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
continue;
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], true);
}
mt76_tx_status_check(&dev->mt76, true);
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/eeprom.c b/sys/contrib/dev/mediatek/mt76/mt7915/eeprom.c
index 9214286ca000..0d2358e400d8 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/eeprom.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/eeprom.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#if defined(__FreeBSD__)
@@ -292,9 +292,7 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
#endif
ETH_ALEN);
- mt76_eeprom_override(&dev->mphy);
-
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/eeprom.h b/sys/contrib/dev/mediatek/mt76/mt7915/eeprom.h
index 31aec0f40232..1dc285c72991 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/eeprom.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/eeprom.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT7915_EEPROM_H
@@ -50,9 +50,9 @@ enum mt7915_eeprom_field {
#define MT_EE_CAL_GROUP_SIZE_7975 (54 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_GROUP_SIZE_7976 (94 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_GROUP_SIZE_7916_6G (94 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7981 (144 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_DPD_SIZE_V1 (54 * MT_EE_CAL_UNIT)
#define MT_EE_CAL_DPD_SIZE_V2 (300 * MT_EE_CAL_UNIT)
-#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_RX_PATH GENMASK(5, 3)
@@ -180,6 +180,8 @@ mt7915_get_cal_group_size(struct mt7915_dev *dev)
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
return (val == MT_EE_V2_BAND_SEL_6GHZ) ? MT_EE_CAL_GROUP_SIZE_7916_6G :
MT_EE_CAL_GROUP_SIZE_7916;
+ } else if (is_mt7981(&dev->mt76)) {
+ return MT_EE_CAL_GROUP_SIZE_7981;
} else if (mt7915_check_adie(dev, false)) {
return MT_EE_CAL_GROUP_SIZE_7976;
} else {
@@ -192,8 +194,6 @@ mt7915_get_cal_dpd_size(struct mt7915_dev *dev)
{
if (is_mt7915(&dev->mt76))
return MT_EE_CAL_DPD_SIZE_V1;
- else if (is_mt7981(&dev->mt76))
- return MT_EE_CAL_DPD_SIZE_V2_7981;
else
return MT_EE_CAL_DPD_SIZE_V2;
}
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/init.c b/sys/contrib/dev/mediatek/mt76/mt7915/init.c
index 6c0f1cdc4987..4128942361ff 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/init.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/etherdevice.h>
@@ -304,6 +304,8 @@ static void __mt7915_init_txpower(struct mt7915_phy *phy,
int pwr_delta = mt7915_eeprom_get_power_delta(dev, sband->band);
struct mt76_power_limits limits;
+ phy->sku_limit_en = true;
+ phy->sku_path_en = true;
for (i = 0; i < sband->n_channels; i++) {
struct ieee80211_channel *chan = &sband->channels[i];
u32 target_power = 0;
@@ -320,6 +322,11 @@ static void __mt7915_init_txpower(struct mt7915_phy *phy,
target_power = mt76_get_rate_power_limits(phy->mt76, chan,
&limits,
target_power);
+
+ /* MT7915N can not enable Backoff table without setting value in dts */
+ if (!limits.path.ofdm[0])
+ phy->sku_path_en = false;
+
target_power += path_delta;
target_power = DIV_ROUND_UP(target_power, 2);
chan->max_power = min_t(int, chan->max_reg_power,
@@ -729,7 +736,9 @@ mt7915_register_ext_phy(struct mt7915_dev *dev, struct mt7915_phy *phy)
mphy->macaddr[0] |= 2;
mphy->macaddr[0] ^= BIT(7);
}
- mt76_eeprom_override(mphy);
+ ret = mt76_eeprom_override(mphy);
+ if (ret)
+ return ret;
/* init wiphy according to mphy and phy */
mt7915_init_wiphy(phy);
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/mac.c b/sys/contrib/dev/mediatek/mt76/mt7915/mac.c
index dcdbde3ee5b7..fb6e03e6f34f 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/etherdevice.h>
@@ -1485,6 +1485,8 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
if (ext_phy)
cancel_delayed_work_sync(&ext_phy->mac_work);
+ mt76_abort_scan(&dev->mt76);
+
mutex_lock(&dev->mt76.mutex);
for (i = 0; i < 10; i++) {
if (!mt7915_mac_restart(dev))
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/mac.h b/sys/contrib/dev/mediatek/mt76/mt7915/mac.h
index 448b1b380190..e39f96e00ba4 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/mac.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/mac.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT7915_MAC_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/main.c b/sys/contrib/dev/mediatek/mt76/mt7915/main.c
index fe0639c14bf9..90d5e79fbf74 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/main.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/main.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/etherdevice.h>
@@ -73,7 +73,7 @@ int mt7915_run(struct ieee80211_hw *hw)
if (ret)
goto out;
- ret = mt7915_mcu_set_sku_en(phy, true);
+ ret = mt7915_mcu_set_sku_en(phy);
if (ret)
goto out;
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/mcu.c b/sys/contrib/dev/mediatek/mt76/mt7915/mcu.c
index 17c9c261960f..87ea528915c6 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#if defined(__FreeBSD__)
@@ -3066,30 +3066,15 @@ static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw)
/* 5G BW160 */
5250, 5570, 5815
};
- static const u16 freq_list_v2_7981[] = {
- /* 5G BW20 */
- 5180, 5200, 5220, 5240,
- 5260, 5280, 5300, 5320,
- 5500, 5520, 5540, 5560,
- 5580, 5600, 5620, 5640,
- 5660, 5680, 5700, 5720,
- 5745, 5765, 5785, 5805,
- 5825, 5845, 5865, 5885,
- /* 5G BW160 */
- 5250, 5570, 5815
- };
- const u16 *freq_list = freq_list_v1;
- int n_freqs = ARRAY_SIZE(freq_list_v1);
- int idx;
+ const u16 *freq_list;
+ int idx, n_freqs;
if (!is_mt7915(&dev->mt76)) {
- if (is_mt7981(&dev->mt76)) {
- freq_list = freq_list_v2_7981;
- n_freqs = ARRAY_SIZE(freq_list_v2_7981);
- } else {
- freq_list = freq_list_v2;
- n_freqs = ARRAY_SIZE(freq_list_v2);
- }
+ freq_list = freq_list_v2;
+ n_freqs = ARRAY_SIZE(freq_list_v2);
+ } else {
+ freq_list = freq_list_v1;
+ n_freqs = ARRAY_SIZE(freq_list_v1);
}
if (freq < 4000) {
@@ -3365,7 +3350,8 @@ int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
int ret;
s8 txpower_sku[MT7915_SKU_RATE_NUM];
- ret = mt7915_mcu_get_txpower_sku(phy, txpower_sku, sizeof(txpower_sku));
+ ret = mt7915_mcu_get_txpower_sku(phy, txpower_sku, sizeof(txpower_sku),
+ TX_POWER_INFO_RATE);
if (ret)
return ret;
@@ -3405,51 +3391,136 @@ int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
sizeof(req), true);
}
+static void
+mt7915_update_txpower(struct mt7915_phy *phy, int tx_power)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_channel *chan = mphy->main_chandef.chan;
+ int chain_idx, val, e2p_power_limit = 0;
+
+ if (!chan) {
+ mphy->txpower_cur = tx_power;
+ return;
+ }
+
+ for (chain_idx = 0; chain_idx < hweight16(mphy->chainmask); chain_idx++) {
+ val = mt7915_eeprom_get_target_power(phy->dev, chan, chain_idx);
+ val += mt7915_eeprom_get_power_delta(phy->dev, chan->band);
+
+ e2p_power_limit = max_t(int, e2p_power_limit, val);
+ }
+
+ if (phy->sku_limit_en)
+ mphy->txpower_cur = min_t(int, e2p_power_limit, tx_power);
+ else
+ mphy->txpower_cur = e2p_power_limit;
+}
+
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
{
+#define TX_POWER_LIMIT_TABLE_RATE 0
+#define TX_POWER_LIMIT_TABLE_PATH 1
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
struct ieee80211_hw *hw = mphy->hw;
- struct mt7915_mcu_txpower_sku req = {
+ struct mt7915_sku_val {
+ u8 format_id;
+ u8 limit_type;
+ u8 band_idx;
+ } __packed hdr = {
.format_id = TX_POWER_LIMIT_TABLE,
+ .limit_type = TX_POWER_LIMIT_TABLE_RATE,
.band_idx = phy->mt76->band_idx,
};
- struct mt76_power_limits limits_array;
- s8 *la = (s8 *)&limits_array;
- int i, idx;
- int tx_power;
+ int i, ret, tx_power;
+ const u8 *len = mt7915_sku_group_len;
+ struct mt76_power_limits la = {};
+ struct sk_buff *skb;
tx_power = mt76_get_power_bound(mphy, hw->conf.power_level);
- tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
- &limits_array, tx_power);
- mphy->txpower_cur = tx_power;
+ if (phy->sku_limit_en) {
+ tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
+ &la, tx_power);
+ mt7915_update_txpower(phy, tx_power);
+ } else {
+ mt7915_update_txpower(phy, tx_power);
+ return 0;
+ }
- for (i = 0, idx = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
- u8 mcs_num, len = mt7915_sku_group_len[i];
- int j;
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(hdr) + MT7915_SKU_RATE_NUM);
+ if (!skb)
+ return -ENOMEM;
- if (i >= SKU_HT_BW20 && i <= SKU_VHT_BW160) {
- mcs_num = 10;
+ skb_put_data(skb, &hdr, sizeof(hdr));
+ skb_put_data(skb, &la.cck, len[SKU_CCK] + len[SKU_OFDM]);
+ skb_put_data(skb, &la.mcs[0], len[SKU_HT_BW20]);
+ skb_put_data(skb, &la.mcs[1], len[SKU_HT_BW40]);
- if (i == SKU_HT_BW20 || i == SKU_VHT_BW20)
- la = (s8 *)&limits_array + 12;
- } else {
- mcs_num = len;
- }
+ /* vht */
+ for (i = 0; i < 4; i++) {
+ skb_put_data(skb, &la.mcs[i], sizeof(la.mcs[i]));
+ skb_put_zero(skb, 2); /* padding */
+ }
- for (j = 0; j < min_t(u8, mcs_num, len); j++)
- req.txpower_sku[idx + j] = la[j];
+ /* he */
+ skb_put_data(skb, &la.ru[0], sizeof(la.ru));
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), true);
+ if (ret)
+ return ret;
- la += mcs_num;
- idx += len;
+ /* only set per-path power table when it's configured */
+ if (!phy->sku_path_en)
+ return 0;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(hdr) + MT7915_SKU_PATH_NUM);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr.limit_type = TX_POWER_LIMIT_TABLE_PATH;
+ skb_put_data(skb, &hdr, sizeof(hdr));
+ skb_put_data(skb, &la.path.cck, sizeof(la.path.cck));
+ skb_put_data(skb, &la.path.ofdm, sizeof(la.path.ofdm));
+ skb_put_data(skb, &la.path.ofdm_bf[1], sizeof(la.path.ofdm_bf) - 1);
+
+ /* HT20 and HT40 */
+ skb_put_data(skb, &la.path.ru[3], sizeof(la.path.ru[3]));
+ skb_put_data(skb, &la.path.ru_bf[3][1], sizeof(la.path.ru_bf[3]) - 1);
+ skb_put_data(skb, &la.path.ru[4], sizeof(la.path.ru[4]));
+ skb_put_data(skb, &la.path.ru_bf[4][1], sizeof(la.path.ru_bf[4]) - 1);
+
+ /* start from non-bf and bf fields of
+ * BW20/RU242, BW40/RU484, BW80/RU996, BW160/RU2x996,
+ * RU26, RU52, and RU106
+ */
+
+ for (i = 0; i < 8; i++) {
+ bool bf = i % 2;
+ u8 idx = (i + 6) / 2;
+ s8 *buf = bf ? la.path.ru_bf[idx] : la.path.ru[idx];
+ /* The non-bf fields of RU26 to RU106 are special cases */
+ if (bf)
+ skb_put_data(skb, buf + 1, 9);
+ else
+ skb_put_data(skb, buf, 10);
}
- return mt76_mcu_send_msg(&dev->mt76,
- MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
- sizeof(req), true);
+ for (i = 0; i < 6; i++) {
+ bool bf = i % 2;
+ u8 idx = i / 2;
+ s8 *buf = bf ? la.path.ru_bf[idx] : la.path.ru[idx];
+
+ skb_put_data(skb, buf, 10);
+ }
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), true);
}
-int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
+int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len,
+ u8 category)
{
#define RATE_POWER_INFO 2
struct mt7915_dev *dev = phy->dev;
@@ -3460,10 +3531,9 @@ int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
u8 _rsv;
} __packed req = {
.format_id = TX_POWER_LIMIT_INFO,
- .category = RATE_POWER_INFO,
+ .category = category,
.band_idx = phy->mt76->band_idx,
};
- s8 txpower_sku[MT7915_SKU_RATE_NUM][2];
struct sk_buff *skb;
int ret, i;
@@ -3473,9 +3543,15 @@ int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
if (ret)
return ret;
- memcpy(txpower_sku, skb->data + 4, sizeof(txpower_sku));
- for (i = 0; i < len; i++)
- txpower[i] = txpower_sku[i][req.band_idx];
+ if (category == TX_POWER_INFO_RATE) {
+ s8 res[MT7915_SKU_RATE_NUM][2];
+
+ memcpy(res, skb->data + 4, sizeof(res));
+ for (i = 0; i < len; i++)
+ txpower[i] = res[i][req.band_idx];
+ } else if (category == TX_POWER_INFO_PATH) {
+ memcpy(txpower, skb->data + 4, len);
+ }
dev_kfree_skb(skb);
@@ -3504,7 +3580,7 @@ int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
sizeof(req), false);
}
-int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable)
+int mt7915_mcu_set_sku_en(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct mt7915_sku {
@@ -3513,10 +3589,21 @@ int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable)
u8 band_idx;
u8 rsv;
} __packed req = {
- .format_id = TX_POWER_LIMIT_ENABLE,
.band_idx = phy->mt76->band_idx,
- .sku_enable = enable,
};
+ int ret;
+
+ req.sku_enable = phy->sku_limit_en;
+ req.format_id = TX_POWER_LIMIT_ENABLE;
+
+ ret = mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
+ sizeof(req), true);
+ if (ret)
+ return ret;
+
+ req.sku_enable = phy->sku_path_en;
+ req.format_id = TX_POWER_LIMIT_PATH_ENABLE;
return mt76_mcu_send_msg(&dev->mt76,
MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/mcu.h b/sys/contrib/dev/mediatek/mt76/mt7915/mcu.h
index 086ad89ecd91..3af11a075a2f 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/mcu.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/mcu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT7915_MCU_H
@@ -429,6 +429,7 @@ enum {
enum {
TX_POWER_LIMIT_ENABLE,
+ TX_POWER_LIMIT_PATH_ENABLE = 0x3,
TX_POWER_LIMIT_TABLE = 0x4,
TX_POWER_LIMIT_INFO = 0x7,
TX_POWER_LIMIT_FRAME = 0x11,
@@ -436,6 +437,11 @@ enum {
};
enum {
+ TX_POWER_INFO_PATH = 1,
+ TX_POWER_INFO_RATE,
+};
+
+enum {
SPR_ENABLE = 0x1,
SPR_ENABLE_SD = 0x3,
SPR_ENABLE_MODE = 0x5,
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/mmio.c b/sys/contrib/dev/mediatek/mt76/mt7915/mmio.c
index ace2441d41a9..eb79c066fc2b 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/mmio.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/mmio.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#if defined(__FreeBSD__)
@@ -676,8 +676,8 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
MT_RXQ_WED_RING_BASE;
wed->wlan.wpdma_rx_glo = pci_resource_start(pci_dev, 0) +
MT_WPDMA_GLO_CFG;
- wed->wlan.wpdma_rx = pci_resource_start(pci_dev, 0) +
- MT_RXQ_WED_DATA_RING_BASE;
+ wed->wlan.wpdma_rx[0] = pci_resource_start(pci_dev, 0) +
+ MT_RXQ_WED_DATA_RING_BASE;
} else {
struct platform_device *plat_dev = pdev_ptr;
struct resource *res;
@@ -699,7 +699,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_tx = res->start + MT_TXQ_WED_RING_BASE;
wed->wlan.wpdma_txfree = res->start + MT_RXQ_WED_RING_BASE;
wed->wlan.wpdma_rx_glo = res->start + MT_WPDMA_GLO_CFG;
- wed->wlan.wpdma_rx = res->start + MT_RXQ_WED_DATA_RING_BASE;
+ wed->wlan.wpdma_rx[0] = res->start + MT_RXQ_WED_DATA_RING_BASE;
}
wed->wlan.nbuf = MT7915_HW_TOKEN_SIZE;
wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30;
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/mt7915.h b/sys/contrib/dev/mediatek/mt76/mt7915/mt7915.h
index d674186488f8..71f417468f48 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/mt7915.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/mt7915.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT7915_H
@@ -73,6 +73,7 @@
#define MT7915_CDEV_THROTTLE_MAX 99
#define MT7915_SKU_RATE_NUM 161
+#define MT7915_SKU_PATH_NUM 185
#define MT7915_MAX_TWT_AGRT 16
#define MT7915_MAX_STA_TWT_AGRT 8
@@ -226,6 +227,9 @@ struct mt7915_phy {
struct mt76_mib_stats mib;
struct mt76_channel_state state_ts;
+ bool sku_limit_en:1;
+ bool sku_path_en:1;
+
#ifdef CONFIG_NL80211_TESTMODE
struct {
u32 *reg_backup;
@@ -494,9 +498,10 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
u8 en);
int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
-int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
+int mt7915_mcu_set_sku_en(struct mt7915_phy *phy);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
-int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
+int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len,
+ u8 category);
int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower);
int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
struct ieee80211_vif *vif,
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/pci.c b/sys/contrib/dev/mediatek/mt76/mt7915/pci.c
index 26acac3e59fd..4e94a137c077 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/pci.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/pci.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Ryder Lee <ryder.lee@mediatek.com>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/regs.h b/sys/contrib/dev/mediatek/mt76/mt7915/regs.h
index c5ec63a25a42..307bf6a75674 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/regs.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT7915_REGS_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/soc.c b/sys/contrib/dev/mediatek/mt76/mt7915/soc.c
index c823a7554a3a..54ff6de96f3e 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/soc.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/soc.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2022 MediaTek Inc. */
#include <linux/kernel.h>
@@ -284,20 +284,15 @@ static int mt798x_wmac_coninfra_check(struct mt7915_dev *dev)
static int mt798x_wmac_coninfra_setup(struct mt7915_dev *dev)
{
struct device *pdev = dev->mt76.dev;
- struct reserved_mem *rmem;
- struct device_node *np;
+ struct resource res;
u32 val;
+ int ret;
- np = of_parse_phandle(pdev->of_node, "memory-region", 0);
- if (!np)
- return -EINVAL;
-
- rmem = of_reserved_mem_lookup(np);
- of_node_put(np);
- if (!rmem)
- return -EINVAL;
+ ret = of_reserved_mem_region_to_resource(pdev->of_node, 0, &res);
+ if (ret)
+ return ret;
- val = (rmem->base >> 16) & MT_TOP_MCU_EMI_BASE_MASK;
+ val = (res.start >> 16) & MT_TOP_MCU_EMI_BASE_MASK;
if (is_mt7986(&dev->mt76)) {
/* Set conninfra subsys PLL check */
@@ -318,8 +313,8 @@ static int mt798x_wmac_coninfra_setup(struct mt7915_dev *dev)
MT_TOP_EFUSE_BASE_MASK, 0x11f20000 >> 16);
}
- mt76_wr(dev, MT_INFRA_BUS_EMI_START, rmem->base);
- mt76_wr(dev, MT_INFRA_BUS_EMI_END, rmem->size);
+ mt76_wr(dev, MT_INFRA_BUS_EMI_START, res.start);
+ mt76_wr(dev, MT_INFRA_BUS_EMI_END, resource_size(&res));
mt76_rr(dev, MT_CONN_INFRA_EFUSE);
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/testmode.c b/sys/contrib/dev/mediatek/mt76/mt7915/testmode.c
index d534fff5c952..618a5c2bdd29 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/testmode.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/testmode.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include "mt7915.h"
@@ -409,7 +409,7 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
- mt7915_mcu_set_sku_en(phy, !en);
+ mt7915_mcu_set_sku_en(phy);
mt7915_tm_mode_ctrl(dev, en);
mt7915_tm_reg_backup_restore(phy);
diff --git a/sys/contrib/dev/mediatek/mt76/mt7915/testmode.h b/sys/contrib/dev/mediatek/mt76/mt7915/testmode.h
index 5573ac309363..bb1bc568751b 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7915/testmode.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7915/testmode.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT7915_TESTMODE_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/Kconfig b/sys/contrib/dev/mediatek/mt76/mt7921/Kconfig
index 7ed51e057857..37b5f46e76f4 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/Kconfig
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: BSD-3-Clause-Clear
config MT7921_COMMON
tristate
select MT792x_LIB
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/Makefile b/sys/contrib/dev/mediatek/mt76/mt7921/Makefile
index 849be9e848e0..2ad3c1cc3779 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/Makefile
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: BSD-3-Clause-Clear
obj-$(CONFIG_MT7921_COMMON) += mt7921-common.o
obj-$(CONFIG_MT7921E) += mt7921e.o
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/debugfs.c b/sys/contrib/dev/mediatek/mt76/mt7921/debugfs.c
index 616b66a3fde2..4333005b3ad9 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/debugfs.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include "mt7921.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/init.c b/sys/contrib/dev/mediatek/mt76/mt7921/init.c
index 2498f6d30f4f..dbbc9e945fe2 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/init.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/etherdevice.h>
@@ -199,7 +199,9 @@ static int __mt7921_init_hardware(struct mt792x_dev *dev)
if (ret)
goto out;
- mt76_eeprom_override(&dev->mphy);
+ ret = mt76_eeprom_override(&dev->mphy);
+ if (ret)
+ goto out;
ret = mt7921_mcu_set_eeprom(dev);
if (ret)
@@ -355,7 +357,7 @@ int mt7921_register_device(struct mt792x_dev *dev)
dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
- queue_work(system_wq, &dev->init_work);
+ queue_work(system_percpu_wq, &dev->init_work);
return 0;
}
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/mac.c b/sys/contrib/dev/mediatek/mt76/mt7921/mac.c
index 577b27d9faa0..36da611f9bfc 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/devcoredump.h>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/main.c b/sys/contrib/dev/mediatek/mt76/mt7921/main.c
index 5881040ac195..5fae9a6e273c 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/main.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/main.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/etherdevice.h>
@@ -135,6 +135,8 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
if (is_mt7922(phy->mt76->dev)) {
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ he_cap_elem->phy_cap_info[4] |=
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/mcu.c b/sys/contrib/dev/mediatek/mt76/mt7921/mcu.c
index 57b9de7a6659..c553423e178c 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc. */
#if defined(__FreeBSD__)
@@ -659,10 +659,10 @@ int mt7921_run_firmware(struct mt792x_dev *dev)
if (err)
return err;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
err = mt7921_load_clc(dev, mt792x_ram_name(dev));
if (err)
return err;
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return mt7921_mcu_fw_log_2_host(dev, 1);
}
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/mcu.h b/sys/contrib/dev/mediatek/mt76/mt7921/mcu.h
index 2834c6c53e58..de676b83b89c 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/mcu.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/mcu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT7921_MCU_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/mt7921.h b/sys/contrib/dev/mediatek/mt76/mt7921/mt7921.h
index c88793fcec64..83fc7f49ff84 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/mt7921.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/mt7921.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT7921_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/pci.c b/sys/contrib/dev/mediatek/mt76/mt7921/pci.c
index bde91b0f0a59..30660837077f 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/pci.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/pci.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc.
*
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/pci_mac.c b/sys/contrib/dev/mediatek/mt76/mt7921/pci_mac.c
index 881812ba03ff..5ec084432ae3 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/pci_mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/pci_mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2021 MediaTek Inc. */
#include "mt7921.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/pci_mcu.c b/sys/contrib/dev/mediatek/mt76/mt7921/pci_mcu.c
index 4cf1f2f0f968..8439c849a7a6 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/pci_mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/pci_mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2021 MediaTek Inc. */
#include "mt7921.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/regs.h b/sys/contrib/dev/mediatek/mt76/mt7921/regs.h
index 43427a3a48af..4d9eaf1e0692 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/regs.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT7921_REGS_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/sdio.c b/sys/contrib/dev/mediatek/mt76/mt7921/sdio.c
index d8d36b3c3068..3421e53dc948 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/sdio.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/sdio.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2021 MediaTek Inc.
*
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/sdio_mac.c b/sys/contrib/dev/mediatek/mt76/mt7921/sdio_mac.c
index a9eb6252a904..416d49e53499 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/sdio_mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/sdio_mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2021 MediaTek Inc. */
#include <linux/iopoll.h>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/sdio_mcu.c b/sys/contrib/dev/mediatek/mt76/mt7921/sdio_mcu.c
index 5e4501d7f1c0..14e66f3f5aad 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/sdio_mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/sdio_mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2021 MediaTek Inc. */
#include <linux/kernel.h>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/testmode.c b/sys/contrib/dev/mediatek/mt76/mt7921/testmode.c
index e838d93477c1..e60ee992edf8 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/testmode.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/testmode.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include "mt7921.h"
#include "mcu.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7921/usb.c b/sys/contrib/dev/mediatek/mt76/mt7921/usb.c
index fe9751851ff7..17057e68bf21 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7921/usb.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7921/usb.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2022 MediaTek Inc.
*
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
@@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = {
/* Netgear, Inc. [A8000,AXE3000] */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ /* Netgear, Inc. A7500 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9065, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
/* TP-Link TXE50UH */
{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/Kconfig b/sys/contrib/dev/mediatek/mt76/mt7925/Kconfig
index 5854e95e68a5..f4f7c93c2ea7 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/Kconfig
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: BSD-3-Clause-Clear
config MT7925_COMMON
tristate
select MT792x_LIB
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/Makefile b/sys/contrib/dev/mediatek/mt76/mt7925/Makefile
index ade5e647c941..8f1078ce3231 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/Makefile
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/Makefile
@@ -1,10 +1,10 @@
-# SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: BSD-3-Clause-Clear
obj-$(CONFIG_MT7925_COMMON) += mt7925-common.o
obj-$(CONFIG_MT7925E) += mt7925e.o
obj-$(CONFIG_MT7925U) += mt7925u.o
-mt7925-common-y := mac.o mcu.o main.o init.o debugfs.o
+mt7925-common-y := mac.o mcu.o regd.o main.o init.o debugfs.o
mt7925-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
mt7925e-y := pci.o pci_mac.o pci_mcu.o
mt7925u-y := usb.o
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/debugfs.c b/sys/contrib/dev/mediatek/mt76/mt7925/debugfs.c
index 1e2fc6577e78..e2498659c884 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/debugfs.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include "mt7925.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/init.c b/sys/contrib/dev/mediatek/mt76/mt7925/init.c
index a5b893b39568..31592775feb3 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/init.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/etherdevice.h>
@@ -7,6 +7,7 @@
#include <linux/thermal.h>
#include <linux/firmware.h>
#include "mt7925.h"
+#include "regd.h"
#include "mac.h"
#include "mcu.h"
@@ -62,151 +63,6 @@ static int mt7925_thermal_init(struct mt792x_phy *phy)
}
#endif
-void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2)
-{
- struct mt792x_phy *phy = &dev->phy;
- struct mt7925_clc_rule_v2 *rule;
- struct mt7925_clc *clc;
- bool old = dev->has_eht, new = true;
- u32 mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2);
- u8 *pos;
-
- if (mtcl_conf != MT792X_ACPI_MTCL_INVALID &&
- (((mtcl_conf >> 4) & 0x3) == 0)) {
- new = false;
- goto out;
- }
-
- if (!phy->clc[MT792x_CLC_BE_CTRL])
- goto out;
-
- clc = (struct mt7925_clc *)phy->clc[MT792x_CLC_BE_CTRL];
- pos = clc->data;
-
- while (1) {
- rule = (struct mt7925_clc_rule_v2 *)pos;
-
- if (rule->alpha2[0] == alpha2[0] &&
- rule->alpha2[1] == alpha2[1]) {
- new = false;
- break;
- }
-
- /* Check the last one */
- if (rule->flag & BIT(0))
- break;
-
- pos += sizeof(*rule);
- }
-
-out:
- if (old == new)
- return;
-
- dev->has_eht = new;
- mt7925_set_stream_he_eht_caps(phy);
-}
-
-static void
-mt7925_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev)
-{
-#define IS_UNII_INVALID(idx, sfreq, efreq, cfreq) \
- (!(dev->phy.clc_chan_conf & BIT(idx)) && (cfreq) >= (sfreq) && (cfreq) <= (efreq))
-#define MT7925_UNII_59G_IS_VALID 0x1
-#define MT7925_UNII_6G_IS_VALID 0x1e
- struct ieee80211_supported_band *sband;
- struct mt76_dev *mdev = &dev->mt76;
- struct ieee80211_channel *ch;
- u32 mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, mdev->alpha2);
- int i;
-
- if (mtcl_conf != MT792X_ACPI_MTCL_INVALID) {
- if ((mtcl_conf & 0x3) == 0)
- dev->phy.clc_chan_conf &= ~MT7925_UNII_59G_IS_VALID;
- if (((mtcl_conf >> 2) & 0x3) == 0)
- dev->phy.clc_chan_conf &= ~MT7925_UNII_6G_IS_VALID;
- }
-
- sband = wiphy->bands[NL80211_BAND_5GHZ];
- if (!sband)
- return;
-
- for (i = 0; i < sband->n_channels; i++) {
- ch = &sband->channels[i];
-
- /* UNII-4 */
- if (IS_UNII_INVALID(0, 5845, 5925, ch->center_freq))
- ch->flags |= IEEE80211_CHAN_DISABLED;
- }
-
- sband = wiphy->bands[NL80211_BAND_6GHZ];
- if (!sband)
- return;
-
- for (i = 0; i < sband->n_channels; i++) {
- ch = &sband->channels[i];
-
- /* UNII-5/6/7/8 */
- if (IS_UNII_INVALID(1, 5925, 6425, ch->center_freq) ||
- IS_UNII_INVALID(2, 6425, 6525, ch->center_freq) ||
- IS_UNII_INVALID(3, 6525, 6875, ch->center_freq) ||
- IS_UNII_INVALID(4, 6875, 7125, ch->center_freq))
- ch->flags |= IEEE80211_CHAN_DISABLED;
- }
-}
-
-void mt7925_regd_update(struct mt792x_dev *dev)
-{
- struct mt76_dev *mdev = &dev->mt76;
- struct ieee80211_hw *hw = mdev->hw;
- struct wiphy *wiphy = hw->wiphy;
-
- if (!dev->regd_change)
- return;
-
- mt7925_mcu_set_clc(dev, mdev->alpha2, dev->country_ie_env);
- mt7925_regd_channel_update(wiphy, dev);
- mt7925_mcu_set_channel_domain(hw->priv);
- mt7925_set_tx_sar_pwr(hw, NULL);
- dev->regd_change = false;
-}
-EXPORT_SYMBOL_GPL(mt7925_regd_update);
-
-static void
-mt7925_regd_notifier(struct wiphy *wiphy,
- struct regulatory_request *req)
-{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct mt792x_dev *dev = mt792x_hw_dev(hw);
- struct mt76_dev *mdev = &dev->mt76;
- struct mt76_connac_pm *pm = &dev->pm;
-
- /* allow world regdom at the first boot only */
- if (!memcmp(req->alpha2, "00", 2) &&
- mdev->alpha2[0] && mdev->alpha2[1])
- return;
-
- /* do not need to update the same country twice */
- if (!memcmp(req->alpha2, mdev->alpha2, 2) &&
- dev->country_ie_env == req->country_ie_env)
- return;
-
- memcpy(mdev->alpha2, req->alpha2, 2);
- mdev->region = req->dfs_region;
- dev->country_ie_env = req->country_ie_env;
- dev->regd_change = true;
-
- if (pm->suspended)
- return;
-
- dev->regd_in_progress = true;
- mt792x_mutex_acquire(dev);
- mt7925_regd_update(dev);
- mt792x_mutex_release(dev);
- dev->regd_in_progress = false;
- wake_up(&dev->wait);
-}
-
static void mt7925_mac_init_basic_rates(struct mt792x_dev *dev)
{
int i;
@@ -237,8 +93,6 @@ int mt7925_mac_init(struct mt792x_dev *dev)
mt7925_mac_init_basic_rates(dev);
- memzero_explicit(&dev->mt76.alpha2, sizeof(dev->mt76.alpha2));
-
return 0;
}
EXPORT_SYMBOL_GPL(mt7925_mac_init);
@@ -251,7 +105,9 @@ static int __mt7925_init_hardware(struct mt792x_dev *dev)
if (ret)
goto out;
- mt76_eeprom_override(&dev->mphy);
+ ret = mt76_eeprom_override(&dev->mphy);
+ if (ret)
+ goto out;
ret = mt7925_mcu_set_eeprom(dev);
if (ret)
@@ -424,7 +280,7 @@ int mt7925_register_device(struct mt792x_dev *dev)
dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
- queue_work(system_wq, &dev->init_work);
+ queue_work(system_percpu_wq, &dev->init_work);
return 0;
}
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/mac.c b/sys/contrib/dev/mediatek/mt76/mt7925/mac.c
index 63995bf9c5d4..1b1a23198b9f 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/devcoredump.h>
@@ -6,6 +6,7 @@
#include <linux/timekeeping.h>
#include "mt7925.h"
#include "../dma.h"
+#include "regd.h"
#include "mac.h"
#include "mcu.h"
@@ -1304,7 +1305,6 @@ void mt7925_mac_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&dev->mphy.mac_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
- dev->sar_inited = false;
for (i = 0; i < 10; i++) {
mutex_lock(&dev->mt76.mutex);
@@ -1333,6 +1333,8 @@ void mt7925_mac_reset_work(struct work_struct *work)
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7925_vif_connect_iter, NULL);
mt76_connac_power_save_sched(&dev->mt76.phy, pm);
+
+ mt7925_regd_change(&dev->phy, "00");
}
void mt7925_coredump_work(struct work_struct *work)
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/mac.h b/sys/contrib/dev/mediatek/mt76/mt7925/mac.h
index b10a993326b9..83ea9021daea 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/mac.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/mac.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2023 MediaTek Inc. */
#ifndef __MT7925_MAC_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/main.c b/sys/contrib/dev/mediatek/mt76/mt7925/main.c
index b0e053b15227..2d358a96640c 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/main.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/main.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/etherdevice.h>
@@ -8,6 +8,7 @@
#include <linux/ctype.h>
#include <net/ipv6.h>
#include "mt7925.h"
+#include "regd.h"
#include "mcu.h"
#include "mac.h"
@@ -138,10 +139,14 @@ mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
}
if (band == NL80211_BAND_6GHZ) {
+ struct ieee80211_supported_band *sband =
+ &phy->mt76->sband_5g.sband;
+ struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
+
u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
- cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_0_5,
+ cap |= u16_encode_bits(ht_cap->ampdu_density,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
@@ -240,6 +245,7 @@ int mt7925_init_mlo_caps(struct mt792x_phy *phy)
{
struct wiphy *wiphy = phy->mt76->hw->wiphy;
static const u8 ext_capa_sta[] = {
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
};
static struct wiphy_iftype_ext_capab ext_capab[] = {
@@ -310,7 +316,6 @@ void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy)
int __mt7925_start(struct mt792x_phy *phy)
{
struct mt76_phy *mphy = phy->mt76;
- struct mt792x_dev *dev = phy->dev;
int err;
err = mt7925_mcu_set_channel_domain(mphy);
@@ -321,13 +326,6 @@ int __mt7925_start(struct mt792x_phy *phy)
if (err)
return err;
- if (!dev->sar_inited) {
- err = mt7925_set_tx_sar_pwr(mphy->hw, NULL);
- if (err)
- return err;
- dev->sar_inited = true;
- }
-
mt792x_mac_reset_counters(phy);
set_bit(MT76_STATE_RUNNING, &mphy->state);
@@ -437,6 +435,9 @@ mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto out;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ if (phy->chip_cap & MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN)
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
out:
mt792x_mutex_release(dev);
@@ -987,56 +988,6 @@ int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt7925_mac_sta_add);
-static u16
-mt7925_mac_select_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
-{
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- struct {
- u8 link_id;
- enum nl80211_band band;
- } data[IEEE80211_MLD_MAX_NUM_LINKS];
- u8 link_id, i, j, n_data = 0;
- u16 sel_links = 0;
-
- if (!ieee80211_vif_is_mld(vif))
- return 0;
-
- if (vif->active_links == usable_links)
- return vif->active_links;
-
- rcu_read_lock();
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(vif->link_conf[link_id]);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chanreq.oper.chan->band;
- n_data++;
- }
- rcu_read_unlock();
-
- for (i = 0; i < n_data; i++) {
- if (!(BIT(data[i].link_id) & vif->active_links))
- continue;
-
- sel_links = BIT(data[i].link_id);
-
- for (j = 0; j < n_data; j++) {
- if (data[i].band != data[j].band) {
- sel_links |= BIT(data[j].link_id);
- break;
- }
- }
-
- break;
- }
-
- return sel_links;
-}
-
static void
mt7925_mac_set_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
{
@@ -1047,7 +998,7 @@ mt7925_mac_set_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
struct cfg80211_chan_def *chandef = &link_conf->chanreq.oper;
enum nl80211_band band = chandef->chan->band, secondary_band;
- u16 sel_links = mt7925_mac_select_links(mdev, vif);
+ u16 sel_links = mt76_select_links(vif, 2);
u8 secondary_link_id = __ffs(~BIT(mvif->deflink_id) & sel_links);
if (!ieee80211_vif_is_mld(vif) || hweight16(sel_links) < 2)
@@ -1369,20 +1320,6 @@ void mt7925_mlo_pm_work(struct work_struct *work)
mt7925_mlo_pm_iter, dev);
}
-static bool is_valid_alpha2(const char *alpha2)
-{
- if (!alpha2)
- return false;
-
- if (alpha2[0] == '0' && alpha2[1] == '0')
- return true;
-
- if (isalpha(alpha2[0]) && isalpha(alpha2[1]))
- return true;
-
- return false;
-}
-
void mt7925_scan_work(struct work_struct *work)
{
struct mt792x_phy *phy;
@@ -1391,7 +1328,6 @@ void mt7925_scan_work(struct work_struct *work)
scan_work.work);
while (true) {
- struct mt76_dev *mdev = &phy->dev->mt76;
struct sk_buff *skb;
struct tlv *tlv;
int tlv_len;
@@ -1422,15 +1358,7 @@ void mt7925_scan_work(struct work_struct *work)
case UNI_EVENT_SCAN_DONE_CHNLINFO:
evt = (struct mt7925_mcu_scan_chinfo_event *)tlv->data;
- if (!is_valid_alpha2(evt->alpha2))
- break;
-
- mt7925_regd_be_ctrl(phy->dev, evt->alpha2);
-
- if (mdev->alpha2[0] != '0' && mdev->alpha2[1] != '0')
- break;
-
- mt7925_mcu_set_clc(phy->dev, evt->alpha2, ENVIRON_INDOOR);
+ mt7925_regd_change(phy, evt->alpha2);
break;
case UNI_EVENT_SCAN_DONE_NLO:
@@ -1731,13 +1659,7 @@ static int mt7925_set_sar_specs(struct ieee80211_hw *hw,
int err;
mt792x_mutex_acquire(dev);
- err = mt7925_mcu_set_clc(dev, dev->mt76.alpha2,
- dev->country_ie_env);
- if (err < 0)
- goto out;
-
err = mt7925_set_tx_sar_pwr(hw, sar);
-out:
mt792x_mutex_release(dev);
return err;
@@ -2021,6 +1943,9 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
mt7925_mcu_set_eht_pp(mvif->phy->mt76, &mconf->mt76,
link_conf, NULL);
+ if (changed & BSS_CHANGED_CQM)
+ mt7925_mcu_set_rssimonitor(dev, vif);
+
mt792x_mutex_release(dev);
}
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/mcu.c b/sys/contrib/dev/mediatek/mt76/mt7925/mcu.c
index 77e494a4ece0..987251a1e5c3 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#if defined(__FreeBSD__)
@@ -8,16 +8,13 @@
#include <linux/fs.h>
#include <linux/firmware.h>
#include "mt7925.h"
+#include "regd.h"
#include "mcu.h"
#include "mac.h"
#define MT_STA_BFER BIT(0)
#define MT_STA_BFEE BIT(1)
-static bool mt7925_disable_clc;
-module_param_named(disable_clc, mt7925_disable_clc, bool, 0644);
-MODULE_PARM_DESC(disable_clc, "disable CLC support");
-
int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
@@ -455,6 +452,56 @@ mt7925_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb)
}
static void
+mt7925_mcu_rssi_monitor_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt7925_uni_rssi_monitor_event *event = priv;
+ enum nl80211_cqm_rssi_threshold_event nl_event;
+ s32 rssi = le32_to_cpu(event->rssi);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ return;
+
+ if (rssi > vif->bss_conf.cqm_rssi_thold)
+ nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+ else
+ nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+
+ ieee80211_cqm_rssi_notify(vif, nl_event, rssi, GFP_KERNEL);
+}
+
+static void
+mt7925_mcu_rssi_monitor_event(struct mt792x_dev *dev, struct sk_buff *skb)
+{
+ struct tlv *tlv;
+ u32 tlv_len;
+ struct mt7925_uni_rssi_monitor_event *event;
+
+ skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4);
+ tlv = (struct tlv *)skb->data;
+ tlv_len = skb->len;
+
+ while (tlv_len > 0 && le16_to_cpu(tlv->len) <= tlv_len) {
+ switch (le16_to_cpu(tlv->tag)) {
+ case UNI_EVENT_RSSI_MONITOR_INFO:
+ event = (struct mt7925_uni_rssi_monitor_event *)skb->data;
+ ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7925_mcu_rssi_monitor_iter,
+ event);
+ break;
+ default:
+ break;
+ }
+ tlv_len -= le16_to_cpu(tlv->len);
+ tlv = (struct tlv *)((char *)(tlv) + le16_to_cpu(tlv->len));
+ }
+}
+
+static void
mt7925_mcu_uni_debug_msg_event(struct mt792x_dev *dev, struct sk_buff *skb)
{
struct mt7925_uni_debug_msg {
@@ -550,6 +597,9 @@ mt7925_mcu_uni_rx_unsolicited_event(struct mt792x_dev *dev,
case MCU_UNI_EVENT_BSS_BEACON_LOSS:
mt7925_mcu_connection_loss_event(dev, skb);
break;
+ case MCU_UNI_EVENT_RSSI_MONITOR:
+ mt7925_mcu_rssi_monitor_event(dev, skb);
+ break;
case MCU_UNI_EVENT_COREDUMP:
dev->fw_assert = true;
mt76_connac_mcu_coredump_event(&dev->mt76, skb, &dev->coredump);
@@ -697,8 +747,8 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
int ret, i, len, offset = 0;
dev->phy.clc_chan_conf = 0xff;
- if (mt7925_disable_clc ||
- mt76_is_usb(&dev->mt76))
+ dev->regd_user = false;
+ if (!mt7925_regd_clc_supported(dev))
return 0;
if (mt76_is_mmio(&dev->mt76)) {
@@ -772,7 +822,7 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
}
}
- ret = mt7925_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
+ ret = mt7925_regd_init(phy);
out:
release_firmware(fw);
@@ -1017,10 +1067,10 @@ int mt7925_run_firmware(struct mt792x_dev *dev)
if (err)
return err;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
err = mt7925_load_clc(dev, mt792x_ram_name(dev));
if (err)
return err;
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return mt7925_mcu_fw_log_2_host(dev, 1);
}
@@ -2635,6 +2685,25 @@ mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf
}
static void
+mt7925_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
+ bool enable)
+{
+ struct bss_info_uni_mbssid *mbssid;
+ struct tlv *tlv;
+
+ if (!enable && !link_conf->bssid_indicator)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_11V_MBSSID,
+ sizeof(*mbssid));
+
+ mbssid = (struct bss_info_uni_mbssid *)tlv;
+ mbssid->max_indicator = link_conf->bssid_indicator;
+ mbssid->mbss_idx = link_conf->bssid_index;
+ mbssid->tx_bss_omac_idx = 0;
+}
+
+static void
mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
struct mt792x_phy *phy)
{
@@ -2800,8 +2869,10 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
mt7925_mcu_bss_color_tlv(skb, link_conf, enable);
}
- if (enable)
+ if (enable) {
mt7925_mcu_bss_rlm_tlv(skb, phy->mt76, link_conf, ctx);
+ mt7925_mcu_bss_mbssid_tlv(skb, link_conf, enable);
+ }
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD(BSS_INFO_UPDATE), true);
@@ -3376,6 +3447,9 @@ int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
struct mt792x_phy *phy = (struct mt792x_phy *)&dev->phy;
int i, ret;
+ if (!ARRAY_SIZE(phy->clc))
+ return -ESRCH;
+
/* submit all clc config */
for (i = 0; i < ARRAY_SIZE(phy->clc); i++) {
if (i == MT792x_CLC_BE_CTRL)
@@ -3716,6 +3790,8 @@ out:
int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy)
{
+ struct mt76_dev *mdev = phy->dev;
+ struct mt792x_dev *dev = mt792x_hw_dev(mdev->hw);
int err;
if (phy->cap.has_2ghz) {
@@ -3732,7 +3808,7 @@ int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy)
return err;
}
- if (phy->cap.has_6ghz) {
+ if (phy->cap.has_6ghz && dev->phy.clc_chan_conf) {
err = mt7925_mcu_rate_txpower_band(phy,
NL80211_BAND_6GHZ);
if (err < 0)
@@ -3809,3 +3885,32 @@ int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
return mt76_mcu_send_msg(&phy->dev->mt76, MCU_UNI_CMD(BAND_CONFIG),
&req, sizeof(req), true);
}
+
+int mt7925_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(&vif->bss_conf);
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ __le16 tag;
+ __le16 len;
+ u8 enable;
+ s8 cqm_rssi_high;
+ s8 cqm_rssi_low;
+ u8 rsv;
+ } req = {
+ .hdr = {
+ .bss_idx = mconf->mt76.idx,
+ },
+ .tag = cpu_to_le16(UNI_CMD_RSSI_MONITOR_SET),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .enable = vif->cfg.assoc,
+ .cqm_rssi_high = (s8)(vif->bss_conf.cqm_rssi_thold + vif->bss_conf.cqm_rssi_hyst),
+ .cqm_rssi_low = (s8)(vif->bss_conf.cqm_rssi_thold - vif->bss_conf.cqm_rssi_hyst),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(RSSI_MONITOR), &req,
+ sizeof(req), false);
+}
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/mcu.h b/sys/contrib/dev/mediatek/mt76/mt7925/mcu.h
index a40764d89a1f..e09e0600534a 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/mcu.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/mcu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2023 MediaTek Inc. */
#ifndef __MT7925_MCU_H
@@ -152,6 +152,14 @@ enum {
UNI_EVENT_SCAN_DONE_NLO = 3,
};
+enum {
+ UNI_CMD_RSSI_MONITOR_SET = 0,
+};
+
+enum {
+ UNI_EVENT_RSSI_MONITOR_INFO = 0,
+};
+
enum connac3_mcu_cipher_type {
CONNAC3_CIPHER_NONE = 0,
CONNAC3_CIPHER_WEP40 = 1,
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/mt7925.h b/sys/contrib/dev/mediatek/mt76/mt7925/mt7925.h
index 1b165d0d8bd3..6b9bf1b89032 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/mt7925.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/mt7925.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2023 MediaTek Inc. */
#ifndef __MT7925_H
@@ -103,6 +103,12 @@ struct mt7925_uni_beacon_loss_event {
struct mt7925_beacon_loss_tlv beacon_loss;
} __packed;
+struct mt7925_uni_rssi_monitor_event {
+ __le16 tag;
+ __le16 len;
+ __le32 rssi;
+} __packed;
+
#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
#define to_rcpi(rssi) (2 * (rssi) + 220)
@@ -257,8 +263,6 @@ int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd);
int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map);
-void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2);
-void mt7925_regd_update(struct mt792x_dev *dev);
int mt7925_mac_init(struct mt792x_dev *dev);
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -372,4 +376,5 @@ int mt7925_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int mt7925_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
struct netlink_callback *cb, void *data, int len);
+int mt7925_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif);
#endif
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/pci.c b/sys/contrib/dev/mediatek/mt76/mt7925/pci.c
index e34f99abd16c..021f431c9ded 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/pci.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/pci.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#if defined(__FreeBSD__)
@@ -12,6 +12,7 @@
#include "mt7925.h"
#include "mac.h"
#include "mcu.h"
+#include "regd.h"
#include "../dma.h"
static const struct pci_device_id mt7925_pci_device_table[] = {
@@ -444,9 +445,9 @@ static void mt7925_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
}
-#if !defined(__FreeBSD__) || defined(CONFIG_PM_SLEEP)
static int mt7925_pci_suspend(struct device *device)
{
+#if !defined(__FreeBSD__) || defined(CONFIG_PM_SLEEP)
struct pci_dev *pdev = to_pci_dev(device);
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
@@ -532,10 +533,14 @@ restore_suspend:
mt792x_reset(&dev->mt76);
return err;
+#else
+ return (-EOPNOTSUPP);
+#endif
}
-static int mt7925_pci_resume(struct device *device)
+static int _mt7925_pci_resume(struct device *device, bool restore)
{
+#if !defined(__FreeBSD__) || defined(CONFIG_PM_SLEEP)
struct pci_dev *pdev = to_pci_dev(device);
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
@@ -574,6 +579,9 @@ static int mt7925_pci_resume(struct device *device)
napi_schedule(&mdev->tx_napi);
local_bh_enable();
+ if (restore)
+ goto failed;
+
mt76_connac_mcu_set_hif_suspend(mdev, false, false);
ret = wait_event_timeout(dev->wait,
dev->hif_resumed, 3 * HZ);
@@ -586,23 +594,42 @@ static int mt7925_pci_resume(struct device *device)
if (!pm->ds_enable)
mt7925_mcu_set_deep_sleep(dev, false);
- mt7925_regd_update(dev);
+ mt7925_mcu_regd_update(dev, mdev->alpha2, dev->country_ie_env);
failed:
pm->suspended = false;
- if (err < 0)
+ if (err < 0 || restore)
mt792x_reset(&dev->mt76);
return err;
-}
+#else
+ return (-EOPNOTSUPP);
#endif
+}
static void mt7925_pci_shutdown(struct pci_dev *pdev)
{
mt7925_pci_remove(pdev);
}
-static DEFINE_SIMPLE_DEV_PM_OPS(mt7925_pm_ops, mt7925_pci_suspend, mt7925_pci_resume);
+static int mt7925_pci_resume(struct device *device)
+{
+ return _mt7925_pci_resume(device, false);
+}
+
+static int mt7925_pci_restore(struct device *device)
+{
+ return _mt7925_pci_resume(device, true);
+}
+
+static const struct dev_pm_ops mt7925_pm_ops = {
+ .suspend = pm_sleep_ptr(mt7925_pci_suspend),
+ .resume = pm_sleep_ptr(mt7925_pci_resume),
+ .freeze = pm_sleep_ptr(mt7925_pci_suspend),
+ .thaw = pm_sleep_ptr(mt7925_pci_resume),
+ .poweroff = pm_sleep_ptr(mt7925_pci_suspend),
+ .restore = pm_sleep_ptr(mt7925_pci_restore),
+};
static struct pci_driver mt7925_pci_driver = {
.name = KBUILD_MODNAME,
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/pci_mac.c b/sys/contrib/dev/mediatek/mt76/mt7925/pci_mac.c
index 4578d16bf456..3072850c2752 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/pci_mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/pci_mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include "mt7925.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/pci_mcu.c b/sys/contrib/dev/mediatek/mt76/mt7925/pci_mcu.c
index f95bc5dcd830..6cceff88c656 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/pci_mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/pci_mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include "mt7925.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/regd.c b/sys/contrib/dev/mediatek/mt76/mt7925/regd.c
new file mode 100644
index 000000000000..292087e882d1
--- /dev/null
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/regd.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/* Copyright (C) 2025 MediaTek Inc. */
+
+#include "mt7925.h"
+#include "regd.h"
+#include "mcu.h"
+
+static bool mt7925_disable_clc;
+module_param_named(disable_clc, mt7925_disable_clc, bool, 0644);
+MODULE_PARM_DESC(disable_clc, "disable CLC support");
+
+bool mt7925_regd_clc_supported(struct mt792x_dev *dev)
+{
+ if (mt7925_disable_clc ||
+ mt76_is_usb(&dev->mt76))
+ return false;
+
+ return true;
+}
+
+void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2)
+{
+ struct mt792x_phy *phy = &dev->phy;
+ struct mt7925_clc_rule_v2 *rule;
+ struct mt7925_clc *clc;
+ bool old = dev->has_eht, new = true;
+ u32 mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2);
+ u8 *pos;
+
+ if (mtcl_conf != MT792X_ACPI_MTCL_INVALID &&
+ (((mtcl_conf >> 4) & 0x3) == 0)) {
+ new = false;
+ goto out;
+ }
+
+ if (!phy->clc[MT792x_CLC_BE_CTRL])
+ goto out;
+
+ clc = (struct mt7925_clc *)phy->clc[MT792x_CLC_BE_CTRL];
+ pos = clc->data;
+
+ while (1) {
+ rule = (struct mt7925_clc_rule_v2 *)pos;
+
+ if (rule->alpha2[0] == alpha2[0] &&
+ rule->alpha2[1] == alpha2[1]) {
+ new = false;
+ break;
+ }
+
+ /* Check the last one */
+ if (rule->flag & BIT(0))
+ break;
+
+ pos += sizeof(*rule);
+ }
+
+out:
+ if (old == new)
+ return;
+
+ dev->has_eht = new;
+ mt7925_set_stream_he_eht_caps(phy);
+}
+
+static void
+mt7925_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev)
+{
+#define IS_UNII_INVALID(idx, sfreq, efreq, cfreq) \
+ (!(dev->phy.clc_chan_conf & BIT(idx)) && (cfreq) >= (sfreq) && (cfreq) <= (efreq))
+#define MT7925_UNII_59G_IS_VALID 0x1
+#define MT7925_UNII_6G_IS_VALID 0x1e
+ struct ieee80211_supported_band *sband;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct ieee80211_channel *ch;
+ u32 mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, mdev->alpha2);
+ int i;
+
+ if (mtcl_conf != MT792X_ACPI_MTCL_INVALID) {
+ if ((mtcl_conf & 0x3) == 0)
+ dev->phy.clc_chan_conf &= ~MT7925_UNII_59G_IS_VALID;
+ if (((mtcl_conf >> 2) & 0x3) == 0)
+ dev->phy.clc_chan_conf &= ~MT7925_UNII_6G_IS_VALID;
+ }
+
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ if (!dev->has_eht)
+ ch->flags |= IEEE80211_CHAN_NO_EHT;
+ }
+
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ /* UNII-4 */
+ if (IS_UNII_INVALID(0, 5845, 5925, ch->center_freq))
+ ch->flags |= IEEE80211_CHAN_DISABLED;
+
+ if (!dev->has_eht)
+ ch->flags |= IEEE80211_CHAN_NO_EHT;
+ }
+
+ sband = wiphy->bands[NL80211_BAND_6GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ /* UNII-5/6/7/8 */
+ if (IS_UNII_INVALID(1, 5925, 6425, ch->center_freq) ||
+ IS_UNII_INVALID(2, 6425, 6525, ch->center_freq) ||
+ IS_UNII_INVALID(3, 6525, 6875, ch->center_freq) ||
+ IS_UNII_INVALID(4, 6875, 7125, ch->center_freq))
+ ch->flags |= IEEE80211_CHAN_DISABLED;
+
+ if (!dev->has_eht)
+ ch->flags |= IEEE80211_CHAN_NO_EHT;
+ }
+}
+
+int mt7925_mcu_regd_update(struct mt792x_dev *dev, u8 *alpha2,
+ enum environment_cap country_ie_env)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int ret = 0;
+
+ dev->regd_in_progress = true;
+
+ mt792x_mutex_acquire(dev);
+ if (!dev->regd_change)
+ goto err;
+
+ ret = mt7925_mcu_set_clc(dev, alpha2, country_ie_env);
+ if (ret < 0)
+ goto err;
+
+ mt7925_regd_be_ctrl(dev, alpha2);
+ mt7925_regd_channel_update(wiphy, dev);
+
+ ret = mt7925_mcu_set_channel_domain(hw->priv);
+ if (ret < 0)
+ goto err;
+
+ ret = mt7925_set_tx_sar_pwr(hw, NULL);
+ if (ret < 0)
+ goto err;
+
+err:
+ mt792x_mutex_release(dev);
+ dev->regd_change = false;
+ dev->regd_in_progress = false;
+ wake_up(&dev->wait);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt7925_mcu_regd_update);
+
+void mt7925_regd_notifier(struct wiphy *wiphy, struct regulatory_request *req)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt76_connac_pm *pm = &dev->pm;
+ struct mt76_dev *mdev = &dev->mt76;
+
+ if (req->initiator == NL80211_REGDOM_SET_BY_USER &&
+ !dev->regd_user)
+ dev->regd_user = true;
+
+ /* allow world regdom at the first boot only */
+ if (!memcmp(req->alpha2, "00", 2) &&
+ mdev->alpha2[0] && mdev->alpha2[1])
+ return;
+
+ /* do not need to update the same country twice */
+ if (!memcmp(req->alpha2, mdev->alpha2, 2) &&
+ dev->country_ie_env == req->country_ie_env)
+ return;
+
+ memcpy(mdev->alpha2, req->alpha2, 2);
+ mdev->region = req->dfs_region;
+ dev->country_ie_env = req->country_ie_env;
+
+ dev->regd_change = true;
+
+ if (pm->suspended)
+ /* postpone the mcu update to resume */
+ return;
+
+ mt7925_mcu_regd_update(dev, req->alpha2,
+ req->country_ie_env);
+ return;
+}
+
+static bool
+mt7925_regd_is_valid_alpha2(const char *alpha2)
+{
+ if (!alpha2)
+ return false;
+
+ if (alpha2[0] == '0' && alpha2[1] == '0')
+ return true;
+
+ if (isalpha(alpha2[0]) && isalpha(alpha2[1]))
+ return true;
+
+ return false;
+}
+
+int mt7925_regd_change(struct mt792x_phy *phy, char *alpha2)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt76_dev *mdev = &dev->mt76;
+
+ if (dev->hw_full_reset)
+ return 0;
+
+ if (!mt7925_regd_is_valid_alpha2(alpha2) ||
+ !mt7925_regd_clc_supported(dev) ||
+ dev->regd_user)
+ return -EINVAL;
+
+ if (mdev->alpha2[0] != '0' && mdev->alpha2[1] != '0')
+ return 0;
+
+ /* do not need to update the same country twice */
+ if (!memcmp(alpha2, mdev->alpha2, 2))
+ return 0;
+
+ if (phy->chip_cap & MT792x_CHIP_CAP_11D_EN) {
+ return regulatory_hint(wiphy, alpha2);
+ } else {
+ return mt7925_mcu_set_clc(dev, alpha2, ENVIRON_INDOOR);
+ }
+}
+EXPORT_SYMBOL_GPL(mt7925_regd_change);
+
+int mt7925_regd_init(struct mt792x_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt76_dev *mdev = &dev->mt76;
+
+ if (phy->chip_cap & MT792x_CHIP_CAP_11D_EN) {
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE |
+ REGULATORY_DISABLE_BEACON_HINTS;
+ } else {
+ memzero_explicit(&mdev->alpha2, sizeof(mdev->alpha2));
+ }
+
+ return 0;
+}
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/regd.h b/sys/contrib/dev/mediatek/mt76/mt7925/regd.h
new file mode 100644
index 000000000000..0767f078862e
--- /dev/null
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/regd.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/* Copyright (C) 2025 MediaTek Inc. */
+
+#ifndef __MT7925_REGD_H
+#define __MT7925_REGD_H
+
+#include "mt7925.h"
+
+int mt7925_mcu_regd_update(struct mt792x_dev *dev, u8 *alpha2,
+ enum environment_cap country_ie_env);
+
+void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2);
+void mt7925_regd_notifier(struct wiphy *wiphy, struct regulatory_request *req);
+bool mt7925_regd_clc_supported(struct mt792x_dev *dev);
+int mt7925_regd_change(struct mt792x_phy *phy, char *alpha2);
+int mt7925_regd_init(struct mt792x_phy *phy);
+
+#endif
+
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/regs.h b/sys/contrib/dev/mediatek/mt76/mt7925/regs.h
index 341987e47f67..24985bba1b90 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/regs.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2023 MediaTek Inc. */
#ifndef __MT7925_REGS_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/testmode.c b/sys/contrib/dev/mediatek/mt76/mt7925/testmode.c
index a3c97164ba21..3d40aacfc011 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/testmode.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/testmode.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
#include "mt7925.h"
#include "mcu.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt7925/usb.c b/sys/contrib/dev/mediatek/mt76/mt7925/usb.c
index 4dfbc1b6cfdd..d9968f03856d 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7925/usb.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7925/usb.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/kernel.h>
@@ -12,6 +12,9 @@
static const struct usb_device_id mt7925u_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7925, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM },
+ /* Netgear, Inc. A9000 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9072, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM },
{ },
};
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x.h b/sys/contrib/dev/mediatek/mt76/mt792x.h
index 443d397d9961..8388638ed550 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x.h
+++ b/sys/contrib/dev/mediatek/mt76/mt792x.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2023 MediaTek Inc. */
#ifndef __MT792X_H
@@ -28,6 +28,7 @@
#define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0)
#define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1)
#define MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN BIT(3)
+#define MT792x_CHIP_CAP_11D_EN BIT(4)
#define MT792x_CHIP_CAP_MLO_EN BIT(8)
#define MT792x_CHIP_CAP_MLO_EML_EN BIT(9)
@@ -230,11 +231,11 @@ struct mt792x_dev {
bool hw_init_done:1;
bool fw_assert:1;
bool has_eht:1;
+ bool regd_user:1;
bool regd_in_progress:1;
bool aspm_supported:1;
bool hif_idle:1;
bool hif_resumed:1;
- bool sar_inited:1;
bool regd_change:1;
wait_queue_head_t wait;
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x_acpi_sar.c b/sys/contrib/dev/mediatek/mt76/mt792x_acpi_sar.c
index d1aebadd50aa..946dd7956e4a 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x_acpi_sar.c
+++ b/sys/contrib/dev/mediatek/mt76/mt792x_acpi_sar.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/acpi.h>
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x_acpi_sar.h b/sys/contrib/dev/mediatek/mt76/mt792x_acpi_sar.h
index e45dcd7fbdb1..474033073831 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x_acpi_sar.h
+++ b/sys/contrib/dev/mediatek/mt76/mt792x_acpi_sar.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2023 MediaTek Inc. */
#ifndef __MT7921_ACPI_SAR_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x_core.c b/sys/contrib/dev/mediatek/mt76/mt792x_core.c
index 6ce282d7bd50..c33c141b859e 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x_core.c
+++ b/sys/contrib/dev/mediatek/mt76/mt792x_core.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/module.h>
@@ -691,9 +691,12 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
- ieee80211_hw_set(hw, NO_VIRTUAL_MONITOR);
- if (is_mt7921(&dev->mt76))
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+
+ if (is_mt7921(&dev->mt76)) {
ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ }
if (dev->pm.enable)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x_debugfs.c b/sys/contrib/dev/mediatek/mt76/mt792x_debugfs.c
index 9858d9a93851..65c37e0cef8f 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x_debugfs.c
+++ b/sys/contrib/dev/mediatek/mt76/mt792x_debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include "mt792x.h"
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x_dma.c b/sys/contrib/dev/mediatek/mt76/mt792x_dma.c
index d68f814b3581..2405ac51a7c0 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x_dma.c
+++ b/sys/contrib/dev/mediatek/mt76/mt792x_dma.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/module.h>
@@ -184,13 +184,13 @@ mt792x_dma_reset(struct mt792x_dev *dev, bool force)
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+ mt76_queue_reset(dev, dev->mphy.q_tx[i], true);
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i)
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], true);
mt76_tx_status_check(&dev->mt76, true);
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x_mac.c b/sys/contrib/dev/mediatek/mt76/mt792x_mac.c
index d72cdb0215e6..830a2e0d6878 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x_mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt792x_mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/module.h>
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x_regs.h b/sys/contrib/dev/mediatek/mt76/mt792x_regs.h
index 458cfd0260b1..acf627aed609 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x_regs.h
+++ b/sys/contrib/dev/mediatek/mt76/mt792x_regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2023 MediaTek Inc. */
#ifndef __MT792X_REGS_H
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x_trace.c b/sys/contrib/dev/mediatek/mt76/mt792x_trace.c
index b6f284fb929d..ffc77d3944bd 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x_trace.c
+++ b/sys/contrib/dev/mediatek/mt76/mt792x_trace.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x_trace.h b/sys/contrib/dev/mediatek/mt76/mt792x_trace.h
index 61f2aa260656..7b0e3f00b194 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x_trace.h
+++ b/sys/contrib/dev/mediatek/mt76/mt792x_trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt792x_usb.c b/sys/contrib/dev/mediatek/mt76/mt792x_usb.c
index 76272a03b22e..552808458138 100644
--- a/sys/contrib/dev/mediatek/mt76/mt792x_usb.c
+++ b/sys/contrib/dev/mediatek/mt76/mt792x_usb.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc.
*
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/Kconfig b/sys/contrib/dev/mediatek/mt76/mt7996/Kconfig
index bb44d4a5e2dc..5503d03bf62c 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/Kconfig
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: BSD-3-Clause-Clear
config MT7996E
tristate "MediaTek MT7996 (PCIe) support"
select MT76_CONNAC_LIB
@@ -12,3 +12,10 @@ config MT7996E
and 2.4GHz IEEE 802.11be 4x4:4SS 4096-QAM, 320MHz channels.
To compile this driver as a module, choose M here.
+
+config MT7996_NPU
+ bool "MT7996 (PCIe) NPU support"
+ depends on MT7996E
+ depends on NET_AIROHA_NPU=y || MT7996E=NET_AIROHA_NPU
+ select MT76_NPU
+ default n
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/Makefile b/sys/contrib/dev/mediatek/mt76/mt7996/Makefile
index 07c8b555c1ac..69d2d4bb9e69 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/Makefile
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/Makefile
@@ -1,8 +1,9 @@
-# SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: BSD-3-Clause-Clear
obj-$(CONFIG_MT7996E) += mt7996e.o
mt7996e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
debugfs.o mmio.o
+mt7996e-$(CONFIG_MT7996_NPU) += npu.o
mt7996e-$(CONFIG_DEV_COREDUMP) += coredump.o
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/coredump.c b/sys/contrib/dev/mediatek/mt76/mt7996/coredump.c
index 433846d2319e..098fb8d5e323 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/coredump.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/coredump.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2023 MediaTek Inc. */
#if defined(__FreeBSD__)
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/coredump.h b/sys/contrib/dev/mediatek/mt76/mt7996/coredump.h
index af2ba219b1b5..baa2f6f50832 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/coredump.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/coredump.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2023 MediaTek Inc. */
#ifndef _COREDUMP_H_
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/debugfs.c b/sys/contrib/dev/mediatek/mt76/mt7996/debugfs.c
index 0ab827f52fd7..76d623b2cafb 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/debugfs.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -953,16 +953,34 @@ bool mt7996_debugfs_rx_log(struct mt7996_dev *dev, const void *data, int len)
#ifdef CONFIG_MAC80211_DEBUGFS
/** per-station debugfs **/
-static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+static int
+mt7996_queues_show(struct seq_file *s, void *data)
+{
+ struct ieee80211_sta *sta = s->private;
+
+ mt7996_sta_hw_queue_read(s, sta);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7996_queues);
+
+void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ debugfs_create_file("hw-queues", 0400, dir, sta, &mt7996_queues_fops);
+}
+
+static ssize_t mt7996_link_sta_fixed_rate_set(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
#define SHORT_PREAMBLE 0
#define LONG_PREAMBLE 1
- struct ieee80211_sta *sta = file->private_data;
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_link_sta *link_sta = file->private_data;
+ struct mt7996_sta *msta = (struct mt7996_sta *)link_sta->sta->drv_priv;
struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
- struct mt7996_sta_link *msta_link = &msta->deflink;
+ struct mt7996_sta_link *msta_link;
struct ra_rate phy = {};
char buf[100];
int ret;
@@ -981,12 +999,13 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
/* mode - cck: 0, ofdm: 1, ht: 2, gf: 3, vht: 4, he_su: 8, he_er: 9 EHT: 15
* bw - bw20: 0, bw40: 1, bw80: 2, bw160: 3, BW320: 4
- * nss - vht: 1~4, he: 1~4, eht: 1~4, others: ignore
* mcs - cck: 0~4, ofdm: 0~7, ht: 0~32, vht: 0~9, he_su: 0~11, he_er: 0~2, eht: 0~13
+ * nss - vht: 1~4, he: 1~4, eht: 1~4, others: ignore
* gi - (ht/vht) lgi: 0, sgi: 1; (he) 0.8us: 0, 1.6us: 1, 3.2us: 2
* preamble - short: 1, long: 0
- * ldpc - off: 0, on: 1
* stbc - off: 0, on: 1
+ * ldpc - off: 0, on: 1
+ * spe - off: 0, on: 1
* ltf - 1xltf: 0, 2xltf: 1, 4xltf: 2
*/
if (sscanf(buf, "%hhu %hhu %hhu %hhu %hu %hhu %hhu %hhu %hhu %hu",
@@ -994,9 +1013,16 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
&phy.preamble, &phy.stbc, &phy.ldpc, &phy.spe, &ltf) != 10) {
dev_warn(dev->mt76.dev,
"format: Mode BW MCS NSS GI Preamble STBC LDPC SPE ltf\n");
- goto out;
+ return -EINVAL;
}
+ mutex_lock(&dev->mt76.mutex);
+
+ msta_link = mt76_dereference(msta->link[link_sta->link_id], &dev->mt76);
+ if (!msta_link) {
+ ret = -EINVAL;
+ goto out;
+ }
phy.wlan_idx = cpu_to_le16(msta_link->wcid.idx);
phy.gi = cpu_to_le16(gi);
phy.ltf = cpu_to_le16(ltf);
@@ -1005,36 +1031,26 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
ret = mt7996_mcu_set_fixed_rate_ctrl(dev, &phy, 0);
if (ret)
- return -EFAULT;
+ goto out;
+ ret = count;
out:
- return count;
+ mutex_unlock(&dev->mt76.mutex);
+ return ret;
}
static const struct file_operations fops_fixed_rate = {
- .write = mt7996_sta_fixed_rate_set,
+ .write = mt7996_link_sta_fixed_rate_set,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
-static int
-mt7996_queues_show(struct seq_file *s, void *data)
-{
- struct ieee80211_sta *sta = s->private;
-
- mt7996_sta_hw_queue_read(s, sta);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mt7996_queues);
-
-void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct dentry *dir)
+void mt7996_link_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir)
{
- debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate);
- debugfs_create_file("hw-queues", 0400, dir, sta, &mt7996_queues_fops);
+ debugfs_create_file("fixed_rate", 0600, dir, link_sta, &fops_fixed_rate);
}
#endif
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/dma.c b/sys/contrib/dev/mediatek/mt76/mt7996/dma.c
index 304bb5a2318c..98ab62d76d24 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/dma.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/dma.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -20,12 +20,15 @@ int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
idx -= MT_TXQ_ID(0);
- if (phy->mt76->band_idx == MT_BAND2)
+ if (wed == &dev->mt76.mmio.wed_hif2)
flags = MT_WED_Q_TX(0);
else
flags = MT_WED_Q_TX(idx);
}
+ if (mt76_npu_device_active(&dev->mt76))
+ flags = MT_NPU_Q_TX(phy->mt76->band_idx);
+
return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc,
ring_base, wed, flags);
}
@@ -86,36 +89,74 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
break;
}
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
/* band0 */
RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
MT7996_RXQ_RRO_BAND0);
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
- MT7996_RXQ_MSDU_PG_BAND0);
- RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
- MT7996_RXQ_TXFREE0);
- /* band1 */
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
- MT7996_RXQ_MSDU_PG_BAND1);
- /* band2 */
- RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
- MT7996_RXQ_RRO_BAND2);
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
- MT7996_RXQ_MSDU_PG_BAND2);
- RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
- MT7996_RXQ_TXFREE2);
-
- RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
- MT7996_RXQ_RRO_IND);
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND0,
+ MT7996_RXQ_MSDU_PG_BAND0);
+ if (is_mt7996(&dev->mt76)) {
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
+ MT_INT_RX_TXFREE_MAIN, MT7996_RXQ_TXFREE0);
+ /* band1 */
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND1,
+ MT7996_RXQ_MSDU_PG_BAND1);
+ /* band2 */
+ RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0,
+ MT_INT_RX_DONE_RRO_BAND2,
+ MT7996_RXQ_RRO_BAND2);
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND2,
+ MT7996_RXQ_MSDU_PG_BAND2);
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0,
+ MT_INT_RX_TXFREE_TRI, MT7996_RXQ_TXFREE2);
+ } else {
+ RXQ_CONFIG(MT_RXQ_RRO_BAND1, WFDMA0,
+ MT_INT_RX_DONE_RRO_BAND1,
+ MT7996_RXQ_RRO_BAND1);
+ }
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0,
+ MT_INT_RX_DONE_RRO_IND,
+ MT7996_RXQ_RRO_IND);
+ else
+ RXQ_CONFIG(MT_RXQ_RRO_RXDMAD_C, WFDMA0,
+ MT_INT_RX_DONE_RRO_RXDMAD_C,
+ MT7996_RXQ_RRO_RXDMAD_C);
}
/* data tx queue */
- TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
if (is_mt7996(&dev->mt76)) {
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
- TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
+ if (dev->hif2) {
+ /* default bn1:ring19 bn2:ring21 */
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2,
+ MT7996_TXQ_BAND2);
+ } else {
+ /* single pcie bn0/1:ring18 bn2:ring19 */
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ }
} else {
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
+ if (dev->hif2) {
+ /* bn0:ring18 bn1:ring21 */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND2,
+ MT7996_TXQ_BAND2);
+ } else {
+ /* single pcie bn0:ring18 bn1:ring19 */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ }
}
/* mcu tx queue */
@@ -169,11 +210,12 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
/* Rx TxFreeDone From MAC Rings */
val = is_mt7996(&dev->mt76) ? 4 : 8;
- if (is_mt7990(&dev->mt76) || (is_mt7996(&dev->mt76) && dev->has_rro))
+ if ((is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev)) ||
+ is_mt7990(&dev->mt76))
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val));
if (is_mt7990(&dev->mt76) && dev->hif2)
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val));
- else if (is_mt7996(&dev->mt76) && dev->has_rro)
+ else if (is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev))
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val));
/* Rx Data Rings */
@@ -182,7 +224,7 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
/* Rx RRO Rings */
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10));
queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1;
mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
@@ -291,11 +333,14 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
if (mt7996_band_valid(dev, MT_BAND0))
irq_mask |= MT_INT_BAND0_RX_DONE;
- if (mt7996_band_valid(dev, MT_BAND1))
+ if (mt7996_band_valid(dev, MT_BAND1)) {
irq_mask |= MT_INT_BAND1_RX_DONE;
+ if (is_mt7992(&dev->mt76) && dev->hif2)
+ irq_mask |= MT_INT_RX_TXFREE_BAND1_EXT;
+ }
if (mt7996_band_valid(dev, MT_BAND2))
- irq_mask |= MT_INT_BAND2_RX_DONE;
+ irq_mask |= MT_INT_BAND2_RX_DONE | MT_INT_TX_RX_DONE_EXT;
if (mtk_wed_device_active(wed) && wed_reset) {
u32 wed_irq_mask = irq_mask;
@@ -305,7 +350,7 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
mtk_wed_device_start(wed, wed_irq_mask);
}
- if (!mt7996_has_wa(dev))
+ if (!mt7996_has_wa(dev) || mt76_npu_device_active(&dev->mt76))
irq_mask &= ~(MT_INT_RX(MT_RXQ_MAIN_WA) |
MT_INT_RX(MT_RXQ_BAND1_WA));
irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
@@ -381,13 +426,48 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
mt76_set(dev, MT_WFDMA_HOST_CONFIG,
- MT_WFDMA_HOST_CONFIG_PDMA_BAND |
- MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
+
+ mt76_clear(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND0_PCIE1 |
+ MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 |
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+
+ if (is_mt7996(&dev->mt76))
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+ else
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND1_PCIE1);
/* AXI read outstanding number */
mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL,
MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14);
+ if (dev->hif2->speed < PCIE_SPEED_5_0GT ||
+ (dev->hif2->speed == PCIE_SPEED_5_0GT &&
+ dev->hif2->width < PCIE_LNK_X2)) {
+ mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ 0x1));
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
+ MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ 0x1));
+ } else if (dev->hif2->speed < PCIE_SPEED_8_0GT ||
+ (dev->hif2->speed == PCIE_SPEED_8_0GT &&
+ dev->hif2->width < PCIE_LNK_X2)) {
+ mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ 0x2));
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
+ MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ 0x2));
+ }
+
/* WFDMA rx threshold */
mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c);
mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008);
@@ -400,27 +480,61 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
* so, redirect pcie0 rx ring3 interrupt to pcie1
*/
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
- dev->has_rro)
+ mt7996_has_hwrro(dev)) {
+ u32 intr = is_mt7996(&dev->mt76) ?
+ MT_WFDMA0_RX_INT_SEL_RING6 :
+ MT_WFDMA0_RX_INT_SEL_RING9 |
+ MT_WFDMA0_RX_INT_SEL_RING5;
+
mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
- MT_WFDMA0_RX_INT_SEL_RING6);
- else
+ intr);
+ } else {
mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
MT_WFDMA0_RX_INT_SEL_RING3);
+ }
}
mt7996_dma_start(dev, reset, true);
}
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int mt7996_dma_rro_init(struct mt7996_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u32 irq_mask;
int ret;
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ /* rxdmad_c */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags = MT_WED_RRO_Q_RXDMAD_C;
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].wed = &mdev->mmio.wed;
+ else if (!mt76_npu_device_active(&dev->mt76))
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags |= MT_QFLAG_EMI_EN;
+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
+ MT_RXQ_ID(MT_RXQ_RRO_RXDMAD_C),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RRO_AP_RING_BASE);
+ if (ret)
+ return ret;
+
+ if (!mtk_wed_device_active(&mdev->mmio.wed)) {
+ /* We need to set cpu idx pointer before resetting the
+ * EMI queues.
+ */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].emi_cpu_idx =
+ &dev->wed_rro.emi_rings_cpu.ptr->ring[0].idx;
+ mt76_queue_reset(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
+ true);
+ }
+ goto start_hw_rro;
+ }
+
/* ind cmd */
mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
- mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
MT_RXQ_ID(MT_RXQ_RRO_IND),
MT7996_RX_RING_SIZE,
@@ -431,7 +545,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
/* rx msdu page queue for band0 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
MT7996_RX_RING_SIZE,
@@ -440,11 +556,13 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
if (ret)
return ret;
- if (mt7996_band_valid(dev, MT_BAND1)) {
+ if (mt7996_band_valid(dev, MT_BAND1) && is_mt7996(&dev->mt76)) {
/* rx msdu page queue for band1 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
MT7996_RX_RING_SIZE,
@@ -458,7 +576,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
/* rx msdu page queue for band2 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
MT7996_RX_RING_SIZE,
@@ -468,15 +588,44 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
return ret;
}
- irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
- MT_INT_TX_DONE_BAND2;
- mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
- mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
- mt7996_irq_enable(dev, irq_mask);
+start_hw_rro:
+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
+ irq_mask = mdev->mmio.irqmask |
+ MT_INT_TX_DONE_BAND2;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
+ mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
+ mt7996_irq_enable(dev, irq_mask);
+ } else {
+ if (is_mt7996(&dev->mt76)) {
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2,
+ mt76_dma_rx_poll);
+ } else {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1,
+ mt76_dma_rx_poll);
+ }
+
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll);
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_RXDMAD_C,
+ mt76_dma_rx_poll);
+ } else {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_IND,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0,
+ mt76_dma_rx_poll);
+ }
+
+ if (!mt76_npu_device_active(&dev->mt76))
+ mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE);
+ }
return 0;
}
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
int mt7996_dma_init(struct mt7996_dev *dev)
{
@@ -563,7 +712,9 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* tx free notify event from WA for band0 */
- if (mtk_wed_device_active(wed) && !dev->has_rro) {
+ if (mtk_wed_device_active(wed) &&
+ ((is_mt7996(&dev->mt76) && !mt7996_has_hwrro(dev)) ||
+ (is_mt7992(&dev->mt76)))) {
dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
}
@@ -618,7 +769,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
/* tx free notify event from WA for mt7996 band2
* use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
*/
- if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) {
+ if (mtk_wed_device_active(wed_hif2) && !mt7996_has_hwrro(dev)) {
dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
}
@@ -633,6 +784,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
} else if (mt7996_band_valid(dev, MT_BAND1)) {
/* rx data queue for mt7992 band1 */
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1].flags = MT_WED_Q_RX(1);
+ dev->mt76.q_rx[MT_RXQ_BAND1].wed = wed;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
MT_RXQ_ID(MT_RXQ_BAND1),
MT7996_RX_RING_SIZE,
@@ -644,6 +800,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
/* tx free notify event from WA for mt7992 band1 */
if (mt7996_has_wa(dev)) {
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
+ if (mtk_wed_device_active(wed_hif2)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1_WA].flags =
+ MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_BAND1_WA].wed = wed_hif2;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
MT_RXQ_ID(MT_RXQ_BAND1_WA),
MT7996_RX_MCU_RING_SIZE,
@@ -654,12 +816,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
}
}
- if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
- dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
/* rx rro data queue for band0 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
- dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
MT_RXQ_ID(MT_RXQ_RRO_BAND0),
MT7996_RX_RING_SIZE,
@@ -668,23 +830,44 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret)
return ret;
- /* tx free notify event from WA for band0 */
- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ if (is_mt7992(&dev->mt76)) {
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].flags =
+ MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
+ if (mtk_wed_device_active(wed) &&
+ mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
+ ret = mt76_queue_alloc(dev,
+ &dev->mt76.q_rx[MT_RXQ_RRO_BAND1],
+ MT_RXQ_ID(MT_RXQ_RRO_BAND1),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND1) + hif1_ofs);
+ if (ret)
+ return ret;
+ } else {
+ if (mtk_wed_device_active(wed)) {
+ /* tx free notify event from WA for band0 */
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ }
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
- MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
- MT7996_RX_MCU_RING_SIZE,
- MT7996_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
- if (ret)
- return ret;
+ ret = mt76_queue_alloc(dev,
+ &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
+ if (ret)
+ return ret;
+ }
if (mt7996_band_valid(dev, MT_BAND2)) {
/* rx rro data queue for band2 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
- dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
+ if (mtk_wed_device_active(wed) &&
+ mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
MT_RXQ_ID(MT_RXQ_RRO_BAND2),
MT7996_RX_RING_SIZE,
@@ -712,6 +895,10 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret < 0)
return ret;
+ ret = mt7996_npu_rx_queues_init(dev);
+ if (ret)
+ return ret;
+
netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7996_poll_tx);
napi_enable(&dev->mt76.tx_napi);
@@ -755,6 +942,10 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
mt76_tx_status_check(&dev->mt76, true);
+ if (mt7996_has_hwrro(dev) &&
+ !mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt7996_rro_msdu_page_map_free(dev);
+
/* reset wfsys */
if (force)
mt7996_wfsys_reset(dev);
@@ -765,6 +956,7 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
if (mtk_wed_device_active(&dev->mt76.mmio.wed))
mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
+ mt76_npu_disable_irqs(&dev->mt76);
mt7996_dma_disable(dev, force);
mt76_wed_dma_reset(&dev->mt76);
@@ -778,21 +970,32 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
- if (mtk_wed_device_active(&dev->mt76.mmio.wed))
- if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) ||
- mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
- continue;
+ struct mt76_queue *q = &dev->mt76.q_rx[i];
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ if (mt76_queue_is_wed_rro(q) ||
+ mt76_queue_is_wed_tx_free(q)) {
+ if (force && mt76_queue_is_wed_rro_data(q))
+ mt76_queue_reset(dev, q, false);
+ continue;
+ }
+ }
+ mt76_queue_reset(dev, q, true);
}
mt76_tx_status_check(&dev->mt76, true);
- mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && force &&
+ (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
+ mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])))
+ continue;
+
mt76_queue_rx_reset(dev, i);
+ }
mt7996_dma_enable(dev, !force);
}
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/eeprom.c b/sys/contrib/dev/mediatek/mt76/mt7996/eeprom.c
index 6354173427a5..3d165202a5fd 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/eeprom.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/eeprom.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -338,9 +338,8 @@ int mt7996_eeprom_init(struct mt7996_dev *dev)
#elif defined(__FreeBSD__)
memcpy(dev->mphy.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR, ETH_ALEN);
#endif
- mt76_eeprom_override(&dev->mphy);
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
int mt7996_eeprom_get_target_power(struct mt7996_dev *dev,
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/eeprom.h b/sys/contrib/dev/mediatek/mt76/mt7996/eeprom.h
index 7a771ca2434c..9e6f0e04caf9 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/eeprom.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/eeprom.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2022 MediaTek Inc.
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/init.c b/sys/contrib/dev/mediatek/mt76/mt7996/init.c
index c0c827e1a80b..34f1df207f19 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/init.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -66,6 +66,33 @@ static const struct ieee80211_iface_combination if_comb = {
.beacon_int_min_gcd = 100,
};
+static const u8 if_types_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const struct wiphy_iftype_ext_capab iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = if_types_ext_capa_ap,
+ .extended_capabilities_mask = if_types_ext_capa_ap,
+ .extended_capabilities_len = sizeof(if_types_ext_capa_ap),
+ .eml_capabilities = IEEE80211_EML_CAP_EMLSR_SUPP,
+ .mld_capa_and_ops =
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS,
+ MT7996_MAX_RADIOS - 1),
+ }, {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = if_types_ext_capa_ap,
+ .extended_capabilities_mask = if_types_ext_capa_ap,
+ .extended_capabilities_len = sizeof(if_types_ext_capa_ap),
+ .mld_capa_and_ops =
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS,
+ MT7996_MAX_RADIOS - 1),
+ },
+};
+
#if defined(CONFIG_HWMON)
static ssize_t mt7996_thermal_temp_show(struct device *dev,
struct device_attribute *attr,
@@ -388,6 +415,7 @@ mt7996_init_wiphy_band(struct ieee80211_hw *hw, struct mt7996_phy *phy)
phy->slottime = 9;
phy->beacon_rate = -1;
+ phy->rxfilter = MT_WF_RFCR_DROP_OTHER_UC;
if (phy->mt76->cap.has_2ghz) {
phy->mt76->sband_2g.sband.ht_cap.cap |=
@@ -452,7 +480,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
hw->max_tx_aggregation_subframes = 512;
hw->netdev_features = NETIF_F_RXCSUM;
- if (mtk_wed_device_active(wed))
+ if (mtk_wed_device_active(wed) || mt76_npu_device_active(mdev))
hw->netdev_features |= NETIF_F_HW_TC;
hw->radiotap_timestamp.units_pos =
@@ -468,8 +496,11 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
wiphy->radio = dev->radios;
wiphy->reg_notifier = mt7996_regd_notifier;
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+ WIPHY_FLAG_SUPPORTS_MLO;
wiphy->mbssid_max_interfaces = 16;
+ wiphy->iftype_ext_capab = iftypes_ext_capa;
+ wiphy->num_iftype_ext_capab = ARRAY_SIZE(iftypes_ext_capa);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
@@ -584,19 +615,21 @@ void mt7996_mac_init(struct mt7996_dev *dev)
}
/* rro module init */
- if (is_mt7996(&dev->mt76))
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
- else
+ if (dev->hif2)
mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE,
- dev->hif2 ? 7 : 0);
+ is_mt7996(&dev->mt76) ? 2 : 7);
+ else
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 0);
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
u16 timeout;
timeout = mt76_rr(dev, MT_HW_REV) == MT_HW_REV1 ? 512 : 128;
mt7996_mcu_set_rro(dev, UNI_RRO_SET_FLUSH_TIMEOUT, timeout);
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 1);
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 0);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE,
+ is_mt7996(&dev->mt76) ? 1 : 2);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH,
+ !is_mt7996(&dev->mt76));
} else {
mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
@@ -643,7 +676,9 @@ static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
if (!mt7996_band_valid(dev, band))
return 0;
- if (is_mt7996(&dev->mt76) && band == MT_BAND2 && dev->hif2) {
+ if (dev->hif2 &&
+ ((is_mt7996(&dev->mt76) && band == MT_BAND2) ||
+ (is_mt7992(&dev->mt76) && band == MT_BAND1))) {
hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
wed = &dev->mt76.mmio.wed_hif2;
}
@@ -684,17 +719,26 @@ static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
if (band == MT_BAND2)
mphy->macaddr[0] ^= BIT(6);
}
- mt76_eeprom_override(mphy);
+ ret = mt76_eeprom_override(mphy);
+ if (ret)
+ goto error;
/* init wiphy according to mphy and phy */
mt7996_init_wiphy_band(mphy->hw, phy);
- ret = mt7996_init_tx_queues(mphy->priv,
- MT_TXQ_ID(band),
- MT7996_TX_RING_SIZE,
- MT_TXQ_RING_BASE(band) + hif1_ofs,
- wed);
- if (ret)
- goto error;
+
+ if (is_mt7996(&dev->mt76) && !dev->hif2 && band == MT_BAND1) {
+ int i;
+
+ for (i = 0; i <= MT_TXQ_PSD; i++)
+ mphy->q_tx[i] = dev->mt76.phys[MT_BAND0]->q_tx[0];
+ } else {
+ ret = mt7996_init_tx_queues(mphy->priv, MT_TXQ_ID(band),
+ MT7996_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(band) + hif1_ofs,
+ wed);
+ if (ret)
+ goto error;
+ }
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
@@ -702,10 +746,9 @@ static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
goto error;
if (wed == &dev->mt76.mmio.wed_hif2 && mtk_wed_device_active(wed)) {
- u32 irq_mask = dev->mt76.mmio.irqmask | MT_INT_TX_DONE_BAND2;
-
- mt76_wr(dev, MT_INT1_MASK_CSR, irq_mask);
- mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, irq_mask);
+ mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
+ MT_INT_TX_RX_DONE_EXT);
}
return 0;
@@ -743,30 +786,152 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
msleep(20);
}
-static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+static void mt7996_rro_hw_init_v3(struct mt7996_dev *dev)
{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ u32 session_id;
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1)
+ return;
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ wed->wlan.ind_cmd.win_size =
+ ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
+ if (is_mt7996(&dev->mt76))
+ wed->wlan.ind_cmd.particular_sid =
+ MT7996_RRO_MAX_SESSION;
+ else
+ wed->wlan.ind_cmd.particular_sid = 1;
+ wed->wlan.ind_cmd.particular_se_phys =
+ dev->wed_rro.session.phy_addr;
+ wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
+ wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
+ }
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
+
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
+ mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
+ MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
+ } else {
+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0);
+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1, 0);
+ }
+
+ /* particular session configure */
+ /* use max session idx + 1 as particular session id */
+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr);
+
+ session_id = is_mt7996(&dev->mt76) ? MT7996_RRO_MAX_SESSION : 1;
+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG1,
+ MT_RRO_PARTICULAR_CONFG_EN |
+ FIELD_PREP(MT_RRO_PARTICULAR_SID, session_id));
+}
+
+void mt7996_rro_hw_init(struct mt7996_dev *dev)
+{
u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
+ int i;
+
+ if (!mt7996_has_hwrro(dev))
+ return;
+
+ INIT_LIST_HEAD(&dev->wed_rro.page_cache);
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++)
+ INIT_LIST_HEAD(&dev->wed_rro.page_map[i]);
+
+ if (!is_mt7996(&dev->mt76)) {
+ reg = MT_RRO_MSDU_PG_SEG_ADDR0;
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ mt76_clear(dev, MT_RRO_3_0_EMU_CONF,
+ MT_RRO_3_0_EMU_CONF_EN_MASK);
+ mt76_set(dev, MT_RRO_3_1_GLOBAL_CONFIG,
+ MT_RRO_3_1_GLOBAL_CONFIG_RXDMAD_SEL);
+ if (!mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ !mt76_npu_device_active(&dev->mt76)) {
+ mt76_set(dev, MT_RRO_3_1_GLOBAL_CONFIG,
+ MT_RRO_3_1_GLOBAL_CONFIG_RX_DIDX_WR_EN |
+ MT_RRO_3_1_GLOBAL_CONFIG_RX_CIDX_RD_EN);
+ mt76_wr(dev, MT_RRO_RX_RING_AP_CIDX_ADDR,
+ dev->wed_rro.emi_rings_cpu.phy_addr >> 4);
+ mt76_wr(dev, MT_RRO_RX_RING_AP_DIDX_ADDR,
+ dev->wed_rro.emi_rings_dma.phy_addr >> 4);
+ }
+ } else {
+ /* set emul 3.0 function */
+ mt76_wr(dev, MT_RRO_3_0_EMU_CONF,
+ MT_RRO_3_0_EMU_CONF_EN_MASK);
+
+ mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE0,
+ dev->wed_rro.addr_elem[0].phy_addr);
+ }
+
+ mt76_set(dev, MT_RRO_3_1_GLOBAL_CONFIG,
+ MT_RRO_3_1_GLOBAL_CONFIG_INTERLEAVE_EN);
+
+ /* setup Msdu page address */
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.msdu_pg); i++) {
+ mt76_wr(dev, reg,
+ dev->wed_rro.msdu_pg[i].phy_addr >> 4);
+ reg += 4;
+ }
+ } else {
+ /* TODO: remove line after WM has set */
+ mt76_clear(dev, WF_RRO_AXI_MST_CFG,
+ WF_RRO_AXI_MST_CFG_DIDX_OK);
+
+ /* setup BA bitmap cache address */
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0,
+ dev->wed_rro.ba_bitmap[0].phy_addr);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0,
+ dev->wed_rro.ba_bitmap[1].phy_addr);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0);
+
+ /* Setup Address element address */
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
+ mt76_wr(dev, reg,
+ dev->wed_rro.addr_elem[i].phy_addr >> 4);
+ reg += 4;
+ }
+
+ /* Setup Address element address - separate address segment
+ * mode.
+ */
+ mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
+ MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
+ }
+
+ mt7996_rro_hw_init_v3(dev);
+
+ /* interrupt enable */
+ mt76_wr(dev, MT_RRO_HOST_INT_ENA,
+ MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
+}
+
+static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+{
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff);
struct mt7996_wed_rro_addr *addr;
void *ptr;
int i;
- if (!dev->has_rro)
+ if (!mt7996_has_hwrro(dev))
return 0;
- if (!mtk_wed_device_active(wed))
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
- ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
- MT7996_RRO_BA_BITMAP_CR_SIZE,
- &dev->wed_rro.ba_bitmap[i].phy_addr,
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3) {
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_BA_BITMAP_CR_SIZE,
+ &dev->wed_rro.ba_bitmap[i].phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
- dev->wed_rro.ba_bitmap[i].ptr = ptr;
+ dev->wed_rro.ba_bitmap[i].ptr = ptr;
+ }
}
for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
@@ -785,12 +950,54 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
addr = dev->wed_rro.addr_elem[i].ptr;
for (j = 0; j < MT7996_RRO_WINDOW_MAX_SIZE; j++) {
- addr->signature = 0xff;
+ addr->data = cpu_to_le32(val);
addr++;
}
- wed->wlan.ind_cmd.addr_elem_phys[i] =
- dev->wed_rro.addr_elem[i].phy_addr;
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mtk_wed_get_rx_capa(&dev->mt76.mmio.wed)) {
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+
+ wed->wlan.ind_cmd.addr_elem_phys[i] =
+ dev->wed_rro.addr_elem[i].phy_addr;
+ }
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.msdu_pg); i++) {
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_MSDU_PG_SIZE_PER_CR,
+ &dev->wed_rro.msdu_pg[i].phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.msdu_pg[i].ptr = ptr;
+
+ memset(dev->wed_rro.msdu_pg[i].ptr, 0,
+ MT7996_RRO_MSDU_PG_SIZE_PER_CR);
+ }
+
+ if (!mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ sizeof(*dev->wed_rro.emi_rings_cpu.ptr),
+ &dev->wed_rro.emi_rings_cpu.phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.emi_rings_cpu.ptr = ptr;
+
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ sizeof(*dev->wed_rro.emi_rings_dma.ptr),
+ &dev->wed_rro.emi_rings_dma.phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.emi_rings_dma.ptr = ptr;
}
ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
@@ -803,69 +1010,20 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
dev->wed_rro.session.ptr = ptr;
addr = dev->wed_rro.session.ptr;
for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) {
- addr->signature = 0xff;
+ addr->data = cpu_to_le32(val);
addr++;
}
- /* rro hw init */
- /* TODO: remove line after WM has set */
- mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK);
-
- /* setup BA bitmap cache address */
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0,
- dev->wed_rro.ba_bitmap[0].phy_addr);
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0);
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0,
- dev->wed_rro.ba_bitmap[1].phy_addr);
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0);
+ mt7996_rro_hw_init(dev);
- /* setup Address element address */
- for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
- mt76_wr(dev, reg, dev->wed_rro.addr_elem[i].phy_addr >> 4);
- reg += 4;
- }
-
- /* setup Address element address - separate address segment mode */
- mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
- MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
-
- wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
- wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION;
- wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr;
- wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
- wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
-
- mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
- mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
- MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
-
- /* particular session configure */
- /* use max session idx + 1 as particular session id */
- mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr);
- mt76_wr(dev, MT_RRO_PARTICULAR_CFG1,
- MT_RRO_PARTICULAR_CONFG_EN |
- FIELD_PREP(MT_RRO_PARTICULAR_SID, MT7996_RRO_MAX_SESSION));
-
- /* interrupt enable */
- mt76_wr(dev, MT_RRO_HOST_INT_ENA,
- MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
-
- /* rro ind cmd queue init */
return mt7996_dma_rro_init(dev);
-#else
- return 0;
-#endif
}
static void mt7996_wed_rro_free(struct mt7996_dev *dev)
{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int i;
- if (!dev->has_rro)
- return;
-
- if (!mtk_wed_device_active(&dev->mt76.mmio.wed))
+ if (!mt7996_has_hwrro(dev))
return;
for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
@@ -889,6 +1047,28 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
dev->wed_rro.addr_elem[i].phy_addr);
}
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.msdu_pg); i++) {
+ if (!dev->wed_rro.msdu_pg[i].ptr)
+ continue;
+
+ dmam_free_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_MSDU_PG_SIZE_PER_CR,
+ dev->wed_rro.msdu_pg[i].ptr,
+ dev->wed_rro.msdu_pg[i].phy_addr);
+ }
+
+ if (dev->wed_rro.emi_rings_cpu.ptr)
+ dmam_free_coherent(dev->mt76.dma_dev,
+ sizeof(*dev->wed_rro.emi_rings_cpu.ptr),
+ dev->wed_rro.emi_rings_cpu.ptr,
+ dev->wed_rro.emi_rings_cpu.phy_addr);
+
+ if (dev->wed_rro.emi_rings_dma.ptr)
+ dmam_free_coherent(dev->mt76.dma_dev,
+ sizeof(*dev->wed_rro.emi_rings_dma.ptr),
+ dev->wed_rro.emi_rings_dma.ptr,
+ dev->wed_rro.emi_rings_dma.phy_addr);
+
if (!dev->wed_rro.session.ptr)
return;
@@ -897,12 +1077,11 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
sizeof(struct mt7996_wed_rro_addr),
dev->wed_rro.session.ptr,
dev->wed_rro.session.phy_addr);
-#endif
}
static void mt7996_wed_rro_work(struct work_struct *work)
{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff);
struct mt7996_dev *dev;
LIST_HEAD(list);
@@ -921,8 +1100,15 @@ static void mt7996_wed_rro_work(struct work_struct *work)
list);
list_del_init(&e->list);
+ if (mt76_npu_device_active(&dev->mt76))
+ goto reset_session;
+
for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) {
+#if defined(__linux__)
void *ptr = dev->wed_rro.session.ptr;
+#elif defined(__FreeBSD__)
+ u8 *ptr = dev->wed_rro.session.ptr;
+#endif
struct mt7996_wed_rro_addr *elem;
u32 idx, elem_id = i;
@@ -938,14 +1124,18 @@ static void mt7996_wed_rro_work(struct work_struct *work)
(e->id % MT7996_RRO_BA_BITMAP_SESSION_SIZE) *
MT7996_RRO_WINDOW_MAX_LEN;
reset:
+#if defined(__linux__)
elem = ptr + elem_id * sizeof(*elem);
- elem->signature = 0xff;
+#elif defined(__FreeBSD__)
+ elem = (void *)(ptr + elem_id * sizeof(*elem));
+#endif
+ elem->data |= cpu_to_le32(val);
}
+reset_session:
mt7996_mcu_wed_rro_reset_sessions(dev, e->id);
out:
kfree(e);
}
-#endif
}
static int mt7996_variant_type_init(struct mt7996_dev *dev)
@@ -1340,7 +1530,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
eht_cap->has_eht = true;
eht_cap_elem->mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
@@ -1532,6 +1721,10 @@ int mt7996_register_device(struct mt7996_dev *dev)
if (ret)
return ret;
+ ret = mt7996_npu_hw_init(dev);
+ if (ret)
+ return ret;
+
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
@@ -1578,6 +1771,9 @@ void mt7996_unregister_device(struct mt7996_dev *dev)
mt7996_mcu_exit(dev);
mt7996_tx_token_put(dev);
mt7996_dma_cleanup(dev);
+ if (mt7996_has_hwrro(dev) &&
+ !mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt7996_rro_msdu_page_map_free(dev);
tasklet_disable(&dev->mt76.irq_tasklet);
mt76_free_device(&dev->mt76);
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/mac.c b/sys/contrib/dev/mediatek/mt76/mt7996/mac.c
index 13ea5b55d619..b884f4475021 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/mac.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -11,6 +11,7 @@
#include "mac.h"
#include "mcu.h"
#if defined(__FreeBSD__)
+#include <linux/cache.h>
#include <linux/delay.h>
#endif
@@ -232,7 +233,9 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
- struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
+ struct mt7996_sta_link *msta_link = (void *)status->wcid;
+ struct mt7996_sta *msta = msta_link->sta;
+ struct ieee80211_bss_conf *link_conf;
__le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
@@ -249,8 +252,11 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
if (!msta || !msta->vif)
return -EINVAL;
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ sta = wcid_to_sta(status->wcid);
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ link_conf = rcu_dereference(vif->link_conf[msta_link->wcid.link_id]);
+ if (!link_conf)
+ return -EINVAL;
/* store the info from RXD and ethhdr to avoid being overridden */
frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
@@ -263,7 +269,7 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
switch (frame_control & (IEEE80211_FCTL_TODS |
IEEE80211_FCTL_FROMDS)) {
case 0:
- ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
+ ether_addr_copy(hdr.addr3, link_conf->bssid);
break;
case IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr.addr3, eth_hdr->h_source);
@@ -716,6 +722,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
status->flag |= RX_FLAG_8023;
mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
*info);
+ mt76_npu_check_ppe(&dev->mt76, skb, *info);
}
if (rxv && !(status->flag & RX_FLAG_8023)) {
@@ -792,6 +799,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
__le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
u16 seqno = le16_to_cpu(sc);
+ bool hw_bigtk = false;
u8 fc_type, fc_stype;
u32 val;
@@ -800,6 +808,9 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
if (is_mt7990(&dev->mt76))
txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid));
+ else
+ txwi[7] |= cpu_to_le32(MT_TXD7_MAC_TXD);
+
tid = MT_TX_ADDBA;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
tid = MT_TX_NORMAL;
@@ -814,7 +825,11 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
info->flags & IEEE80211_TX_CTL_USE_MINRATE)
val |= MT_TXD1_FIXED_RATE;
- if (key && multicast && ieee80211_is_robust_mgmt_frame(skb)) {
+ if (is_mt7990(&dev->mt76) && ieee80211_is_beacon(fc) &&
+ (wcid->hw_key_idx2 == 6 || wcid->hw_key_idx2 == 7))
+ hw_bigtk = true;
+
+ if ((key && multicast && ieee80211_is_robust_mgmt_frame(skb)) || hw_bigtk) {
val |= MT_TXD1_BIP;
txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
}
@@ -963,8 +978,9 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD5_TX_STATUS_HOST;
txwi[5] = cpu_to_le32(val);
- val = MT_TXD6_DAS;
- if (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
+ val = MT_TXD6_DAS | MT_TXD6_VTA;
+ if ((q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) ||
+ skb->protocol == cpu_to_be16(ETH_P_PAE))
val |= MT_TXD6_DIS_MAT;
if (is_mt7996(&dev->mt76))
@@ -1028,15 +1044,20 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_connac_txp_common *txp;
+ struct mt7996_vif *mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
+ struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL;
+ struct mt76_vif_link *mlink = NULL;
struct mt76_txwi_cache *t;
int id, i, pid, nbuf = tx_info->nbuf - 1;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ __le32 *ptr = (__le32 *)txwi_ptr;
u8 *txwi = (u8 *)txwi_ptr;
+ u8 link_id;
if (unlikely(tx_info->skb->len <= ETH_HLEN))
return -EINVAL;
@@ -1044,6 +1065,30 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (!wcid)
wcid = &dev->mt76.global_wcid;
+ if ((is_8023 || ieee80211_is_data_qos(hdr->frame_control)) && sta->mlo &&
+ likely(tx_info->skb->protocol != cpu_to_be16(ETH_P_PAE))) {
+ u8 tid = tx_info->skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+ link_id = (tid % 2) ? msta->seclink_id : msta->deflink_id;
+ } else {
+ link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+ }
+
+ if (link_id != wcid->link_id && link_id != IEEE80211_LINK_UNSPECIFIED) {
+ if (msta) {
+ struct mt7996_sta_link *msta_link =
+ rcu_dereference(msta->link[link_id]);
+
+ if (msta_link)
+ wcid = &msta_link->wcid;
+ } else if (mvif) {
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ if (mlink && mlink->wcid)
+ wcid = mlink->wcid;
+ }
+ }
+
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
@@ -1051,6 +1096,41 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (id < 0)
return id;
+ /* Since the rules of HW MLD address translation are not fully
+ * compatible with 802.11 EAPOL frame, we do the translation by
+ * software
+ */
+ if (tx_info->skb->protocol == cpu_to_be16(ETH_P_PAE) && sta->mlo) {
+ struct ieee80211_hdr *hdr = (void *)tx_info->skb->data;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+
+ link_conf = rcu_dereference(vif->link_conf[wcid->link_id]);
+ if (!link_conf)
+ return -EINVAL;
+
+ link_sta = rcu_dereference(sta->link[wcid->link_id]);
+ if (!link_sta)
+ return -EINVAL;
+
+ dma_sync_single_for_cpu(mdev->dma_dev, tx_info->buf[1].addr,
+ tx_info->buf[1].len, DMA_TO_DEVICE);
+
+ memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
+ memcpy(hdr->addr2, link_conf->addr, ETH_ALEN);
+ if (ieee80211_has_a4(hdr->frame_control)) {
+ memcpy(hdr->addr3, sta->addr, ETH_ALEN);
+ memcpy(hdr->addr4, vif->addr, ETH_ALEN);
+ } else if (ieee80211_has_tods(hdr->frame_control)) {
+ memcpy(hdr->addr3, sta->addr, ETH_ALEN);
+ } else if (ieee80211_has_fromds(hdr->frame_control)) {
+ memcpy(hdr->addr3, vif->addr, ETH_ALEN);
+ }
+
+ dma_sync_single_for_device(mdev->dma_dev, tx_info->buf[1].addr,
+ tx_info->buf[1].len, DMA_TO_DEVICE);
+ }
+
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
memset(txwi_ptr, 0, MT_TXD_SIZE);
/* Transmit non qos data by 802.11 header and need to fill txd by host*/
@@ -1058,46 +1138,73 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
pid, qid, 0);
- txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
- for (i = 0; i < nbuf; i++) {
- u16 len;
+ /* MT7996 and MT7992 require driver to provide the MAC TXP for AddBA
+ * req
+ */
+ if (le32_to_cpu(ptr[7]) & MT_TXD7_MAC_TXD) {
+ u32 val;
+
+ ptr = (__le32 *)(txwi + MT_TXD_SIZE);
+ memset((void *)ptr, 0, sizeof(struct mt76_connac_fw_txp));
+
+ val = FIELD_PREP(MT_TXP0_TOKEN_ID0, id) |
+ MT_TXP0_TOKEN_ID0_VALID_MASK;
+ ptr[0] = cpu_to_le32(val);
- len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
+ val = FIELD_PREP(MT_TXP1_TID_ADDBA,
+ tx_info->skb->priority &
+ IEEE80211_QOS_CTL_TID_MASK);
+ ptr[1] = cpu_to_le32(val);
+ ptr[2] = cpu_to_le32(tx_info->buf[1].addr & 0xFFFFFFFF);
+
+ val = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[1].len) |
+ MT_TXP3_ML0_MASK;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
- tx_info->buf[i + 1].addr >> 32);
+ val |= FIELD_PREP(MT_TXP3_DMA_ADDR_H,
+ tx_info->buf[1].addr >> 32);
#endif
+ ptr[3] = cpu_to_le32(val);
+ } else {
+ struct mt76_connac_txp_common *txp;
- txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
- txp->fw.len[i] = cpu_to_le16(len);
- }
- txp->fw.nbuf = nbuf;
+ txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
+ for (i = 0; i < nbuf; i++) {
+ u16 len;
- txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
+ len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
+ tx_info->buf[i + 1].addr >> 32);
+#endif
- if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+ txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->fw.len[i] = cpu_to_le16(len);
+ }
+ txp->fw.nbuf = nbuf;
- if (!key)
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+ txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
- if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
- if (vif) {
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt76_vif_link *mlink = NULL;
+ if (!key)
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
- if (wcid->offchannel)
- mlink = rcu_dereference(mvif->mt76.offchannel_link);
- if (!mlink)
- mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]);
+ if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
- txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx;
- }
+ if (mvif) {
+ if (wcid->offchannel)
+ mlink = rcu_dereference(mvif->mt76.offchannel_link);
+ if (!mlink)
+ mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]);
- txp->fw.token = cpu_to_le16(id);
- txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
+ txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx;
+ }
+
+ txp->fw.token = cpu_to_le16(id);
+ txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
+ }
tx_info->skb = NULL;
@@ -1190,8 +1297,14 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
if (link_sta) {
wcid_idx = wcid->idx;
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7996_tx_check_aggr(link_sta, wcid, t->skb);
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) {
+ struct mt7996_sta *msta;
+
+ /* AMPDU state is stored in the primary link */
+ msta = (void *)link_sta->sta->drv_priv;
+ mt7996_tx_check_aggr(link_sta, &msta->deflink.wcid,
+ t->skb);
+ }
} else {
wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
}
@@ -1253,6 +1366,9 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
info = le32_to_cpu(*cur_info);
if (info & MT_TXFREE_INFO_PAIR) {
struct ieee80211_sta *sta;
+ unsigned long valid_links;
+ struct mt7996_sta *msta;
+ unsigned int id;
u16 idx;
idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
@@ -1267,7 +1383,21 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
if (!link_sta)
goto next;
- mt76_wcid_add_poll(&dev->mt76, wcid);
+ msta = (struct mt7996_sta *)sta->drv_priv;
+ valid_links = sta->valid_links ?: BIT(0);
+
+ /* For MLD STA, add all link's wcid to sta_poll_list */
+ for_each_set_bit(id, &valid_links,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt7996_sta_link *msta_link;
+
+ msta_link = rcu_dereference(msta->link[id]);
+ if (!msta_link)
+ continue;
+
+ mt76_wcid_add_poll(&dev->mt76,
+ &msta_link->wcid);
+ }
next:
/* ver 7 has a new DW with pair = 1, skip it */
if (ver == 7 && ((void *)(cur_info + 1) < end) &&
@@ -1589,6 +1719,366 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
+static struct mt7996_msdu_page *
+mt7996_msdu_page_get_from_cache(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p = NULL;
+
+ spin_lock(&dev->wed_rro.lock);
+
+ if (!list_empty(&dev->wed_rro.page_cache)) {
+ p = list_first_entry(&dev->wed_rro.page_cache,
+ struct mt7996_msdu_page, list);
+ list_del(&p->list);
+ }
+
+ spin_unlock(&dev->wed_rro.lock);
+
+ return p;
+}
+
+static struct mt7996_msdu_page *mt7996_msdu_page_get(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p;
+
+ p = mt7996_msdu_page_get_from_cache(dev);
+ if (!p) {
+ p = kzalloc(L1_CACHE_ALIGN(sizeof(*p)), GFP_ATOMIC);
+ if (p)
+ INIT_LIST_HEAD(&p->list);
+ }
+
+ return p;
+}
+
+static void mt7996_msdu_page_put_to_cache(struct mt7996_dev *dev,
+ struct mt7996_msdu_page *p)
+{
+ if (p->buf) {
+ mt76_put_page_pool_buf(p->buf, false);
+ p->buf = NULL;
+ }
+
+ spin_lock(&dev->wed_rro.lock);
+ list_add(&p->list, &dev->wed_rro.page_cache);
+ spin_unlock(&dev->wed_rro.lock);
+}
+
+static void mt7996_msdu_page_free_cache(struct mt7996_dev *dev)
+{
+ while (true) {
+ struct mt7996_msdu_page *p;
+
+ p = mt7996_msdu_page_get_from_cache(dev);
+ if (!p)
+ break;
+
+ if (p->buf)
+ mt76_put_page_pool_buf(p->buf, false);
+
+ kfree(p);
+ }
+}
+
+static u32 mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr)
+{
+ u32 val = 0;
+ int i = 0;
+
+ while (dma_addr) {
+ val += (u32)((dma_addr & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE;
+ dma_addr >>= 8;
+ i += 13;
+ }
+
+ return val % MT7996_RRO_MSDU_PG_HASH_SIZE;
+}
+
+static struct mt7996_msdu_page *
+mt7996_rro_msdu_page_get(struct mt7996_dev *dev, dma_addr_t dma_addr)
+{
+ u32 hash = mt7996_msdu_page_hash_from_addr(dma_addr);
+ struct mt7996_msdu_page *p, *tmp, *addr = NULL;
+
+ spin_lock(&dev->wed_rro.lock);
+
+ list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[hash],
+ list) {
+ if (p->dma_addr == dma_addr) {
+ list_del(&p->list);
+ addr = p;
+ break;
+ }
+ }
+
+ spin_unlock(&dev->wed_rro.lock);
+
+ return addr;
+}
+
+static void mt7996_rx_token_put(struct mt7996_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+ t = mt76_rx_token_release(&dev->mt76, i);
+ if (!t || !t->ptr)
+ continue;
+
+ mt76_put_page_pool_buf(t->ptr, false);
+ t->dma_addr = 0;
+ t->ptr = NULL;
+
+ mt76_put_rxwi(&dev->mt76, t);
+ }
+}
+
+void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p, *tmp;
+ int i;
+
+ local_bh_disable();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++) {
+ list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[i],
+ list) {
+ list_del_init(&p->list);
+ if (p->buf)
+ mt76_put_page_pool_buf(p->buf, false);
+ kfree(p);
+ }
+ }
+ mt7996_msdu_page_free_cache(dev);
+
+ local_bh_enable();
+
+ mt7996_rx_token_put(dev);
+}
+
+int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
+ dma_addr_t dma_addr, void *data)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt7996_msdu_page_info *pinfo = data;
+ struct mt7996_msdu_page *p;
+ u32 hash;
+
+ pinfo->data |= cpu_to_le32(FIELD_PREP(MSDU_PAGE_INFO_OWNER_MASK, 1));
+ p = mt7996_msdu_page_get(dev);
+ if (!p)
+ return -ENOMEM;
+
+ p->buf = data;
+ p->dma_addr = dma_addr;
+ p->q = q;
+
+ hash = mt7996_msdu_page_hash_from_addr(dma_addr);
+
+ spin_lock(&dev->wed_rro.lock);
+ list_add_tail(&p->list, &dev->wed_rro.page_map[hash]);
+ spin_unlock(&dev->wed_rro.lock);
+
+ return 0;
+}
+
+static struct mt7996_wed_rro_addr *
+mt7996_rro_addr_elem_get(struct mt7996_dev *dev, u16 session_id, u16 seq_num)
+{
+ u32 idx = 0;
+#if defined(__linux__)
+ void *addr;
+#elif defined(__FreeBSD__)
+ u8 *addr;
+#endif
+
+ if (session_id == MT7996_RRO_MAX_SESSION) {
+ addr = dev->wed_rro.session.ptr;
+ } else {
+ idx = session_id / MT7996_RRO_BA_BITMAP_SESSION_SIZE;
+ addr = dev->wed_rro.addr_elem[idx].ptr;
+
+ idx = session_id % MT7996_RRO_BA_BITMAP_SESSION_SIZE;
+ idx = idx * MT7996_RRO_WINDOW_MAX_LEN;
+ }
+ idx += seq_num % MT7996_RRO_WINDOW_MAX_LEN;
+
+ return (void *)(addr + idx * sizeof(struct mt7996_wed_rro_addr));
+}
+
+#define MT996_RRO_SN_MASK GENMASK(11, 0)
+
+void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data;
+ u32 cmd_data0 = le32_to_cpu(cmd->data0);
+ u32 cmd_data1 = le32_to_cpu(cmd->data1);
+ u8 ind_reason = FIELD_GET(RRO_IND_DATA0_IND_REASON_MASK, cmd_data0);
+ u16 start_seq = FIELD_GET(RRO_IND_DATA0_START_SEQ_MASK, cmd_data0);
+ u16 seq_id = FIELD_GET(RRO_IND_DATA0_SEQ_ID_MASK, cmd_data0);
+ u16 ind_count = FIELD_GET(RRO_IND_DATA1_IND_COUNT_MASK, cmd_data1);
+ struct mt7996_msdu_page_info *pinfo = NULL;
+ struct mt7996_msdu_page *p = NULL;
+ int i, seq_num = 0;
+
+ for (i = 0; i < ind_count; i++) {
+ struct mt7996_wed_rro_addr *e;
+ struct mt76_rx_status *status;
+ struct mt7996_rro_hif *rxd;
+ int j, len, qid, data_len;
+ struct mt76_txwi_cache *t;
+ dma_addr_t dma_addr = 0;
+ u16 rx_token_id, count;
+ struct mt76_queue *q;
+ struct sk_buff *skb;
+ u32 info = 0, data;
+ u8 signature;
+ void *buf;
+ bool ls;
+
+ seq_num = FIELD_GET(MT996_RRO_SN_MASK, start_seq + i);
+ e = mt7996_rro_addr_elem_get(dev, seq_id, seq_num);
+ data = le32_to_cpu(e->data);
+ signature = FIELD_GET(WED_RRO_ADDR_SIGNATURE_MASK, data);
+ if (signature != (seq_num / MT7996_RRO_WINDOW_MAX_LEN)) {
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK,
+ 0xff);
+
+ e->data |= cpu_to_le32(val);
+ goto update_ack_seq_num;
+ }
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dma_addr = FIELD_GET(WED_RRO_ADDR_HEAD_HIGH_MASK, data);
+ dma_addr <<= 32;
+#endif
+ dma_addr |= le32_to_cpu(e->head_low);
+
+ count = FIELD_GET(WED_RRO_ADDR_COUNT_MASK, data);
+ for (j = 0; j < count; j++) {
+ if (!p) {
+ p = mt7996_rro_msdu_page_get(dev, dma_addr);
+ if (!p)
+ continue;
+
+ dma_sync_single_for_cpu(mdev->dma_dev, p->dma_addr,
+ SKB_WITH_OVERHEAD(p->q->buf_size),
+ page_pool_get_dma_dir(p->q->page_pool));
+ pinfo = (struct mt7996_msdu_page_info *)p->buf;
+ }
+
+ rxd = &pinfo->rxd[j % MT7996_MAX_HIF_RXD_IN_PG];
+ len = FIELD_GET(RRO_HIF_DATA1_SDL_MASK,
+ le32_to_cpu(rxd->data1));
+
+ rx_token_id = FIELD_GET(RRO_HIF_DATA4_RX_TOKEN_ID_MASK,
+ le32_to_cpu(rxd->data4));
+ t = mt76_rx_token_release(mdev, rx_token_id);
+ if (!t)
+ goto next_page;
+
+ qid = t->qid;
+ buf = t->ptr;
+ q = &mdev->q_rx[qid];
+ dma_sync_single_for_cpu(mdev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+
+ t->dma_addr = 0;
+ t->ptr = NULL;
+ mt76_put_rxwi(mdev, t);
+ if (!buf)
+ goto next_page;
+
+ if (q->rx_head)
+ data_len = q->buf_size;
+ else
+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ if (data_len < len + q->buf_offset) {
+ dev_kfree_skb(q->rx_head);
+ mt76_put_page_pool_buf(buf, false);
+ q->rx_head = NULL;
+ goto next_page;
+ }
+
+ ls = FIELD_GET(RRO_HIF_DATA1_LS_MASK,
+ le32_to_cpu(rxd->data1));
+ if (q->rx_head) {
+ /* TODO: Take into account non-linear skb. */
+ mt76_put_page_pool_buf(buf, false);
+ if (ls) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+ }
+ goto next_page;
+ }
+
+ if (ls && !mt7996_rx_check(mdev, buf, len))
+ goto next_page;
+
+ skb = build_skb(buf, q->buf_size);
+ if (!skb)
+ goto next_page;
+
+ skb_reserve(skb, q->buf_offset);
+ skb_mark_for_recycle(skb);
+ __skb_put(skb, len);
+
+ if (ind_reason == 1 || ind_reason == 2) {
+ dev_kfree_skb(skb);
+ goto next_page;
+ }
+
+ if (!ls) {
+ q->rx_head = skb;
+ goto next_page;
+ }
+
+ status = (struct mt76_rx_status *)skb->cb;
+ if (seq_id != MT7996_RRO_MAX_SESSION)
+ status->aggr = true;
+
+ mt7996_queue_rx_skb(mdev, qid, skb, &info);
+next_page:
+ if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) {
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dma_addr =
+ FIELD_GET(MSDU_PAGE_INFO_PG_HIGH_MASK,
+ le32_to_cpu(pinfo->data));
+ dma_addr <<= 32;
+ dma_addr |= le32_to_cpu(pinfo->pg_low);
+#else
+ dma_addr = le32_to_cpu(pinfo->pg_low);
+#endif
+ mt7996_msdu_page_put_to_cache(dev, p);
+ p = NULL;
+ }
+ }
+
+update_ack_seq_num:
+ if ((i + 1) % 4 == 0)
+ mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK,
+ seq_id) |
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK,
+ seq_num));
+ if (p) {
+ mt7996_msdu_page_put_to_cache(dev, p);
+ p = NULL;
+ }
+ }
+
+ /* Update ack_seq_num for remaining addr_elem */
+ if (i % 4)
+ mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, seq_id) |
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, seq_num));
+}
+
void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
@@ -1737,7 +2227,8 @@ mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!link || link->phy != phy)
continue;
- mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf);
+ mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf,
+ link_conf->enable_beacon);
}
}
@@ -1781,13 +2272,10 @@ void mt7996_tx_token_put(struct mt7996_dev *dev)
static int
mt7996_mac_restart(struct mt7996_dev *dev)
{
- struct mt7996_phy *phy2, *phy3;
struct mt76_dev *mdev = &dev->mt76;
+ struct mt7996_phy *phy;
int i, ret;
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
-
if (dev->hif2) {
mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
@@ -1799,20 +2287,14 @@ mt7996_mac_restart(struct mt7996_dev *dev)
mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
}
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt7996_for_each_phy(dev, phy)
+ set_bit(MT76_RESET, &phy->mt76->state);
wake_up(&dev->mt76.mcu.wait);
- if (phy2)
- set_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- set_bit(MT76_RESET, &phy3->mt76->state);
/* lock/unlock all queues to ensure that no tx is pending */
- mt76_txq_schedule_all(&dev->mphy);
- if (phy2)
- mt76_txq_schedule_all(phy2->mt76);
- if (phy3)
- mt76_txq_schedule_all(phy3->mt76);
+ mt7996_for_each_phy(dev, phy)
+ mt76_txq_schedule_all(phy->mt76);
/* disable all tx/rx napi */
mt76_worker_disable(&dev->mt76.tx_worker);
@@ -1864,42 +2346,57 @@ mt7996_mac_restart(struct mt7996_dev *dev)
if (ret)
goto out;
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt7996_has_hwrro(dev)) {
+ u32 wed_irq_mask = dev->mt76.mmio.irqmask |
+ MT_INT_TX_DONE_BAND2;
+
+ mt7996_rro_hw_init(dev);
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
+ mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i]))
+ mt76_queue_rx_reset(dev, i);
+ }
+
+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+ mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
+ false);
+ mt7996_irq_enable(dev, wed_irq_mask);
+ mt7996_irq_disable(dev, 0);
+ }
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
+ mt76_wr(dev, MT_INT_PCIE1_MASK_CSR,
+ MT_INT_TX_RX_DONE_EXT);
+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
+ MT_INT_TX_RX_DONE_EXT);
+ }
+
/* set the necessary init items */
ret = mt7996_mcu_set_eeprom(dev);
if (ret)
goto out;
mt7996_mac_init(dev);
- mt7996_init_txpower(&dev->phy);
- mt7996_init_txpower(phy2);
- mt7996_init_txpower(phy3);
+ mt7996_for_each_phy(dev, phy)
+ mt7996_init_txpower(phy);
ret = mt7996_txbf_init(dev);
+ if (ret)
+ goto out;
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
- ret = mt7996_run(&dev->phy);
- if (ret)
- goto out;
- }
-
- if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
- ret = mt7996_run(phy2);
- if (ret)
- goto out;
- }
+ mt7996_for_each_phy(dev, phy) {
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ continue;
- if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
- ret = mt7996_run(phy3);
+ ret = mt7996_run(phy);
if (ret)
goto out;
}
out:
/* reset done */
- clear_bit(MT76_RESET, &dev->mphy.state);
- if (phy2)
- clear_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- clear_bit(MT76_RESET, &phy3->mt76->state);
+ mt7996_for_each_phy(dev, phy)
+ clear_bit(MT76_RESET, &phy->mt76->state);
napi_enable(&dev->mt76.tx_napi);
local_bh_disable();
@@ -1911,74 +2408,123 @@ out:
}
static void
+mt7996_mac_reset_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(msta->link); i++) {
+ struct mt7996_sta_link *msta_link = NULL;
+
+ msta_link = rcu_replace_pointer(msta->link[i], msta_link,
+ lockdep_is_held(&dev->mt76.mutex));
+ if (!msta_link)
+ continue;
+
+ mt7996_mac_sta_deinit_link(dev, msta_link);
+
+ if (msta->deflink_id == i) {
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ continue;
+ }
+
+ kfree_rcu(msta_link, rcu_head);
+ }
+}
+
+static void
+mt7996_mac_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ struct mt7996_dev *dev = data;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(mvif->link); i++) {
+
+ mlink = mt76_dereference(mvif->link[i], &dev->mt76);
+ if (!mlink || mlink == (struct mt76_vif_link *)vif->drv_priv)
+ continue;
+
+ rcu_assign_pointer(mvif->link[i], NULL);
+ kfree_rcu(mlink, rcu_head);
+ }
+ rcu_read_unlock();
+}
+
+static void
mt7996_mac_full_reset(struct mt7996_dev *dev)
{
- struct mt7996_phy *phy2, *phy3;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7996_phy *phy;
+ LIST_HEAD(list);
int i;
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
dev->recovery.hw_full_reset = true;
wake_up(&dev->mt76.mcu.wait);
- ieee80211_stop_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_stop_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_stop_queues(phy3->mt76->hw);
+ ieee80211_stop_queues(hw);
cancel_work_sync(&dev->wed_rro.work);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2)
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- if (phy3)
- cancel_delayed_work_sync(&phy3->mt76->mac_work);
+ mt7996_for_each_phy(dev, phy)
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
+
+ mt76_abort_scan(&dev->mt76);
mutex_lock(&dev->mt76.mutex);
for (i = 0; i < 10; i++) {
if (!mt7996_mac_restart(dev))
break;
}
- mutex_unlock(&dev->mt76.mutex);
if (i == 10)
dev_err(dev->mt76.dev, "chip full reset failed\n");
- ieee80211_restart_hw(mt76_hw(dev));
- if (phy2)
- ieee80211_restart_hw(phy2->mt76->hw);
- if (phy3)
- ieee80211_restart_hw(phy3->mt76->hw);
+ mt7996_for_each_phy(dev, phy)
+ phy->omac_mask = 0;
- ieee80211_wake_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_wake_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_wake_queues(phy3->mt76->hw);
+ ieee80211_iterate_stations_atomic(hw, mt7996_mac_reset_sta_iter, dev);
+ ieee80211_iterate_active_interfaces_atomic(hw,
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
+ mt7996_mac_reset_vif_iter, dev);
+ mt76_reset_device(&dev->mt76);
+
+ INIT_LIST_HEAD(&dev->sta_rc_list);
+ INIT_LIST_HEAD(&dev->twt_list);
+
+ spin_lock_bh(&dev->wed_rro.lock);
+ list_splice_init(&dev->wed_rro.poll_list, &list);
+ spin_unlock_bh(&dev->wed_rro.lock);
+
+ while (!list_empty(&list)) {
+ struct mt7996_wed_rro_session_id *e;
+
+ e = list_first_entry(&list, struct mt7996_wed_rro_session_id,
+ list);
+ list_del_init(&e->list);
+ kfree(e);
+ }
+ i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
+ dev->mt76.global_wcid.idx = i;
dev->recovery.hw_full_reset = false;
- ieee80211_queue_delayed_work(mt76_hw(dev),
- &dev->mphy.mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(phy2->mt76->hw,
- &phy2->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy3)
- ieee80211_queue_delayed_work(phy3->mt76->hw,
- &phy3->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ ieee80211_restart_hw(mt76_hw(dev));
}
void mt7996_mac_reset_work(struct work_struct *work)
{
- struct mt7996_phy *phy2, *phy3;
+ struct ieee80211_hw *hw;
struct mt7996_dev *dev;
+ struct mt7996_phy *phy;
int i;
dev = container_of(work, struct mt7996_dev, reset_work);
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
+ hw = mt76_hw(dev);
/* chip full reset */
if (dev->recovery.restart) {
@@ -2009,7 +2555,7 @@ void mt7996_mac_reset_work(struct work_struct *work)
return;
dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
- wiphy_name(dev->mt76.hw->wiphy));
+ wiphy_name(hw->wiphy));
if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
@@ -2018,25 +2564,19 @@ void mt7996_mac_reset_work(struct work_struct *work)
mtk_wed_device_stop(&dev->mt76.mmio.wed);
ieee80211_stop_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_stop_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_stop_queues(phy3->mt76->hw);
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt76_abort_scan(&dev->mt76);
wake_up(&dev->mt76.mcu.wait);
cancel_work_sync(&dev->wed_rro.work);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2) {
- set_bit(MT76_RESET, &phy2->mt76->state);
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- }
- if (phy3) {
- set_bit(MT76_RESET, &phy3->mt76->state);
- cancel_delayed_work_sync(&phy3->mt76->mac_work);
+ mt7996_for_each_phy(dev, phy) {
+ mt76_abort_roc(phy->mt76);
+ set_bit(MT76_RESET, &phy->mt76->state);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
}
+
mt76_worker_disable(&dev->mt76.tx_worker);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
@@ -2049,6 +2589,8 @@ void mt7996_mac_reset_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
+ mt7996_npu_hw_stop(dev);
+
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
@@ -2064,18 +2606,17 @@ void mt7996_mac_reset_work(struct work_struct *work)
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
- /* enable DMA Tx/Tx and interrupt */
+ /* enable DMA Rx/Tx and interrupt */
mt7996_dma_start(dev, false, false);
+ if (!is_mt7996(&dev->mt76) && dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK);
+
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
- u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
+ u32 wed_irq_mask = MT_INT_TX_DONE_BAND2 |
dev->mt76.mmio.irqmask;
- if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
- wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
-
mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
-
mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
true);
mt7996_irq_enable(dev, wed_irq_mask);
@@ -2089,11 +2630,8 @@ void mt7996_mac_reset_work(struct work_struct *work)
}
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
- clear_bit(MT76_RESET, &dev->mphy.state);
- if (phy2)
- clear_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- clear_bit(MT76_RESET, &phy3->mt76->state);
+ mt7996_for_each_phy(dev, phy)
+ clear_bit(MT76_RESET, &phy->mt76->state);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
@@ -2115,25 +2653,15 @@ void mt7996_mac_reset_work(struct work_struct *work)
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
- ieee80211_wake_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_wake_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_wake_queues(phy3->mt76->hw);
+ ieee80211_wake_queues(hw);
+ mt7996_update_beacons(dev);
mutex_unlock(&dev->mt76.mutex);
- mt7996_update_beacons(dev);
+ mt7996_npu_hw_init(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(phy2->mt76->hw,
- &phy2->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy3)
- ieee80211_queue_delayed_work(phy3->mt76->hw,
- &phy3->mt76->mac_work,
+ mt7996_for_each_phy(dev, phy)
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7996_WATCHDOG_TIME);
dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
wiphy_name(dev->mt76.hw->wiphy));
@@ -2382,6 +2910,8 @@ void mt7996_mac_sta_rc_work(struct work_struct *work)
LIST_HEAD(list);
u32 changed;
+ mutex_lock(&dev->mt76.mutex);
+
spin_lock_bh(&dev->mt76.sta_poll_lock);
list_splice_init(&dev->sta_rc_list, &list);
@@ -2414,6 +2944,8 @@ void mt7996_mac_sta_rc_work(struct work_struct *work)
}
spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+ mutex_unlock(&dev->mt76.mutex);
}
void mt7996_mac_work(struct work_struct *work)
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/mac.h b/sys/contrib/dev/mediatek/mt76/mt7996/mac.h
index e629324a5617..4eca37b013fc 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/mac.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/mac.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2022 MediaTek Inc.
*/
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/main.c b/sys/contrib/dev/mediatek/mt76/mt7996/main.c
index 84f731b387d2..beed795edb24 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/main.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/main.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -90,9 +90,11 @@ static void mt7996_stop(struct ieee80211_hw *hw, bool suspend)
{
}
-static inline int get_free_idx(u32 mask, u8 start, u8 end)
+static inline int get_free_idx(u64 mask, u8 start, u8 end)
{
- return ffs(~mask & GENMASK(end, start));
+ if (~mask & GENMASK_ULL(end, start))
+ return __ffs64(~mask & GENMASK_ULL(end, start)) + 1;
+ return 0;
}
static int get_omac_idx(enum nl80211_iftype type, u64 mask)
@@ -138,6 +140,28 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
return -1;
}
+static int get_own_mld_idx(u64 mask, bool group_mld)
+{
+ u8 start = group_mld ? 0 : 16;
+ u8 end = group_mld ? 15 : 63;
+ int idx;
+
+ idx = get_free_idx(mask, start, end);
+ if (idx)
+ return idx - 1;
+
+ /* If the 16-63 range is not available, perform another lookup in the
+ * range 0-15
+ */
+ if (!group_mld) {
+ idx = get_free_idx(mask, 0, 15);
+ if (idx)
+ return idx - 1;
+ }
+
+ return -EINVAL;
+}
+
static void
mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlink)
{
@@ -160,112 +184,106 @@ mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlin
static int
mt7996_set_hw_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+ unsigned int link_id, struct ieee80211_key_conf *key)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_bss_conf *link_conf;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
int idx = key->keyidx;
- unsigned int link_id;
- unsigned long links;
+ u8 *wcid_keyidx;
+ bool is_bigtk;
+ int err;
- if (key->link_id >= 0)
- links = BIT(key->link_id);
- else if (sta && sta->valid_links)
- links = sta->valid_links;
- else if (vif->valid_links)
- links = vif->valid_links;
- else
- links = BIT(0);
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ return 0;
- for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct mt7996_sta_link *msta_link;
- struct mt7996_vif_link *link;
- u8 *wcid_keyidx;
- int err;
+ if (!mt7996_vif_link_phy(link))
+ return 0;
- link = mt7996_vif_link(dev, vif, link_id);
- if (!link)
- continue;
+ if (sta) {
+ struct mt7996_sta *msta;
- if (sta) {
- struct mt7996_sta *msta;
+ msta = (struct mt7996_sta *)sta->drv_priv;
+ msta_link = mt76_dereference(msta->link[link_id],
+ &dev->mt76);
+ if (!msta_link)
+ return 0;
- msta = (struct mt7996_sta *)sta->drv_priv;
- msta_link = mt76_dereference(msta->link[link_id],
- &dev->mt76);
- if (!msta_link)
- continue;
+ if (!msta_link->wcid.sta)
+ return -EOPNOTSUPP;
+ } else {
+ msta_link = &link->msta_link;
+ }
+ wcid_keyidx = &msta_link->wcid.hw_key_idx;
- if (!msta_link->wcid.sta)
- return -EOPNOTSUPP;
- } else {
- msta_link = &link->msta_link;
- }
- wcid_keyidx = &msta_link->wcid.hw_key_idx;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_AES_CMAC:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7) {
- wcid_keyidx = &msta_link->wcid.hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
- }
- break;
- default:
- break;
+ is_bigtk = key->keyidx == 6 || key->keyidx == 7;
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ if (is_bigtk) {
+ wcid_keyidx = &msta_link->wcid.hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
}
+ break;
+ default:
+ break;
+ }
- if (cmd == SET_KEY && !sta && !link->mt76.cipher) {
- struct ieee80211_bss_conf *link_conf;
+ link_conf = link_conf_dereference_protected(vif, link_id);
+ if (!link_conf)
+ link_conf = &vif->bss_conf;
- link_conf = link_conf_dereference_protected(vif,
- link_id);
- if (!link_conf)
- link_conf = &vif->bss_conf;
+ if (cmd == SET_KEY && !sta && !link->mt76.cipher) {
+ link->mt76.cipher =
+ mt76_connac_mcu_get_cipher(key->cipher);
+ mt7996_mcu_add_bss_info(link->phy, vif, link_conf,
+ &link->mt76, msta_link, true);
+ }
- link->mt76.cipher =
- mt76_connac_mcu_get_cipher(key->cipher);
- mt7996_mcu_add_bss_info(link->phy, vif, link_conf,
- &link->mt76, msta_link, true);
- }
+ if (cmd == SET_KEY)
+ *wcid_keyidx = idx;
+ else if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
- if (cmd == SET_KEY) {
- *wcid_keyidx = idx;
- } else {
- if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- continue;
- }
+ /* only do remove key for BIGTK */
+ if (cmd != SET_KEY && !is_bigtk)
+ return 0;
- mt76_wcid_key_setup(&dev->mt76, &msta_link->wcid, key);
+ mt76_wcid_key_setup(&dev->mt76, &msta_link->wcid, key);
- if (key->keyidx == 6 || key->keyidx == 7) {
- err = mt7996_mcu_bcn_prot_enable(dev, link,
- msta_link, key);
- if (err)
- return err;
- }
+ err = mt7996_mcu_add_key(&dev->mt76, link, key,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+ &msta_link->wcid, cmd);
- err = mt7996_mcu_add_key(&dev->mt76, vif, key,
- MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
- &msta_link->wcid, cmd);
- if (err)
- return err;
+ /* remove and add beacon in order to enable beacon protection */
+ if (cmd == SET_KEY && is_bigtk && link_conf->enable_beacon) {
+ mt7996_mcu_add_beacon(hw, vif, link_conf, false);
+ mt7996_mcu_add_beacon(hw, vif, link_conf, true);
}
- return 0;
+ return err;
}
+struct mt7996_key_iter_data {
+ enum set_key_cmd cmd;
+ unsigned int link_id;
+};
+
static void
mt7996_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_key_conf *key,
void *data)
{
+ struct mt7996_key_iter_data *it = data;
+
if (sta)
return;
- WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, key));
+ WARN_ON(mt7996_set_hw_key(hw, it->cmd, vif, NULL, it->link_id, key));
}
int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
@@ -278,8 +296,12 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt7996_phy *phy = mphy->priv;
struct mt7996_dev *dev = phy->dev;
u8 band_idx = phy->mt76->band_idx;
+ struct mt7996_key_iter_data it = {
+ .cmd = SET_KEY,
+ .link_id = link_conf->link_id,
+ };
struct mt76_txq *mtxq;
- int idx, ret;
+ int mld_idx, idx, ret;
mlink->idx = __ffs64(~dev->mt76.vif_mask);
if (mlink->idx >= mt7996_max_interface_num(dev))
@@ -289,6 +311,11 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
+ mld_idx = get_own_mld_idx(dev->mld_idx_mask, false);
+ if (mld_idx < 0)
+ return -ENOSPC;
+
+ link->mld_idx = mld_idx;
link->phy = phy;
mlink->omac_idx = idx;
mlink->band_idx = band_idx;
@@ -301,6 +328,7 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
return ret;
dev->mt76.vif_mask |= BIT_ULL(mlink->idx);
+ dev->mld_idx_mask |= BIT_ULL(link->mld_idx);
phy->omac_mask |= BIT_ULL(mlink->omac_idx);
idx = MT7996_WTBL_RESERVED - mlink->idx;
@@ -308,6 +336,7 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
INIT_LIST_HEAD(&msta_link->rc_list);
msta_link->wcid.idx = idx;
msta_link->wcid.link_id = link_conf->link_id;
+ msta_link->wcid.link_valid = ieee80211_vif_is_mld(vif);
msta_link->wcid.tx_info |= MT_WCID_TX_INFO_SET;
mt76_wcid_init(&msta_link->wcid, band_idx);
@@ -339,9 +368,10 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
CONN_STATE_PORT_SECURE, true);
rcu_assign_pointer(dev->mt76.wcid[idx], &msta_link->wcid);
- ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, NULL);
+ ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, &it);
- if (mvif->mt76.deflink_id == IEEE80211_LINK_UNSPECIFIED)
+ if (!mlink->wcid->offchannel &&
+ mvif->mt76.deflink_id == IEEE80211_LINK_UNSPECIFIED)
mvif->mt76.deflink_id = link_conf->link_id;
return 0;
@@ -356,8 +386,15 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt7996_sta_link *msta_link = &link->msta_link;
struct mt7996_phy *phy = mphy->priv;
struct mt7996_dev *dev = phy->dev;
+ struct mt7996_key_iter_data it = {
+ .cmd = SET_KEY,
+ .link_id = link_conf->link_id,
+ };
int idx = msta_link->wcid.idx;
+ if (!mlink->wcid->offchannel)
+ ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, &it);
+
mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL,
CONN_STATE_DISCONNECT, false);
mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, false);
@@ -366,7 +403,8 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- if (mvif->mt76.deflink_id == link_conf->link_id) {
+ if (!mlink->wcid->offchannel &&
+ mvif->mt76.deflink_id == link_conf->link_id) {
struct ieee80211_bss_conf *iter;
unsigned int link_id;
@@ -380,6 +418,7 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
}
dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx);
+ dev->mld_idx_mask &= ~BIT_ULL(link->mld_idx);
phy->omac_mask &= ~BIT_ULL(mlink->omac_idx);
spin_lock_bh(&dev->mt76.sta_poll_lock);
@@ -551,8 +590,9 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- int err;
+ unsigned int link_id;
+ unsigned long links;
+ int err = 0;
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@@ -586,11 +626,22 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- if (!mt7996_vif_link_phy(&mvif->deflink))
- return 0; /* defer until after link add */
-
mutex_lock(&dev->mt76.mutex);
- err = mt7996_set_hw_key(hw, cmd, vif, sta, key);
+
+ if (key->link_id >= 0)
+ links = BIT(key->link_id);
+ else if (sta && sta->valid_links)
+ links = sta->valid_links;
+ else if (vif->valid_links)
+ links = vif->valid_links;
+ else
+ links = BIT(0);
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ err = mt7996_set_hw_key(hw, cmd, vif, sta, link_id, key);
+ if (err)
+ break;
+ }
mutex_unlock(&dev->mt76.mutex);
return err;
@@ -606,8 +657,8 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_vif_link *mlink = mt7996_vif_link(dev, vif, link_id);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_vif_link_info *link_info = &mvif->link_info[link_id];
static const u8 mq_to_aci[] = {
[IEEE80211_AC_VO] = 3,
[IEEE80211_AC_VI] = 2,
@@ -616,7 +667,7 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
};
/* firmware uses access class index */
- mlink->queue_params[mq_to_aci[queue]] = *params;
+ link_info->queue_params[mq_to_aci[queue]] = *params;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
return 0;
@@ -850,7 +901,7 @@ mt7996_link_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
link->mt76.beacon_rates_idx =
mt7996_get_rates_table(phy, info, true, false);
- mt7996_mcu_add_beacon(hw, vif, info);
+ mt7996_mcu_add_beacon(hw, vif, info, info->enable_beacon);
}
if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -878,7 +929,7 @@ mt7996_channel_switch_beacon(struct ieee80211_hw *hw,
struct mt7996_dev *dev = mt7996_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
+ mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf, vif->bss_conf.enable_beacon);
mutex_unlock(&dev->mt76.mutex);
}
@@ -903,6 +954,7 @@ mt7996_mac_sta_init_link(struct mt7996_dev *dev,
msta_link = &msta->deflink;
msta->deflink_id = link_id;
+ msta->seclink_id = msta->deflink_id;
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct mt76_txq *mtxq;
@@ -917,6 +969,11 @@ mt7996_mac_sta_init_link(struct mt7996_dev *dev,
msta_link = kzalloc(sizeof(*msta_link), GFP_KERNEL);
if (!msta_link)
return -ENOMEM;
+
+ if (msta->seclink_id == msta->deflink_id &&
+ (sta->valid_links & ~BIT(msta->deflink_id)))
+ msta->seclink_id = __ffs(sta->valid_links &
+ ~BIT(msta->deflink_id));
}
INIT_LIST_HEAD(&msta_link->rc_list);
@@ -925,6 +982,8 @@ mt7996_mac_sta_init_link(struct mt7996_dev *dev,
msta_link->wcid.sta = 1;
msta_link->wcid.idx = idx;
msta_link->wcid.link_id = link_id;
+ msta_link->wcid.link_valid = !!sta->valid_links;
+ msta_link->wcid.def_wcid = &msta->deflink.wcid;
ewma_avg_signal_init(&msta_link->avg_ack_signal);
ewma_signal_init(&msta_link->wcid.rssi);
@@ -941,18 +1000,9 @@ mt7996_mac_sta_init_link(struct mt7996_dev *dev,
return 0;
}
-static void
-mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
- struct mt7996_sta_link *msta_link)
+void mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
+ struct mt7996_sta_link *msta_link)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(msta_link->wcid.aggr); i++)
- mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, i);
-
- mt7996_mac_wtbl_update(dev, msta_link->wcid.idx,
- MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
-
spin_lock_bh(&dev->mt76.sta_poll_lock);
if (!list_empty(&msta_link->wcid.poll_list))
list_del_init(&msta_link->wcid.poll_list);
@@ -982,6 +1032,9 @@ mt7996_mac_sta_remove_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
if (!msta_link)
continue;
+ mt7996_mac_wtbl_update(dev, msta_link->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
mt7996_mac_sta_deinit_link(dev, msta_link);
link = mt7996_vif_link(dev, vif, link_id);
if (!link)
@@ -995,6 +1048,8 @@ mt7996_mac_sta_remove_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
if (msta->deflink_id == link_id) {
msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
continue;
+ } else if (msta->seclink_id == link_id) {
+ msta->seclink_id = IEEE80211_LINK_UNSPECIFIED;
}
kfree_rcu(msta_link, rcu_head);
@@ -1036,16 +1091,17 @@ mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
goto error_unlink;
}
- err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
- link_id);
- if (err)
- goto error_unlink;
-
mphy = mt76_vif_link_phy(&link->mt76);
if (!mphy) {
err = -EINVAL;
goto error_unlink;
}
+
+ err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
+ link_id);
+ if (err)
+ goto error_unlink;
+
mphy->num_sta++;
}
@@ -1089,6 +1145,7 @@ mt7996_mac_sta_add(struct mt7996_dev *dev, struct ieee80211_vif *vif,
mutex_lock(&dev->mt76.mutex);
msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ msta->seclink_id = IEEE80211_LINK_UNSPECIFIED;
msta->vif = mvif;
err = mt7996_mac_sta_add_links(dev, vif, sta, links);
@@ -1105,12 +1162,15 @@ mt7996_mac_sta_event(struct mt7996_dev *dev, struct ieee80211_vif *vif,
unsigned long links = sta->valid_links;
struct ieee80211_link_sta *link_sta;
unsigned int link_id;
+ int err = 0;
+
+ mutex_lock(&dev->mt76.mutex);
for_each_sta_active_link(vif, sta, link_sta, link_id) {
struct ieee80211_bss_conf *link_conf;
struct mt7996_sta_link *msta_link;
struct mt7996_vif_link *link;
- int i, err;
+ int i;
link_conf = link_conf_dereference_protected(vif, link_id);
if (!link_conf)
@@ -1130,12 +1190,12 @@ mt7996_mac_sta_event(struct mt7996_dev *dev, struct ieee80211_vif *vif,
link, msta_link,
CONN_STATE_CONNECT, true);
if (err)
- return err;
+ goto unlock;
err = mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif,
link_id, false);
if (err)
- return err;
+ goto unlock;
msta_link->wcid.tx_info |= MT_WCID_TX_INFO_SET;
break;
@@ -1144,28 +1204,30 @@ mt7996_mac_sta_event(struct mt7996_dev *dev, struct ieee80211_vif *vif,
link, msta_link,
CONN_STATE_PORT_SECURE, false);
if (err)
- return err;
+ goto unlock;
break;
case MT76_STA_EVENT_DISASSOC:
for (i = 0; i < ARRAY_SIZE(msta_link->twt.flow); i++)
mt7996_mac_twt_teardown_flow(dev, link,
msta_link, i);
- if (sta->mlo && links == BIT(link_id)) /* last link */
- mt7996_mcu_teardown_mld_sta(dev, link,
- msta_link);
- else
+ if (!sta->mlo)
mt7996_mcu_add_sta(dev, link_conf, link_sta,
link, msta_link,
CONN_STATE_DISCONNECT, false);
+ else if (sta->mlo && links == BIT(link_id)) /* last link */
+ mt7996_mcu_teardown_mld_sta(dev, link,
+ msta_link);
msta_link->wcid.sta_disabled = 1;
msta_link->wcid.sta = 0;
links = links & ~BIT(link_id);
break;
}
}
+unlock:
+ mutex_unlock(&dev->mt76.mutex);
- return 0;
+ return err;
}
static void
@@ -1179,6 +1241,24 @@ mt7996_mac_sta_remove(struct mt7996_dev *dev, struct ieee80211_vif *vif,
mutex_unlock(&dev->mt76.mutex);
}
+static void
+mt7996_set_active_links(struct ieee80211_vif *vif)
+{
+ u16 active_links;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ieee80211_vif_is_mld(vif))
+ return;
+
+ active_links = mt76_select_links(vif, MT7996_MAX_RADIOS);
+ if (hweight16(active_links) < 2)
+ return;
+
+ ieee80211_set_active_links_async(vif, active_links);
+}
+
static int
mt7996_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, enum ieee80211_sta_state old_state,
@@ -1196,16 +1276,18 @@ mt7996_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7996_mac_sta_remove(dev, vif, sta);
if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC)
+ new_state == IEEE80211_STA_ASSOC) {
+ mt7996_set_active_links(vif);
ev = MT76_STA_EVENT_ASSOC;
- else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTHORIZED)
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
ev = MT76_STA_EVENT_AUTHORIZE;
- else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH)
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
ev = MT76_STA_EVENT_DISASSOC;
- else
+ } else {
return 0;
+ }
return mt7996_mac_sta_event(dev, vif, sta, ev);
}
@@ -1214,29 +1296,60 @@ static void mt7996_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct mt7996_sta *msta = sta ? (void *)sta->drv_priv : NULL;
struct mt76_phy *mphy = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
+ struct mt7996_vif *mvif = vif ? (void *)vif->drv_priv : NULL;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
u8 link_id = u32_get_bits(info->control.flags,
IEEE80211_TX_CTRL_MLO_LINK);
rcu_read_lock();
- if (vif) {
- struct mt7996_vif *mvif = (void *)vif->drv_priv;
- struct mt76_vif_link *mlink = &mvif->deflink.mt76;
+ /* Use primary link_id if the value from mac80211 is set to
+ * IEEE80211_LINK_UNSPECIFIED.
+ */
+ if (link_id == IEEE80211_LINK_UNSPECIFIED) {
+ if (msta)
+ link_id = msta->deflink_id;
+ else if (mvif)
+ link_id = mvif->mt76.deflink_id;
+ }
+
+ if (vif && ieee80211_vif_is_mld(vif)) {
+ struct ieee80211_bss_conf *link_conf;
+
+ if (msta) {
+ struct ieee80211_link_sta *link_sta;
- if (link_id < IEEE80211_LINK_UNSPECIFIED)
- mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ link_sta = rcu_dereference(sta->link[link_id]);
+ if (!link_sta)
+ link_sta = rcu_dereference(sta->link[msta->deflink_id]);
- if (!mlink) {
- ieee80211_free_txskb(hw, skb);
- goto unlock;
+ if (link_sta) {
+ memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
+ if (ether_addr_equal(sta->addr, hdr->addr3))
+ memcpy(hdr->addr3, link_sta->addr, ETH_ALEN);
+ }
+ }
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (link_conf) {
+ memcpy(hdr->addr2, link_conf->addr, ETH_ALEN);
+ if (ether_addr_equal(vif->addr, hdr->addr3))
+ memcpy(hdr->addr3, link_conf->addr, ETH_ALEN);
}
+ }
- if (mlink->wcid)
+ if (mvif) {
+ struct mt76_vif_link *mlink;
+
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ if (mlink && mlink->wcid)
wcid = mlink->wcid;
if (mvif->mt76.roc_phy &&
@@ -1244,7 +1357,7 @@ static void mt7996_tx(struct ieee80211_hw *hw,
mphy = mvif->mt76.roc_phy;
if (mphy->roc_link)
wcid = mphy->roc_link->wcid;
- } else {
+ } else if (mlink) {
mphy = mt76_vif_link_phy(mlink);
}
}
@@ -1254,8 +1367,7 @@ static void mt7996_tx(struct ieee80211_hw *hw,
goto unlock;
}
- if (control->sta && link_id < IEEE80211_LINK_UNSPECIFIED) {
- struct mt7996_sta *msta = (void *)control->sta->drv_priv;
+ if (msta) {
struct mt7996_sta_link *msta_link;
msta_link = rcu_dereference(msta->link[link_id]);
@@ -1292,16 +1404,13 @@ static int
mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
- enum ieee80211_ampdu_mlme_action action = params->action;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = params->sta;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct ieee80211_txq *txq = sta->txq[params->tid];
- struct ieee80211_link_sta *link_sta;
u16 tid = params->tid;
u16 ssn = params->ssn;
struct mt76_txq *mtxq;
- unsigned int link_id;
int ret = 0;
if (!txq)
@@ -1311,59 +1420,42 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mt76.mutex);
- for_each_sta_active_link(vif, sta, link_sta, link_id) {
- struct mt7996_sta_link *msta_link;
- struct mt7996_vif_link *link;
-
- msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
- if (!msta_link)
- continue;
-
- link = mt7996_vif_link(dev, vif, link_id);
- if (!link)
- continue;
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta_link->wcid, tid,
- ssn, params->buf_size);
- ret = mt7996_mcu_add_rx_ba(dev, params, link, true);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, tid);
- ret = mt7996_mcu_add_rx_ba(dev, params, link, false);
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- mtxq->aggr = true;
- mtxq->send_bar = false;
- ret = mt7996_mcu_add_tx_ba(dev, params, link,
- msta_link, true);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mtxq->aggr = false;
- clear_bit(tid, &msta_link->wcid.ampdu_state);
- ret = mt7996_mcu_add_tx_ba(dev, params, link,
- msta_link, false);
- break;
- case IEEE80211_AMPDU_TX_START:
- set_bit(tid, &msta_link->wcid.ampdu_state);
- ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- mtxq->aggr = false;
- clear_bit(tid, &msta_link->wcid.ampdu_state);
- ret = mt7996_mcu_add_tx_ba(dev, params, link,
- msta_link, false);
- break;
- }
-
- if (ret)
- break;
- }
-
- if (action == IEEE80211_AMPDU_TX_STOP_CONT)
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ /* Since packets belonging to the same TID can be split over
+ * multiple links, store the AMPDU state for reordering in the
+ * primary link
+ */
+ mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid,
+ ssn, params->buf_size);
+ ret = mt7996_mcu_add_rx_ba(dev, params, vif, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
+ ret = mt7996_mcu_add_rx_ba(dev, params, vif, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ret = mt7996_mcu_add_tx_ba(dev, params, vif, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, vif, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ set_bit(tid, &msta->deflink.wcid.ampdu_state);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, vif, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
mutex_unlock(&dev->mt76.mutex);
@@ -1617,19 +1709,13 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
}
}
-static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
+static void mt7996_link_rate_ctrl_update(void *data,
+ struct mt7996_sta_link *msta_link)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta *msta = msta_link->sta;
struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
- struct mt7996_sta_link *msta_link;
u32 *changed = data;
- rcu_read_lock();
-
- msta_link = rcu_dereference(msta->link[msta->deflink_id]);
- if (!msta_link)
- goto out;
-
spin_lock_bh(&dev->mt76.sta_poll_lock);
msta_link->changed |= *changed;
@@ -1637,8 +1723,6 @@ static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
list_add_tail(&msta_link->rc_list, &dev->sta_rc_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
-out:
- rcu_read_unlock();
}
static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw,
@@ -1646,11 +1730,32 @@ static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_link_sta *link_sta,
u32 changed)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = link_sta->sta;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
- mt7996_link_rate_ctrl_update(&changed, sta);
- ieee80211_queue_work(hw, &dev->rc_work);
+ rcu_read_lock();
+
+ msta_link = rcu_dereference(msta->link[link_sta->link_id]);
+ if (msta_link) {
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+
+ mt7996_link_rate_ctrl_update(&changed, msta_link);
+ ieee80211_queue_work(hw, &dev->rc_work);
+ }
+
+ rcu_read_unlock();
+}
+
+static void mt7996_sta_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
+ u32 *changed = data;
+
+ msta_link = rcu_dereference(msta->link[msta->deflink_id]);
+ if (msta_link)
+ mt7996_link_rate_ctrl_update(&changed, msta_link);
}
static int
@@ -1671,7 +1776,7 @@ mt7996_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT
* then multiple MCS setting (MCS 4,5,6) is not supported.
*/
- ieee80211_iterate_stations_atomic(hw, mt7996_link_rate_ctrl_update,
+ ieee80211_iterate_stations_atomic(hw, mt7996_sta_rate_ctrl_update,
&changed);
ieee80211_queue_work(hw, &dev->rc_work);
@@ -2059,7 +2164,6 @@ out:
return ret;
}
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
static int
mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2067,17 +2171,14 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
struct net_device_path_ctx *ctx,
struct net_device_path *path)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
struct mt7996_sta_link *msta_link;
struct mt7996_vif_link *link;
- struct mt76_vif_link *mlink;
- struct mt7996_phy *phy;
- mlink = rcu_dereference(mvif->mt76.link[msta->deflink_id]);
- if (!mlink)
+ link = mt7996_vif_link(dev, vif, msta->deflink_id);
+ if (!link)
return -EIO;
msta_link = rcu_dereference(msta->link[msta->deflink_id]);
@@ -2087,38 +2188,91 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
if (!msta_link->wcid.sta || msta_link->wcid.idx > MT7996_WTBL_STA)
return -EIO;
- link = (struct mt7996_vif_link *)mlink;
- phy = mt7996_vif_link_phy(link);
- if (!phy)
- return -ENODEV;
-
- if (phy != &dev->phy && phy->mt76->band_idx == MT_BAND2)
+ if (dev->hif2 &&
+ ((is_mt7996(&dev->mt76) && msta_link->wcid.phy_idx == MT_BAND2) ||
+ (is_mt7992(&dev->mt76) && msta_link->wcid.phy_idx == MT_BAND1)))
wed = &dev->mt76.mmio.wed_hif2;
- if (!mtk_wed_device_active(wed))
+ if (!mtk_wed_device_active(wed) &&
+ !mt76_npu_device_active(&dev->mt76))
return -ENODEV;
path->type = DEV_PATH_MTK_WDMA;
path->dev = ctx->dev;
- path->mtk_wdma.wdma_idx = wed->wdma_idx;
- path->mtk_wdma.bss = mlink->idx;
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(wed))
+ path->mtk_wdma.wdma_idx = wed->wdma_idx;
+ else
+#endif
+ path->mtk_wdma.wdma_idx = link->mt76.band_idx;
+ path->mtk_wdma.bss = link->mt76.idx;
path->mtk_wdma.queue = 0;
path->mtk_wdma.wcid = msta_link->wcid.idx;
- path->mtk_wdma.amsdu = mtk_wed_is_amsdu_supported(wed);
+ if (ieee80211_hw_check(hw, SUPPORTS_AMSDU_IN_AMPDU) &&
+ mtk_wed_is_amsdu_supported(wed))
+ path->mtk_wdma.amsdu = msta_link->wcid.amsdu;
+ else
+ path->mtk_wdma.amsdu = 0;
ctx->dev = NULL;
return 0;
}
-#endif
-
static int
mt7996_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 old_links, u16 new_links,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
- return 0;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!old_links) {
+ int idx;
+
+ idx = get_own_mld_idx(dev->mld_idx_mask, true);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ mvif->mld_group_idx = idx;
+ dev->mld_idx_mask |= BIT_ULL(mvif->mld_group_idx);
+
+ idx = get_free_idx(dev->mld_remap_idx_mask, 0, 15) - 1;
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ mvif->mld_remap_idx = idx;
+ dev->mld_remap_idx_mask |= BIT_ULL(mvif->mld_remap_idx);
+ }
+
+ if (new_links)
+ goto out;
+
+ dev->mld_idx_mask &= ~BIT_ULL(mvif->mld_group_idx);
+ dev->mld_remap_idx_mask &= ~BIT_ULL(mvif->mld_remap_idx);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void
+mt7996_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy;
+
+ ieee80211_wake_queues(hw);
+ mt7996_for_each_phy(dev, phy)
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7996_WATCHDOG_TIME);
}
const struct ieee80211_ops mt7996_ops = {
@@ -2171,12 +2325,16 @@ const struct ieee80211_ops mt7996_ops = {
.twt_teardown_request = mt7996_twt_teardown_request,
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = mt7996_sta_add_debugfs,
+ .link_sta_add_debugfs = mt7996_link_sta_add_debugfs,
#endif
.set_radar_background = mt7996_set_radar_background,
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7996_net_fill_forward_path,
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_setup_tc = mt76_wed_net_setup_tc,
+#elif defined(CONFIG_MT7996_NPU)
+ .net_setup_tc = mt76_npu_net_setup_tc,
#endif
.change_vif_links = mt7996_change_vif_links,
.change_sta_links = mt7996_mac_sta_change_links,
+ .reconfig_complete = mt7996_reconfig_complete,
};
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/mcu.c b/sys/contrib/dev/mediatek/mt76/mt7996/mcu.c
index 443cd21a32b8..1e21c05bae28 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/mcu.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/mcu.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -246,6 +246,30 @@ mt7996_mcu_parse_response(struct mt76_dev *mdev, int cmd,
return ret;
}
+static void
+mt7996_mcu_set_timeout(struct mt76_dev *mdev, int cmd)
+{
+ mdev->mcu.timeout = 5 * HZ;
+
+ if (!(cmd & __MCU_CMD_FIELD_UNI))
+ return;
+
+ switch (FIELD_GET(__MCU_CMD_FIELD_ID, cmd)) {
+ case MCU_UNI_CMD_THERMAL:
+ case MCU_UNI_CMD_TWT:
+ case MCU_UNI_CMD_GET_MIB_INFO:
+ case MCU_UNI_CMD_STA_REC_UPDATE:
+ case MCU_UNI_CMD_BSS_INFO_UPDATE:
+ mdev->mcu.timeout = 2 * HZ;
+ return;
+ case MCU_UNI_CMD_EFUSE_CTRL:
+ mdev->mcu.timeout = 20 * HZ;
+ return;
+ default:
+ break;
+ }
+}
+
static int
mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq)
@@ -259,7 +283,7 @@ mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
u32 val;
u8 seq;
- mdev->mcu.timeout = 20 * HZ;
+ mt7996_mcu_set_timeout(mdev, cmd);
seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (!seq)
@@ -298,6 +322,9 @@ mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ if (mcu_cmd == MCU_UNI_CMD_SDO)
+ uni_txd->option &= ~MCU_CMD_ACK;
+
if ((cmd & __MCU_CMD_FIELD_WA) && (cmd & __MCU_CMD_FIELD_WM))
uni_txd->s2d_index = MCU_S2D_H2CN;
else if (cmd & __MCU_CMD_FIELD_WA)
@@ -679,7 +706,7 @@ mt7996_mcu_wed_rro_event(struct mt7996_dev *dev, struct sk_buff *skb)
{
struct mt7996_mcu_wed_rro_event *event = (void *)skb->data;
- if (!dev->has_rro)
+ if (!mt7996_has_hwrro(dev))
return;
skb_pull(skb, sizeof(struct mt7996_mcu_rxd) + 4);
@@ -918,17 +945,28 @@ mt7996_mcu_bss_txcmd_tlv(struct sk_buff *skb, bool en)
}
static void
-mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct mt76_vif_link *mlink)
+mt7996_mcu_bss_mld_tlv(struct sk_buff *skb,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt7996_vif_link *link)
{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct bss_mld_tlv *mld;
struct tlv *tlv;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld));
-
mld = (struct bss_mld_tlv *)tlv;
- mld->group_mld_id = 0xff;
- mld->own_mld_id = mlink->idx;
- mld->remap_idx = 0xff;
+ mld->own_mld_id = link->mld_idx;
+ mld->link_id = link_conf->link_id;
+
+ if (ieee80211_vif_is_mld(vif)) {
+ mld->group_mld_id = mvif->mld_group_idx;
+ mld->remap_idx = mvif->mld_remap_idx;
+ memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
+ } else {
+ mld->group_mld_id = 0xff;
+ mld->remap_idx = 0xff;
+ }
}
static void
@@ -1018,7 +1056,6 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
struct mt76_connac_bss_basic_tlv *bss;
u32 type = CONNECTION_INFRA_AP;
u16 sta_wlan_idx = wlan_idx;
- struct ieee80211_sta *sta;
struct tlv *tlv;
int idx;
@@ -1029,14 +1066,18 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
break;
case NL80211_IFTYPE_STATION:
if (enable) {
+ struct ieee80211_sta *sta;
+
rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ sta = ieee80211_find_sta(vif, link_conf->bssid);
if (sta) {
- struct mt76_wcid *wcid;
+ struct mt7996_sta *msta = (void *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
+ int link_id = link_conf->link_id;
- wcid = (struct mt76_wcid *)sta->drv_priv;
- sta_wlan_idx = wcid->idx;
+ msta_link = rcu_dereference(msta->link[link_id]);
+ if (msta_link)
+ sta_wlan_idx = msta_link->wcid.idx;
}
rcu_read_unlock();
}
@@ -1053,8 +1094,6 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*bss));
bss = (struct mt76_connac_bss_basic_tlv *)tlv;
- bss->bcn_interval = cpu_to_le16(link_conf->beacon_int);
- bss->dtim_period = link_conf->dtim_period;
bss->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx);
bss->sta_idx = cpu_to_le16(sta_wlan_idx);
bss->conn_type = cpu_to_le32(type);
@@ -1074,10 +1113,10 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
memcpy(bss->bssid, link_conf->bssid, ETH_ALEN);
bss->bcn_interval = cpu_to_le16(link_conf->beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->dtim_period = link_conf->dtim_period;
bss->phymode = mt76_connac_get_phy_mode(phy, vif,
chandef->chan->band, NULL);
- bss->phymode_ext = mt76_connac_get_phy_mode_ext(phy, &vif->bss_conf,
+ bss->phymode_ext = mt76_connac_get_phy_mode_ext(phy, link_conf,
chandef->chan->band);
return 0;
@@ -1127,6 +1166,8 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
goto out;
if (enable) {
+ struct mt7996_vif_link *link;
+
mt7996_mcu_bss_rfch_tlv(skb, phy);
mt7996_mcu_bss_bmc_tlv(skb, mlink, phy);
mt7996_mcu_bss_ra_tlv(skb, phy);
@@ -1137,7 +1178,8 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
mt7996_mcu_bss_he_tlv(skb, vif, link_conf, phy);
/* this tag is necessary no matter if the vif is MLD */
- mt7996_mcu_bss_mld_tlv(skb, mlink);
+ link = container_of(mlink, struct mt7996_vif_link, mt76);
+ mt7996_mcu_bss_mld_tlv(skb, link_conf, link);
}
mt7996_mcu_bss_mbssid_tlv(skb, link_conf, enable);
@@ -1168,9 +1210,8 @@ int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif,
static int
mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+ struct mt76_wcid *wcid, bool enable, bool tx)
{
- struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct sta_rec_ba_uni *ba;
struct sk_buff *skb;
struct tlv *tlv;
@@ -1189,7 +1230,7 @@ mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
ba->ba_en = enable << params->tid;
ba->amsdu = params->amsdu;
ba->tid = params->tid;
- ba->ba_rdd_rro = !tx && enable && dev->has_rro;
+ ba->ba_rdd_rro = !tx && enable && mt7996_has_hwrro(dev);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
@@ -1198,20 +1239,67 @@ mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
/** starec & wtbl **/
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link, bool enable)
+ struct ieee80211_vif *vif, bool enable)
{
- if (enable && !params->amsdu)
- msta_link->wcid.amsdu = false;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret = 0;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ if (enable && !params->amsdu)
+ msta_link->wcid.amsdu = false;
+
+ ret = mt7996_mcu_sta_ba(dev, &link->mt76, params,
+ &msta_link->wcid, enable, true);
+ if (ret)
+ break;
+ }
- return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, true);
+ return ret;
}
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link, bool enable)
+ struct ieee80211_vif *vif, bool enable)
{
- return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, false);
+ struct ieee80211_sta *sta = params->sta;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret = 0;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ ret = mt7996_mcu_sta_ba(dev, &link->mt76, params,
+ &msta_link->wcid, enable, false);
+ if (ret)
+ break;
+ }
+
+ return ret;
}
static void
@@ -1757,8 +1845,8 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
bf->ibf_nrow = tx_ant;
if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he)
- bf->ibf_timeout = is_mt7996(&dev->mt76) ? MT7996_IBF_TIMEOUT :
- MT7992_IBF_TIMEOUT;
+ bf->ibf_timeout = is_mt7992(&dev->mt76) ? MT7992_IBF_TIMEOUT :
+ MT7996_IBF_TIMEOUT;
else if (!ebf && link_sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
bf->ibf_timeout = MT7996_IBF_TIMEOUT_LEGACY;
else
@@ -1772,19 +1860,6 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
bf->mem_20m = bf->nrow < BF_MAT_ORDER ?
matrix[bf->nrow][bf->ncol] : 0;
}
-
- switch (link_sta->bandwidth) {
- case IEEE80211_STA_RX_BW_160:
- case IEEE80211_STA_RX_BW_80:
- bf->mem_total = bf->mem_20m * 2;
- break;
- case IEEE80211_STA_RX_BW_40:
- bf->mem_total = bf->mem_20m;
- break;
- case IEEE80211_STA_RX_BW_20:
- default:
- break;
- }
}
static void
@@ -2284,13 +2359,10 @@ error_unlock:
}
static int
-mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+mt7996_mcu_add_group(struct mt7996_dev *dev, struct mt7996_vif_link *link,
+ struct mt76_wcid *wcid)
{
#define MT_STA_BSS_GROUP 1
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta_link *msta_link;
- struct mt7996_sta *msta;
struct {
u8 __rsv1[4];
@@ -2305,13 +2377,10 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
.tag = cpu_to_le16(UNI_VOW_DRR_CTRL),
.len = cpu_to_le16(sizeof(req) - 4),
.action = cpu_to_le32(MT_STA_BSS_GROUP),
- .val = cpu_to_le32(mvif->deflink.mt76.idx % 16),
+ .val = cpu_to_le32(link->mt76.idx % 16),
+ .wlan_idx = cpu_to_le16(wcid->idx),
};
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL;
- msta_link = msta ? &msta->deflink : &mvif->deflink.msta_link;
- req.wlan_idx = cpu_to_le16(msta_link->wcid.idx);
-
return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(VOW), &req,
sizeof(req), true);
}
@@ -2344,8 +2413,8 @@ mt7996_mcu_sta_mld_setup_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
mld_setup->primary_id = cpu_to_le16(msta_link->wcid.idx);
if (nlinks > 1) {
- link_id = __ffs(sta->valid_links & ~BIT(msta->deflink_id));
- msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ msta_link = mt76_dereference(msta->link[msta->seclink_id],
+ &dev->mt76);
if (!msta_link)
return;
}
@@ -2451,7 +2520,7 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev,
}
}
- ret = mt7996_mcu_add_group(dev, link_conf->vif, sta);
+ ret = mt7996_mcu_add_group(dev, link, wcid);
if (ret) {
dev_kfree_skb(skb);
return ret;
@@ -2480,157 +2549,94 @@ int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev,
}
static int
-mt7996_mcu_sta_key_tlv(struct mt76_wcid *wcid,
+mt7996_mcu_sta_key_tlv(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff *skb,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
struct sta_rec_sec_uni *sec;
+ struct sec_key_uni *sec_key;
struct tlv *tlv;
+ u8 cipher;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
sec = (struct sta_rec_sec_uni *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key_uni *sec_key;
- u8 cipher;
+ /* due to connac3 FW design, we only do remove key for BIGTK; even for
+ * removal, the field should be filled with SET_KEY
+ */
+ sec->add = SET_KEY;
+ sec->n_cipher = 1;
+ sec_key = &sec->key[0];
+ sec_key->wlan_idx = cpu_to_le16(wcid->idx);
+ sec_key->key_id = key->keyidx;
- cipher = mt76_connac_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
+ if (cmd != SET_KEY)
+ return 0;
- sec_key = &sec->key[0];
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->mgmt_prot = 0;
- sec_key->cipher_id = cipher;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- sec_key->need_resp = 0;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
+ cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ if (cipher == MCU_CIPHER_NONE)
+ return -EOPNOTSUPP;
- sec->n_cipher = 1;
- } else {
- sec->n_cipher = 0;
+ sec_key->mgmt_prot = 0;
+ sec_key->cipher_id = cipher;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = key->keylen;
+ sec_key->need_resp = 0;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MCU_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ return 0;
}
- return 0;
-}
-
-int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_key_conf *key, int mcu_cmd,
- struct mt76_wcid *wcid, enum set_key_cmd cmd)
-{
- struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
- struct sk_buff *skb;
- int ret;
-
- skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
- MT7996_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ret = mt7996_mcu_sta_key_tlv(wcid, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
-}
-
-static int mt7996_mcu_get_pn(struct mt7996_dev *dev,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link, u8 *pn)
-{
-#define TSC_TYPE_BIGTK_PN 2
- struct sta_rec_pn_info *pn_info;
- struct sk_buff *skb, *rskb;
- struct tlv *tlv;
- int ret;
-
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
- &msta_link->wcid);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PN_INFO, sizeof(*pn_info));
- pn_info = (struct sta_rec_pn_info *)tlv;
-
- pn_info->tsc_type = TSC_TYPE_BIGTK_PN;
- ret = mt76_mcu_skb_send_and_get_msg(&dev->mt76, skb,
- MCU_WM_UNI_CMD_QUERY(STA_REC_UPDATE),
- true, &rskb);
- if (ret)
- return ret;
+ if (sec_key->key_id != 6 && sec_key->key_id != 7)
+ return 0;
- skb_pull(rskb, 4);
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ sec_key->cipher_id = MCU_CIPHER_BCN_PROT_CMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ sec_key->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ sec_key->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_256;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ if (!is_mt7990(dev))
+ return -EOPNOTSUPP;
+ sec_key->cipher_id = MCU_CIPHER_BCN_PROT_CMAC_256;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- pn_info = (struct sta_rec_pn_info *)rskb->data;
- if (le16_to_cpu(pn_info->tag) == STA_REC_PN_INFO)
- memcpy(pn, pn_info->pn, 6);
+ sec_key->bcn_mode = is_mt7990(dev) ? BP_HW_MODE : BP_SW_MODE;
- dev_kfree_skb(rskb);
return 0;
}
-int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link,
- struct ieee80211_key_conf *key)
+int mt7996_mcu_add_key(struct mt76_dev *dev, struct mt7996_vif_link *link,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd)
{
- struct mt7996_mcu_bcn_prot_tlv *bcn_prot;
struct sk_buff *skb;
- struct tlv *tlv;
- u8 pn[6] = {};
- int len = sizeof(struct bss_req_hdr) +
- sizeof(struct mt7996_mcu_bcn_prot_tlv);
int ret;
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76, len);
+ skb = __mt76_connac_mcu_alloc_sta_req(dev, (struct mt76_vif_link *)link,
+ wcid, MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
- tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BCN_PROT, sizeof(*bcn_prot));
-
- bcn_prot = (struct mt7996_mcu_bcn_prot_tlv *)tlv;
-
- ret = mt7996_mcu_get_pn(dev, link, msta_link, pn);
+ ret = mt7996_mcu_sta_key_tlv(dev, wcid, skb, key, cmd);
if (ret) {
dev_kfree_skb(skb);
return ret;
}
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_AES_CMAC:
- bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_CMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_256;
- break;
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- default:
- dev_err(dev->mt76.dev, "Not supported Bigtk Cipher\n");
- dev_kfree_skb(skb);
- return -EOPNOTSUPP;
- }
-
- pn[0]++;
- memcpy(bcn_prot->pn, pn, 6);
- bcn_prot->enable = BP_SW_MODE;
- memcpy(bcn_prot->key, key->key, WLAN_MAX_KEY_LEN);
- bcn_prot->key_id = key->keyidx;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+ return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
@@ -2748,12 +2754,18 @@ mt7996_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
static void
mt7996_mcu_beacon_cont(struct mt7996_dev *dev,
struct ieee80211_bss_conf *link_conf,
+ struct mt7996_vif_link *link,
struct sk_buff *rskb, struct sk_buff *skb,
struct bss_bcn_content_tlv *bcn,
struct ieee80211_mutable_offsets *offs)
{
- struct mt76_wcid *wcid = &dev->mt76.global_wcid;
- u8 *buf;
+ u8 *buf, keyidx = link->msta_link.wcid.hw_key_idx2;
+ struct mt76_wcid *wcid;
+
+ if (is_mt7990(&dev->mt76) && (keyidx == 6 || keyidx == 7))
+ wcid = &link->msta_link.wcid;
+ else
+ wcid = &dev->mt76.global_wcid;
bcn->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
bcn->tim_ie_pos = cpu_to_le16(offs->tim_offset);
@@ -2775,7 +2787,7 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev,
}
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf)
+ struct ieee80211_bss_conf *link_conf, bool enabled)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_vif_link *link = mt7996_vif_conf_link(dev, vif, link_conf);
@@ -2786,7 +2798,6 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct tlv *tlv;
struct bss_bcn_content_tlv *bcn;
int len, extra_len = 0;
- bool enabled = link_conf->enable_beacon;
if (link_conf->nontransmitted)
return 0;
@@ -2829,7 +2840,7 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, mlink->band_idx);
- mt7996_mcu_beacon_cont(dev, link_conf, rskb, skb, bcn, &offs);
+ mt7996_mcu_beacon_cont(dev, link_conf, link, rskb, skb, bcn, &offs);
if (link_conf->bssid_indicator)
mt7996_mcu_beacon_mbss(rskb, skb, bcn, &offs);
mt7996_mcu_beacon_cntdwn(rskb, skb, &offs, link_conf->csa_active);
@@ -3451,6 +3462,9 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif,
#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \
WMM_CW_MAX_SET | WMM_TXOP_SET)
struct mt7996_vif_link *link = mt7996_vif_conf_link(dev, vif, link_conf);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ unsigned int link_id = link_conf->link_id;
+ struct mt7996_vif_link_info *link_info = &mvif->link_info[link_id];
struct {
u8 bss_idx;
u8 __rsv[3];
@@ -3468,7 +3482,7 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif,
skb_put_data(skb, &hdr, sizeof(hdr));
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- struct ieee80211_tx_queue_params *q = &link->queue_params[ac];
+ struct ieee80211_tx_queue_params *q = &link_info->queue_params[ac];
struct edca *e;
struct tlv *tlv;
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/mcu.h b/sys/contrib/dev/mediatek/mt76/mt7996/mcu.h
index 130ea95626d5..e0b83ac9f5e2 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/mcu.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/mcu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -351,17 +351,6 @@ enum {
BP_HW_MODE,
};
-struct mt7996_mcu_bcn_prot_tlv {
- __le16 tag;
- __le16 len;
- u8 pn[6];
- u8 enable;
- u8 cipher_id;
- u8 key[WLAN_MAX_KEY_LEN];
- u8 key_id;
- u8 __rsv[3];
-} __packed;
-
struct bss_ra_tlv {
__le16 tag;
__le16 len;
@@ -481,7 +470,8 @@ struct bss_mld_tlv {
u8 own_mld_id;
u8 mac_addr[ETH_ALEN];
u8 remap_idx;
- u8 __rsv[3];
+ u8 link_id;
+ u8 __rsv[2];
} __packed;
struct sta_rec_ht_uni {
@@ -530,6 +520,9 @@ struct sec_key_uni {
u8 key_len;
u8 need_resp;
u8 key[32];
+ u8 pn[6];
+ u8 bcn_mode;
+ u8 _rsv;
} __packed;
struct sta_rec_sec_uni {
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/mmio.c b/sys/contrib/dev/mediatek/mt76/mt7996/mmio.c
index 05a3d9019167..59a2a5f7534a 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/mmio.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/mmio.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -474,14 +474,15 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
struct pci_dev *pci_dev = pdev_ptr;
- u32 hif1_ofs = 0;
+ u32 hif1_ofs;
if (!wed_enable)
return 0;
- dev->has_rro = true;
+ dev->mt76.hwrro_mode = is_mt7996(&dev->mt76) ? MT76_HWRRO_V3
+ : MT76_HWRRO_V3_1;
- hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+ hif1_ofs = dev->hif2 ? MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0) : 0;
if (hif2)
wed = &dev->mt76.mmio.wed_hif2;
@@ -505,11 +506,18 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_tx = wed->wlan.phy_base + hif1_ofs +
MT_TXQ_RING_BASE(0) +
MT7996_TXQ_BAND2 * MT_RING_SIZE;
- if (dev->has_rro) {
- wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
- MT_RXQ_RING_BASE(0) +
- MT7996_RXQ_TXFREE2 * MT_RING_SIZE;
- wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_EXT) - 1;
+ if (mt7996_has_hwrro(dev)) {
+ if (is_mt7996(&dev->mt76)) {
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_EXT) - 1;
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_TXFREE2 * MT_RING_SIZE;
+ } else {
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_BAND1_EXT) - 1;
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_MCU_WA_EXT * MT_RING_SIZE;
+ }
} else {
wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
MT_RXQ_RING_BASE(0) +
@@ -518,14 +526,14 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
}
wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + hif1_ofs + MT_WFDMA0_GLO_CFG;
- wed->wlan.wpdma_rx = wed->wlan.phy_base + hif1_ofs +
- MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
- MT7996_RXQ_BAND0 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx[0] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND2) +
+ MT7996_RXQ_BAND2 * MT_RING_SIZE;
wed->wlan.id = MT7996_DEVICE_ID_2;
wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
} else {
- wed->wlan.hw_rro = dev->has_rro; /* default on */
+ wed->wlan.hw_rro = mt7996_has_hwrro(dev);
wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR;
wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR;
wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) +
@@ -533,16 +541,26 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + MT_WFDMA0_GLO_CFG;
- wed->wlan.wpdma_rx = wed->wlan.phy_base +
- MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
- MT7996_RXQ_BAND0 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx[0] = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
+ MT7996_RXQ_BAND0 * MT_RING_SIZE;
wed->wlan.wpdma_rx_rro[0] = wed->wlan.phy_base +
MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND0) +
MT7996_RXQ_RRO_BAND0 * MT_RING_SIZE;
- wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
- MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) +
- MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE;
+ if (is_mt7996(&dev->mt76)) {
+ wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) +
+ MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE;
+ } else {
+ wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND1) +
+ MT7996_RXQ_RRO_BAND1 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx[1] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND1) +
+ MT7996_RXQ_BAND1 * MT_RING_SIZE;
+ }
+
wed->wlan.wpdma_rx_pg = wed->wlan.phy_base +
MT_RXQ_RING_BASE(MT7996_RXQ_MSDU_PG_BAND0) +
MT7996_RXQ_MSDU_PG_BAND0 * MT_RING_SIZE;
@@ -552,10 +570,14 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1;
- wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
-
wed->wlan.rro_rx_tbit[0] = ffs(MT_INT_RX_DONE_RRO_BAND0) - 1;
- wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1;
+ if (is_mt7996(&dev->mt76)) {
+ wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
+ wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1;
+ } else {
+ wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND1) - 1;
+ wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND1) - 1;
+ }
wed->wlan.rx_pg_tbit[0] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND0) - 1;
wed->wlan.rx_pg_tbit[1] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND1) - 1;
@@ -563,20 +585,32 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1;
wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1;
- if (dev->has_rro) {
- wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
- MT7996_RXQ_TXFREE0 * MT_RING_SIZE;
- wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_MAIN) - 1;
+ if (is_mt7996(&dev->mt76)) {
+ if (mt7996_has_hwrro(dev)) {
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_TXFREE0 * MT_RING_SIZE;
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_MAIN) - 1;
+ } else {
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_MAIN) - 1;
+ }
} else {
wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_MAIN) - 1;
wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
}
dev->mt76.rx_token_size = MT7996_TOKEN_SIZE + wed->wlan.rx_npkt;
+
+ if (dev->hif2 && is_mt7992(&dev->mt76))
+ wed->wlan.id = 0x7992;
}
wed->wlan.nbuf = MT7996_HW_TOKEN_SIZE;
wed->wlan.token_start = MT7996_TOKEN_SIZE - wed->wlan.nbuf;
+ wed->wlan.hif2 = hif2;
wed->wlan.amsdu_max_subframes = 8;
wed->wlan.amsdu_max_len = 1536;
@@ -591,8 +625,10 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.reset_complete = mt76_wed_reset_complete;
}
- if (mtk_wed_device_attach(wed))
+ if (mtk_wed_device_attach(wed)) {
+ dev->mt76.hwrro_mode = MT76_HWRRO_OFF;
return 0;
+ }
*irq = wed->irq;
dev->mt76.dma_dev = wed->dev;
@@ -686,9 +722,18 @@ void mt7996_dual_hif_set_irq_mask(struct mt7996_dev *dev, bool write_reg,
static void mt7996_rx_poll_complete(struct mt76_dev *mdev,
enum mt76_rxq_id q)
{
- struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ if (q == MT_RXQ_NPU0 || q == MT_RXQ_NPU1) {
+ struct airoha_npu *npu;
- mt7996_irq_enable(dev, MT_INT_RX(q));
+ npu = rcu_dereference(mdev->mmio.npu);
+ if (npu)
+ airoha_npu_wlan_enable_irq(npu, q - MT_RXQ_NPU0);
+ } else {
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev,
+ mt76);
+
+ mt7996_irq_enable(dev, MT_INT_RX(q));
+ }
}
/* TODO: support 2/4/6/8 MSI-X vectors */
@@ -705,12 +750,18 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
dev->mt76.mmio.irqmask);
if (intr1 & MT_INT_RX_TXFREE_EXT)
napi_schedule(&dev->mt76.napi[MT_RXQ_TXFREE_BAND2]);
+
+ if (intr1 & MT_INT_RX_DONE_BAND2_EXT)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND2]);
+
+ if (intr1 & MT_INT_RX_TXFREE_BAND1_EXT)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND1_WA]);
}
if (mtk_wed_device_active(wed)) {
mtk_wed_device_irq_set_mask(wed, 0);
intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
- intr |= (intr1 & ~MT_INT_RX_TXFREE_EXT);
+ intr |= (intr1 & ~MT_INT_TX_RX_DONE_EXT);
} else {
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
@@ -796,6 +847,8 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.rx_skb = mt7996_queue_rx_skb,
.rx_check = mt7996_rx_check,
.rx_poll_complete = mt7996_rx_poll_complete,
+ .rx_rro_ind_process = mt7996_rro_rx_process,
+ .rx_rro_add_msdu_page = mt7996_rro_msdu_page_add,
.update_survey = mt7996_update_channel,
.set_channel = mt7996_set_channel,
.vif_link_add = mt7996_vif_link_add,
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/mt7996.h b/sys/contrib/dev/mediatek/mt76/mt7996/mt7996.h
index 81cc1ca22cc6..b4b07d1514a4 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/mt7996.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/mt7996.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -115,6 +115,8 @@
#define MT7996_CRIT_TEMP 110
#define MT7996_MAX_TEMP 120
+#define MT7996_MAX_HIF_RXD_IN_PG 5
+#define MT7996_RRO_MSDU_PG_HASH_SIZE 127
#define MT7996_RRO_MAX_SESSION 1024
#define MT7996_RRO_WINDOW_MAX_LEN 1024
#define MT7996_RRO_ADDR_ELEM_LEN 128
@@ -131,6 +133,10 @@
#define MT7996_RX_MSDU_PAGE_SIZE (128 + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+/* RRO 3.1 */
+#define MT7996_RRO_MSDU_PG_CR_CNT 8
+#define MT7996_RRO_MSDU_PG_SIZE_PER_CR 0x10000
+
struct mt7996_vif;
struct mt7996_sta;
struct mt7996_dfs_pulse;
@@ -181,7 +187,7 @@ enum mt7996_rxq_id {
MT7996_RXQ_BAND1 = 5, /* for mt7992 */
MT7996_RXQ_BAND2 = 5,
MT7996_RXQ_RRO_BAND0 = 8,
- MT7996_RXQ_RRO_BAND1 = 8,/* unused */
+ MT7996_RXQ_RRO_BAND1 = 9,
MT7996_RXQ_RRO_BAND2 = 6,
MT7996_RXQ_MSDU_PG_BAND0 = 10,
MT7996_RXQ_MSDU_PG_BAND1 = 11,
@@ -190,6 +196,7 @@ enum mt7996_rxq_id {
MT7996_RXQ_TXFREE1 = 9,
MT7996_RXQ_TXFREE2 = 7,
MT7996_RXQ_RRO_IND = 0,
+ MT7996_RXQ_RRO_RXDMAD_C = 0,
MT7990_RXQ_TXFREE0 = 6,
MT7990_RXQ_TXFREE1 = 7,
};
@@ -239,6 +246,7 @@ struct mt7996_sta {
struct mt7996_sta_link deflink; /* must be first */
struct mt7996_sta_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
u8 deflink_id;
+ u8 seclink_id;
struct mt7996_vif *vif;
};
@@ -249,13 +257,23 @@ struct mt7996_vif_link {
struct mt7996_sta_link msta_link;
struct mt7996_phy *phy;
- struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
struct cfg80211_bitrate_mask bitrate_mask;
+
+ u8 mld_idx;
+};
+
+struct mt7996_vif_link_info {
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
};
struct mt7996_vif {
struct mt7996_vif_link deflink; /* must be first */
struct mt76_vif_data mt76;
+
+ struct mt7996_vif_link_info link_info[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ u8 mld_group_idx;
+ u8 mld_remap_idx;
};
/* crash-dump */
@@ -273,15 +291,17 @@ struct mt7996_hif {
struct device *dev;
void __iomem *regs;
int irq;
+
+ enum pci_bus_speed speed;
+ enum pcie_link_width width;
};
+#define WED_RRO_ADDR_SIGNATURE_MASK GENMASK(31, 24)
+#define WED_RRO_ADDR_COUNT_MASK GENMASK(14, 4)
+#define WED_RRO_ADDR_HEAD_HIGH_MASK GENMASK(3, 0)
struct mt7996_wed_rro_addr {
- u32 head_low;
- u32 head_high : 4;
- u32 count: 11;
- u32 oor: 1;
- u32 rsv : 8;
- u32 signature : 8;
+ __le32 head_low;
+ __le32 data;
};
struct mt7996_wed_rro_session_id {
@@ -289,6 +309,44 @@ struct mt7996_wed_rro_session_id {
u16 id;
};
+struct mt7996_msdu_page {
+ struct list_head list;
+
+ struct mt76_queue *q;
+ dma_addr_t dma_addr;
+ void *buf;
+};
+
+/* data1 */
+#define RRO_HIF_DATA1_LS_MASK BIT(30)
+#define RRO_HIF_DATA1_SDL_MASK GENMASK(29, 16)
+/* data4 */
+#define RRO_HIF_DATA4_RX_TOKEN_ID_MASK GENMASK(15, 0)
+struct mt7996_rro_hif {
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
+ __le32 data4;
+ __le32 data5;
+};
+
+#define MSDU_PAGE_INFO_OWNER_MASK BIT(31)
+#define MSDU_PAGE_INFO_PG_HIGH_MASK GENMASK(3, 0)
+struct mt7996_msdu_page_info {
+ struct mt7996_rro_hif rxd[MT7996_MAX_HIF_RXD_IN_PG];
+ __le32 pg_low;
+ __le32 data;
+};
+
+#define MT7996_MAX_RRO_RRS_RING 4
+struct mt7996_rro_queue_regs_emi {
+ struct {
+ __le16 idx;
+ __le16 rsv;
+ } ring[MT7996_MAX_RRO_RRS_RING];
+};
+
struct mt7996_phy {
struct mt76_phy *mt76;
struct mt7996_dev *dev;
@@ -340,6 +398,9 @@ struct mt7996_dev {
u32 q_int_mask[MT7996_MAX_QUEUE];
u32 q_wfdma_mask;
+ u64 mld_idx_mask;
+ u64 mld_remap_idx_mask;
+
const struct mt76_bus_ops *bus_ops;
struct mt7996_phy phy;
@@ -380,7 +441,6 @@ struct mt7996_dev {
bool flash_mode:1;
bool has_eht:1;
- bool has_rro:1;
struct {
struct {
@@ -395,10 +455,25 @@ struct mt7996_dev {
void *ptr;
dma_addr_t phy_addr;
} session;
+ struct {
+ void *ptr;
+ dma_addr_t phy_addr;
+ } msdu_pg[MT7996_RRO_MSDU_PG_CR_CNT];
+ struct {
+ struct mt7996_rro_queue_regs_emi *ptr;
+ dma_addr_t phy_addr;
+ } emi_rings_cpu;
+ struct {
+ struct mt7996_rro_queue_regs_emi *ptr;
+ dma_addr_t phy_addr;
+ } emi_rings_dma;
struct work_struct work;
struct list_head poll_list;
spinlock_t lock;
+
+ struct list_head page_cache;
+ struct list_head page_map[MT7996_RRO_MSDU_PG_HASH_SIZE];
} wed_rro;
bool ibf;
@@ -555,6 +630,7 @@ extern struct pci_driver mt7996_hif_driver;
struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
void __iomem *mem_base, u32 device_id);
+void mt7996_rro_hw_init(struct mt7996_dev *dev);
void mt7996_wfsys_reset(struct mt7996_dev *dev);
irqreturn_t mt7996_irq_handler(int irq, void *dev_instance);
u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif_link *link);
@@ -607,16 +683,15 @@ int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev,
struct mt7996_sta_link *msta_link);
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link, bool enable);
+ struct ieee80211_vif *vif, bool enable);
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link, bool enable);
+ struct ieee80211_vif *vif, bool enable);
int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
struct mt76_vif_link *mlink,
struct cfg80211_he_bss_color *he_bss_color);
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf);
+ struct ieee80211_bss_conf *link_conf, bool enabled);
int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
struct ieee80211_bss_conf *link_conf,
struct mt7996_vif_link *link, u32 changed);
@@ -672,6 +747,11 @@ int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_wed_rro_reset_sessions(struct mt7996_dev *dev, u16 id);
int mt7996_mcu_set_sniffer_mode(struct mt7996_phy *phy, bool enabled);
+static inline bool mt7996_has_hwrro(struct mt7996_dev *dev)
+{
+ return dev->mt76.hwrro_mode != MT76_HWRRO_OFF;
+}
+
static inline u8 mt7996_max_interface_num(struct mt7996_dev *dev)
{
return min(MT7996_MAX_INTERFACES * (1 + mt7996_band_valid(dev, MT_BAND1) +
@@ -710,7 +790,7 @@ void mt7996_memcpy_fromio(struct mt7996_dev *dev, void *buf, u32 offset,
static inline u16 mt7996_rx_chainmask(struct mt7996_phy *phy)
{
- int max_nss = hweight8(phy->mt76->hw->wiphy->available_antennas_tx);
+ int max_nss = hweight16(phy->orig_antenna_mask);
int cur_nss = hweight8(phy->mt76->antenna_mask);
u16 tx_chainmask = phy->mt76->chainmask;
@@ -746,6 +826,8 @@ void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
struct mt7996_vif_link *link,
struct mt7996_sta_link *msta_link,
u8 flowid);
+void mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
+ struct mt7996_sta_link *msta_link);
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt);
@@ -756,6 +838,10 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7996_tx_token_put(struct mt7996_dev *dev);
void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb, u32 *info);
+void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev);
+int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
+ dma_addr_t dma_addr, void *data);
+void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data);
bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7996_stats_work(struct work_struct *work);
int mt76_dfs_start_rdd(struct mt7996_dev *dev, bool force);
@@ -766,7 +852,7 @@ void mt7996_update_channel(struct mt76_phy *mphy);
int mt7996_init_debugfs(struct mt7996_dev *dev);
void mt7996_debugfs_rx_fw_monitor(struct mt7996_dev *dev, const void *data, int len);
bool mt7996_debugfs_rx_log(struct mt7996_dev *dev, const void *data, int len);
-int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+int mt7996_mcu_add_key(struct mt76_dev *dev, struct mt7996_vif_link *link,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd);
int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev,
@@ -781,6 +867,9 @@ int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
+void mt7996_link_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir);
#endif
int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
bool hif2, int *irq);
@@ -790,8 +879,27 @@ u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
int mt7996_mtk_init_debugfs(struct mt7996_phy *phy, struct dentry *dir);
#endif
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int mt7996_dma_rro_init(struct mt7996_dev *dev);
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
+
+#ifdef CONFIG_MT7996_NPU
+int mt7996_npu_hw_init(struct mt7996_dev *dev);
+int mt7996_npu_hw_stop(struct mt7996_dev *dev);
+int mt7996_npu_rx_queues_init(struct mt7996_dev *dev);
+#else
+static inline int mt7996_npu_hw_init(struct mt7996_dev *dev)
+{
+ return 0;
+}
+
+static inline int mt7996_npu_hw_stop(struct mt7996_dev *dev)
+{
+ return 0;
+}
+
+static inline int mt7996_npu_rx_queues_init(struct mt7996_dev *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_MT7996_NPU */
#endif
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/pci.c b/sys/contrib/dev/mediatek/mt76/mt7996/pci.c
index ed32ff3089b9..241599dcbc72 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/pci.c
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/pci.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -105,6 +105,7 @@ static int mt7996_pci_hif2_probe(struct pci_dev *pdev)
hif->dev = &pdev->dev;
hif->regs = pcim_iomap_table(pdev)[0];
hif->irq = pdev->irq;
+ pcie_bandwidth_available(pdev, NULL, &hif->speed, &hif->width);
spin_lock_bh(&hif_lock);
list_add(&hif->list, &hif_list);
spin_unlock_bh(&hif_lock);
@@ -155,6 +156,10 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
mdev = &dev->mt76;
mt7996_wfsys_reset(dev);
hif2 = mt7996_pci_init_hif2(pdev);
+ dev->hif2 = hif2;
+
+ mt76_npu_init(mdev, pci_resource_start(pdev, 0),
+ pdev->bus && pci_domain_nr(pdev->bus) ? 3 : 2);
ret = mt7996_mmio_wed_init(dev, pdev, false, &irq);
if (ret < 0)
@@ -174,12 +179,11 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
goto free_wed_or_irq_vector;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
- /* master switch of PCIe tnterrupt enable */
+ /* master switch of PCIe interrupt enable */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
if (hif2) {
hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
- dev->hif2 = hif2;
ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &hif2_irq);
if (ret < 0)
diff --git a/sys/contrib/dev/mediatek/mt76/mt7996/regs.h b/sys/contrib/dev/mediatek/mt76/mt7996/regs.h
index e942c0058731..e48e0e575b64 100644
--- a/sys/contrib/dev/mediatek/mt76/mt7996/regs.h
+++ b/sys/contrib/dev/mediatek/mt76/mt7996/regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -88,6 +88,8 @@ enum offs_rev {
#define MT_RRO_BA_BITMAP_BASE1 MT_RRO_TOP(0xC)
#define WF_RRO_AXI_MST_CFG MT_RRO_TOP(0xB8)
#define WF_RRO_AXI_MST_CFG_DIDX_OK BIT(12)
+
+#define MT_RRO_ADDR_ARRAY_BASE0 MT_RRO_TOP(0x30)
#define MT_RRO_ADDR_ARRAY_BASE1 MT_RRO_TOP(0x34)
#define MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE BIT(31)
@@ -108,6 +110,19 @@ enum offs_rev {
#define MT_RRO_ADDR_ELEM_SEG_ADDR0 MT_RRO_TOP(0x400)
+#define MT_RRO_3_0_EMU_CONF MT_RRO_TOP(0x600)
+#define MT_RRO_3_0_EMU_CONF_EN_MASK BIT(11)
+
+#define MT_RRO_3_1_GLOBAL_CONFIG MT_RRO_TOP(0x604)
+#define MT_RRO_3_1_GLOBAL_CONFIG_RXDMAD_SEL BIT(6)
+#define MT_RRO_3_1_GLOBAL_CONFIG_RX_CIDX_RD_EN BIT(3)
+#define MT_RRO_3_1_GLOBAL_CONFIG_RX_DIDX_WR_EN BIT(2)
+#define MT_RRO_3_1_GLOBAL_CONFIG_INTERLEAVE_EN BIT(0)
+
+#define MT_RRO_MSDU_PG_SEG_ADDR0 MT_RRO_TOP(0x620)
+#define MT_RRO_RX_RING_AP_CIDX_ADDR MT_RRO_TOP(0x6f0)
+#define MT_RRO_RX_RING_AP_DIDX_ADDR MT_RRO_TOP(0x6f4)
+
#define MT_RRO_ACK_SN_CTRL MT_RRO_TOP(0x50)
#define MT_RRO_ACK_SN_CTRL_SN_MASK GENMASK(27, 16)
#define MT_RRO_ACK_SN_CTRL_SESSION_MASK GENMASK(11, 0)
@@ -412,7 +427,9 @@ enum offs_rev {
#define MT_WFDMA0_RX_INT_PCIE_SEL MT_WFDMA0(0x154)
#define MT_WFDMA0_RX_INT_SEL_RING3 BIT(3)
+#define MT_WFDMA0_RX_INT_SEL_RING5 BIT(5)
#define MT_WFDMA0_RX_INT_SEL_RING6 BIT(6)
+#define MT_WFDMA0_RX_INT_SEL_RING9 BIT(9)
#define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
@@ -430,6 +447,7 @@ enum offs_rev {
#define MT_WFDMA0_PAUSE_RX_Q_RRO_TH MT_WFDMA0(0x27c)
#define WF_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
+#define WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK GENMASK(27, 24)
#define WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD BIT(18)
#define WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE BIT(14)
@@ -451,6 +469,8 @@ enum offs_rev {
#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
+#define MT_WFDMA_HOST_CONFIG_BAND0_PCIE1 BIT(20)
+#define MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 BIT(21)
#define MT_WFDMA_HOST_CONFIG_BAND2_PCIE1 BIT(22)
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
@@ -459,6 +479,9 @@ enum offs_rev {
#define MT_WFDMA_AXI_R2A_CTRL MT_WFDMA_EXT_CSR(0x500)
#define MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK GENMASK(4, 0)
+#define MT_WFDMA_AXI_R2A_CTRL2 MT_WFDMA_EXT_CSR(0x508)
+#define MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK GENMASK(31, 28)
+
#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
@@ -492,6 +515,8 @@ enum offs_rev {
#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
#define MT_RXQ_RRO_IND_RING_BASE MT_RRO_TOP(0x40)
+#define MT_RXQ_RRO_AP_RING_BASE MT_RRO_TOP(0x650)
+
#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
MT_MCUQ_ID(q) * 0x4)
#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
@@ -514,7 +539,9 @@ enum offs_rev {
#define MT_INT_RX_DONE_WA_EXT BIT(3) /* for mt7992 */
#define MT_INT_RX_DONE_WA_TRI BIT(3)
#define MT_INT_RX_TXFREE_MAIN BIT(17)
+#define MT_INT_RX_TXFREE_BAND1 BIT(15)
#define MT_INT_RX_TXFREE_TRI BIT(15)
+#define MT_INT_RX_TXFREE_BAND1_EXT BIT(19) /* for mt7992 two PCIE*/
#define MT_INT_RX_TXFREE_BAND0_MT7990 BIT(14)
#define MT_INT_RX_TXFREE_BAND1_MT7990 BIT(15)
#define MT_INT_RX_DONE_BAND2_EXT BIT(23)
@@ -522,9 +549,10 @@ enum offs_rev {
#define MT_INT_MCU_CMD BIT(29)
#define MT_INT_RX_DONE_RRO_BAND0 BIT(16)
-#define MT_INT_RX_DONE_RRO_BAND1 BIT(16)
+#define MT_INT_RX_DONE_RRO_BAND1 BIT(17)
#define MT_INT_RX_DONE_RRO_BAND2 BIT(14)
#define MT_INT_RX_DONE_RRO_IND BIT(11)
+#define MT_INT_RX_DONE_RRO_RXDMAD_C BIT(11)
#define MT_INT_RX_DONE_MSDU_PG_BAND0 BIT(18)
#define MT_INT_RX_DONE_MSDU_PG_BAND1 BIT(19)
#define MT_INT_RX_DONE_MSDU_PG_BAND2 BIT(23)
@@ -552,6 +580,8 @@ enum offs_rev {
#define MT_INT_RRO_RX_DONE (MT_INT_RX(MT_RXQ_RRO_BAND0) | \
MT_INT_RX(MT_RXQ_RRO_BAND1) | \
MT_INT_RX(MT_RXQ_RRO_BAND2) | \
+ MT_INT_RX(MT_RXQ_RRO_IND) | \
+ MT_INT_RX(MT_RXQ_RRO_RXDMAD_C) | \
MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) | \
MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) | \
MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2))
diff --git a/sys/contrib/dev/mediatek/mt76/pci.c b/sys/contrib/dev/mediatek/mt76/pci.c
index b5031ca7f73f..833923ab2483 100644
--- a/sys/contrib/dev/mediatek/mt76/pci.c
+++ b/sys/contrib/dev/mediatek/mt76/pci.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/scan.c b/sys/contrib/dev/mediatek/mt76/scan.c
index 9b20ccbeb8cf..ff9176cdee3d 100644
--- a/sys/contrib/dev/mediatek/mt76/scan.c
+++ b/sys/contrib/dev/mediatek/mt76/scan.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2024 Felix Fietkau <nbd@nbd.name>
*/
@@ -16,11 +16,13 @@ static void mt76_scan_complete(struct mt76_dev *dev, bool abort)
clear_bit(MT76_SCANNING, &phy->state);
- if (dev->scan.chan && phy->main_chandef.chan)
+ if (dev->scan.chan && phy->main_chandef.chan &&
+ !test_bit(MT76_MCU_RESET, &dev->phy.state))
mt76_set_channel(phy, &phy->main_chandef, false);
mt76_put_vif_phy_link(phy, dev->scan.vif, dev->scan.mlink);
memset(&dev->scan, 0, sizeof(dev->scan));
- ieee80211_scan_completed(phy->hw, &info);
+ if (!test_bit(MT76_MCU_RESET, &dev->phy.state))
+ ieee80211_scan_completed(phy->hw, &info);
}
void mt76_abort_scan(struct mt76_dev *dev)
@@ -28,6 +30,7 @@ void mt76_abort_scan(struct mt76_dev *dev)
cancel_delayed_work_sync(&dev->scan_work);
mt76_scan_complete(dev, true);
}
+EXPORT_SYMBOL_GPL(mt76_abort_scan);
static void
mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
@@ -112,9 +115,6 @@ void mt76_scan_work(struct work_struct *work)
local_bh_enable();
out:
- if (!duration)
- return;
-
if (dev->scan.chan)
duration = max_t(int, duration,
msecs_to_jiffies(req->duration +
@@ -139,7 +139,8 @@ int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mutex);
- if (dev->scan.req || phy->roc_vif) {
+ if (dev->scan.req || phy->roc_vif ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state)) {
ret = -EBUSY;
goto out;
}
diff --git a/sys/contrib/dev/mediatek/mt76/sdio.c b/sys/contrib/dev/mediatek/mt76/sdio.c
index 8e9576747052..8bae77c761be 100644
--- a/sys/contrib/dev/mediatek/mt76/sdio.c
+++ b/sys/contrib/dev/mediatek/mt76/sdio.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc.
*
* This file is written based on mt76/usb.c.
diff --git a/sys/contrib/dev/mediatek/mt76/sdio.h b/sys/contrib/dev/mediatek/mt76/sdio.h
index 27d5d2077eba..41b89f3de86b 100644
--- a/sys/contrib/dev/mediatek/mt76/sdio.h
+++ b/sys/contrib/dev/mediatek/mt76/sdio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Sean Wang <sean.wang@mediatek.com>
diff --git a/sys/contrib/dev/mediatek/mt76/sdio_txrx.c b/sys/contrib/dev/mediatek/mt76/sdio_txrx.c
index f882d21c9f63..3f314e8e1e69 100644
--- a/sys/contrib/dev/mediatek/mt76/sdio_txrx.c
+++ b/sys/contrib/dev/mediatek/mt76/sdio_txrx.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Felix Fietkau <nbd@nbd.name>
diff --git a/sys/contrib/dev/mediatek/mt76/testmode.c b/sys/contrib/dev/mediatek/mt76/testmode.c
index ca4feccf38ca..6ee160bda882 100644
--- a/sys/contrib/dev/mediatek/mt76/testmode.c
+++ b/sys/contrib/dev/mediatek/mt76/testmode.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
#include <linux/random.h>
diff --git a/sys/contrib/dev/mediatek/mt76/testmode.h b/sys/contrib/dev/mediatek/mt76/testmode.h
index 0590c35c7126..bed1ba40ba94 100644
--- a/sys/contrib/dev/mediatek/mt76/testmode.h
+++ b/sys/contrib/dev/mediatek/mt76/testmode.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/trace.c b/sys/contrib/dev/mediatek/mt76/trace.c
index f199fcd2a63d..f17cc01017f3 100644
--- a/sys/contrib/dev/mediatek/mt76/trace.c
+++ b/sys/contrib/dev/mediatek/mt76/trace.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/trace.h b/sys/contrib/dev/mediatek/mt76/trace.h
index 109a07f9733a..794b957ac79d 100644
--- a/sys/contrib/dev/mediatek/mt76/trace.h
+++ b/sys/contrib/dev/mediatek/mt76/trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/tx.c b/sys/contrib/dev/mediatek/mt76/tx.c
index 8ab5840fee57..9ec6d0b53a84 100644
--- a/sys/contrib/dev/mediatek/mt76/tx.c
+++ b/sys/contrib/dev/mediatek/mt76/tx.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
@@ -618,7 +618,8 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control) &&
(!ieee80211_is_bufferable_mmpdu(skb) ||
- ieee80211_is_deauth(hdr->frame_control)))
+ ieee80211_is_deauth(hdr->frame_control) ||
+ head == &wcid->tx_offchannel))
qid = MT_TXQ_PSD;
q = phy->q_tx[qid];
@@ -846,8 +847,10 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
spin_lock_bh(&dev->token_lock);
- token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
- if (token >= 0)
+ token = idr_alloc(&dev->token, *ptxwi, dev->token_start,
+ dev->token_start + dev->token_size,
+ GFP_ATOMIC);
+ if (token >= dev->token_start)
dev->token_count++;
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
diff --git a/sys/contrib/dev/mediatek/mt76/usb.c b/sys/contrib/dev/mediatek/mt76/usb.c
index 11f9d2808f15..67c976bb9c22 100644
--- a/sys/contrib/dev/mediatek/mt76/usb.c
+++ b/sys/contrib/dev/mediatek/mt76/usb.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/usb_trace.c b/sys/contrib/dev/mediatek/mt76/usb_trace.c
index 9942bdd6177b..a04585b4b778 100644
--- a/sys/contrib/dev/mediatek/mt76/usb_trace.c
+++ b/sys/contrib/dev/mediatek/mt76/usb_trace.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/usb_trace.h b/sys/contrib/dev/mediatek/mt76/usb_trace.h
index 7b261ddb2ac6..93bb69c65a4f 100644
--- a/sys/contrib/dev/mediatek/mt76/usb_trace.h
+++ b/sys/contrib/dev/mediatek/mt76/usb_trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ISC */
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/util.c b/sys/contrib/dev/mediatek/mt76/util.c
index 37cfa133010f..4ca478fb1acd 100644
--- a/sys/contrib/dev/mediatek/mt76/util.c
+++ b/sys/contrib/dev/mediatek/mt76/util.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
diff --git a/sys/contrib/dev/mediatek/mt76/util.h b/sys/contrib/dev/mediatek/mt76/util.h
index 73a784fe2707..596fcf18c3c2 100644
--- a/sys/contrib/dev/mediatek/mt76/util.h
+++ b/sys/contrib/dev/mediatek/mt76/util.h
@@ -1,132 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
- * Copyright (c) 2020-2025 Bjoern A. Zeeb <bz@FreeBSD.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
-#ifndef _MT76_UTIL_H
-#define _MT76_UTIL_H
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <net/mac80211.h>
+#if defined(__FreeBSD__)
#include <linux/kthread.h>
+#endif
struct mt76_worker
{
- void(*fn)(struct mt76_worker *);
struct task_struct *task;
+ void (*fn)(struct mt76_worker *);
unsigned long state;
};
-enum mt76_worker_state {
+enum {
MT76_WORKER_SCHEDULED,
MT76_WORKER_RUNNING,
};
-#if 0
-bool __mt76_poll(struct mt76_dev *, u32, u32, u32, int);
-bool __mt76_poll_msec(struct mt76_dev *, u32, u32, u32, int);
-int mt76_get_min_avg_rssi(struct mt76_dev *, bool);
-#endif
-int mt76_wcid_alloc(u32 *, int);
-int __mt76_worker_fn(void *);
+#define MT76_INCR(_var, _size) \
+ (_var = (((_var) + 1) % (_size)))
+
+int mt76_wcid_alloc(u32 *mask, int size);
-/* wcid_phy_mask is [32] */
static inline void
-mt76_wcid_mask_set(u32 *mask, u16 bit)
+mt76_wcid_mask_set(u32 *mask, int idx)
{
+ mask[idx / 32] |= BIT(idx % 32);
+}
- mask[bit / 32] |= BIT(bit % 32);
+static inline void
+mt76_wcid_mask_clear(u32 *mask, int idx)
+{
+ mask[idx / 32] &= ~BIT(idx % 32);
}
static inline void
-mt76_wcid_mask_clear(u32 *mask, u16 bit)
+mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- mask[bit / 32] &= ~BIT(bit % 32);
+ if (enable)
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ else
+ hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
}
-/* See, e.g., __mt76_worker_fn for some details. */
+int __mt76_worker_fn(void *ptr);
+
static inline int
-mt76_worker_setup(struct ieee80211_hw *hw __unused, struct mt76_worker *w,
- void (*wfunc)(struct mt76_worker *), const char *name)
+mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w,
+ void (*fn)(struct mt76_worker *),
+ const char *name)
{
- int error;
-
- if (wfunc)
- w->fn = wfunc;
+ const char *dev_name = wiphy_name(hw->wiphy);
+ int ret;
+ if (fn)
+ w->fn = fn;
w->task = kthread_run(__mt76_worker_fn, w,
- "mt76-%s", name);
+ "mt76-%s %s", name, dev_name);
- if (!IS_ERR(w->task))
- return (0);
+ if (IS_ERR(w->task)) {
+ ret = PTR_ERR(w->task);
+ w->task = NULL;
+ return ret;
+ }
- error = PTR_ERR(w->task);
- w->task = NULL;
- return (error);
+ return 0;
}
-static inline void
-mt76_worker_schedule(struct mt76_worker *w)
+static inline void mt76_worker_schedule(struct mt76_worker *w)
{
-
- if (w->task == NULL)
+ if (!w->task)
return;
- if (!test_and_set_bit(MT76_WORKER_SCHEDULED, &w->state) ||
+ if (!test_and_set_bit(MT76_WORKER_SCHEDULED, &w->state) &&
!test_bit(MT76_WORKER_RUNNING, &w->state))
wake_up_process(w->task);
}
-static inline void
-mt76_worker_enable(struct mt76_worker *w)
+static inline void mt76_worker_disable(struct mt76_worker *w)
{
-
- if (w->task == NULL)
+ if (!w->task)
return;
- kthread_unpark(w->task);
- mt76_worker_schedule(w);
+ kthread_park(w->task);
+ WRITE_ONCE(w->state, 0);
}
-static inline void
-mt76_worker_disable(struct mt76_worker *w)
+static inline void mt76_worker_enable(struct mt76_worker *w)
{
-
- if (w->task == NULL)
+ if (!w->task)
return;
- kthread_park(w->task);
- WRITE_ONCE(w->state, 0);
+ kthread_unpark(w->task);
+ mt76_worker_schedule(w);
}
-static inline void
-mt76_worker_teardown(struct mt76_worker *w)
+static inline void mt76_worker_teardown(struct mt76_worker *w)
{
-
- if (w->task == NULL)
+ if (!w->task)
return;
kthread_stop(w->task);
w->task = NULL;
}
-static inline void
-mt76_skb_set_moredata(struct sk_buff *skb, bool moredata)
-{
- /*
- * This would be net80211::IEEE80211_FC1_MORE_DATA
- * Implement it as mostly LinuxKPI 802.11 to avoid
- * further header pollution and possible conflicts.
- */
- struct ieee80211_hdr *hdr;
- uint16_t val;
-
- hdr = (struct ieee80211_hdr *)skb->data;
- val = cpu_to_le16(IEEE80211_FC1_MORE_DATA << 8);
- if (!moredata)
- hdr->frame_control &= ~val;
- else
- hdr->frame_control |= val;
-}
-
-#endif /* _MT76_UTIL_H */
+#endif
diff --git a/sys/contrib/dev/mediatek/mt76/wed.c b/sys/contrib/dev/mediatek/mt76/wed.c
index 63f69e152b1c..ed657d952de2 100644
--- a/sys/contrib/dev/mediatek/mt76/wed.c
+++ b/sys/contrib/dev/mediatek/mt76/wed.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org>
*/
@@ -8,7 +8,7 @@
void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mt76_dev *dev = mt76_wed_to_dev(wed);
int i;
for (i = 0; i < dev->rx_token_size; i++) {
@@ -31,8 +31,8 @@ EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
+ struct mt76_dev *dev = mt76_wed_to_dev(wed);
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct mt76_txwi_cache *t = NULL;
int i;
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(mt76_wed_init_rx_buf);
int mt76_wed_offload_enable(struct mtk_wed_device *wed)
{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mt76_dev *dev = mt76_wed_to_dev(wed);
spin_lock_bh(&dev->token_lock);
dev->token_size = wed->wlan.token_start;
@@ -118,7 +118,7 @@ int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
case MT76_WED_Q_TXFREE:
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
- mt76_dma_queue_reset(dev, q);
+ mt76_dma_queue_reset(dev, q, true);
mt76_dma_rx_fill(dev, q, false);
ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
@@ -133,21 +133,21 @@ int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
break;
case MT76_WED_RRO_Q_DATA:
q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
+ mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_MSDU_PG:
q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
+ mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_IND:
q->flags &= ~MT_QFLAG_WED;
- mt76_dma_queue_reset(dev, q);
+ mt76_dma_queue_reset(dev, q, true);
mt76_dma_rx_fill(dev, q, false);
mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
break;
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(mt76_wed_dma_setup);
void mt76_wed_offload_disable(struct mtk_wed_device *wed)
{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mt76_dev *dev = mt76_wed_to_dev(wed);
spin_lock_bh(&dev->token_lock);
dev->token_size = dev->drv->token_size;
@@ -174,7 +174,7 @@ EXPORT_SYMBOL_GPL(mt76_wed_offload_disable);
void mt76_wed_reset_complete(struct mtk_wed_device *wed)
{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mt76_dev *dev = mt76_wed_to_dev(wed);
complete(&dev->mmio.wed_reset_complete);
}
diff --git a/sys/contrib/dev/rtw88/bf.c b/sys/contrib/dev/rtw88/bf.c
index 16c6a1d972e5..8969a1e0cb47 100644
--- a/sys/contrib/dev/rtw88/bf.c
+++ b/sys/contrib/dev/rtw88/bf.c
@@ -129,8 +129,11 @@ void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev,
void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif,
enum rtw_trx_desc_rate rate)
{
+ u8 csi_rsc = CSI_RSC_FOLLOW_RX_PACKET_BW;
u32 psf_ctl = 0;
- u8 csi_rsc = 0x1;
+
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C)
+ csi_rsc = CSI_RSC_PRIMARY_20M_BW;
psf_ctl = rtw_read32(rtwdev, REG_BBPSF_CTRL) |
BIT_WMAC_USE_NDPARATE |
@@ -392,6 +395,9 @@ void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
csi_cfg = rtw_read32(rtwdev, REG_BBPSF_CTRL) & ~BIT_MASK_CSI_RATE;
cur_rrsr = rtw_read16(rtwdev, REG_RRSR);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C)
+ csi_cfg |= BIT_CSI_FORCE_RATE;
+
if (rssi >= 40) {
if (cur_rate != DESC_RATE54M) {
cur_rrsr |= BIT(DESC_RATE54M);
diff --git a/sys/contrib/dev/rtw88/bf.h b/sys/contrib/dev/rtw88/bf.h
index 7b40c2c03856..a5d3010e6be6 100644
--- a/sys/contrib/dev/rtw88/bf.h
+++ b/sys/contrib/dev/rtw88/bf.h
@@ -33,6 +33,7 @@
#define BIT_SHIFT_R_MU_RL 12
#define BIT_SHIFT_WMAC_TXMU_ACKPOLICY 4
#define BIT_SHIFT_CSI_RATE 24
+#define BIT_CSI_FORCE_RATE BIT(15)
#define BIT_MASK_R_MU_RL (R_MU_RL << BIT_SHIFT_R_MU_RL)
#define BIT_MASK_R_MU_TABLE_VALID 0x3f
@@ -48,6 +49,12 @@
#define RTW_SND_CTRL_REMOVE 0x98
#define RTW_SND_CTRL_SOUNDING 0x9B
+enum csi_rsc {
+ CSI_RSC_PRIMARY_20M_BW = 0,
+ CSI_RSC_FOLLOW_RX_PACKET_BW = 1,
+ CSI_RSC_DUPLICATE_MODE = 2,
+};
+
enum csi_seg_len {
HAL_CSI_SEG_4K = 0,
HAL_CSI_SEG_8K = 1,
diff --git a/sys/contrib/dev/rtw88/rtw8822bu.c b/sys/contrib/dev/rtw88/rtw8822bu.c
index efda9887cc41..2886f470df71 100644
--- a/sys/contrib/dev/rtw88/rtw8822bu.c
+++ b/sys/contrib/dev/rtw88/rtw8822bu.c
@@ -79,6 +79,8 @@ static const struct usb_device_id rtw_8822bu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* D-Link DWA-T185 rev. A1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x03d1, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* BUFFALO WI-U2-866DM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x03d0, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* BUFFALO WI-U3-866DHP */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table);
diff --git a/sys/contrib/dev/rtw88/rtw8822cu.c b/sys/contrib/dev/rtw88/rtw8822cu.c
index 90fcbb8ec629..a5f8311c022b 100644
--- a/sys/contrib/dev/rtw88/rtw8822cu.c
+++ b/sys/contrib/dev/rtw88/rtw8822cu.c
@@ -21,6 +21,8 @@ static const struct usb_device_id rtw_8822cu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8822c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x13b1, 0x0043, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822c_hw_spec) }, /* Alpha - Alpha */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3329, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822c_hw_spec) }, /* D-Link AC13U rev. A1 */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822cu_id_table);
diff --git a/sys/contrib/dev/rtw88/sdio.c b/sys/contrib/dev/rtw88/sdio.c
index 99d7c629eac6..e35de52d8eb4 100644
--- a/sys/contrib/dev/rtw88/sdio.c
+++ b/sys/contrib/dev/rtw88/sdio.c
@@ -144,8 +144,10 @@ static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr,
static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr)
{
+ bool might_indirect_under_power_off = rtwdev->chip->id == RTW_CHIP_TYPE_8822C;
+
if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags) &&
- !rtw_sdio_is_bus_addr(addr))
+ !rtw_sdio_is_bus_addr(addr) && might_indirect_under_power_off)
return false;
return !rtw_sdio_is_sdio30_supported(rtwdev) ||
diff --git a/sys/contrib/openzfs/module/nvpair/nvpair.c b/sys/contrib/openzfs/module/nvpair/nvpair.c
index eb8c14b4a783..cb3a024ec95c 100644
--- a/sys/contrib/openzfs/module/nvpair/nvpair.c
+++ b/sys/contrib/openzfs/module/nvpair/nvpair.c
@@ -3246,7 +3246,8 @@ nvs_xdr_nvl_fini(nvstream_t *nvs)
* xdrproc_t-compatible callbacks for xdr_array()
*/
-#if defined(_KERNEL) && defined(__linux__) /* Linux kernel */
+#if (defined(__FreeBSD_version) && __FreeBSD_version >= 1600010) || \
+ defined(_KERNEL) && defined(__linux__) /* Linux kernel */
#define NVS_BUILD_XDRPROC_T(type) \
static bool_t \
diff --git a/sys/crypto/sha2/sha256c.c b/sys/crypto/sha2/sha256c.c
index b7f7295c5c85..c7f9bbf119a5 100644
--- a/sys/crypto/sha2/sha256c.c
+++ b/sys/crypto/sha2/sha256c.c
@@ -206,12 +206,8 @@ SHA256_Transform_arm64(uint32_t * state, const unsigned char block[64])
DEFINE_UIFUNC(static, void, SHA256_Transform,
(uint32_t * state, const unsigned char block[64]))
{
- u_long hwcap;
-
- if (elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap)) == 0) {
- if ((hwcap & HWCAP_SHA2) != 0)
- return (SHA256_Transform_arm64);
- }
+ if ((at_hwcap & HWCAP_SHA2) != 0)
+ return (SHA256_Transform_arm64);
return (SHA256_Transform_c);
}
diff --git a/sys/crypto/sha2/sha512c.c b/sys/crypto/sha2/sha512c.c
index 076cb30a1dfa..335dae08abb7 100644
--- a/sys/crypto/sha2/sha512c.c
+++ b/sys/crypto/sha2/sha512c.c
@@ -236,13 +236,8 @@ SHA512_Transform_arm64(uint64_t * state,
DEFINE_UIFUNC(static, void, SHA512_Transform,
(uint64_t * state, const unsigned char block[SHA512_BLOCK_LENGTH]))
{
- u_long hwcap;
-
- if (elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap)) == 0) {
- if ((hwcap & HWCAP_SHA512) != 0) {
- return (SHA512_Transform_arm64);
- }
- }
+ if ((at_hwcap & HWCAP_SHA512) != 0)
+ return (SHA512_Transform_arm64);
return (SHA512_Transform_c);
}
diff --git a/sys/dev/acpi_support/acpi_ibm.c b/sys/dev/acpi_support/acpi_ibm.c
index c1302508b8a2..f895d48bb6d0 100644
--- a/sys/dev/acpi_support/acpi_ibm.c
+++ b/sys/dev/acpi_support/acpi_ibm.c
@@ -568,14 +568,14 @@ acpi_ibm_attach(device_t dev)
SYSCTL_ADD_PROC(sc->sysctl_ctx,
SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO,
acpi_ibm_sysctls[i].name,
- CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE,
sc, i, acpi_ibm_sysctl, "I",
acpi_ibm_sysctls[i].description);
} else {
SYSCTL_ADD_PROC(sc->sysctl_ctx,
SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO,
acpi_ibm_sysctls[i].name,
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
sc, i, acpi_ibm_sysctl, "I",
acpi_ibm_sysctls[i].description);
}
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 8380f701d226..e43ef72ca9d2 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -58,6 +58,7 @@
#if defined(__i386__) || defined(__amd64__)
#include <machine/clock.h>
+#include <machine/intr_machdep.h>
#include <machine/pci_cfgreg.h>
#include <x86/cputypes.h>
#include <x86/x86_var.h>
@@ -679,15 +680,19 @@ acpi_attach(device_t dev)
#endif
/*
- * Probe all supported ACPI sleep states. Awake (S0) is always supported.
+ * Probe all supported ACPI sleep states. Awake (S0) is always supported,
+ * and suspend-to-idle is always supported on x86 only (at the moment).
*/
- acpi_supported_sstates[ACPI_STATE_S0] = TRUE;
+ acpi_supported_sstates[ACPI_STATE_S0] = true;
acpi_supported_stypes[POWER_STYPE_AWAKE] = true;
+#if defined(__i386__) || defined(__amd64__)
+ acpi_supported_stypes[POWER_STYPE_SUSPEND_TO_IDLE] = true;
+#endif
for (state = ACPI_STATE_S1; state <= ACPI_STATE_S5; state++)
if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT,
__DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) &&
ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) {
- acpi_supported_sstates[state] = TRUE;
+ acpi_supported_sstates[state] = true;
acpi_supported_stypes[acpi_sstate_to_stype(state)] = true;
}
@@ -705,13 +710,24 @@ acpi_attach(device_t dev)
else if (acpi_supported_sstates[ACPI_STATE_S2])
sc->acpi_standby_sx = ACPI_STATE_S2;
- /* Pick the first valid sleep type for the sleep button default. */
+ /*
+ * Pick the first valid sleep type for the sleep button default. If that
+ * type was hibernate and we support s2idle, set it to that. The sleep
+ * button prefers s2mem instead of s2idle at the moment as s2idle may not
+ * yet work reliably on all machines. In the future, we should set this to
+ * s2idle when ACPI_FADT_LOW_POWER_S0 is set.
+ */
sc->acpi_sleep_button_stype = POWER_STYPE_UNKNOWN;
for (stype = POWER_STYPE_STANDBY; stype <= POWER_STYPE_HIBERNATE; stype++)
if (acpi_supported_stypes[stype]) {
sc->acpi_sleep_button_stype = stype;
break;
}
+ if (sc->acpi_sleep_button_stype == POWER_STYPE_HIBERNATE ||
+ sc->acpi_sleep_button_stype == POWER_STYPE_UNKNOWN) {
+ if (acpi_supported_stypes[POWER_STYPE_SUSPEND_TO_IDLE])
+ sc->acpi_sleep_button_stype = POWER_STYPE_SUSPEND_TO_IDLE;
+ }
acpi_enable_fixed_events(sc);
@@ -3315,7 +3331,8 @@ acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype)
return (0);
#else
- /* This platform does not support acpi suspend/resume. */
+ device_printf(sc->acpi_dev, "ACPI suspend not supported on this platform "
+ "(TODO suspend to idle should be, however)\n");
return (EOPNOTSUPP);
#endif
}
@@ -3330,13 +3347,13 @@ acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype)
int
acpi_AckSleepState(struct apm_clone_data *clone, int error)
{
+ struct acpi_softc *sc = clone->acpi_sc;
+
#if defined(__amd64__) || defined(__i386__)
- struct acpi_softc *sc;
int ret, sleeping;
/* If no pending sleep type, return an error. */
ACPI_LOCK(acpi);
- sc = clone->acpi_sc;
if (sc->acpi_next_stype == POWER_STYPE_AWAKE) {
ACPI_UNLOCK(acpi);
return (ENXIO);
@@ -3379,7 +3396,8 @@ acpi_AckSleepState(struct apm_clone_data *clone, int error)
}
return (ret);
#else
- /* This platform does not support acpi suspend/resume. */
+ device_printf(sc->acpi_dev, "ACPI suspend not supported on this platform "
+ "(TODO suspend to idle should be, however)\n");
return (EOPNOTSUPP);
#endif
}
@@ -3418,27 +3436,133 @@ acpi_sleep_disable(struct acpi_softc *sc)
}
enum acpi_sleep_state {
- ACPI_SS_NONE,
- ACPI_SS_GPE_SET,
- ACPI_SS_DEV_SUSPEND,
- ACPI_SS_SLP_PREP,
- ACPI_SS_SLEPT,
+ ACPI_SS_NONE = 0,
+ ACPI_SS_GPE_SET = 1 << 0,
+ ACPI_SS_DEV_SUSPEND = 1 << 1,
+ ACPI_SS_SLP_PREP = 1 << 2,
+ ACPI_SS_SLEPT = 1 << 3,
};
+static void
+do_standby(struct acpi_softc *sc, enum acpi_sleep_state *slp_state,
+ register_t rflags)
+{
+ ACPI_STATUS status;
+
+ status = AcpiEnterSleepState(sc->acpi_standby_sx);
+ intr_restore(rflags);
+ AcpiLeaveSleepStatePrep(sc->acpi_standby_sx);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
+ AcpiFormatException(status));
+ return;
+ }
+ *slp_state |= ACPI_SS_SLEPT;
+}
+
+static void
+do_sleep(struct acpi_softc *sc, enum acpi_sleep_state *slp_state,
+ register_t rflags, int state)
+{
+ int sleep_result;
+ ACPI_EVENT_STATUS power_button_status;
+
+ MPASS(state == ACPI_STATE_S3 || state == ACPI_STATE_S4);
+
+ sleep_result = acpi_sleep_machdep(sc, state);
+ acpi_wakeup_machdep(sc, state, sleep_result, 0);
+
+ if (sleep_result == 1 && state == ACPI_STATE_S3) {
+ /*
+ * XXX According to ACPI specification SCI_EN bit should be restored
+ * by ACPI platform (BIOS, firmware) to its pre-sleep state.
+ * Unfortunately some BIOSes fail to do that and that leads to
+ * unexpected and serious consequences during wake up like a system
+ * getting stuck in SMI handlers.
+ * This hack is picked up from Linux, which claims that it follows
+ * Windows behavior.
+ */
+ AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT);
+
+ /*
+ * Prevent misinterpretation of the wakeup by power button
+ * as a request for power off.
+ * Ideally we should post an appropriate wakeup event,
+ * perhaps using acpi_event_power_button_wake or alike.
+ *
+ * Clearing of power button status after wakeup is mandated
+ * by ACPI specification in section "Fixed Power Button".
+ *
+ * XXX As of ACPICA 20121114 AcpiGetEventStatus provides
+ * status as 0/1 corresponding to inactive/active despite
+ * its type being ACPI_EVENT_STATUS. In other words,
+ * we should not test for ACPI_EVENT_FLAG_SET for time being.
+ */
+ if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON,
+ &power_button_status)) && power_button_status != 0) {
+ AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
+ device_printf(sc->acpi_dev, "cleared fixed power button status\n");
+ }
+ }
+
+ intr_restore(rflags);
+
+ /* call acpi_wakeup_machdep() again with interrupt enabled */
+ acpi_wakeup_machdep(sc, state, sleep_result, 1);
+
+ AcpiLeaveSleepStatePrep(state);
+
+ if (sleep_result == -1)
+ return;
+
+ /* Re-enable ACPI hardware on wakeup from sleep state 4. */
+ if (state == ACPI_STATE_S4)
+ AcpiEnable();
+ *slp_state |= ACPI_SS_SLEPT;
+}
+
+#if defined(__i386__) || defined(__amd64__)
+static void
+do_idle(struct acpi_softc *sc, enum acpi_sleep_state *slp_state,
+ register_t rflags)
+{
+
+ intr_suspend();
+
+ /*
+ * The CPU will exit idle when interrupted, so we want to minimize the
+ * number of interrupts it can receive while idle. We do this by only
+ * allowing SCI (system control interrupt) interrupts, which are used by
+ * the ACPI firmware to send wake GPEs to the OS.
+ *
+ * XXX We might still receive other spurious non-wake GPEs from noisy
+ * devices that can't be disabled, so this will need to end up being a
+ * suspend-to-idle loop which, when breaking out of idle, will check the
+ * reason for the wakeup and immediately idle the CPU again if it was not a
+ * proper wake event.
+ */
+ intr_enable_src(AcpiGbl_FADT.SciInterrupt);
+
+ cpu_idle(0);
+
+ intr_resume(false);
+ intr_restore(rflags);
+ *slp_state |= ACPI_SS_SLEPT;
+}
+#endif
+
/*
* Enter the desired system sleep state.
*
- * Currently we support S1-S5 but S4 is only S4BIOS
+ * Currently we support S1-S5 and suspend-to-idle, but S4 is only S4BIOS.
*/
static ACPI_STATUS
acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
{
register_t intr;
ACPI_STATUS status;
- ACPI_EVENT_STATUS power_button_status;
enum acpi_sleep_state slp_state;
int acpi_sstate;
- int sleep_result;
ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
@@ -3498,7 +3622,7 @@ acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
/* Enable any GPEs as appropriate and requested by the user. */
acpi_wake_prep_walk(sc, stype);
- slp_state = ACPI_SS_GPE_SET;
+ slp_state |= ACPI_SS_GPE_SET;
/*
* Inform all devices that we are going to sleep. If at least one
@@ -3509,113 +3633,78 @@ acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
* bus interface does not provide for this.
*/
if (DEVICE_SUSPEND(root_bus) != 0) {
- device_printf(sc->acpi_dev, "device_suspend failed\n");
- goto backout;
+ device_printf(sc->acpi_dev, "device_suspend failed\n");
+ goto backout;
}
- slp_state = ACPI_SS_DEV_SUSPEND;
+ slp_state |= ACPI_SS_DEV_SUSPEND;
- status = AcpiEnterSleepStatePrep(acpi_sstate);
- if (ACPI_FAILURE(status)) {
- device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
- AcpiFormatException(status));
- goto backout;
+ if (stype != POWER_STYPE_SUSPEND_TO_IDLE) {
+ status = AcpiEnterSleepStatePrep(acpi_sstate);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
+ AcpiFormatException(status));
+ goto backout;
+ }
+ slp_state |= ACPI_SS_SLP_PREP;
}
- slp_state = ACPI_SS_SLP_PREP;
if (sc->acpi_sleep_delay > 0)
DELAY(sc->acpi_sleep_delay * 1000000);
suspendclock();
intr = intr_disable();
- if (stype != POWER_STYPE_STANDBY) {
- sleep_result = acpi_sleep_machdep(sc, acpi_sstate);
- acpi_wakeup_machdep(sc, acpi_sstate, sleep_result, 0);
-
- /*
- * XXX According to ACPI specification SCI_EN bit should be restored
- * by ACPI platform (BIOS, firmware) to its pre-sleep state.
- * Unfortunately some BIOSes fail to do that and that leads to
- * unexpected and serious consequences during wake up like a system
- * getting stuck in SMI handlers.
- * This hack is picked up from Linux, which claims that it follows
- * Windows behavior.
- */
- if (sleep_result == 1 && stype != POWER_STYPE_HIBERNATE)
- AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT);
-
- if (sleep_result == 1 && stype == POWER_STYPE_SUSPEND_TO_MEM) {
- /*
- * Prevent mis-interpretation of the wakeup by power button
- * as a request for power off.
- * Ideally we should post an appropriate wakeup event,
- * perhaps using acpi_event_power_button_wake or alike.
- *
- * Clearing of power button status after wakeup is mandated
- * by ACPI specification in section "Fixed Power Button".
- *
- * XXX As of ACPICA 20121114 AcpiGetEventStatus provides
- * status as 0/1 corressponding to inactive/active despite
- * its type being ACPI_EVENT_STATUS. In other words,
- * we should not test for ACPI_EVENT_FLAG_SET for time being.
- */
- if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON,
- &power_button_status)) && power_button_status != 0) {
- AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
- device_printf(sc->acpi_dev,
- "cleared fixed power button status\n");
- }
- }
-
- intr_restore(intr);
-
- /* call acpi_wakeup_machdep() again with interrupt enabled */
- acpi_wakeup_machdep(sc, acpi_sstate, sleep_result, 1);
-
- AcpiLeaveSleepStatePrep(acpi_sstate);
-
- if (sleep_result == -1)
- goto backout;
-
- /* Re-enable ACPI hardware on wakeup from hibernate. */
- if (stype == POWER_STYPE_HIBERNATE)
- AcpiEnable();
- } else {
- status = AcpiEnterSleepState(acpi_sstate);
- intr_restore(intr);
- AcpiLeaveSleepStatePrep(acpi_sstate);
- if (ACPI_FAILURE(status)) {
- device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
- AcpiFormatException(status));
- goto backout;
- }
+ switch (stype) {
+ case POWER_STYPE_STANDBY:
+ do_standby(sc, &slp_state, intr);
+ break;
+ case POWER_STYPE_SUSPEND_TO_MEM:
+ case POWER_STYPE_HIBERNATE:
+ do_sleep(sc, &slp_state, intr, acpi_sstate);
+ break;
+ case POWER_STYPE_SUSPEND_TO_IDLE:
+#if defined(__i386__) || defined(__amd64__)
+ do_idle(sc, &slp_state, intr);
+ break;
+#endif
+ case POWER_STYPE_AWAKE:
+ case POWER_STYPE_POWEROFF:
+ case POWER_STYPE_COUNT:
+ case POWER_STYPE_UNKNOWN:
+ __unreachable();
}
- slp_state = ACPI_SS_SLEPT;
+ resumeclock();
/*
* Back out state according to how far along we got in the suspend
* process. This handles both the error and success cases.
*/
backout:
- if (slp_state >= ACPI_SS_SLP_PREP)
- resumeclock();
- if (slp_state >= ACPI_SS_GPE_SET) {
+ if ((slp_state & ACPI_SS_GPE_SET) != 0) {
acpi_wake_prep_walk(sc, stype);
sc->acpi_stype = POWER_STYPE_AWAKE;
+ slp_state &= ~ACPI_SS_GPE_SET;
}
- if (slp_state >= ACPI_SS_DEV_SUSPEND)
+ if ((slp_state & ACPI_SS_DEV_SUSPEND) != 0) {
DEVICE_RESUME(root_bus);
- if (slp_state >= ACPI_SS_SLP_PREP)
+ slp_state &= ~ACPI_SS_DEV_SUSPEND;
+ }
+ if ((slp_state & ACPI_SS_SLP_PREP) != 0) {
AcpiLeaveSleepState(acpi_sstate);
- if (slp_state >= ACPI_SS_SLEPT) {
+ slp_state &= ~ACPI_SS_SLP_PREP;
+ }
+ if ((slp_state & ACPI_SS_SLEPT) != 0) {
#if defined(__i386__) || defined(__amd64__)
/* NB: we are still using ACPI timecounter at this point. */
resume_TSC();
#endif
acpi_resync_clock(sc);
acpi_enable_fixed_events(sc);
+ slp_state &= ~ACPI_SS_SLEPT;
}
sc->acpi_next_stype = POWER_STYPE_AWAKE;
+ MPASS(slp_state == ACPI_SS_NONE);
+
bus_topo_unlock();
#ifdef EARLY_AP_STARTUP
@@ -4242,6 +4331,21 @@ acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn)
ACPI_UNLOCK(acpi);
}
+void
+acpi_deregister_ioctls(acpi_ioctl_fn fn)
+{
+ struct acpi_ioctl_hook *hp, *thp;
+
+ ACPI_LOCK(acpi);
+ TAILQ_FOREACH_SAFE(hp, &acpi_ioctl_hooks, link, thp) {
+ if (hp->fn == fn) {
+ TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link);
+ free(hp, M_ACPIDEV);
+ }
+ }
+ ACPI_UNLOCK(acpi);
+}
+
static int
acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td)
{
@@ -4514,6 +4618,7 @@ static struct debugtag dbg_layer[] = {
{"ACPI_FAN", ACPI_FAN},
{"ACPI_POWERRES", ACPI_POWERRES},
{"ACPI_PROCESSOR", ACPI_PROCESSOR},
+ {"ACPI_SPMC", ACPI_SPMC},
{"ACPI_THERMAL", ACPI_THERMAL},
{"ACPI_TIMER", ACPI_TIMER},
{"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS},
diff --git a/sys/dev/acpica/acpi_battery.c b/sys/dev/acpica/acpi_battery.c
index cfd8261d5eab..f1eebda705c1 100644
--- a/sys/dev/acpica/acpi_battery.c
+++ b/sys/dev/acpica/acpi_battery.c
@@ -531,13 +531,7 @@ acpi_battery_init(void)
out:
if (error) {
- acpi_deregister_ioctl(ACPIIO_BATT_GET_UNITS, acpi_battery_ioctl);
- acpi_deregister_ioctl(ACPIIO_BATT_GET_BATTINFO, acpi_battery_ioctl);
- acpi_deregister_ioctl(ACPIIO_BATT_GET_BATTINFO_V1, acpi_battery_ioctl);
- acpi_deregister_ioctl(ACPIIO_BATT_GET_BIF, acpi_battery_ioctl);
- acpi_deregister_ioctl(ACPIIO_BATT_GET_BIX, acpi_battery_ioctl);
- acpi_deregister_ioctl(ACPIIO_BATT_GET_BST, acpi_battery_ioctl);
- acpi_deregister_ioctl(ACPIIO_BATT_GET_BST_V1, acpi_battery_ioctl);
+ acpi_deregister_ioctls(acpi_battery_ioctl);
}
return (error);
}
diff --git a/sys/dev/acpica/acpi_spmc.c b/sys/dev/acpica/acpi_spmc.c
new file mode 100644
index 000000000000..57593d9ccae1
--- /dev/null
+++ b/sys/dev/acpica/acpi_spmc.c
@@ -0,0 +1,618 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * This software was developed by Aymeric Wibo <obiwac@freebsd.org>
+ * under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/malloc.h>
+#include <sys/uuid.h>
+#include <sys/kdb.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_SPMC
+ACPI_MODULE_NAME("SPMC")
+
+static SYSCTL_NODE(_debug_acpi, OID_AUTO, spmc, CTLFLAG_RD | CTLFLAG_MPSAFE,
+ NULL, "SPMC debugging");
+
+static char *spmc_ids[] = {
+ "PNP0D80",
+ NULL
+};
+
+enum intel_dsm_index {
+ DSM_ENUM_FUNCTIONS = 0,
+ DSM_GET_DEVICE_CONSTRAINTS = 1,
+ DSM_GET_CRASH_DUMP_DEVICE = 2,
+ DSM_DISPLAY_OFF_NOTIF = 3,
+ DSM_DISPLAY_ON_NOTIF = 4,
+ DSM_ENTRY_NOTIF = 5,
+ DSM_EXIT_NOTIF = 6,
+ /* Only for Microsoft DSM set. */
+ DSM_MODERN_ENTRY_NOTIF = 7,
+ DSM_MODERN_EXIT_NOTIF = 8,
+};
+
+enum amd_dsm_index {
+ AMD_DSM_ENUM_FUNCTIONS = 0,
+ AMD_DSM_GET_DEVICE_CONSTRAINTS = 1,
+ AMD_DSM_ENTRY_NOTIF = 2,
+ AMD_DSM_EXIT_NOTIF = 3,
+ AMD_DSM_DISPLAY_OFF_NOTIF = 4,
+ AMD_DSM_DISPLAY_ON_NOTIF = 5,
+};
+
+enum dsm_set_flags {
+ DSM_SET_INTEL = 1 << 0,
+ DSM_SET_MS = 1 << 1,
+ DSM_SET_AMD = 1 << 2,
+};
+
+struct dsm_set {
+ enum dsm_set_flags flag;
+ const char *name;
+ int revision;
+ struct uuid uuid;
+ uint64_t dsms_expected;
+};
+
+static struct dsm_set intel_dsm_set = {
+ .flag = DSM_SET_INTEL,
+ .name = "Intel",
+ /*
+ * XXX Linux uses 1 for the revision on Intel DSMs, but doesn't explain
+ * why. The commit that introduces this links to a document mentioning
+ * revision 0, so default this to 0.
+ *
+ * The debug.acpi.spmc.intel_dsm_revision sysctl may be used to configure
+ * this just in case.
+ */
+ .revision = 0,
+ .uuid = { /* c4eb40a0-6cd2-11e2-bcfd-0800200c9a66 */
+ 0xc4eb40a0, 0x6cd2, 0x11e2, 0xbc, 0xfd,
+ {0x08, 0x00, 0x20, 0x0c, 0x9a, 0x66},
+ },
+ .dsms_expected = DSM_GET_DEVICE_CONSTRAINTS | DSM_DISPLAY_OFF_NOTIF |
+ DSM_DISPLAY_ON_NOTIF | DSM_ENTRY_NOTIF | DSM_EXIT_NOTIF,
+};
+
+SYSCTL_INT(_debug_acpi_spmc, OID_AUTO, intel_dsm_revision, CTLFLAG_RW,
+ &intel_dsm_set.revision, 0,
+ "Revision to use when evaluating Intel SPMC DSMs");
+
+static struct dsm_set ms_dsm_set = {
+ .flag = DSM_SET_MS,
+ .name = "Microsoft",
+ .revision = 0,
+ .uuid = { /* 11e00d56-ce64-47ce-837b-1f898f9aa461 */
+ 0x11e00d56, 0xce64, 0x47ce, 0x83, 0x7b,
+ {0x1f, 0x89, 0x8f, 0x9a, 0xa4, 0x61},
+ },
+ .dsms_expected = DSM_DISPLAY_OFF_NOTIF | DSM_DISPLAY_ON_NOTIF |
+ DSM_ENTRY_NOTIF | DSM_EXIT_NOTIF | DSM_MODERN_ENTRY_NOTIF |
+ DSM_MODERN_EXIT_NOTIF,
+};
+
+static struct dsm_set amd_dsm_set = {
+ .flag = DSM_SET_AMD,
+ .name = "AMD",
+ /*
+ * XXX Linux uses 0 for the revision on AMD DSMs, but at least on the
+ * Framework 13 AMD 7040 series, the enum functions DSM only returns a
+ * function mask that covers all the DSMs we need to call when called
+ * with revision 2.
+ *
+ * The debug.acpi.spmc.amd_dsm_revision sysctl may be used to configure
+ * this just in case.
+ */
+ .revision = 2,
+ .uuid = { /* e3f32452-febc-43ce-9039-932122d37721 */
+ 0xe3f32452, 0xfebc, 0x43ce, 0x90, 0x39,
+ {0x93, 0x21, 0x22, 0xd3, 0x77, 0x21},
+ },
+ .dsms_expected = AMD_DSM_GET_DEVICE_CONSTRAINTS | AMD_DSM_ENTRY_NOTIF |
+ AMD_DSM_EXIT_NOTIF | AMD_DSM_DISPLAY_OFF_NOTIF |
+ AMD_DSM_DISPLAY_ON_NOTIF,
+};
+
+SYSCTL_INT(_debug_acpi_spmc, OID_AUTO, amd_dsm_revision, CTLFLAG_RW,
+ &amd_dsm_set.revision, 0, "Revision to use when evaluating AMD SPMC DSMs");
+
+union dsm_index {
+ int i;
+ enum intel_dsm_index regular;
+ enum amd_dsm_index amd;
+};
+
+struct acpi_spmc_constraint {
+ bool enabled;
+ char *name;
+ int min_d_state;
+ ACPI_HANDLE handle;
+
+ /* Unused, spec only. */
+ uint64_t lpi_uid;
+ uint64_t min_dev_specific_state;
+
+ /* Unused, AMD only. */
+ uint64_t function_states;
+};
+
+struct acpi_spmc_softc {
+ device_t dev;
+ ACPI_HANDLE handle;
+ ACPI_OBJECT *obj;
+ enum dsm_set_flags dsm_sets;
+
+ bool constraints_populated;
+ size_t constraint_count;
+ struct acpi_spmc_constraint *constraints;
+};
+
+static void acpi_spmc_check_dsm_set(struct acpi_spmc_softc *sc,
+ ACPI_HANDLE handle, struct dsm_set *dsm_set);
+static int acpi_spmc_get_constraints(device_t dev);
+static void acpi_spmc_free_constraints(struct acpi_spmc_softc *sc);
+
+static int
+acpi_spmc_probe(device_t dev)
+{
+ char *name;
+ ACPI_HANDLE handle;
+ struct acpi_spmc_softc *sc;
+
+ /* Check that this is an enabled device. */
+ if (acpi_get_type(dev) != ACPI_TYPE_DEVICE || acpi_disabled("spmc"))
+ return (ENXIO);
+
+ if (ACPI_ID_PROBE(device_get_parent(dev), dev, spmc_ids, &name) > 0)
+ return (ENXIO);
+
+ handle = acpi_get_handle(dev);
+ if (handle == NULL)
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+
+ /* Check which sets of DSM's are supported. */
+ sc->dsm_sets = 0;
+
+ acpi_spmc_check_dsm_set(sc, handle, &intel_dsm_set);
+ acpi_spmc_check_dsm_set(sc, handle, &ms_dsm_set);
+ acpi_spmc_check_dsm_set(sc, handle, &amd_dsm_set);
+
+ if (sc->dsm_sets == 0)
+ return (ENXIO);
+
+ device_set_descf(dev, "Low Power S0 Idle (DSM sets 0x%x)",
+ sc->dsm_sets);
+
+ return (0);
+}
+
+static int
+acpi_spmc_attach(device_t dev)
+{
+ struct acpi_spmc_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ sc->handle = acpi_get_handle(dev);
+ if (sc->handle == NULL)
+ return (ENXIO);
+
+ sc->constraints_populated = false;
+ sc->constraint_count = 0;
+ sc->constraints = NULL;
+
+ /* Get device constraints. We can only call this once so do this now. */
+ acpi_spmc_get_constraints(sc->dev);
+
+ return (0);
+}
+
+static int
+acpi_spmc_detach(device_t dev)
+{
+ acpi_spmc_free_constraints(device_get_softc(dev));
+ return (0);
+}
+
+static void
+acpi_spmc_check_dsm_set(struct acpi_spmc_softc *sc, ACPI_HANDLE handle,
+ struct dsm_set *dsm_set)
+{
+ const uint64_t dsms_supported = acpi_DSMQuery(handle,
+ (uint8_t *)&dsm_set->uuid, dsm_set->revision);
+
+ /*
+ * Check if DSM set supported at all. We do this by checking the
+ * existence of "enum functions".
+ */
+ if ((dsms_supported & 1) == 0)
+ return;
+ if ((dsms_supported & dsm_set->dsms_expected)
+ != dsm_set->dsms_expected) {
+ device_printf(sc->dev, "DSM set %s does not support expected "
+ "DSMs (0x%lx vs 0x%lx). Some methods may fail.\n",
+ dsm_set->name, dsms_supported, dsm_set->dsms_expected);
+ }
+ sc->dsm_sets |= dsm_set->flag;
+}
+
+static void
+acpi_spmc_free_constraints(struct acpi_spmc_softc *sc)
+{
+ if (sc->constraints == NULL)
+ return;
+
+ for (size_t i = 0; i < sc->constraint_count; i++) {
+ if (sc->constraints[i].name != NULL)
+ free(sc->constraints[i].name, M_TEMP);
+ }
+
+ free(sc->constraints, M_TEMP);
+ sc->constraints = NULL;
+}
+
+static int
+acpi_spmc_get_constraints_spec(struct acpi_spmc_softc *sc, ACPI_OBJECT *object)
+{
+ struct acpi_spmc_constraint *constraint;
+ int revision;
+ ACPI_OBJECT *constraint_obj;
+ ACPI_OBJECT *name_obj;
+ ACPI_OBJECT *detail;
+ ACPI_OBJECT *constraint_package;
+
+ KASSERT(sc->constraints_populated == false,
+ ("constraints already populated"));
+
+ sc->constraint_count = object->Package.Count;
+ sc->constraints = malloc(sc->constraint_count * sizeof *sc->constraints,
+ M_TEMP, M_WAITOK | M_ZERO);
+
+ /*
+ * The value of sc->constraint_count can change during the loop, so
+ * iterate until object->Package.Count so we actually go over all
+ * elements in the package.
+ */
+ for (size_t i = 0; i < object->Package.Count; i++) {
+ constraint_obj = &object->Package.Elements[i];
+ constraint = &sc->constraints[i];
+
+ constraint->enabled =
+ constraint_obj->Package.Elements[1].Integer.Value;
+
+ name_obj = &constraint_obj->Package.Elements[0];
+ constraint->name = strdup(name_obj->String.Pointer, M_TEMP);
+ if (constraint->name == NULL) {
+ acpi_spmc_free_constraints(sc);
+ return (ENOMEM);
+ }
+
+ /*
+ * The first element in the device constraint detail package is
+ * the revision, and should always be zero.
+ */
+ revision = constraint_obj->Package.Elements[0].Integer.Value;
+ if (revision != 0) {
+ device_printf(sc->dev, "Unknown revision %d for "
+ "device constraint detail package\n", revision);
+ sc->constraint_count--;
+ continue;
+ }
+
+ detail = &constraint_obj->Package.Elements[2];
+ constraint_package = &detail->Package.Elements[1];
+
+ constraint->lpi_uid =
+ constraint_package->Package.Elements[0].Integer.Value;
+ constraint->min_d_state =
+ constraint_package->Package.Elements[1].Integer.Value;
+ constraint->min_dev_specific_state =
+ constraint_package->Package.Elements[2].Integer.Value;
+ }
+
+ sc->constraints_populated = true;
+ return (0);
+}
+
+static int
+acpi_spmc_get_constraints_amd(struct acpi_spmc_softc *sc, ACPI_OBJECT *object)
+{
+ size_t constraint_count;
+ ACPI_OBJECT *constraint_obj;
+ ACPI_OBJECT *constraints;
+ struct acpi_spmc_constraint *constraint;
+ ACPI_OBJECT *name_obj;
+
+ KASSERT(sc->constraints_populated == false,
+ ("constraints already populated"));
+
+ /*
+ * First element in the package is unknown.
+ * Second element is the number of device constraints.
+ * Third element is the list of device constraints itself.
+ */
+ constraint_count = object->Package.Elements[1].Integer.Value;
+ constraints = &object->Package.Elements[2];
+
+ if (constraints->Package.Count != constraint_count) {
+ device_printf(sc->dev, "constraint count mismatch (%d to %zu)\n",
+ constraints->Package.Count, constraint_count);
+ return (ENXIO);
+ }
+
+ sc->constraint_count = constraint_count;
+ sc->constraints = malloc(constraint_count * sizeof *sc->constraints,
+ M_TEMP, M_WAITOK | M_ZERO);
+
+ for (size_t i = 0; i < constraint_count; i++) {
+ /* Parse the constraint package. */
+ constraint_obj = &constraints->Package.Elements[i];
+ if (constraint_obj->Package.Count != 4) {
+ device_printf(sc->dev, "constraint %zu has %d elements\n",
+ i, constraint_obj->Package.Count);
+ acpi_spmc_free_constraints(sc);
+ return (ENXIO);
+ }
+
+ constraint = &sc->constraints[i];
+ constraint->enabled =
+ constraint_obj->Package.Elements[0].Integer.Value;
+
+ name_obj = &constraint_obj->Package.Elements[1];
+ constraint->name = strdup(name_obj->String.Pointer, M_TEMP);
+ if (constraint->name == NULL) {
+ acpi_spmc_free_constraints(sc);
+ return (ENOMEM);
+ }
+
+ constraint->function_states =
+ constraint_obj->Package.Elements[2].Integer.Value;
+ constraint->min_d_state =
+ constraint_obj->Package.Elements[3].Integer.Value;
+ }
+
+ sc->constraints_populated = true;
+ return (0);
+}
+
+static int
+acpi_spmc_get_constraints(device_t dev)
+{
+ struct acpi_spmc_softc *sc;
+ union dsm_index dsm_index;
+ struct dsm_set *dsm_set;
+ ACPI_STATUS status;
+ ACPI_BUFFER result;
+ ACPI_OBJECT *object;
+ bool is_amd;
+ int rv;
+ struct acpi_spmc_constraint *constraint;
+
+ sc = device_get_softc(dev);
+ if (sc->constraints_populated)
+ return (0);
+
+ /* The Microsoft DSM set doesn't have this DSM. */
+ is_amd = (sc->dsm_sets & DSM_SET_AMD) != 0;
+ if (is_amd) {
+ dsm_set = &amd_dsm_set;
+ dsm_index.amd = AMD_DSM_GET_DEVICE_CONSTRAINTS;
+ } else {
+ dsm_set = &intel_dsm_set;
+ dsm_index.regular = DSM_GET_DEVICE_CONSTRAINTS;
+ }
+
+ /* XXX It seems like this DSM fails if called more than once. */
+ status = acpi_EvaluateDSMTyped(sc->handle, (uint8_t *)&dsm_set->uuid,
+ dsm_set->revision, dsm_index.i, NULL, &result,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "%s failed to call %s DSM %d (rev %d)\n",
+ __func__, dsm_set->name, dsm_index.i, dsm_set->revision);
+ return (ENXIO);
+ }
+
+ object = (ACPI_OBJECT *)result.Pointer;
+ if (is_amd)
+ rv = acpi_spmc_get_constraints_amd(sc, object);
+ else
+ rv = acpi_spmc_get_constraints_spec(sc, object);
+ AcpiOsFree(object);
+ if (rv != 0)
+ return (rv);
+
+ /* Get handles for each constraint device. */
+ for (size_t i = 0; i < sc->constraint_count; i++) {
+ constraint = &sc->constraints[i];
+
+ status = acpi_GetHandleInScope(sc->handle,
+ __DECONST(char *, constraint->name), &constraint->handle);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "failed to get handle for %s\n",
+ constraint->name);
+ constraint->handle = NULL;
+ }
+ }
+ return (0);
+}
+
+static void
+acpi_spmc_check_constraints(struct acpi_spmc_softc *sc)
+{
+ bool violation = false;
+
+ KASSERT(sc->constraints_populated, ("constraints not populated"));
+ for (size_t i = 0; i < sc->constraint_count; i++) {
+ struct acpi_spmc_constraint *constraint = &sc->constraints[i];
+
+ if (!constraint->enabled)
+ continue;
+ if (constraint->handle == NULL)
+ continue;
+
+ ACPI_STATUS status = acpi_GetHandleInScope(sc->handle,
+ __DECONST(char *, constraint->name), &constraint->handle);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->dev, "failed to get handle for %s\n",
+ constraint->name);
+ constraint->handle = NULL;
+ }
+ if (constraint->handle == NULL)
+ continue;
+
+#ifdef notyet
+ int d_state;
+ if (ACPI_FAILURE(acpi_pwr_get_state(constraint->handle, &d_state)))
+ continue;
+ if (d_state < constraint->min_d_state) {
+ device_printf(sc->dev, "constraint for device %s"
+ " violated (minimum D-state required was %s, actual"
+ " D-state is %s), might fail to enter LPI state\n",
+ constraint->name,
+ acpi_d_state_to_str(constraint->min_d_state),
+ acpi_d_state_to_str(d_state));
+ violation = true;
+ }
+#endif
+ }
+ if (!violation)
+ device_printf(sc->dev,
+ "all device power constraints respected!\n");
+}
+
+static void
+acpi_spmc_run_dsm(device_t dev, struct dsm_set *dsm_set, int index)
+{
+ struct acpi_spmc_softc *sc;
+ ACPI_STATUS status;
+ ACPI_BUFFER result;
+
+ sc = device_get_softc(dev);
+
+ status = acpi_EvaluateDSMTyped(sc->handle, (uint8_t *)&dsm_set->uuid,
+ dsm_set->revision, index, NULL, &result, ACPI_TYPE_ANY);
+
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "%s failed to call %s DSM %d (rev %d)\n",
+ __func__, dsm_set->name, index, dsm_set->revision);
+ return;
+ }
+
+ AcpiOsFree(result.Pointer);
+}
+
+/*
+ * Try running the DSMs from all the DSM sets we have, as them failing costs us
+ * nothing, and it seems like on AMD platforms, both the AMD entry and Microsoft
+ * "modern" DSM's are required for it to enter modern standby.
+ *
+ * This is what Linux does too.
+ */
+static void
+acpi_spmc_display_off_notif(device_t dev)
+{
+ struct acpi_spmc_softc *sc = device_get_softc(dev);
+
+ if ((sc->dsm_sets & DSM_SET_INTEL) != 0)
+ acpi_spmc_run_dsm(dev, &intel_dsm_set, DSM_DISPLAY_OFF_NOTIF);
+ if ((sc->dsm_sets & DSM_SET_MS) != 0)
+ acpi_spmc_run_dsm(dev, &ms_dsm_set, DSM_DISPLAY_OFF_NOTIF);
+ if ((sc->dsm_sets & DSM_SET_AMD) != 0)
+ acpi_spmc_run_dsm(dev, &amd_dsm_set, AMD_DSM_DISPLAY_OFF_NOTIF);
+}
+
+static void
+acpi_spmc_display_on_notif(device_t dev)
+{
+ struct acpi_spmc_softc *sc = device_get_softc(dev);
+
+ if ((sc->dsm_sets & DSM_SET_INTEL) != 0)
+ acpi_spmc_run_dsm(dev, &intel_dsm_set, DSM_DISPLAY_ON_NOTIF);
+ if ((sc->dsm_sets & DSM_SET_MS) != 0)
+ acpi_spmc_run_dsm(dev, &ms_dsm_set, DSM_DISPLAY_ON_NOTIF);
+ if ((sc->dsm_sets & DSM_SET_AMD) != 0)
+ acpi_spmc_run_dsm(dev, &amd_dsm_set, AMD_DSM_DISPLAY_ON_NOTIF);
+}
+
+static void
+acpi_spmc_entry_notif(device_t dev)
+{
+ struct acpi_spmc_softc *sc = device_get_softc(dev);
+
+ acpi_spmc_check_constraints(sc);
+
+ if ((sc->dsm_sets & DSM_SET_AMD) != 0)
+ acpi_spmc_run_dsm(dev, &amd_dsm_set, AMD_DSM_ENTRY_NOTIF);
+ if ((sc->dsm_sets & DSM_SET_MS) != 0) {
+ acpi_spmc_run_dsm(dev, &ms_dsm_set, DSM_MODERN_ENTRY_NOTIF);
+ acpi_spmc_run_dsm(dev, &ms_dsm_set, DSM_ENTRY_NOTIF);
+ }
+ if ((sc->dsm_sets & DSM_SET_INTEL) != 0)
+ acpi_spmc_run_dsm(dev, &intel_dsm_set, DSM_ENTRY_NOTIF);
+}
+
+static void
+acpi_spmc_exit_notif(device_t dev)
+{
+ struct acpi_spmc_softc *sc = device_get_softc(dev);
+
+ if ((sc->dsm_sets & DSM_SET_INTEL) != 0)
+ acpi_spmc_run_dsm(dev, &intel_dsm_set, DSM_EXIT_NOTIF);
+ if ((sc->dsm_sets & DSM_SET_AMD) != 0)
+ acpi_spmc_run_dsm(dev, &amd_dsm_set, AMD_DSM_EXIT_NOTIF);
+ if ((sc->dsm_sets & DSM_SET_MS) != 0) {
+ acpi_spmc_run_dsm(dev, &ms_dsm_set, DSM_EXIT_NOTIF);
+ acpi_spmc_run_dsm(dev, &ms_dsm_set, DSM_MODERN_EXIT_NOTIF);
+ }
+}
+
+static int
+acpi_spmc_suspend(device_t dev)
+{
+ acpi_spmc_display_off_notif(dev);
+ acpi_spmc_entry_notif(dev);
+
+ return (0);
+}
+
+static int
+acpi_spmc_resume(device_t dev)
+{
+ acpi_spmc_exit_notif(dev);
+ acpi_spmc_display_on_notif(dev);
+
+ return (0);
+}
+
+static device_method_t acpi_spmc_methods[] = {
+ DEVMETHOD(device_probe, acpi_spmc_probe),
+ DEVMETHOD(device_attach, acpi_spmc_attach),
+ DEVMETHOD(device_detach, acpi_spmc_detach),
+ DEVMETHOD_END
+};
+
+static driver_t acpi_spmc_driver = {
+ "acpi_spmc",
+ acpi_spmc_methods,
+ sizeof(struct acpi_spmc_softc),
+};
+
+DRIVER_MODULE_ORDERED(acpi_spmc, acpi, acpi_spmc_driver, NULL, NULL, SI_ORDER_ANY);
+MODULE_DEPEND(acpi_spmc, acpi, 1, 1, 1);
diff --git a/sys/dev/acpica/acpiio.h b/sys/dev/acpica/acpiio.h
index 63779d309951..4df049ed196a 100644
--- a/sys/dev/acpica/acpiio.h
+++ b/sys/dev/acpica/acpiio.h
@@ -205,6 +205,7 @@ union acpi_battery_ioctl_arg {
typedef int (*acpi_ioctl_fn)(u_long cmd, caddr_t addr, void *arg);
extern int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg);
extern void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn);
+extern void acpi_deregister_ioctls(acpi_ioctl_fn fn);
#endif
#endif /* !_ACPIIO_H_ */
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 6db55b10570d..0ffb9f7c7cc3 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -191,6 +191,7 @@ extern struct mtx acpi_mutex;
#define ACPI_THERMAL 0x01000000
#define ACPI_TIMER 0x02000000
#define ACPI_OEM 0x04000000
+#define ACPI_SPMC 0x08000000
/*
* Constants for different interrupt models used with acpi_SetIntrModel().
diff --git a/sys/dev/aq/aq_common.h b/sys/dev/aq/aq_common.h
index af59ecf7af1e..f31567480ec0 100644
--- a/sys/dev/aq/aq_common.h
+++ b/sys/dev/aq/aq_common.h
@@ -67,5 +67,4 @@ do { \
#define AQ_VER "0.0.5"
-#endif //_AQ_COMMON_H_
-
+#endif // _AQ_COMMON_H_
diff --git a/sys/dev/aq/aq_dbg.c b/sys/dev/aq/aq_dbg.c
index 495991fafbdc..bed29b3d0755 100644
--- a/sys/dev/aq/aq_dbg.c
+++ b/sys/dev/aq/aq_dbg.c
@@ -56,7 +56,8 @@ const uint32_t dbg_categories_ = dbg_init | dbg_config | dbg_fw;
#define __field(TYPE, VAR) TYPE VAR;
void
-trace_aq_tx_descr(int ring_idx, unsigned int pointer, volatile uint64_t descr[2])
+trace_aq_tx_descr(int ring_idx, unsigned int pointer,
+ volatile uint64_t descr[2])
{
#if AQ_CFG_DEBUG_LVL > 2
struct __entry{
@@ -211,7 +212,8 @@ DumpHex(const void* data, size_t size) {
for (i = 0; i < size; ++i) {
sprintf(buf, "%02X ", ((const unsigned char*)data)[i]);
strcat(line, buf);
- if (((const unsigned char*)data)[i] >= ' ' && ((const unsigned char*)data)[i] <= '~') {
+ if (((const unsigned char*)data)[i] >= ' ' &&
+ ((const unsigned char*)data)[i] <= '~') {
ascii[i % 16] = ((const unsigned char*)data)[i];
} else {
ascii[i % 16] = '.';
diff --git a/sys/dev/aq/aq_device.h b/sys/dev/aq/aq_device.h
index 2b170f710840..64edbd138b3a 100644
--- a/sys/dev/aq/aq_device.h
+++ b/sys/dev/aq/aq_device.h
@@ -147,4 +147,4 @@ void aq_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
int aq_mediachange(struct ifnet *ifp);
void aq_if_update_admin_status(if_ctx_t ctx);
-#endif
+#endif // _AQ_DEVICE_H_
diff --git a/sys/dev/aq/aq_fw.c b/sys/dev/aq/aq_fw.c
index fac720ba2327..3ab5909882e1 100644
--- a/sys/dev/aq/aq_fw.c
+++ b/sys/dev/aq/aq_fw.c
@@ -62,6 +62,7 @@ typedef enum aq_fw_bootloader_mode
} aq_fw_bootloader_mode;
#define AQ_CFG_HOST_BOOT_DISABLE 0
+
// Timeouts
#define RBL_TIMEOUT_MS 10000
#define MAC_FW_START_TIMEOUT_MS 10000
@@ -215,12 +216,14 @@ mac_soft_reset_flb_(struct aq_hw* hw)
int k;
reg_global_ctl2_set(hw, 0x40e1);
- // Let Felicity hardware to complete SMBUS transaction before Global software reset.
+ // Let Felicity hardware complete SMBUS transaction before Global
+ // software reset.
msec_delay(50);
/*
- * If SPI burst transaction was interrupted(before running the script), global software
- * reset may not clear SPI interface. Clean it up manually before global reset.
+ * If SPI burst transaction was interrupted(before running the script),
+ * global software reset may not clear SPI interface. Clean it up
+ * manually before global reset.
*/
reg_glb_nvr_provisioning2_set(hw, 0xa0);
reg_glb_nvr_interface1_set(hw, 0x9f);
@@ -236,8 +239,8 @@ mac_soft_reset_flb_(struct aq_hw* hw)
reg_glb_general_provisioning9_set(hw, 1);
/*
- * For the case SPI burst transaction was interrupted (by MCP reset above),
- * wait until it is completed by hardware.
+ * For the case SPI burst transaction was interrupted (by MCP reset
+ * above), wait until it is completed by hardware.
*/
msec_delay(50); // Sleep for 10 ms.
@@ -263,7 +266,8 @@ mac_soft_reset_flb_(struct aq_hw* hw)
trace(dbg_init, "FLB> MAC kickstart done, %d ms", k);
/* FW reset */
reg_global_ctl2_set(hw, 0x80e0);
- // Let Felicity hardware complete SMBUS transaction before Global software reset.
+ // Let Felicity hardware complete SMBUS transaction before
+ // Global software reset.
msec_delay(50);
}
reg_glb_cpu_sem_set(hw, 1, 0);
diff --git a/sys/dev/aq/aq_fw1x.c b/sys/dev/aq/aq_fw1x.c
index 10328fdbb01c..5a409eef15f4 100644
--- a/sys/dev/aq/aq_fw1x.c
+++ b/sys/dev/aq/aq_fw1x.c
@@ -171,7 +171,8 @@ fw1x_reset(struct aq_hw* hal)
const int retryCount = 1000;
for (int i = 0; i < retryCount; ++i) {
- // Read the beginning of Statistics structure to capture the Transaction ID.
+ // Read the beginning of Statistics structure to capture the
+ // Transaction ID.
aq_hw_fw_downld_dwords(hal, hal->mbox_addr, (uint32_t*)&mbox,
(uint32_t)((char*)&mbox.stats - (char*)&mbox) / sizeof(uint32_t));
@@ -185,7 +186,6 @@ fw1x_reset(struct aq_hw* hal)
* Compare transaction ID to initial value.
* If it's different means f/w is alive. We're done.
*/
-
return (EOK);
}
@@ -321,4 +321,3 @@ struct aq_firmware_ops aq_fw1x_ops =
.get_mac_addr = fw1x_get_mac_addr,
.get_stats = fw1x_get_stats,
};
-
diff --git a/sys/dev/aq/aq_fw2x.c b/sys/dev/aq/aq_fw2x.c
index 7a39a0ddc7e6..004121242ac1 100644
--- a/sys/dev/aq/aq_fw2x.c
+++ b/sys/dev/aq/aq_fw2x.c
@@ -193,7 +193,6 @@ typedef struct fw2x_mailbox // struct fwHostInterface
#define FW2X_LED_DEFAULT 0x0U
// Firmware v2-3.x specific functions.
-//@{
int fw2x_reset(struct aq_hw* hw);
int fw2x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode,
@@ -203,8 +202,6 @@ int fw2x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode,
int fw2x_get_mac_addr(struct aq_hw* hw, uint8_t* mac);
int fw2x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats);
-//@}
-
static uint64_t
@@ -474,7 +471,7 @@ fw2x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats)
return (-ENOTSUP);
}
- // Say to F/W to update the statistics
+ // Tell F/W to update the statistics.
if (!toggle_mpi_ctrl_and_wait_(hw, FW2X_CAP_STATISTICS, 1, 25)) {
trace_error(dbg_fw, "fw2x> statistics update timeout");
AQ_DBG_EXIT(-ETIME);
diff --git a/sys/dev/aq/aq_hw.c b/sys/dev/aq/aq_hw.c
index fe48e255254a..f73805a939cd 100644
--- a/sys/dev/aq/aq_hw.c
+++ b/sys/dev/aq/aq_hw.c
@@ -796,9 +796,11 @@ hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s *self, bool promisc)
void
-aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc, bool mc_promisc)
+aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc,
+ bool mc_promisc)
{
- AQ_DBG_ENTERA("promisc %d, vlan_promisc %d, allmulti %d", l2_promisc, vlan_promisc, mc_promisc);
+ AQ_DBG_ENTERA("promisc %d, vlan_promisc %d, allmulti %d", l2_promisc,
+ vlan_promisc, mc_promisc);
rpfl2promiscuous_mode_en_set(self, l2_promisc);
@@ -811,7 +813,8 @@ aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc, bool
}
int
-aq_hw_rss_hash_set(struct aq_hw_s *self, uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE])
+aq_hw_rss_hash_set(struct aq_hw_s *self,
+ uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE])
{
uint32_t rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
uint32_t addr = 0U;
@@ -841,7 +844,8 @@ err_exit:
}
int
-aq_hw_rss_hash_get(struct aq_hw_s *self, uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE])
+aq_hw_rss_hash_get(struct aq_hw_s *self,
+ uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE])
{
uint32_t rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
uint32_t addr = 0U;
@@ -863,7 +867,8 @@ aq_hw_rss_hash_get(struct aq_hw_s *self, uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE
}
int
-aq_hw_rss_set(struct aq_hw_s *self, uint8_t rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])
+aq_hw_rss_set(struct aq_hw_s *self,
+ uint8_t rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])
{
uint16_t bitary[(HW_ATL_RSS_INDIRECTION_TABLE_MAX *
3 / 16U)];
diff --git a/sys/dev/aq/aq_hw.h b/sys/dev/aq/aq_hw.h
index fb07f7c8b838..a4d4dbb3a512 100644
--- a/sys/dev/aq/aq_hw.h
+++ b/sys/dev/aq/aq_hw.h
@@ -356,5 +356,4 @@ int aq_hw_rss_hash_get(struct aq_hw_s *self, uint8_t rss_key[HW_ATL_RSS_HASHKEY_
int aq_hw_rss_set(struct aq_hw_s *self, uint8_t rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX]);
int aq_hw_udp_rss_enable(struct aq_hw_s *self, bool enable);
-#endif //_AQ_HW_H_
-
+#endif // _AQ_HW_H_
diff --git a/sys/dev/aq/aq_hw_llh.c b/sys/dev/aq/aq_hw_llh.c
index 87384ad46618..43f966becf50 100644
--- a/sys/dev/aq/aq_hw_llh.c
+++ b/sys/dev/aq/aq_hw_llh.c
@@ -89,7 +89,8 @@ reg_global_ctl2_get(struct aq_hw* hw)
}
void
-reg_glb_daisy_chain_status1_set(struct aq_hw* hw, uint32_t glb_daisy_chain_status1)
+reg_glb_daisy_chain_status1_set(struct aq_hw* hw,
+ uint32_t glb_daisy_chain_status1)
{
AQ_WRITE_REG(hw, glb_daisy_chain_status1_adr, glb_daisy_chain_status1);
}
@@ -459,7 +460,8 @@ itr_mif_int_map_en_get(struct aq_hw *aq_hw, uint32_t mif)
}
void
-itr_mif_int_map_set(struct aq_hw *aq_hw, uint32_t mifInterruptMapping, uint32_t mif)
+itr_mif_int_map_set(struct aq_hw *aq_hw, uint32_t mifInterruptMapping,
+ uint32_t mif)
{
AQ_WRITE_REG_BIT(aq_hw, itrImrMifM_ADR(mif), itrImrMifM_MSK(mif),
itrImrMifM_SHIFT(mif), mifInterruptMapping);
@@ -516,8 +518,8 @@ rdm_rx_dca_mode_set(struct aq_hw *aq_hw, uint32_t rx_dca_mode)
}
void
-rdm_rx_desc_data_buff_size_set(struct aq_hw *aq_hw, uint32_t rx_desc_data_buff_size,
- uint32_t descriptor)
+rdm_rx_desc_data_buff_size_set(struct aq_hw *aq_hw,
+ uint32_t rx_desc_data_buff_size, uint32_t descriptor)
{
AQ_WRITE_REG_BIT(aq_hw, rdm_descddata_size_adr(descriptor),
rdm_descddata_size_msk, rdm_descddata_size_shift,
@@ -525,14 +527,16 @@ rdm_rx_desc_data_buff_size_set(struct aq_hw *aq_hw, uint32_t rx_desc_data_buff_s
}
void
-rdm_rx_desc_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_desc_dca_en, uint32_t dca)
+rdm_rx_desc_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_desc_dca_en,
+ uint32_t dca)
{
AQ_WRITE_REG_BIT(aq_hw, rdm_dcaddesc_en_adr(dca), rdm_dcaddesc_en_msk,
rdm_dcaddesc_en_shift, rx_desc_dca_en);
}
void
-rdm_rx_desc_en_set(struct aq_hw *aq_hw, uint32_t rx_desc_en, uint32_t descriptor)
+rdm_rx_desc_en_set(struct aq_hw *aq_hw, uint32_t rx_desc_en,
+ uint32_t descriptor)
{
AQ_WRITE_REG_BIT(aq_hw, rdm_descden_adr(descriptor), rdm_descden_msk,
rdm_descden_shift, rx_desc_en);
@@ -564,14 +568,16 @@ rdm_rx_desc_head_ptr_get(struct aq_hw *aq_hw, uint32_t descriptor)
}
void
-rdm_rx_desc_len_set(struct aq_hw *aq_hw, uint32_t rx_desc_len, uint32_t descriptor)
+rdm_rx_desc_len_set(struct aq_hw *aq_hw, uint32_t rx_desc_len,
+ uint32_t descriptor)
{
AQ_WRITE_REG_BIT(aq_hw, rdm_descdlen_adr(descriptor), rdm_descdlen_msk,
rdm_descdlen_shift, rx_desc_len);
}
void
-rdm_rx_desc_res_set(struct aq_hw *aq_hw, uint32_t rx_desc_res, uint32_t descriptor)
+rdm_rx_desc_res_set(struct aq_hw *aq_hw, uint32_t rx_desc_res,
+ uint32_t descriptor)
{
AQ_WRITE_REG_BIT(aq_hw, rdm_descdreset_adr(descriptor),
rdm_descdreset_msk, rdm_descdreset_shift, rx_desc_res);
@@ -587,7 +593,8 @@ rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw,
}
void
-rdm_rx_head_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_head_dca_en, uint32_t dca)
+rdm_rx_head_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_head_dca_en,
+ uint32_t dca)
{
AQ_WRITE_REG_BIT(aq_hw, rdm_dcadhdr_en_adr(dca), rdm_dcadhdr_en_msk,
rdm_dcadhdr_en_shift, rx_head_dca_en);
@@ -960,7 +967,8 @@ rpf_rss_redir_tbl_addr_set(struct aq_hw *aq_hw, uint32_t rss_redir_tbl_addr)
}
void
-rpf_rss_redir_tbl_wr_data_set(struct aq_hw *aq_hw, uint32_t rss_redir_tbl_wr_data)
+rpf_rss_redir_tbl_wr_data_set(struct aq_hw *aq_hw,
+ uint32_t rss_redir_tbl_wr_data)
{
AQ_WRITE_REG_BIT(aq_hw, rpf_rss_redir_wr_data_adr,
rpf_rss_redir_wr_data_msk, rpf_rss_redir_wr_data_shift,
@@ -1010,7 +1018,8 @@ hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, uint32_t vlan_outer_etht)
}
void
-hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, uint32_t vlan_prom_mode_en)
+hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ uint32_t vlan_prom_mode_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
HW_ATL_RPF_VL_PROMIS_MODE_MSK,
@@ -1029,7 +1038,8 @@ hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
}
void
-hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, uint32_t vlan_untagged_act)
+hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ uint32_t vlan_untagged_act)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
@@ -1038,7 +1048,8 @@ hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, uint32_t vlan_untagged_a
}
void
-hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, uint32_t vlan_flr_en, uint32_t filter)
+hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, uint32_t vlan_flr_en,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
HW_ATL_RPF_VL_EN_F_MSK,
@@ -1047,7 +1058,8 @@ hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, uint32_t vlan_flr_en, uint32_t
}
void
-hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, uint32_t vlan_flr_act, uint32_t filter)
+hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, uint32_t vlan_flr_act,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
HW_ATL_RPF_VL_ACT_F_MSK,
@@ -1056,7 +1068,8 @@ hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, uint32_t vlan_flr_act, uint32
}
void
-hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, uint32_t vlan_id_flr, uint32_t filter)
+hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, uint32_t vlan_id_flr,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
HW_ATL_RPF_VL_ID_F_MSK,
@@ -1075,7 +1088,8 @@ hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, uint32_t vlan_rxq_en,
}
void
-hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, uint32_t vlan_rxq, uint32_t filter)
+hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, uint32_t vlan_rxq,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter),
HW_ATL_RPF_VL_RXQ_F_MSK,
@@ -1084,7 +1098,8 @@ hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, uint32_t vlan_rxq, uint32_t f
};
void
-hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, uint32_t etht_flr_en, uint32_t filter)
+hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, uint32_t etht_flr_en,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
HW_ATL_RPF_ET_ENF_MSK,
@@ -1101,8 +1116,8 @@ hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
}
void
-hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, uint32_t etht_rx_queue_en,
- uint32_t filter)
+hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ uint32_t etht_rx_queue_en, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
HW_ATL_RPF_ET_RXQFEN_MSK,
@@ -1111,8 +1126,8 @@ hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, uint32_t etht_rx_queue_en
}
void
-hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, uint32_t etht_user_priority,
- uint32_t filter)
+hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ uint32_t etht_user_priority, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
HW_ATL_RPF_ET_UPF_MSK,
@@ -1148,7 +1163,8 @@ hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, uint32_t etht_flr_act,
}
void
-hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, uint32_t etht_flr, uint32_t filter)
+hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, uint32_t etht_flr,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
HW_ATL_RPF_ET_VALF_MSK,
@@ -1220,7 +1236,8 @@ hw_atl_rpf_l3_arpf_en_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
}
void
-hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
+hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw_s *aq_hw, uint32_t val,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_RXQF_EN_ADR(filter),
HW_ATL_RPF_L3_L4_RXQF_EN_MSK,
@@ -1228,7 +1245,8 @@ hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filte
}
void
-hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
+hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw_s *aq_hw, uint32_t val,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_MNG_RXQF_ADR(filter),
HW_ATL_RPF_L3_L4_MNG_RXQF_MSK,
@@ -1321,7 +1339,8 @@ rpf_vlan_flr_en_set(struct aq_hw *aq_hw, uint32_t vlan_flr_en, uint32_t filter)
}
void
-rpf_vlan_flr_act_set(struct aq_hw *aq_hw, uint32_t vlan_flr_act, uint32_t filter)
+rpf_vlan_flr_act_set(struct aq_hw *aq_hw, uint32_t vlan_flr_act,
+ uint32_t filter)
{
AQ_WRITE_REG_BIT(aq_hw, rpf_vl_act_f_adr(filter), rpf_vl_act_f_msk,
rpf_vl_act_f_shift, vlan_flr_act);
@@ -1366,21 +1385,24 @@ rpf_etht_user_priority_set(struct aq_hw *aq_hw, uint32_t etht_user_priority,
}
void
-rpf_etht_rx_queue_set(struct aq_hw *aq_hw, uint32_t etht_rx_queue, uint32_t filter)
+rpf_etht_rx_queue_set(struct aq_hw *aq_hw, uint32_t etht_rx_queue,
+ uint32_t filter)
{
AQ_WRITE_REG_BIT(aq_hw, rpf_et_rxqf_adr(filter), rpf_et_rxqf_msk,
rpf_et_rxqf_shift, etht_rx_queue);
}
void
-rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, uint32_t etht_mgt_queue, uint32_t filter)
+rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, uint32_t etht_mgt_queue,
+ uint32_t filter)
{
AQ_WRITE_REG_BIT(aq_hw, rpf_et_mng_rxqf_adr(filter),
rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift, etht_mgt_queue);
}
void
-rpf_etht_flr_act_set(struct aq_hw *aq_hw, uint32_t etht_flr_act, uint32_t filter)
+rpf_etht_flr_act_set(struct aq_hw *aq_hw, uint32_t etht_flr_act,
+ uint32_t filter)
{
AQ_WRITE_REG_BIT(aq_hw, rpf_et_actf_adr(filter), rpf_et_actf_msk,
rpf_et_actf_shift, etht_flr_act);
@@ -1565,14 +1587,16 @@ tdm_tx_dca_mode_set(struct aq_hw *aq_hw, uint32_t tx_dca_mode)
}
void
-tdm_tx_desc_dca_en_set(struct aq_hw *aq_hw, uint32_t tx_desc_dca_en, uint32_t dca)
+tdm_tx_desc_dca_en_set(struct aq_hw *aq_hw, uint32_t tx_desc_dca_en,
+ uint32_t dca)
{
AQ_WRITE_REG_BIT(aq_hw, tdm_dcaddesc_en_adr(dca), tdm_dcaddesc_en_msk,
tdm_dcaddesc_en_shift, tx_desc_dca_en);
}
void
-tdm_tx_desc_en_set(struct aq_hw *aq_hw, uint32_t tx_desc_en, uint32_t descriptor)
+tdm_tx_desc_en_set(struct aq_hw *aq_hw, uint32_t tx_desc_en,
+ uint32_t descriptor)
{
AQ_WRITE_REG_BIT(aq_hw, tdm_descden_adr(descriptor), tdm_descden_msk,
tdm_descden_shift, tx_desc_en);
@@ -1586,7 +1610,8 @@ tdm_tx_desc_head_ptr_get(struct aq_hw *aq_hw, uint32_t descriptor)
}
void
-tdm_tx_desc_len_set(struct aq_hw *aq_hw, uint32_t tx_desc_len, uint32_t descriptor)
+tdm_tx_desc_len_set(struct aq_hw *aq_hw, uint32_t tx_desc_len,
+ uint32_t descriptor)
{
AQ_WRITE_REG_BIT(aq_hw, tdm_descdlen_adr(descriptor), tdm_descdlen_msk,
tdm_descdlen_shift, tx_desc_len);
@@ -1721,7 +1746,8 @@ tpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
}
void
-tpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw, uint32_t tcp_udp_crc_offload_en)
+tpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw,
+ uint32_t tcp_udp_crc_offload_en)
{
AQ_WRITE_REG_BIT(aq_hw, tpol4chk_en_adr, tpol4chk_en_msk,
tpol4chk_en_shift, tcp_udp_crc_offload_en);
@@ -1876,7 +1902,8 @@ pci_pci_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t pci_reg_res_dis)
uint32_t
reg_glb_cpu_scratch_scp_get(struct aq_hw *hw, uint32_t glb_cpu_scratch_scp_idx)
{
- return AQ_READ_REG(hw, glb_cpu_scratch_scp_adr(glb_cpu_scratch_scp_idx));
+ return AQ_READ_REG(hw,
+ glb_cpu_scratch_scp_adr(glb_cpu_scratch_scp_idx));
}
void
reg_glb_cpu_scratch_scp_set(struct aq_hw *aq_hw, uint32_t glb_cpu_scratch_scp,
@@ -1892,7 +1919,8 @@ reg_glb_cpu_no_reset_scratchpad_get(struct aq_hw *hw, uint32_t index)
return AQ_READ_REG(hw, glb_cpu_no_reset_scratchpad_adr(index));
}
void
-reg_glb_cpu_no_reset_scratchpad_set(struct aq_hw* hw, uint32_t value, uint32_t index)
+reg_glb_cpu_no_reset_scratchpad_set(struct aq_hw* hw, uint32_t value,
+ uint32_t index)
{
AQ_WRITE_REG(hw, glb_cpu_no_reset_scratchpad_adr(index), value);
}
diff --git a/sys/dev/aq/aq_media.c b/sys/dev/aq/aq_media.c
index 9cdc0236bb60..f961f3bb5f0f 100644
--- a/sys/dev/aq/aq_media.c
+++ b/sys/dev/aq/aq_media.c
@@ -119,7 +119,9 @@ aq_mediachange(if_t ifp)
AQ_DBG_ENTERA("media 0x%x", user_media);
if (!(ifm->ifm_media & IFM_ETHER)) {
- device_printf(aq_dev->dev, "%s(): aq_dev interface - bad media: 0x%X", __FUNCTION__, ifm->ifm_media);
+ device_printf(aq_dev->dev,
+ "%s(): aq_dev interface - bad media: 0x%X", __FUNCTION__,
+ ifm->ifm_media);
return (0); // should never happen
}
diff --git a/sys/dev/aq/aq_ring.c b/sys/dev/aq/aq_ring.c
index 40128cbb72b8..51014ae0a9d7 100644
--- a/sys/dev/aq/aq_ring.c
+++ b/sys/dev/aq/aq_ring.c
@@ -107,9 +107,11 @@ aq_ring_rx_init(struct aq_hw *hw, struct aq_ring *ring)
rdm_rx_desc_len_set(hw, ring->rx_size / 8U, ring->index);
- device_printf(ring->dev->dev, "ring %d: __PAGESIZE=%d MCLBYTES=%d hw->max_frame_size=%d\n",
- ring->index, PAGE_SIZE, MCLBYTES, ring->rx_max_frame_size);
- rdm_rx_desc_data_buff_size_set(hw, ring->rx_max_frame_size / 1024U, ring->index);
+ device_printf(ring->dev->dev,
+ "ring %d: __PAGESIZE=%d MCLBYTES=%d hw->max_frame_size=%d\n",
+ ring->index, PAGE_SIZE, MCLBYTES, ring->rx_max_frame_size);
+ rdm_rx_desc_data_buff_size_set(hw, ring->rx_max_frame_size / 1024U,
+ ring->index);
rdm_rx_desc_head_buff_size_set(hw, 0U, ring->index);
rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
@@ -279,7 +281,8 @@ aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
for (iter = 0, cnt = 0, i = idx;
iter < ring->rx_size && cnt <= budget;) {
- trace_aq_rx_descr(ring->index, i, (volatile uint64_t*)&rx_desc[i]);
+ trace_aq_rx_descr(ring->index, i,
+ (volatile uint64_t*)&rx_desc[i]);
if (!rx_desc[i].wb.dd)
break;
@@ -309,8 +312,8 @@ aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
static void
aq_rx_set_cso_flags(aq_rx_desc_t *rx_desc, if_rxd_info_t ri)
{
- if ((rx_desc->wb.pkt_type & 0x3) == 0) { //IPv4
- if (rx_desc->wb.rx_cntl & BIT(0)){ // IPv4 csum checked
+ if ((rx_desc->wb.pkt_type & 0x3) == 0) { // IPv4
+ if (rx_desc->wb.rx_cntl & BIT(0)) { // IPv4 csum checked
ri->iri_csum_flags |= CSUM_IP_CHECKED;
if (!(rx_desc->wb.rx_stat & BIT(1)))
ri->iri_csum_flags |= CSUM_IP_VALID;
@@ -355,7 +358,8 @@ aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
do {
rx_desc = (aq_rx_desc_t *) &ring->rx_descs[cidx];
- trace_aq_rx_descr(ring->index, cidx, (volatile uint64_t*)rx_desc);
+ trace_aq_rx_descr(ring->index, cidx,
+ (volatile uint64_t *)rx_desc);
if ((rx_desc->wb.rx_stat & BIT(0)) != 0) {
ring->stats.rx_err++;
diff --git a/sys/dev/asmc/asmc.c b/sys/dev/asmc/asmc.c
index 5d3b97a065c9..5b2a901328d3 100644
--- a/sys/dev/asmc/asmc.c
+++ b/sys/dev/asmc/asmc.c
@@ -287,6 +287,13 @@ static const struct asmc_model asmc_models[] = {
ASMC_MBP114_TEMPS, ASMC_MBP114_TEMPNAMES, ASMC_MBP114_TEMPDESCS
},
+ {
+ "MacBookPro11,5",
+ "Apple SMC MacBook Pro Retina Core i7 (mid 2015, 15-inch, AMD GPU)",
+ ASMC_SMS_FUNCS_DISABLED, ASMC_FAN_FUNCS2, ASMC_LIGHT_FUNCS,
+ ASMC_MBP115_TEMPS, ASMC_MBP115_TEMPNAMES, ASMC_MBP115_TEMPDESCS
+ },
+
/* The Mac Mini has no SMS */
{
"Macmini1,1", "Apple SMC Mac Mini",
diff --git a/sys/dev/asmc/asmcvar.h b/sys/dev/asmc/asmcvar.h
index b6d8686d9670..102bee8a15b7 100644
--- a/sys/dev/asmc/asmcvar.h
+++ b/sys/dev/asmc/asmcvar.h
@@ -467,6 +467,41 @@ struct asmc_softc {
"Pbus", "Ambient Light", "Leftside", "Rightside", "CPU Package Core", \
"CPU Package GPU", "CPU Package Total", "System Total", "DC In" }
+/* MacBookPro11,5 - same as 11,4 but without IBLC, ICMC, and IC2C keys */
+#define ASMC_MBP115_TEMPS { "IC0C", "ID0R", "IHDC", "IPBR", "IC0R", \
+ "IO3R", "IO5R", "IM0C", "IC1C", \
+ "IC3C", "ILDC", "IAPC", "IHSC", \
+ "TC0P", "TP0P", "TM0P", \
+ "Ta0P", "Th2H", "Th1H", "TW0P", "Ts0P", \
+ "Ts1P", "TB0T", "TB1T", "TB2T", "TH0A", "TH0B", \
+ "TC1C", "TC2C", "TC3C", "TC4C", "TCXC", \
+ "TCGC", "TPCD", "TCSA", "VC0C", "VD0R", \
+ "VP0R", "ALSL", "F0Ac", "F1Ac", "PCPC", \
+ "PCPG", "PCPT", "PSTR", "PDTR", NULL }
+
+
+#define ASMC_MBP115_TEMPNAMES { "IC0C", "ID0R", "IHDC", "IPBR", "IC0R", \
+ "IO3R", "IO5R", "IM0C", "IC1C", \
+ "IC3C", "ILDC", "IAPC", "IHSC", \
+ "TC0P", "TP0P", "TM0P", \
+ "Ta0P", "Th2H", "Th1H", "TW0P", "Ts0P", \
+ "Ts1P", "TB0T", "TB1T", "TB2T", "TH0A", "TH0B", \
+ "TC1C", "TC2C", "TC3C", "TC4C", "TCXC", \
+ "TCGC", "TPCD", "TCSA", "VC0C", "VD0R", \
+ "VP0R", "ALSL", "F0Ac", "F1Ac", "PCPC", \
+ "PCPG", "PCPT", "PSTR", "PDTR" }
+
+#define ASMC_MBP115_TEMPDESCS { "CPU High (CPU, I/O)", "DC In", "SSD", "Charger (BMON)", "CPU", \
+ "Other 3.3V", "Other 5V", "Memory", "Platform Controller Hub Core", \
+ "CPU DDR", "LCD Panel", "Airport", "Thunderbolt", \
+ "CPU Proximity", "Platform Controller Hub", "Memory Proximity", "Air Flow Proximity", \
+ "Left Fin Stack", "Right Fin Stack", "Airport Proximity", "Palm Rest", "Palm Rest Actuator", \
+ "Battery Max", "Battery Sensor 1", "Battery Sensor 2", "SSD A", "SSD B", \
+ "CPU Core 1", "CPU Core 2", "CPU Core 3", "CPU Core 4", "CPU PECI Die", \
+ "Intel GPU", "Platform Controller Hub PECI", "CPU System Agent Core", "CPU VCore", "DC In", \
+ "Pbus", "Ambient Light", "Leftside", "Rightside", "CPU Package Core", \
+ "CPU Package GPU", "CPU Package Total", "System Total", "DC In" }
+
#define ASMC_MM_TEMPS { "TN0P", "TN1P", NULL }
#define ASMC_MM_TEMPNAMES { "northbridge1", "northbridge2" }
#define ASMC_MM_TEMPDESCS { "Northbridge Point 1", \
diff --git a/sys/dev/ata/ata-pci.h b/sys/dev/ata/ata-pci.h
index 630d0184c820..5ff3e344c411 100644
--- a/sys/dev/ata/ata-pci.h
+++ b/sys/dev/ata/ata-pci.h
@@ -430,8 +430,8 @@ struct ata_pci_controller {
#define ATA_HT1000_S1 0x024b1166
#define ATA_HT1000_S2 0x024a1166
#define ATA_K2 0x02401166
-#define ATA_FRODO4 0x02411166
-#define ATA_FRODO8 0x02421166
+#define ATA_FRODO8 0x02411166
+#define ATA_FRODO4 0x02421166
#define ATA_SILICON_IMAGE_ID 0x1095
#define ATA_SII3114 0x31141095
diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c
index cf3084f9b768..2feb19bff677 100644
--- a/sys/dev/bge/if_bge.c
+++ b/sys/dev/bge/if_bge.c
@@ -3721,7 +3721,12 @@ bge_attach(device_t dev)
if_setgetcounterfn(ifp, bge_get_counter);
if_setsendqlen(ifp, BGE_TX_RING_CNT - 1);
if_setsendqready(ifp);
- if_sethwassist(ifp, sc->bge_csum_features);
+ /* Initially enable checksum offloading either for all of IPv4, TCP/IPv4
+ * and UDP/IPv4, or for none. This avoids problems when the interface
+ * is added to a bridge.
+ */
+ if (sc->bge_csum_features & CSUM_UDP)
+ if_sethwassist(ifp, sc->bge_csum_features);
if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_MTU);
if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
@@ -3732,6 +3737,13 @@ bge_attach(device_t dev)
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
#endif
if_setcapenable(ifp, if_getcapabilities(ifp));
+ /*
+ * Disable TXCSUM capability initially, if UDP checksum offloading is
+ * not enabled. This avoids problems when the interface is added to a
+ * bridge.
+ */
+ if ((sc->bge_csum_features & CSUM_UDP) == 0)
+ if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
diff --git a/sys/dev/bnxt/bnxt_en/bnxt.h b/sys/dev/bnxt/bnxt_en/bnxt.h
index 0ba7b5723b91..64482a656e9d 100644
--- a/sys/dev/bnxt/bnxt_en/bnxt.h
+++ b/sys/dev/bnxt/bnxt_en/bnxt.h
@@ -455,6 +455,7 @@ struct bnxt_link_info {
uint16_t req_link_speed;
uint8_t module_status;
struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+ uint8_t active_lanes;
};
enum bnxt_phy_type {
@@ -1167,6 +1168,7 @@ struct bnxt_softc {
struct iflib_dma_info def_nq_ring_mem;
struct task def_cp_task;
int db_size;
+ int db_offset;
int legacy_db_size;
struct bnxt_doorbell_ops db_ops;
@@ -1249,6 +1251,10 @@ struct bnxt_softc {
#define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(47)
#define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(48)
#define BNXT_FW_CAP_RSS_TCAM BIT_ULL(49)
+
+ #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(61)
+ #define BNXT_SW_RES_LMT(bp) ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
+
uint32_t lpi_tmr_lo;
uint32_t lpi_tmr_hi;
/* copied from flags and flags2 in hwrm_port_phy_qcaps_output */
@@ -1264,6 +1270,7 @@ struct bnxt_softc {
#define BNXT_PHY_FL_NO_PAUSE (HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_PAUSE_UNSUPPORTED << 8)
#define BNXT_PHY_FL_NO_PFC (HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_PFC_UNSUPPORTED << 8)
#define BNXT_PHY_FL_BANK_SEL (HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_BANK_ADDR_SUPPORTED << 8)
+#define BNXT_PHY_FL_SPEEDS2 (HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_SPEEDS2_SUPPORTED << 8)
struct bnxt_aux_dev *aux_dev;
struct net_device *net_dev;
struct mtx en_ops_lock;
@@ -1333,6 +1340,7 @@ struct bnxt_softc {
unsigned long fw_reset_timestamp;
struct bnxt_fw_health *fw_health;
+ char board_partno[64];
};
struct bnxt_filter_info {
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c b/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
index 9e7f4614d9f9..2a79b418fe62 100644
--- a/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
+++ b/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
@@ -1218,6 +1218,9 @@ bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_DBR_PACING_SUPPORTED)
softc->fw_cap |= BNXT_FW_CAP_DBR_PACING_SUPPORTED;
+ if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
+
if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_GENERIC_STATS_SUPPORTED)
softc->fw_cap |= BNXT_FW_CAP_GENERIC_STATS;
func->fw_fid = le16toh(resp->fid);
@@ -1309,6 +1312,7 @@ bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
goto end;
softc->legacy_db_size = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+ softc->db_offset = le16toh(resp->legacy_l2_db_size_kb) * 1024;
if (BNXT_CHIP_P5(softc)) {
if (BNXT_PF(softc))
@@ -1316,6 +1320,7 @@ bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
else
min_db_offset = DB_VF_OFFSET_P5;
softc->legacy_db_size = min_db_offset;
+ softc->db_offset = min_db_offset;
}
softc->db_size = roundup2(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
@@ -2912,10 +2917,14 @@ bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc)
}
link_info->duplex_setting = resp->duplex_cfg;
- if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
+ if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
link_info->link_speed = le16toh(resp->link_speed);
- else
+ if (softc->phy_flags & BNXT_PHY_FL_SPEEDS2)
+ link_info->active_lanes = resp->active_lanes;
+ } else {
link_info->link_speed = 0;
+ link_info->active_lanes = 0;
+ }
link_info->force_link_speed = le16toh(resp->force_link_speed);
link_info->auto_link_speeds = le16toh(resp->auto_link_speed);
link_info->support_speeds = le16toh(resp->support_speeds);
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_mgmt.c b/sys/dev/bnxt/bnxt_en/bnxt_mgmt.c
index bbc12b96d8c6..98ae9848c42b 100644
--- a/sys/dev/bnxt/bnxt_en/bnxt_mgmt.c
+++ b/sys/dev/bnxt/bnxt_en/bnxt_mgmt.c
@@ -387,15 +387,18 @@ bnxt_mgmt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
int ret = 0;
switch(cmd) {
- case BNXT_MGMT_OPCODE_GET_DEV_INFO:
+ case IO_BNXT_MGMT_OPCODE_GET_DEV_INFO:
+ case IOW_BNXT_MGMT_OPCODE_GET_DEV_INFO:
ret = bnxt_mgmt_get_dev_info(dev, cmd, data, flag, td);
break;
- case BNXT_MGMT_OPCODE_PASSTHROUGH_HWRM:
+ case IO_BNXT_MGMT_OPCODE_PASSTHROUGH_HWRM:
+ case IOW_BNXT_MGMT_OPCODE_PASSTHROUGH_HWRM:
mtx_lock(&mgmt_lock);
ret = bnxt_mgmt_process_hwrm(dev, cmd, data, flag, td);
mtx_unlock(&mgmt_lock);
break;
- case BNXT_MGMT_OPCODE_DCB_OPS:
+ case IO_BNXT_MGMT_OPCODE_DCB_OPS:
+ case IOW_BNXT_MGMT_OPCODE_DCB_OPS:
ret = bnxt_mgmt_process_dcb(dev, cmd, data, flag, td);
break;
default:
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_mgmt.h b/sys/dev/bnxt/bnxt_en/bnxt_mgmt.h
index 8489a223adef..5b94184b1646 100644
--- a/sys/dev/bnxt/bnxt_en/bnxt_mgmt.h
+++ b/sys/dev/bnxt/bnxt_en/bnxt_mgmt.h
@@ -39,9 +39,14 @@
#define DRIVER_NAME "if_bnxt"
-#define BNXT_MGMT_OPCODE_GET_DEV_INFO 0x80000000
-#define BNXT_MGMT_OPCODE_PASSTHROUGH_HWRM 0x80000001
-#define BNXT_MGMT_OPCODE_DCB_OPS 0x80000002
+
+#define IOW_BNXT_MGMT_OPCODE_GET_DEV_INFO _IOW(0, 0, 0)
+#define IOW_BNXT_MGMT_OPCODE_PASSTHROUGH_HWRM _IOW(0, 1, 0)
+#define IOW_BNXT_MGMT_OPCODE_DCB_OPS _IOW(0, 2, 0)
+
+#define IO_BNXT_MGMT_OPCODE_GET_DEV_INFO _IO(0, 0)
+#define IO_BNXT_MGMT_OPCODE_PASSTHROUGH_HWRM _IO(0, 1)
+#define IO_BNXT_MGMT_OPCODE_DCB_OPS _IO(0, 2)
#define BNXT_MGMT_MAX_HWRM_REQ_LENGTH HWRM_MAX_REQ_LEN
#define BNXT_MGMT_MAX_HWRM_RESP_LENGTH (512)
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_ulp.c b/sys/dev/bnxt/bnxt_en/bnxt_ulp.c
index 3c1f62cb4da3..c6d862a36a9a 100644
--- a/sys/dev/bnxt/bnxt_en/bnxt_ulp.c
+++ b/sys/dev/bnxt/bnxt_en/bnxt_ulp.c
@@ -125,7 +125,7 @@ static void bnxt_fill_msix_vecs(struct bnxt_softc *bp, struct bnxt_msix_entry *e
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
if (BNXT_CHIP_P5_PLUS(bp))
- ent[i].db_offset = DB_PF_OFFSET_P5;
+ ent[i].db_offset = bp->db_offset;
else
ent[i].db_offset = (idx + i) * 0x80;
@@ -449,6 +449,7 @@ static inline void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt_soft
edev->pdev = bp->pdev;
edev->softc = bp;
edev->l2_db_size = bp->db_size;
+ edev->l2_db_offset = bp->db_offset;
mtx_init(&bp->en_ops_lock, "Ethernet ops lock", NULL, MTX_DEF);
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
@@ -457,9 +458,12 @@ static inline void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt_soft
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
if (bp->is_asym_q)
edev->flags |= BNXT_EN_FLAG_ASYM_Q;
+ if (BNXT_SW_RES_LMT(bp))
+ edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
edev->hwrm_bar = bp->hwrm_bar;
edev->port_partition_type = bp->port_partition_type;
edev->ulp_version = BNXT_ULP_VERSION;
+ memcpy(edev->board_part_number, bp->board_partno, BNXT_VPD_PN_FLD_LEN - 1);
}
int bnxt_rdma_aux_device_del(struct bnxt_softc *softc)
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_ulp.h b/sys/dev/bnxt/bnxt_en/bnxt_ulp.h
index 0108293046d7..53bb51b07135 100644
--- a/sys/dev/bnxt/bnxt_en/bnxt_ulp.h
+++ b/sys/dev/bnxt/bnxt_en/bnxt_ulp.h
@@ -90,10 +90,15 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
#define BNXT_EN_FLAG_ASYM_Q 0x10
#define BNXT_EN_FLAG_MULTI_HOST 0x20
+ #define BNXT_EN_FLAG_SW_RES_LMT 0x400
#define BNXT_EN_ASYM_Q(edev) ((edev)->flags & BNXT_EN_FLAG_ASYM_Q)
#define BNXT_EN_MH(edev) ((edev)->flags & BNXT_EN_FLAG_MULTI_HOST)
+#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ int l2_db_offset; /* Doorbell BAR offset
+ * of non-cacheable.
+ */
int l2_db_size; /* Doorbell BAR size in
* bytes mapped by L2
* driver.
@@ -121,6 +126,9 @@ struct bnxt_en_dev {
struct bnxt_dbr *en_dbr;
struct bnxt_bar_info hwrm_bar;
u32 espeed;
+ uint8_t lanes;
+ #define BNXT_VPD_PN_FLD_LEN 32
+ char board_part_number[BNXT_VPD_PN_FLD_LEN];
};
struct bnxt_en_ops {
diff --git a/sys/dev/bnxt/bnxt_en/if_bnxt.c b/sys/dev/bnxt/bnxt_en/if_bnxt.c
index 471e26a4b252..dea6fd68181e 100644
--- a/sys/dev/bnxt/bnxt_en/if_bnxt.c
+++ b/sys/dev/bnxt/bnxt_en/if_bnxt.c
@@ -1198,8 +1198,13 @@ static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
max_srqs = ctxm->max_entries;
if (softc->flags & BNXT_FLAG_ROCE_CAP) {
pg_lvl = 2;
- extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
- extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (BNXT_SW_RES_LMT(softc)) {
+ extra_qps = max_qps - l2_qps - qp1_qps;
+ extra_srqs = max_srqs - srqs;
+ } else {
+ extra_qps = min_t(uint32_t, 65536, max_qps - l2_qps - qp1_qps);
+ extra_srqs = min_t(uint32_t, 8192, max_srqs - srqs);
+ }
}
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
@@ -2669,6 +2674,13 @@ bnxt_attach_pre(if_ctx_t ctx)
softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF,
M_WAITOK|M_ZERO);
+ if (BNXT_PF(softc)) {
+ const char *part_num;
+
+ if (pci_get_vpd_readonly(softc->dev, "PN", &part_num) == 0)
+ snprintf(softc->board_partno, sizeof(softc->board_partno), "%s", part_num);
+ }
+
return (rc);
failed:
@@ -3280,11 +3292,10 @@ bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
- if (link_info->link_up)
- ifmr->ifm_status |= IFM_ACTIVE;
- else
- ifmr->ifm_status &= ~IFM_ACTIVE;
+ if (!link_info->link_up)
+ return;
+ ifmr->ifm_status |= IFM_ACTIVE;
if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
ifmr->ifm_active |= IFM_FDX;
else
@@ -4295,7 +4306,7 @@ bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c)
return -EOPNOTSUPP;
- rc = bnxt_read_sfp_module_eeprom_info(softc, I2C_DEV_ADDR_A0, 0, 0, 0,
+ rc = bnxt_read_sfp_module_eeprom_info(softc, i2c->dev_addr, 0, 0, 0,
i2c->offset, i2c->len, data);
return rc;
@@ -4790,9 +4801,11 @@ bnxt_report_link(struct bnxt_softc *softc)
const char *duplex = NULL, *flow_ctrl = NULL;
const char *signal_mode = "";
- if(softc->edev)
+ if(softc->edev) {
softc->edev->espeed =
bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ softc->edev->lanes = link_info->active_lanes;
+ }
if (link_info->link_up == link_info->last_link_up) {
if (!link_info->link_up)
diff --git a/sys/dev/bnxt/bnxt_re/bnxt_re-abi.h b/sys/dev/bnxt/bnxt_re/bnxt_re-abi.h
index 8f48609e7f6f..c7ca19f29046 100644
--- a/sys/dev/bnxt/bnxt_re/bnxt_re-abi.h
+++ b/sys/dev/bnxt/bnxt_re/bnxt_re-abi.h
@@ -34,7 +34,7 @@
#include <asm/types.h>
#include <linux/types.h>
-#define BNXT_RE_ABI_VERSION 6
+#define BNXT_RE_ABI_VERSION 7
enum {
BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED = 0x01,
@@ -43,12 +43,14 @@ enum {
BNXT_RE_COMP_MASK_UCNTX_MQP_EX_SUPPORTED = 0x08,
BNXT_RE_COMP_MASK_UCNTX_DBR_PACING_ENABLED = 0x10,
BNXT_RE_COMP_MASK_UCNTX_DBR_RECOVERY_ENABLED = 0x20,
- BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED = 0x40
+ BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED = 0x40,
+ BNXT_RE_COMP_MASK_UCNTX_CMASK_HAVE_MODE = 0x80,
};
enum {
BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
- BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE = 0x02
+ BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE = 0x02,
+ BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT = 0x03
};
struct bnxt_re_uctx_req {
@@ -66,7 +68,7 @@ struct bnxt_re_uctx_resp {
__u32 max_cqd;
__u32 chip_id0;
__u32 chip_id1;
- __u32 modes;
+ __u32 mode;
__aligned_u64 comp_mask;
} __attribute__((packed));
@@ -134,6 +136,8 @@ struct bnxt_re_qp_req {
__u64 qpsva;
__u64 qprva;
__u64 qp_handle;
+ __u64 comp_mask;
+ __u32 sq_slots;
} __attribute__((packed));
struct bnxt_re_qp_resp {
diff --git a/sys/dev/bnxt/bnxt_re/bnxt_re.h b/sys/dev/bnxt/bnxt_re/bnxt_re.h
index fe7a27f4e216..0afc8566c020 100644
--- a/sys/dev/bnxt/bnxt_re/bnxt_re.h
+++ b/sys/dev/bnxt/bnxt_re/bnxt_re.h
@@ -535,6 +535,7 @@ struct bnxt_re_dev {
bool is_virtfn;
u32 num_vfs;
u32 espeed;
+ u8 lanes;
/*
* For storing the speed of slave interfaces.
* Same as espeed when bond is not configured
@@ -716,7 +717,7 @@ void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 removal_type,
void bnxt_re_destroy_lag(struct bnxt_re_dev **rdev);
int bnxt_re_add_device(struct bnxt_re_dev **rdev,
struct ifnet *netdev,
- u8 qp_mode, u8 op_type, u8 wqe_mode, u32 num_msix_requested,
+ u8 qp_mode, u8 op_type, u32 num_msix_requested,
struct auxiliary_device *aux_dev);
void bnxt_re_create_base_interface(bool primary);
int bnxt_re_schedule_work(struct bnxt_re_dev *rdev, unsigned long event,
@@ -1069,6 +1070,15 @@ static inline void bnxt_re_set_def_do_pacing(struct bnxt_re_dev *rdev)
rdev->qplib_res.pacing_data->do_pacing = rdev->dbr_def_do_pacing;
}
+static inline bool bnxt_re_is_var_size_supported(struct bnxt_re_dev *rdev,
+ struct bnxt_re_ucontext *uctx)
+{
+ if (uctx)
+ return uctx->cmask & BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
+ else
+ return rdev->chip_ctx->modes.wqe_mode;
+}
+
static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev)
{
rdev->qplib_res.pacing_data->dev_err_state =
diff --git a/sys/dev/bnxt/bnxt_re/ib_verbs.c b/sys/dev/bnxt/bnxt_re/ib_verbs.c
index 0383a16757aa..32899abab9ff 100644
--- a/sys/dev/bnxt/bnxt_re/ib_verbs.c
+++ b/sys/dev/bnxt/bnxt_re/ib_verbs.c
@@ -241,46 +241,99 @@ int bnxt_re_modify_device(struct ib_device *ibdev,
return 0;
}
-static void __to_ib_speed_width(u32 espeed, u8 *speed, u8 *width)
+static void __to_ib_speed_width(u32 espeed, u8 lanes, u8 *speed, u8 *width)
{
- switch (espeed) {
- case SPEED_1000:
- *speed = IB_SPEED_SDR;
+ if (!lanes) {
+ switch (espeed) {
+ case SPEED_1000:
+ *speed = IB_SPEED_SDR;
+ *width = IB_WIDTH_1X;
+ break;
+ case SPEED_10000:
+ *speed = IB_SPEED_QDR;
+ *width = IB_WIDTH_1X;
+ break;
+ case SPEED_20000:
+ *speed = IB_SPEED_DDR;
+ *width = IB_WIDTH_4X;
+ break;
+ case SPEED_25000:
+ *speed = IB_SPEED_EDR;
+ *width = IB_WIDTH_1X;
+ break;
+ case SPEED_40000:
+ *speed = IB_SPEED_QDR;
+ *width = IB_WIDTH_4X;
+ break;
+ case SPEED_50000:
+ *speed = IB_SPEED_EDR;
+ *width = IB_WIDTH_2X;
+ break;
+ case SPEED_100000:
+ *speed = IB_SPEED_EDR;
+ *width = IB_WIDTH_4X;
+ break;
+ case SPEED_200000:
+ *speed = IB_SPEED_HDR;
+ *width = IB_WIDTH_4X;
+ break;
+ case SPEED_400000:
+ *speed = IB_SPEED_NDR;
+ *width = IB_WIDTH_4X;
+ break;
+ default:
+ *speed = IB_SPEED_SDR;
+ *width = IB_WIDTH_1X;
+ break;
+ }
+ return;
+ }
+
+ switch (lanes) {
+ case 1:
*width = IB_WIDTH_1X;
break;
- case SPEED_10000:
- *speed = IB_SPEED_QDR;
- *width = IB_WIDTH_1X;
+ case 2:
+ *width = IB_WIDTH_2X;
break;
- case SPEED_20000:
- *speed = IB_SPEED_DDR;
+ case 4:
*width = IB_WIDTH_4X;
break;
- case SPEED_25000:
- *speed = IB_SPEED_EDR;
+ case 8:
+ *width = IB_WIDTH_8X;
+ break;
+ case 12:
+ *width = IB_WIDTH_12X;
+ break;
+ default:
*width = IB_WIDTH_1X;
+ }
+
+ switch (espeed / lanes) {
+ case SPEED_2500:
+ *speed = IB_SPEED_SDR;
break;
- case SPEED_40000:
- *speed = IB_SPEED_QDR;
- *width = IB_WIDTH_4X;
+ case SPEED_5000:
+ *speed = IB_SPEED_DDR;
break;
- case SPEED_50000:
- *speed = IB_SPEED_EDR;
- *width = IB_WIDTH_2X;
+ case SPEED_10000:
+ *speed = IB_SPEED_FDR10;
break;
- case SPEED_100000:
+ case SPEED_14000:
+ *speed = IB_SPEED_FDR;
+ break;
+ case SPEED_25000:
*speed = IB_SPEED_EDR;
- *width = IB_WIDTH_4X;
break;
- case SPEED_200000:
+ case SPEED_50000:
*speed = IB_SPEED_HDR;
- *width = IB_WIDTH_4X;
+ break;
+ case SPEED_100000:
+ *speed = IB_SPEED_NDR;
break;
default:
*speed = IB_SPEED_SDR;
- *width = IB_WIDTH_1X;
- break;
- }
+ }
}
/* Port */
@@ -318,9 +371,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
port_attr->subnet_timeout = 0;
port_attr->init_type_reply = 0;
rdev->espeed = rdev->en_dev->espeed;
+ rdev->lanes = rdev->en_dev->lanes;
if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
- __to_ib_speed_width(rdev->espeed, &active_speed,
+ __to_ib_speed_width(rdev->espeed, rdev->lanes, &active_speed,
&active_width);
port_attr->active_speed = active_speed;
@@ -1613,15 +1667,18 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
align = sizeof(struct sq_send_hdr);
ilsize = ALIGN(init_attr->cap.max_inline_data, align);
- sq->wqe_size = bnxt_re_get_swqe_size(ilsize, sq->max_sge);
- if (sq->wqe_size > _get_swqe_sz(dev_attr->max_qp_sges))
- return -EINVAL;
- /* For Cu/Wh and gen p5 backward compatibility mode
- * wqe size is fixed to 128 bytes
+ /* For gen p4 and gen p5 fixed wqe compatibility mode
+ * wqe size is fixed to 128 bytes - ie 6 SGEs
*/
- if (sq->wqe_size < _get_swqe_sz(dev_attr->max_qp_sges) &&
- qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
- sq->wqe_size = _get_swqe_sz(dev_attr->max_qp_sges);
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
+ sq->wqe_size = _get_swqe_sz(BNXT_STATIC_MAX_SGE);
+ sq->max_sge = BNXT_STATIC_MAX_SGE;
+ } else {
+ sq->wqe_size = bnxt_re_get_swqe_size(ilsize, sq->max_sge);
+ if (sq->wqe_size > _get_swqe_sz(dev_attr->max_qp_sges))
+ return -EINVAL;
+ }
+
if (init_attr->cap.max_inline_data) {
qplqp->max_inline_data = sq->wqe_size -
@@ -1666,23 +1723,28 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev,
return rc;
bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
+ bytes = PAGE_ALIGN(bytes);
/* Consider mapping PSN search memory only for RC QPs. */
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
psn_sz = _is_chip_gen_p5_p7(rdev->chip_ctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
- if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
+ if (rdev->dev_attr && _is_host_msn_table(rdev->dev_attr->dev_cap_ext_flags2))
psn_sz = sizeof(struct sq_msn_search);
- psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
- qplib_qp->sq.max_wqe :
- ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
- sizeof(struct bnxt_qplib_sge));
- if (BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
+ if (cntx && bnxt_re_is_var_size_supported(rdev, cntx)) {
+ psn_nume = ureq.sq_slots;
+ } else {
+ psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ qplib_qp->sq.max_wqe :
+ ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
+ sizeof(struct bnxt_qplib_sge));
+ }
+ if (rdev->dev_attr && _is_host_msn_table(rdev->dev_attr->dev_cap_ext_flags2))
psn_nume = roundup_pow_of_two(psn_nume);
bytes += (psn_nume * psn_sz);
+ bytes = PAGE_ALIGN(bytes);
}
- bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get_compat(rdev, context, udata, ureq.qpsva, bytes,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem)) {
@@ -1857,6 +1919,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp(struct bnxt_re_pd *pd,
/* Shadow QP SQ depth should be same as QP1 RQ depth */
qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size(0, 6);
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.sq.q_full_delta = 1;
@@ -1868,6 +1931,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp(struct bnxt_re_pd *pd,
qp->qplib_qp.rq.wqe_size = _max_rwqe_sz(6); /* 128 Byte wqe size */
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
qp->qplib_qp.rq.sginfo.pgsize = PAGE_SIZE;
qp->qplib_qp.rq.sginfo.pgshft = PAGE_SHIFT;
@@ -1940,6 +2004,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
entries = init_attr->cap.max_recv_wr + 1;
entries = bnxt_re_init_depth(entries, cntx);
rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ rq->max_sw_wqe = rq->max_wqe;
rq->q_full_delta = 0;
rq->sginfo.pgsize = PAGE_SIZE;
rq->sginfo.pgshft = PAGE_SHIFT;
@@ -1964,10 +2029,11 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
struct ib_qp_init_attr *init_attr,
- void *cntx)
+ void *cntx, struct ib_udata *udata)
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_qp_req ureq;
struct bnxt_re_dev *rdev;
struct bnxt_qplib_q *sq;
int diff = 0;
@@ -1979,35 +2045,53 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
sq = &qplqp->sq;
dev_attr = rdev->dev_attr;
- sq->max_sge = init_attr->cap.max_send_sge;
- if (sq->max_sge > dev_attr->max_qp_sges) {
- sq->max_sge = dev_attr->max_qp_sges;
- init_attr->cap.max_send_sge = sq->max_sge;
+ if (udata) {
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(udata->inlen, sizeof(ureq)));
+ if (rc)
+ return -EINVAL;
}
- rc = bnxt_re_setup_swqe_size(qp, init_attr);
- if (rc)
- return rc;
- /*
- * Change the SQ depth if user has requested minimum using
- * configfs. Only supported for kernel consumers. Setting
- * min_tx_depth to 4096 to handle iser SQ full condition
- * in most of the newer OS distros
- */
+
+ sq->max_sge = init_attr->cap.max_send_sge;
entries = init_attr->cap.max_send_wr;
- if (!cntx && rdev->min_tx_depth && init_attr->qp_type != IB_QPT_GSI) {
+ if (cntx && udata && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
+ sq->max_wqe = ureq.sq_slots;
+ sq->max_sw_wqe = ureq.sq_slots;
+ sq->wqe_size = sizeof(struct sq_sge);
+ } else {
+ if (sq->max_sge > dev_attr->max_qp_sges) {
+ sq->max_sge = dev_attr->max_qp_sges;
+ init_attr->cap.max_send_sge = sq->max_sge;
+ }
+ rc = bnxt_re_setup_swqe_size(qp, init_attr);
+ if (rc)
+ return rc;
/*
- * If users specify any value greater than 1 use min_tx_depth
- * provided by user for comparison. Else, compare it with the
- * BNXT_RE_MIN_KERNEL_QP_TX_DEPTH and adjust it accordingly.
+ * Change the SQ depth if user has requested minimum using
+ * configfs. Only supported for kernel consumers. Setting
+ * min_tx_depth to 4096 to handle iser SQ full condition
+ * in most of the newer OS distros
*/
- if (rdev->min_tx_depth > 1 && entries < rdev->min_tx_depth)
- entries = rdev->min_tx_depth;
- else if (entries < BNXT_RE_MIN_KERNEL_QP_TX_DEPTH)
- entries = BNXT_RE_MIN_KERNEL_QP_TX_DEPTH;
- }
- diff = bnxt_re_get_diff(cntx, rdev->chip_ctx);
- entries = bnxt_re_init_depth(entries + diff + 1, cntx);
- sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+
+ if (!cntx && rdev->min_tx_depth && init_attr->qp_type != IB_QPT_GSI) {
+ /*
+ * If users specify any value greater than 1 use min_tx_depth
+ * provided by user for comparison. Else, compare it with the
+ * BNXT_RE_MIN_KERNEL_QP_TX_DEPTH and adjust it accordingly.
+ */
+ if (rdev->min_tx_depth > 1 && entries < rdev->min_tx_depth)
+ entries = rdev->min_tx_depth;
+ else if (entries < BNXT_RE_MIN_KERNEL_QP_TX_DEPTH)
+ entries = BNXT_RE_MIN_KERNEL_QP_TX_DEPTH;
+ }
+ diff = bnxt_re_get_diff(cntx, rdev->chip_ctx);
+ entries = bnxt_re_init_depth(entries + diff + 1, cntx);
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
+ else
+ sq->max_sw_wqe = sq->max_wqe;
+ }
sq->q_full_delta = diff + 1;
/*
* Reserving one slot for Phantom WQE. Application can
@@ -2073,11 +2157,6 @@ out:
return qptype;
}
-static int bnxt_re_init_qp_wqe_mode(struct bnxt_re_dev *rdev)
-{
- return rdev->chip_ctx->modes.wqe_mode;
-}
-
static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
@@ -2111,7 +2190,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
goto out;
}
qplqp->type = (u8)qptype;
- qplqp->wqe_mode = bnxt_re_init_qp_wqe_mode(rdev);
+ qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, cntx);
ether_addr_copy(qplqp->smac, rdev->dev_addr);
if (init_attr->qp_type == IB_QPT_RC) {
@@ -2158,7 +2237,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
bnxt_re_adjust_gsi_rq_attr(qp);
/* Setup SQ */
- rc = bnxt_re_init_sq_attr(qp, init_attr, cntx);
+ rc = bnxt_re_init_sq_attr(qp, init_attr, cntx, udata);
if (rc)
goto out;
if (init_attr->qp_type == IB_QPT_GSI)
@@ -2794,6 +2873,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (entries > dev_attr->max_qp_wqes)
entries = dev_attr->max_qp_wqes;
qp->qplib_qp.rq.max_wqe = entries;
+ qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
qp_attr->cap.max_recv_wr;
qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
@@ -5294,11 +5374,9 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *uctx_in,
}
genp5 = _is_chip_gen_p5_p7(cctx);
- if (BNXT_RE_ABI_VERSION > 5) {
- resp.modes = genp5 ? cctx->modes.wqe_mode : 0;
- if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
- resp.comp_mask = BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED;
- }
+ resp.mode = genp5 ? cctx->modes.wqe_mode : 0;
+ if (rdev->dev_attr && _is_host_msn_table(rdev->dev_attr->dev_cap_ext_flags2))
+ resp.comp_mask = BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED;
resp.pg_size = PAGE_SIZE;
resp.cqe_sz = sizeof(struct cq_base);
@@ -5331,6 +5409,12 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *uctx_in,
if (bnxt_re_init_rsvd_wqe_flag(&ureq, &resp, genp5))
dev_warn(rdev_to_dev(rdev),
"Rsvd wqe in use! Try the updated library.\n");
+ if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) {
+ resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_CMASK_HAVE_MODE;
+ resp.mode = rdev->chip_ctx->modes.wqe_mode;
+ if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ resp.comp_mask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
+ }
} else {
dev_warn(rdev_to_dev(rdev),
"Enabled roundup logic. Update the library!\n");
diff --git a/sys/dev/bnxt/bnxt_re/ib_verbs.h b/sys/dev/bnxt/bnxt_re/ib_verbs.h
index cb9f7974e92d..66d15dd2b767 100644
--- a/sys/dev/bnxt/bnxt_re/ib_verbs.h
+++ b/sys/dev/bnxt/bnxt_re/ib_verbs.h
@@ -49,10 +49,22 @@ struct bnxt_re_dev;
#define SPEED_1000 1000
#endif
+#ifndef SPEED_2500
+#define SPEED_2500 2500
+#endif
+
+#ifndef SPEED_5000
+#define SPEED_5000 5000
+#endif
+
#ifndef SPEED_10000
#define SPEED_10000 10000
#endif
+#ifndef SPEED_14000
+#define SPEED_14000 14000
+#endif
+
#ifndef SPEED_20000
#define SPEED_20000 20000
#endif
@@ -77,10 +89,18 @@ struct bnxt_re_dev;
#define SPEED_200000 200000
#endif
+#ifndef SPEED_400000
+#define SPEED_400000 400000
+#endif
+
#ifndef IB_SPEED_HDR
#define IB_SPEED_HDR 64
#endif
+#ifndef IB_SPEED_NDR
+#define IB_SPEED_NDR 128
+#endif
+
#define RDMA_NETWORK_IPV4 1
#define RDMA_NETWORK_IPV6 2
@@ -488,6 +508,11 @@ static inline int bnxt_re_init_pow2_flag(struct bnxt_re_uctx_req *req,
return 0;
}
+enum {
+ BNXT_RE_UCNTX_CAP_POW2_DISABLED = 0x1ULL,
+ BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED = 0x2ULL,
+};
+
static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
{
return uctx ? (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED) ?
diff --git a/sys/dev/bnxt/bnxt_re/main.c b/sys/dev/bnxt/bnxt_re/main.c
index 3d26d21f3fc7..01c2710bc3ea 100644
--- a/sys/dev/bnxt/bnxt_re/main.c
+++ b/sys/dev/bnxt/bnxt_re/main.c
@@ -1101,7 +1101,6 @@ static int bnxt_re_handle_start(struct auxiliary_device *adev)
rc = bnxt_re_add_device(&rdev, real_dev,
en_info->gsi_mode,
BNXT_RE_POST_RECOVERY_INIT,
- en_info->wqe_mode,
en_info->num_msix_requested, adev);
if (rc) {
/* Add device failed. Unregister the device.
@@ -1411,12 +1410,14 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
dev_info(rdev_to_dev(rdev),
"Couldn't get DB bar size, Low latency framework is disabled\n");
/* set register offsets for both UC and WC */
- if (_is_chip_p7(cctx))
- res->dpi_tbl.ucreg.offset = offset;
- else
+ if (_is_chip_p7(cctx)) {
+ res->dpi_tbl.ucreg.offset = en_dev->l2_db_offset;
+ res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
+ } else {
res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
BNXT_QPLIB_DBR_PF_DB_OFFSET;
- res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
+ res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
+ }
/* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
* is equal to the DB-Bar actual size. This indicates that L2
@@ -1433,15 +1434,15 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
return;
}
-static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
+static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_en_dev *en_dev;
en_dev = rdev->en_dev;
cctx = rdev->chip_ctx;
- cctx->modes.wqe_mode = _is_chip_gen_p5_p7(rdev->chip_ctx) ?
- mode : BNXT_QPLIB_WQE_MODE_STATIC;
+ cctx->modes.wqe_mode = _is_chip_p7(rdev->chip_ctx) ?
+ BNXT_QPLIB_WQE_MODE_VARIABLE : BNXT_QPLIB_WQE_MODE_STATIC;
cctx->modes.te_bypass = false;
if (bnxt_re_hwrm_qcaps(rdev))
dev_err(rdev_to_dev(rdev),
@@ -1490,7 +1491,7 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
kfree(chip_ctx);
}
-static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
+static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
@@ -1525,7 +1526,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
rc = -ENOMEM;
goto fail;
}
- bnxt_re_set_drv_mode(rdev, wqe_mode);
+ bnxt_re_set_drv_mode(rdev);
bnxt_re_set_db_offset(rdev);
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
@@ -2029,11 +2030,30 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
}
+static ssize_t show_board_id(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+ char buffer[BNXT_VPD_PN_FLD_LEN] = {};
+
+ if (!rdev->is_virtfn)
+ memcpy(buffer, rdev->en_dev->board_part_number,
+ BNXT_VPD_PN_FLD_LEN - 1);
+ else
+ scnprintf(buffer, BNXT_VPD_PN_FLD_LEN,
+ "0x%x-VF", rdev->en_dev->pdev->device);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", buffer);
+}
+
static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL);
static DEVICE_ATTR(hca_type, 0444, show_hca, NULL);
+static DEVICE_ATTR(board_id, 0444, show_board_id, NULL);
+
static struct device_attribute *bnxt_re_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_hca_type
+ &dev_attr_hca_type,
+ &dev_attr_board_id
};
int ib_register_device_compat(struct bnxt_re_dev *rdev)
@@ -3530,7 +3550,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
}
}
-static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type, u8 wqe_mode)
+static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
{
struct bnxt_re_ring_attr rattr = {};
struct bnxt_qplib_creq_ctx *creq;
@@ -3545,7 +3565,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type, u8 wqe_mode)
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
- rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
+ rc = bnxt_re_setup_chip_ctx(rdev);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to get chip context rc 0x%x", rc);
bnxt_re_unregister_netdev(rdev);
@@ -3592,19 +3612,24 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type, u8 wqe_mode)
goto release_rtnl;
}
+ set_bit(BNXT_RE_FLAG_NET_RING_ALLOC, &rdev->flags);
+
if (!rdev->chip_ctx)
goto release_rtnl;
- /* Program the NQ ID for DBQ notification */
- if (rdev->chip_ctx->modes.dbr_pacing_v0 ||
- bnxt_qplib_dbr_pacing_en(rdev->chip_ctx) ||
- bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) {
- rc = bnxt_re_initialize_dbr_pacing(rdev);
- if (!rc)
- rdev->dbr_pacing = true;
- else
- rdev->dbr_pacing = false;
- dev_dbg(rdev_to_dev(rdev), "%s: initialize db pacing ret %d\n",
- __func__, rc);
+
+ if (!(_is_chip_p7(rdev->chip_ctx))) {
+ /* Program the NQ ID for DBQ notification */
+ if (rdev->chip_ctx->modes.dbr_pacing_v0 ||
+ bnxt_qplib_dbr_pacing_en(rdev->chip_ctx) ||
+ bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) {
+ rc = bnxt_re_initialize_dbr_pacing(rdev);
+ if (!rc)
+ rdev->dbr_pacing = true;
+ else
+ rdev->dbr_pacing = false;
+ dev_dbg(rdev_to_dev(rdev), "%s: initialize db pacing ret %d\n",
+ __func__, rc);
+ }
}
vec = rdev->nqr.msix_entries[BNXT_RE_AEQ_IDX].vector;
@@ -3811,6 +3836,7 @@ static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct ifnet *netdev,
void bnxt_re_get_link_speed(struct bnxt_re_dev *rdev)
{
rdev->espeed = rdev->en_dev->espeed;
+ rdev->lanes = rdev->en_dev->lanes;
return;
}
@@ -3852,7 +3878,7 @@ void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
int bnxt_re_add_device(struct bnxt_re_dev **rdev,
struct ifnet *netdev,
- u8 qp_mode, u8 op_type, u8 wqe_mode,
+ u8 qp_mode, u8 op_type,
u32 num_msix_requested,
struct auxiliary_device *aux_dev)
{
@@ -3925,7 +3951,7 @@ int bnxt_re_add_device(struct bnxt_re_dev **rdev,
rtnl_lock();
en_info->rdev = *rdev;
rtnl_unlock();
- rc = bnxt_re_dev_init(*rdev, op_type, wqe_mode);
+ rc = bnxt_re_dev_init(*rdev, op_type);
if (rc) {
ref_error:
bnxt_re_dev_unreg(*rdev);
@@ -4374,7 +4400,6 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
rc = bnxt_re_add_device(&rdev, en_dev->net,
BNXT_RE_GSI_MODE_ALL,
BNXT_RE_COMPLETE_INIT,
- BNXT_QPLIB_WQE_MODE_STATIC,
BNXT_RE_MSIX_FROM_MOD_PARAM, adev);
if (rc) {
mutex_unlock(&bnxt_re_mutex);
diff --git a/sys/dev/bnxt/bnxt_re/qplib_fp.c b/sys/dev/bnxt/bnxt_re/qplib_fp.c
index 3f1b02406f7f..19708302198b 100644
--- a/sys/dev/bnxt/bnxt_re/qplib_fp.c
+++ b/sys/dev/bnxt/bnxt_re/qplib_fp.c
@@ -388,10 +388,14 @@ static void bnxt_qplib_service_nq(unsigned long data)
struct bnxt_qplib_srq *srq;
struct nq_srq_event *nqsrqe =
(struct nq_srq_event *)nqe;
+ u8 toggle;
q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high) << 32;
srq = (struct bnxt_qplib_srq *)q_handle;
+ toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
+ >> NQ_CN_TOGGLE_SFT;
+ srq->dbinfo.toggle = toggle;
bnxt_qplib_armen_db(&srq->dbinfo,
DBC_DBC_TYPE_SRQ_ARMENA);
if (!nq->srqn_handler(nq,
@@ -838,15 +842,15 @@ static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
int rc = 0;
int indx;
- que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
+ que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
if (!que->swq) {
rc = -ENOMEM;
goto out;
}
que->swq_start = 0;
- que->swq_last = que->max_wqe - 1;
- for (indx = 0; indx < que->max_wqe; indx++)
+ que->swq_last = que->max_sw_wqe - 1;
+ for (indx = 0; indx < que->max_sw_wqe; indx++)
que->swq[indx].next_idx = indx + 1;
que->swq[que->swq_last].next_idx = 0; /* Make it circular */
que->swq_last = 0;
@@ -875,19 +879,23 @@ static u32 bnxt_qplib_get_stride(void)
return sizeof(struct sq_sge);
}
-static u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que)
+u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq)
{
- u8 stride;
+ u32 slots;
- stride = bnxt_qplib_get_stride();
- return (que->wqe_size * que->max_wqe) / stride;
+ /* Queue depth is the number of slots. */
+ slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
+ /* For variable WQE mode, need to align the slots to 256 */
+ if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq)
+ slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN);
+ return slots;
}
static u32 _set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode)
{
/* For Variable mode supply number of 16B slots */
return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
- que->max_wqe : bnxt_qplib_get_depth(que);
+ que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true);
}
static u32 _set_sq_max_slot(u8 wqe_mode)
@@ -925,7 +933,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sginfo;
hwq_attr.stride = bnxt_qplib_get_stride();
- hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
@@ -949,7 +957,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sginfo;
hwq_attr.stride = bnxt_qplib_get_stride();
- hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
@@ -1075,8 +1083,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u32 sqsz;
qp->cctx = res->cctx;
- if (res->dattr)
+ if (res->dattr) {
qp->dev_cap_flags = res->dattr->dev_cap_flags;
+ qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_ext_flags2);
+ }
+
/* General */
req.type = qp->type;
req.dpi = cpu_to_le32(qp->dpi->dpi);
@@ -1087,7 +1098,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
psn_sz = _is_chip_gen_p5_p7(qp->cctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
- if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
+ if (qp->is_host_msn_tbl) {
psn_sz = sizeof(struct sq_msn_search);
qp->msn = 0;
}
@@ -1098,12 +1109,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sginfo;
hwq_attr.stride = bnxt_qplib_get_stride();
- hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
hwq_attr.aux_stride = psn_sz;
hwq_attr.aux_depth = (psn_sz) ?
_set_sq_size(sq, qp->wqe_mode) : 0;
/* Update msn tbl size */
- if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
+ if (qp->is_host_msn_tbl && psn_sz) {
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
hwq_attr.aux_depth = roundup_pow_of_two(_set_sq_size(sq, qp->wqe_mode));
else
@@ -1131,8 +1142,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req.sq_pg_size_sq_lvl = pg_sz_lvl;
req.sq_fwo_sq_sge = cpu_to_le16(((0 << CMDQ_CREATE_QP_SQ_FWO_SFT) &
CMDQ_CREATE_QP_SQ_FWO_MASK) |
- ((BNXT_RE_HW_RETX(qp->dev_cap_flags)) ?
- BNXT_MSN_TBLE_SGE : sq->max_sge &
+ (sq->max_sge &
CMDQ_CREATE_QP_SQ_SGE_MASK));
req.scq_cid = cpu_to_le32(qp->scq->id);
@@ -1141,7 +1151,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sginfo;
hwq_attr.stride = bnxt_qplib_get_stride();
- hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.aux_stride = 0;
hwq_attr.aux_depth = 0;
hwq_attr.type = HWQ_TYPE_QUEUE;
@@ -1764,7 +1774,7 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
return;
/* Handle MSN differently on cap flags */
- if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
+ if (qp->is_host_msn_tbl) {
bnxt_qplib_fill_msn_search(qp, wqe, swq);
return;
}
@@ -1897,7 +1907,7 @@ static u16 _translate_q_full_delta(struct bnxt_qplib_q *que, u16 wqe_bytes)
}
static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
- struct bnxt_qplib_swq *swq, bool hw_retx)
+ struct bnxt_qplib_swq *swq, bool is_host_msn_tbl)
{
struct bnxt_qplib_hwq *sq_hwq;
u32 pg_num, pg_indx;
@@ -1909,8 +1919,11 @@ static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib
return;
tail = swq->slot_idx / sq->dbinfo.max_slot;
- if (hw_retx)
+ if (is_host_msn_tbl) {
+ /* For HW retx use qp msn index */
+ tail = qp->msn;
tail %= qp->msn_tbl_sz;
+ }
pg_num = (tail + sq_hwq->pad_pgofft) / (PAGE_SIZE / sq_hwq->pad_stride);
pg_indx = (tail + sq_hwq->pad_pgofft) % (PAGE_SIZE / sq_hwq->pad_stride);
buff = (void *)(sq_hwq->pad_pg[pg_num] + pg_indx * sq_hwq->pad_stride);
@@ -1935,6 +1948,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swq *swq;
bool sch_handler = false;
u16 slots_needed;
+ bool msn_update;
void *base_hdr;
void *ext_hdr;
__le32 temp32;
@@ -1976,7 +1990,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
sw_prod = sq_hwq->prod;
swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
swq->slot_idx = sw_prod;
- bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
+ bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
swq->wr_id = wqe->wr_id;
swq->type = wqe->type;
@@ -2010,6 +2024,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
wqe->num_sge, &sw_prod);
if (data_len < 0)
goto queue_err;
+ /* Make sure we update MSN table only for wired wqes */
+ msn_update = true;
+
/* Specifics */
switch (wqe->type) {
case BNXT_QPLIB_SWQE_TYPE_SEND:
@@ -2064,6 +2081,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
ext_sqe->avid = cpu_to_le32(wqe->send.avid &
SQ_SEND_AVID_MASK);
sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
+ msn_update = false;
} else {
sqe->length = cpu_to_le32(data_len);
if (qp->mtu)
@@ -2157,6 +2175,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
"\tflags = 0x%x\n"
"\tinv_l_key = 0x%x\n",
sqe->wqe_type, sqe->flags, sqe->inv_l_key);
+ msn_update = false;
break;
}
case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
@@ -2207,6 +2226,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
sqe->zero_based_page_size_log, sqe->l_key,
*(u32 *)sqe->length, sqe->numlevels_pbl_page_size_log,
ext_sqe->pblptr, ext_sqe->va);
+ msn_update = false;
break;
}
case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
@@ -2236,6 +2256,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
sqe->wqe_type, sqe->flags, sqe->access_cntl,
sqe->mw_type_zero_based, sqe->parent_l_key,
sqe->l_key, sqe->va, ext_sqe->length_lo);
+ msn_update = false;
break;
}
default:
@@ -2243,8 +2264,10 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
rc = -EINVAL;
goto done;
}
- swq->next_psn = sq->psn & BTH_PSN_MASK;
- bnxt_qplib_fill_psn_search(qp, wqe, swq);
+ if (!qp->is_host_msn_tbl || msn_update) {
+ swq->next_psn = sq->psn & BTH_PSN_MASK;
+ bnxt_qplib_fill_psn_search(qp, wqe, swq);
+ }
queue_err:
bnxt_qplib_swq_mod_start(sq, wqe_idx);
@@ -2859,6 +2882,32 @@ out:
return rc;
}
+static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
+{
+ struct bnxt_qplib_hwq *sq_hwq;
+ struct bnxt_qplib_swq *swq;
+ int cqe_sq_cons = -1;
+ u32 start, last;
+
+ sq_hwq = &sq->hwq;
+
+ start = sq->swq_start;
+ last = sq->swq_last;
+
+ while (last != start) {
+ swq = &sq->swq[last];
+ if (swq->slot_idx == cqe_slot) {
+ cqe_sq_cons = swq->next_idx;
+ dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
+ __func__, cqe_sq_cons, cqe_slot);
+ break;
+ }
+
+ last = swq->next_idx;
+ }
+ return cqe_sq_cons;
+}
+
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct cq_req *hwcqe,
struct bnxt_qplib_cqe **pcqe, int *budget,
@@ -2867,8 +2916,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *sq;
struct bnxt_qplib_cqe *cqe;
- u32 cqe_sq_cons;
+ u32 cqe_sq_cons, slot_num;
struct bnxt_qplib_swq *swq;
+ int cqe_cons;
int rc = 0;
qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
@@ -2880,13 +2930,26 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
}
sq = &qp->sq;
- cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
+ cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
+ if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
+ slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
+ cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
+ if (cqe_cons < 0) {
+ dev_dbg(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
+ __func__, slot_num);
+ goto done;
+ }
+ cqe_sq_cons = cqe_cons;
+ dev_dbg(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
+ __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
+ }
+
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_sq_cons
@@ -3329,7 +3392,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
if (cqe_cons == 0xFFFF)
goto do_rq;
- cqe_cons %= sq->max_wqe;
+ cqe_cons %= sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
diff --git a/sys/dev/bnxt/bnxt_re/qplib_fp.h b/sys/dev/bnxt/bnxt_re/qplib_fp.h
index 527c377f0aa5..542a26782c62 100644
--- a/sys/dev/bnxt/bnxt_re/qplib_fp.h
+++ b/sys/dev/bnxt/bnxt_re/qplib_fp.h
@@ -300,6 +300,7 @@ struct bnxt_qplib_q {
struct bnxt_qplib_sg_info sginfo;
struct bnxt_qplib_hwq hwq;
u32 max_wqe;
+ u32 max_sw_wqe;
u16 max_sge;
u16 wqe_size;
u16 q_full_delta;
@@ -390,6 +391,7 @@ struct bnxt_qplib_qp {
u32 msn_tbl_sz;
/* get devflags in PI code */
u16 dev_cap_flags;
+ bool is_host_msn_tbl;
};
@@ -634,5 +636,16 @@ static inline uint64_t bnxt_re_update_msn_tbl(uint32_t st_idx, uint32_t npsn, ui
SQ_MSN_SEARCH_START_PSN_MASK));
}
+static inline bool __is_var_wqe(struct bnxt_qplib_qp *qp)
+{
+ return (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE);
+}
+
+static inline bool __is_err_cqe_for_var_wqe(struct bnxt_qplib_qp *qp, u8 status)
+{
+ return (status != CQ_REQ_STATUS_OK) && __is_var_wqe(qp);
+}
+
void bnxt_re_schedule_dbq_event(struct bnxt_qplib_res *res);
+u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq);
#endif
diff --git a/sys/dev/bnxt/bnxt_re/qplib_res.c b/sys/dev/bnxt/bnxt_re/qplib_res.c
index f527af031176..9051f4c9f2b7 100644
--- a/sys/dev/bnxt/bnxt_re/qplib_res.c
+++ b/sys/dev/bnxt/bnxt_re/qplib_res.c
@@ -1139,7 +1139,8 @@ int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id);
- ucreg->offset = 65536;
+ if (_is_chip_gen_p5(res->cctx))
+ ucreg->offset = 65536;
ucreg->len = ucreg->offset + PAGE_SIZE;
diff --git a/sys/dev/bnxt/bnxt_re/qplib_res.h b/sys/dev/bnxt/bnxt_re/qplib_res.h
index 6468207a49aa..59a8a43ecef6 100644
--- a/sys/dev/bnxt/bnxt_re/qplib_res.h
+++ b/sys/dev/bnxt/bnxt_re/qplib_res.h
@@ -616,6 +616,12 @@ static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
/* Disable HW_RETX */
#define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
+static inline bool _is_host_msn_table(u16 dev_cap_ext_flags2)
+{
+ return (dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK) ==
+ CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE;
+}
+
static inline bool _is_cqe_v2_supported(u16 dev_cap_flags)
{
return dev_cap_flags &
@@ -650,7 +656,7 @@ static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info,
#define BNXT_QPLIB_INIT_DBHDR(xid, type, indx, toggle) \
(((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \
(type) | BNXT_QPLIB_DBR_VALID) << 32) | (indx) | \
- ((toggle) << (BNXT_QPLIB_DBR_TOGGLE_SHIFT)))
+ (((u32)(toggle)) << (BNXT_QPLIB_DBR_TOGGLE_SHIFT)))
static inline void bnxt_qplib_write_db(struct bnxt_qplib_db_info *info,
u64 key, void __iomem *db,
@@ -724,7 +730,7 @@ static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info,
u64 key = 0;
u8 toggle = 0;
- if (type == DBC_DBC_TYPE_CQ_ARMENA)
+ if (type == DBC_DBC_TYPE_CQ_ARMENA || type == DBC_DBC_TYPE_SRQ_ARMENA)
toggle = info->toggle;
/* Index always at 0 */
key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, 0, toggle);
@@ -746,7 +752,7 @@ static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info)
u64 key = 0;
/* Index always at 0 */
- key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_SRQ_ARM, 0, 0);
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_SRQ_ARM, 0, info->toggle);
bnxt_qplib_write_db(info, key, info->priv_db, &info->shadow_key);
}
@@ -837,4 +843,11 @@ static inline void bnxt_qplib_max_res_supported(struct bnxt_qplib_chip_ctx *cctx
break;
}
}
+
+static inline u32 bnxt_re_cap_fw_res(u32 fw_val, u32 drv_cap, bool sw_max_en)
+{
+ if (sw_max_en)
+ return fw_val;
+ return min_t(u32, fw_val, drv_cap);
+}
#endif
diff --git a/sys/dev/bnxt/bnxt_re/qplib_sp.c b/sys/dev/bnxt/bnxt_re/qplib_sp.c
index c414718a816f..f876573ce69f 100644
--- a/sys/dev/bnxt/bnxt_re/qplib_sp.c
+++ b/sys/dev/bnxt/bnxt_re/qplib_sp.c
@@ -40,6 +40,7 @@
#include "qplib_res.h"
#include "qplib_rcfw.h"
#include "qplib_sp.h"
+#include "bnxt_ulp.h"
const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 }};
@@ -79,6 +80,7 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, char *fw_ver)
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
{
+ struct bnxt_qplib_max_res dev_res = {};
struct creq_query_func_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
@@ -86,10 +88,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
struct bnxt_qplib_dev_attr *attr;
struct bnxt_qplib_chip_ctx *cctx;
struct cmdq_query_func req = {};
+ bool sw_max_en;
u8 *tqm_alloc;
int i, rc = 0;
u32 temp;
- u8 chip_gen = BNXT_RE_DEFAULT;
cctx = rcfw->res->cctx;
attr = rcfw->res->dattr;
@@ -110,10 +112,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
goto bail;
+ bnxt_qplib_max_res_supported(cctx, rcfw->res, &dev_res, false);
+ sw_max_en = BNXT_EN_SW_RES_LMT(rcfw->res->en_dev);
/* Extract the context from the side buffer */
- chip_gen = _get_chip_gen_p5_type(cctx);
- attr->max_qp = le32_to_cpu(sb->max_qp);
- attr->max_qp = min_t(u32, attr->max_qp, BNXT_RE_MAX_QP_SUPPORTED(chip_gen));
+ attr->max_qp = bnxt_re_cap_fw_res(le32_to_cpu(sb->max_qp),
+ dev_res.max_qp, sw_max_en);
/* max_qp value reported by FW does not include the QP1 */
attr->max_qp += 1;
attr->max_qp_rd_atom =
@@ -126,11 +129,6 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
* one extra entry while creating the qp
*/
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
- /* Adjust for max_qp_wqes for variable wqe */
- if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
- attr->max_qp_wqes = (BNXT_MAX_SQ_SIZE) /
- (BNXT_MAX_VAR_WQE_SIZE / BNXT_SGE_SIZE) - 1;
- }
if (!_is_chip_gen_p5_p7(cctx)) {
/*
* 128 WQEs needs to be reserved for the HW (8916). Prevent
@@ -138,33 +136,36 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
*/
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
}
- attr->max_qp_sges = sb->max_sge;
- if (_is_chip_gen_p5_p7(cctx) &&
- cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
- attr->max_qp_sges = sb->max_sge_var_wqe;
- attr->max_cq = le32_to_cpu(sb->max_cq);
- attr->max_cq = min_t(u32, attr->max_cq, BNXT_RE_MAX_CQ_SUPPORTED(chip_gen));
+
+ /* Adjust for max_qp_wqes for variable wqe */
+ if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
+
+ attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
+ min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : sb->max_sge;
+ attr->max_cq = bnxt_re_cap_fw_res(le32_to_cpu(sb->max_cq),
+ dev_res.max_cq, sw_max_en);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes);
attr->max_cq_sges = attr->max_qp_sges;
- attr->max_mr = le32_to_cpu(sb->max_mr);
- attr->max_mr = min_t(u32, attr->max_mr, BNXT_RE_MAX_MRW_SUPPORTED(chip_gen));
- attr->max_mw = le32_to_cpu(sb->max_mw);
- attr->max_mw = min_t(u32, attr->max_mw, BNXT_RE_MAX_MRW_SUPPORTED(chip_gen));
+ attr->max_mr = bnxt_re_cap_fw_res(le32_to_cpu(sb->max_mr),
+ dev_res.max_mr, sw_max_en);
+ attr->max_mw = bnxt_re_cap_fw_res(le32_to_cpu(sb->max_mw),
+ dev_res.max_mr, sw_max_en);
attr->max_mr_size = le64_to_cpu(sb->max_mr_size);
attr->max_pd = BNXT_QPLIB_MAX_PD;
attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
- attr->max_ah = le32_to_cpu(sb->max_ah);
- attr->max_ah = min_t(u32, attr->max_ah, BNXT_RE_MAX_AH_SUPPORTED(chip_gen));
+ attr->max_ah = bnxt_re_cap_fw_res(le32_to_cpu(sb->max_ah),
+ dev_res.max_ah, sw_max_en);
attr->max_fmr = le32_to_cpu(sb->max_fmr);
attr->max_map_per_fmr = sb->max_map_per_fmr;
- attr->max_srq = le16_to_cpu(sb->max_srq);
- attr->max_srq = min_t(u32, attr->max_srq, BNXT_RE_MAX_SRQ_SUPPORTED(chip_gen));
+ attr->max_srq = bnxt_re_cap_fw_res(le16_to_cpu(sb->max_srq),
+ dev_res.max_srq, sw_max_en);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
attr->max_pkey = 1;
@@ -185,6 +186,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
attr->page_size_cap = BIT_ULL(28) | BIT_ULL(21) | BIT_ULL(12);
bnxt_qplib_query_version(rcfw, attr->fw_ver);
+ attr->dev_cap_ext_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
diff --git a/sys/dev/bnxt/bnxt_re/qplib_sp.h b/sys/dev/bnxt/bnxt_re/qplib_sp.h
index e306db3b9d8e..5a5485dc5250 100644
--- a/sys/dev/bnxt/bnxt_re/qplib_sp.h
+++ b/sys/dev/bnxt/bnxt_re/qplib_sp.h
@@ -32,6 +32,7 @@
#define __BNXT_QPLIB_SP_H__
#include <rdma/ib_verbs.h>
+#include "bnxt_re-abi.h"
#define BNXT_QPLIB_RESERVED_QP_WRS 128
@@ -71,6 +72,7 @@ struct bnxt_qplib_dev_attr {
u32 l2_db_size;
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
u8 is_atomic;
+ u16 dev_cap_ext_flags2;
u16 dev_cap_flags;
u64 page_size_cap;
u32 max_dpi;
@@ -394,6 +396,13 @@ bool ib_modify_qp_is_ok_compat(enum ib_qp_state cur_state, enum ib_qp_state next
#define BNXT_MAX_VAR_WQE_SIZE 512
#define BNXT_SGE_SIZE 16
+#define BNXT_VAR_MAX_WQE 4352
+#define BNXT_VAR_MAX_SLOT_ALIGN 256
+#define BNXT_VAR_MAX_SGE 13
+#define BNXT_RE_MAX_RQ_WQES 65536
+
+#define BNXT_STATIC_MAX_SGE 6
+
/* PF defines */
#define BNXT_RE_MAX_QP_SUPPORTED(chip_gen) \
chip_gen == BNXT_RE_DEFAULT ? (64 * 1024) : 0
diff --git a/sys/dev/clk/rockchip/rk_clk_fract.c b/sys/dev/clk/rockchip/rk_clk_fract.c
index aa7084c90d76..b06b13146e5f 100644
--- a/sys/dev/clk/rockchip/rk_clk_fract.c
+++ b/sys/dev/clk/rockchip/rk_clk_fract.c
@@ -80,7 +80,7 @@ DEFINE_CLASS_1(rk_clk_fract, rk_clk_fract_class, rk_clk_fract_methods,
* http://en.wikipedia.org/wiki/Continued_fraction
*
* - n_input, d_input Given input fraction
- * - n_max, d_max Maximum vaues of divider registers
+ * - n_max, d_max Maximum values of divider registers
* - n_out, d_out Computed approximation
*/
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index 65292486cbc8..494f83a47135 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -3282,7 +3282,9 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x477d4, 0x477fc,
0x48000, 0x48004,
0x48018, 0x4801c,
- 0x49304, 0x493f0,
+ 0x49304, 0x49320,
+ 0x4932c, 0x4932c,
+ 0x49334, 0x493f0,
0x49400, 0x49410,
0x49460, 0x494f4,
0x50000, 0x50084,
@@ -3305,7 +3307,9 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x515f0, 0x515f4,
0x58000, 0x58004,
0x58018, 0x5801c,
- 0x59304, 0x593f0,
+ 0x59304, 0x59320,
+ 0x5932c, 0x5932c,
+ 0x59334, 0x593f0,
0x59400, 0x59410,
0x59460, 0x594f4,
};
@@ -6177,11 +6181,6 @@ static bool mem_intr_handler(struct adapter *adap, int idx, int flags)
ii.cause_reg = MC_T7_REG(A_MC_P_DDRCTL_INT_CAUSE, i);
ii.enable_reg = MC_T7_REG(A_MC_P_DDRCTL_INT_ENABLE, i);
fatal |= t4_handle_intr(adap, &ii, 0, flags);
-
- snprintf(rname, sizeof(rname), "MC%u_ECC_UE_INT_CAUSE", i);
- ii.cause_reg = MC_T7_REG(A_MC_P_ECC_UE_INT_CAUSE, i);
- ii.enable_reg = MC_T7_REG(A_MC_P_ECC_UE_INT_ENABLE, i);
- fatal |= t4_handle_intr(adap, &ii, 0, flags);
}
break;
}
diff --git a/sys/dev/cxgbe/common/t4_regs.h b/sys/dev/cxgbe/common/t4_regs.h
index 51f150443261..09d0d4aa2c08 100644
--- a/sys/dev/cxgbe/common/t4_regs.h
+++ b/sys/dev/cxgbe/common/t4_regs.h
@@ -27,11 +27,11 @@
*/
/* This file is automatically generated --- changes will be lost */
-/* Generation Date : Tue Oct 28 05:23:45 PM IST 2025 */
+/* Generation Date : Thu Dec 11 08:42:50 PM IST 2025 */
/* Directory name: t4_reg.txt, Date: Not specified */
-/* Directory name: t5_reg.txt, Changeset: 6945:54ba4ba7ee8b */
+/* Directory name: t5_reg.txt, Changeset: 6946:9d3868c42009 */
/* Directory name: t6_reg.txt, Changeset: 4277:9c165d0f4899 */
-/* Directory name: t7_sw_reg.txt, Changeset: 5946:0b60ff298e7d */
+/* Directory name: t7_sw_reg.txt, Changeset: 5950:7c934148528c */
#define MYPF_BASE 0x1b000
#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
@@ -6195,15 +6195,15 @@
#define A_PCIE_PF_INT_CFG 0x3140
-#define S_T7_VECNUM 12
-#define M_T7_VECNUM 0x7ffU
-#define V_T7_VECNUM(x) ((x) << S_T7_VECNUM)
-#define G_T7_VECNUM(x) (((x) >> S_T7_VECNUM) & M_T7_VECNUM)
+#define S_T7_PF_INT_VECNUM 12
+#define M_T7_PF_INT_VECNUM 0x7ffU
+#define V_T7_PF_INT_VECNUM(x) ((x) << S_T7_PF_INT_VECNUM)
+#define G_T7_PF_INT_VECNUM(x) (((x) >> S_T7_PF_INT_VECNUM) & M_T7_PF_INT_VECNUM)
-#define S_T7_VECBASE 0
-#define M_T7_VECBASE 0xfffU
-#define V_T7_VECBASE(x) ((x) << S_T7_VECBASE)
-#define G_T7_VECBASE(x) (((x) >> S_T7_VECBASE) & M_T7_VECBASE)
+#define S_T7_PF_INT_VECBASE 0
+#define M_T7_PF_INT_VECBASE 0xfffU
+#define V_T7_PF_INT_VECBASE(x) ((x) << S_T7_PF_INT_VECBASE)
+#define G_T7_PF_INT_VECBASE(x) (((x) >> S_T7_PF_INT_VECBASE) & M_T7_PF_INT_VECBASE)
#define A_PCIE_PF_INT_CFG2 0x3144
#define A_PCIE_VF_INT_CFG 0x3180
@@ -10636,6 +10636,12 @@
#define G_VFID_PCIE(x) (((x) >> S_VFID_PCIE) & M_VFID_PCIE)
#define A_PCIE_VF_INT_INDIR_DATA 0x5c48
+
+#define S_T7_VECBASE 0
+#define M_T7_VECBASE 0xfffU
+#define V_T7_VECBASE(x) ((x) << S_T7_VECBASE)
+#define G_T7_VECBASE(x) (((x) >> S_T7_VECBASE) & M_T7_VECBASE)
+
#define A_PCIE_VF_256_INT_CFG2 0x5c4c
#define A_PCIE_VF_MSI_EN_4 0x5e50
#define A_PCIE_VF_MSI_EN_5 0x5e54
@@ -17723,6 +17729,22 @@
#define V_GPIO20_PE_EN(x) ((x) << S_GPIO20_PE_EN)
#define F_GPIO20_PE_EN V_GPIO20_PE_EN(1U)
+#define S_T7_GPIO19_PE_EN 19
+#define V_T7_GPIO19_PE_EN(x) ((x) << S_T7_GPIO19_PE_EN)
+#define F_T7_GPIO19_PE_EN V_T7_GPIO19_PE_EN(1U)
+
+#define S_T7_GPIO18_PE_EN 18
+#define V_T7_GPIO18_PE_EN(x) ((x) << S_T7_GPIO18_PE_EN)
+#define F_T7_GPIO18_PE_EN V_T7_GPIO18_PE_EN(1U)
+
+#define S_T7_GPIO17_PE_EN 17
+#define V_T7_GPIO17_PE_EN(x) ((x) << S_T7_GPIO17_PE_EN)
+#define F_T7_GPIO17_PE_EN V_T7_GPIO17_PE_EN(1U)
+
+#define S_T7_GPIO16_PE_EN 16
+#define V_T7_GPIO16_PE_EN(x) ((x) << S_T7_GPIO16_PE_EN)
+#define F_T7_GPIO16_PE_EN V_T7_GPIO16_PE_EN(1U)
+
#define A_DBG_PVT_REG_THRESHOLD 0x611c
#define S_PVT_CALIBRATION_DONE 8
@@ -17859,6 +17881,22 @@
#define V_GPIO20_PS_EN(x) ((x) << S_GPIO20_PS_EN)
#define F_GPIO20_PS_EN V_GPIO20_PS_EN(1U)
+#define S_T7_GPIO19_PS_EN 19
+#define V_T7_GPIO19_PS_EN(x) ((x) << S_T7_GPIO19_PS_EN)
+#define F_T7_GPIO19_PS_EN V_T7_GPIO19_PS_EN(1U)
+
+#define S_T7_GPIO18_PS_EN 18
+#define V_T7_GPIO18_PS_EN(x) ((x) << S_T7_GPIO18_PS_EN)
+#define F_T7_GPIO18_PS_EN V_T7_GPIO18_PS_EN(1U)
+
+#define S_T7_GPIO17_PS_EN 17
+#define V_T7_GPIO17_PS_EN(x) ((x) << S_T7_GPIO17_PS_EN)
+#define F_T7_GPIO17_PS_EN V_T7_GPIO17_PS_EN(1U)
+
+#define S_T7_GPIO16_PS_EN 16
+#define V_T7_GPIO16_PS_EN(x) ((x) << S_T7_GPIO16_PS_EN)
+#define F_T7_GPIO16_PS_EN V_T7_GPIO16_PS_EN(1U)
+
#define A_DBG_PVT_REG_IN_TERMP 0x6120
#define S_REG_IN_TERMP_B 4
@@ -21825,10 +21863,6 @@
#define V_FUTURE_DEXPANSION_WTS(x) ((x) << S_FUTURE_DEXPANSION_WTS)
#define G_FUTURE_DEXPANSION_WTS(x) (((x) >> S_FUTURE_DEXPANSION_WTS) & M_FUTURE_DEXPANSION_WTS)
-#define S_T7_FUTURE_CEXPANSION_WTS 31
-#define V_T7_FUTURE_CEXPANSION_WTS(x) ((x) << S_T7_FUTURE_CEXPANSION_WTS)
-#define F_T7_FUTURE_CEXPANSION_WTS V_T7_FUTURE_CEXPANSION_WTS(1U)
-
#define S_CL14_WR_CMD_TO_ERROR 30
#define V_CL14_WR_CMD_TO_ERROR(x) ((x) << S_CL14_WR_CMD_TO_ERROR)
#define F_CL14_WR_CMD_TO_ERROR V_CL14_WR_CMD_TO_ERROR(1U)
@@ -21837,10 +21871,6 @@
#define V_CL13_WR_CMD_TO_ERROR(x) ((x) << S_CL13_WR_CMD_TO_ERROR)
#define F_CL13_WR_CMD_TO_ERROR V_CL13_WR_CMD_TO_ERROR(1U)
-#define S_T7_FUTURE_DEXPANSION_WTS 15
-#define V_T7_FUTURE_DEXPANSION_WTS(x) ((x) << S_T7_FUTURE_DEXPANSION_WTS)
-#define F_T7_FUTURE_DEXPANSION_WTS V_T7_FUTURE_DEXPANSION_WTS(1U)
-
#define S_CL14_WR_DATA_TO_ERROR 14
#define V_CL14_WR_DATA_TO_ERROR(x) ((x) << S_CL14_WR_DATA_TO_ERROR)
#define F_CL14_WR_DATA_TO_ERROR V_CL14_WR_DATA_TO_ERROR(1U)
@@ -21965,10 +21995,6 @@
#define V_FUTURE_DEXPANSION_RTE(x) ((x) << S_FUTURE_DEXPANSION_RTE)
#define G_FUTURE_DEXPANSION_RTE(x) (((x) >> S_FUTURE_DEXPANSION_RTE) & M_FUTURE_DEXPANSION_RTE)
-#define S_T7_FUTURE_CEXPANSION_RTE 31
-#define V_T7_FUTURE_CEXPANSION_RTE(x) ((x) << S_T7_FUTURE_CEXPANSION_RTE)
-#define F_T7_FUTURE_CEXPANSION_RTE V_T7_FUTURE_CEXPANSION_RTE(1U)
-
#define S_CL14_RD_CMD_TO_EN 30
#define V_CL14_RD_CMD_TO_EN(x) ((x) << S_CL14_RD_CMD_TO_EN)
#define F_CL14_RD_CMD_TO_EN V_CL14_RD_CMD_TO_EN(1U)
@@ -21977,10 +22003,6 @@
#define V_CL13_RD_CMD_TO_EN(x) ((x) << S_CL13_RD_CMD_TO_EN)
#define F_CL13_RD_CMD_TO_EN V_CL13_RD_CMD_TO_EN(1U)
-#define S_T7_FUTURE_DEXPANSION_RTE 15
-#define V_T7_FUTURE_DEXPANSION_RTE(x) ((x) << S_T7_FUTURE_DEXPANSION_RTE)
-#define F_T7_FUTURE_DEXPANSION_RTE V_T7_FUTURE_DEXPANSION_RTE(1U)
-
#define S_CL14_RD_DATA_TO_EN 14
#define V_CL14_RD_DATA_TO_EN(x) ((x) << S_CL14_RD_DATA_TO_EN)
#define F_CL14_RD_DATA_TO_EN V_CL14_RD_DATA_TO_EN(1U)
@@ -22105,10 +22127,6 @@
#define V_FUTURE_DEXPANSION_RTS(x) ((x) << S_FUTURE_DEXPANSION_RTS)
#define G_FUTURE_DEXPANSION_RTS(x) (((x) >> S_FUTURE_DEXPANSION_RTS) & M_FUTURE_DEXPANSION_RTS)
-#define S_T7_FUTURE_CEXPANSION_RTS 31
-#define V_T7_FUTURE_CEXPANSION_RTS(x) ((x) << S_T7_FUTURE_CEXPANSION_RTS)
-#define F_T7_FUTURE_CEXPANSION_RTS V_T7_FUTURE_CEXPANSION_RTS(1U)
-
#define S_CL14_RD_CMD_TO_ERROR 30
#define V_CL14_RD_CMD_TO_ERROR(x) ((x) << S_CL14_RD_CMD_TO_ERROR)
#define F_CL14_RD_CMD_TO_ERROR V_CL14_RD_CMD_TO_ERROR(1U)
@@ -22117,10 +22135,9 @@
#define V_CL13_RD_CMD_TO_ERROR(x) ((x) << S_CL13_RD_CMD_TO_ERROR)
#define F_CL13_RD_CMD_TO_ERROR V_CL13_RD_CMD_TO_ERROR(1U)
-#define S_T7_FUTURE_DEXPANSION_RTS 14
-#define M_T7_FUTURE_DEXPANSION_RTS 0x3U
-#define V_T7_FUTURE_DEXPANSION_RTS(x) ((x) << S_T7_FUTURE_DEXPANSION_RTS)
-#define G_T7_FUTURE_DEXPANSION_RTS(x) (((x) >> S_T7_FUTURE_DEXPANSION_RTS) & M_T7_FUTURE_DEXPANSION_RTS)
+#define S_CL14_RD_DATA_TO_ERROR 14
+#define V_CL14_RD_DATA_TO_ERROR(x) ((x) << S_CL14_RD_DATA_TO_ERROR)
+#define F_CL14_RD_DATA_TO_ERROR V_CL14_RD_DATA_TO_ERROR(1U)
#define S_CL13_RD_DATA_TO_ERROR 13
#define V_CL13_RD_DATA_TO_ERROR(x) ((x) << S_CL13_RD_DATA_TO_ERROR)
@@ -22224,10 +22241,9 @@
#define V_FUTURE_DEXPANSION_IPE(x) ((x) << S_FUTURE_DEXPANSION_IPE)
#define G_FUTURE_DEXPANSION_IPE(x) (((x) >> S_FUTURE_DEXPANSION_IPE) & M_FUTURE_DEXPANSION_IPE)
-#define S_T7_FUTURE_DEXPANSION_IPE 14
-#define M_T7_FUTURE_DEXPANSION_IPE 0x3ffffU
-#define V_T7_FUTURE_DEXPANSION_IPE(x) ((x) << S_T7_FUTURE_DEXPANSION_IPE)
-#define G_T7_FUTURE_DEXPANSION_IPE(x) (((x) >> S_T7_FUTURE_DEXPANSION_IPE) & M_T7_FUTURE_DEXPANSION_IPE)
+#define S_CL14_IF_PAR_EN 14
+#define V_CL14_IF_PAR_EN(x) ((x) << S_CL14_IF_PAR_EN)
+#define F_CL14_IF_PAR_EN V_CL14_IF_PAR_EN(1U)
#define S_CL13_IF_PAR_EN 13
#define V_CL13_IF_PAR_EN(x) ((x) << S_CL13_IF_PAR_EN)
@@ -22292,10 +22308,9 @@
#define V_FUTURE_DEXPANSION_IPS(x) ((x) << S_FUTURE_DEXPANSION_IPS)
#define G_FUTURE_DEXPANSION_IPS(x) (((x) >> S_FUTURE_DEXPANSION_IPS) & M_FUTURE_DEXPANSION_IPS)
-#define S_T7_FUTURE_DEXPANSION_IPS 14
-#define M_T7_FUTURE_DEXPANSION_IPS 0x3ffffU
-#define V_T7_FUTURE_DEXPANSION_IPS(x) ((x) << S_T7_FUTURE_DEXPANSION_IPS)
-#define G_T7_FUTURE_DEXPANSION_IPS(x) (((x) >> S_T7_FUTURE_DEXPANSION_IPS) & M_T7_FUTURE_DEXPANSION_IPS)
+#define S_CL14_IF_PAR_ERROR 14
+#define V_CL14_IF_PAR_ERROR(x) ((x) << S_CL14_IF_PAR_ERROR)
+#define F_CL14_IF_PAR_ERROR V_CL14_IF_PAR_ERROR(1U)
#define S_CL13_IF_PAR_ERROR 13
#define V_CL13_IF_PAR_ERROR(x) ((x) << S_CL13_IF_PAR_ERROR)
@@ -39030,21 +39045,21 @@
#define A_PM_TX_CH1_OSPI_DEFICIT_THRSHLD 0x10028
#define A_PM_TX_PERR_ENABLE 0x10028
-#define S_T7_1_OSPI_OVERFLOW3 23
-#define V_T7_1_OSPI_OVERFLOW3(x) ((x) << S_T7_1_OSPI_OVERFLOW3)
-#define F_T7_1_OSPI_OVERFLOW3 V_T7_1_OSPI_OVERFLOW3(1U)
+#define S_OSPI_OVERFLOW3_TX 23
+#define V_OSPI_OVERFLOW3_TX(x) ((x) << S_OSPI_OVERFLOW3_TX)
+#define F_OSPI_OVERFLOW3_TX V_OSPI_OVERFLOW3_TX(1U)
-#define S_T7_1_OSPI_OVERFLOW2 22
-#define V_T7_1_OSPI_OVERFLOW2(x) ((x) << S_T7_1_OSPI_OVERFLOW2)
-#define F_T7_1_OSPI_OVERFLOW2 V_T7_1_OSPI_OVERFLOW2(1U)
+#define S_OSPI_OVERFLOW2_TX 22
+#define V_OSPI_OVERFLOW2_TX(x) ((x) << S_OSPI_OVERFLOW2_TX)
+#define F_OSPI_OVERFLOW2_TX V_OSPI_OVERFLOW2_TX(1U)
-#define S_T7_1_OSPI_OVERFLOW1 21
-#define V_T7_1_OSPI_OVERFLOW1(x) ((x) << S_T7_1_OSPI_OVERFLOW1)
-#define F_T7_1_OSPI_OVERFLOW1 V_T7_1_OSPI_OVERFLOW1(1U)
+#define S_OSPI_OVERFLOW1_TX 21
+#define V_OSPI_OVERFLOW1_TX(x) ((x) << S_OSPI_OVERFLOW1_TX)
+#define F_OSPI_OVERFLOW1_TX V_OSPI_OVERFLOW1_TX(1U)
-#define S_T7_1_OSPI_OVERFLOW0 20
-#define V_T7_1_OSPI_OVERFLOW0(x) ((x) << S_T7_1_OSPI_OVERFLOW0)
-#define F_T7_1_OSPI_OVERFLOW0 V_T7_1_OSPI_OVERFLOW0(1U)
+#define S_OSPI_OVERFLOW0_TX 20
+#define V_OSPI_OVERFLOW0_TX(x) ((x) << S_OSPI_OVERFLOW0_TX)
+#define F_OSPI_OVERFLOW0_TX V_OSPI_OVERFLOW0_TX(1U)
#define S_T7_BUNDLE_LEN_OVFL_EN 18
#define V_T7_BUNDLE_LEN_OVFL_EN(x) ((x) << S_T7_BUNDLE_LEN_OVFL_EN)
@@ -41390,15 +41405,65 @@
#define V_T7_BUBBLE(x) ((x) << S_T7_BUBBLE)
#define F_T7_BUBBLE V_T7_BUBBLE(1U)
-#define S_TXTOKENFIFO 15
-#define M_TXTOKENFIFO 0x3ffU
-#define V_TXTOKENFIFO(x) ((x) << S_TXTOKENFIFO)
-#define G_TXTOKENFIFO(x) (((x) >> S_TXTOKENFIFO) & M_TXTOKENFIFO)
+#define S_TX_TF_FIFO_PERR 19
+#define V_TX_TF_FIFO_PERR(x) ((x) << S_TX_TF_FIFO_PERR)
+#define F_TX_TF_FIFO_PERR V_TX_TF_FIFO_PERR(1U)
-#define S_PERR_TP2MPS_TFIFO 13
-#define M_PERR_TP2MPS_TFIFO 0x3U
-#define V_PERR_TP2MPS_TFIFO(x) ((x) << S_PERR_TP2MPS_TFIFO)
-#define G_PERR_TP2MPS_TFIFO(x) (((x) >> S_PERR_TP2MPS_TFIFO) & M_PERR_TP2MPS_TFIFO)
+#define S_TX_FIFO_PERR 18
+#define V_TX_FIFO_PERR(x) ((x) << S_TX_FIFO_PERR)
+#define F_TX_FIFO_PERR V_TX_FIFO_PERR(1U)
+
+#define S_NON_IPSEC_TX_FIFO3_PERR 17
+#define V_NON_IPSEC_TX_FIFO3_PERR(x) ((x) << S_NON_IPSEC_TX_FIFO3_PERR)
+#define F_NON_IPSEC_TX_FIFO3_PERR V_NON_IPSEC_TX_FIFO3_PERR(1U)
+
+#define S_NON_IPSEC_TX_FIFO2_PERR 16
+#define V_NON_IPSEC_TX_FIFO2_PERR(x) ((x) << S_NON_IPSEC_TX_FIFO2_PERR)
+#define F_NON_IPSEC_TX_FIFO2_PERR V_NON_IPSEC_TX_FIFO2_PERR(1U)
+
+#define S_NON_IPSEC_TX_FIFO1_PERR 15
+#define V_NON_IPSEC_TX_FIFO1_PERR(x) ((x) << S_NON_IPSEC_TX_FIFO1_PERR)
+#define F_NON_IPSEC_TX_FIFO1_PERR V_NON_IPSEC_TX_FIFO1_PERR(1U)
+
+#define S_NON_IPSEC_TX_FIFO0_PERR 14
+#define V_NON_IPSEC_TX_FIFO0_PERR(x) ((x) << S_NON_IPSEC_TX_FIFO0_PERR)
+#define F_NON_IPSEC_TX_FIFO0_PERR V_NON_IPSEC_TX_FIFO0_PERR(1U)
+
+#define S_TP2MPS_TX0 13
+#define V_TP2MPS_TX0(x) ((x) << S_TP2MPS_TX0)
+#define F_TP2MPS_TX0 V_TP2MPS_TX0(1U)
+
+#define S_CRYPTO2MPS_TX0 12
+#define V_CRYPTO2MPS_TX0(x) ((x) << S_CRYPTO2MPS_TX0)
+#define F_CRYPTO2MPS_TX0 V_CRYPTO2MPS_TX0(1U)
+
+#define S_TP2MPS_TX1 11
+#define V_TP2MPS_TX1(x) ((x) << S_TP2MPS_TX1)
+#define F_TP2MPS_TX1 V_TP2MPS_TX1(1U)
+
+#define S_CRYPTO2MPS_TX1 10
+#define V_CRYPTO2MPS_TX1(x) ((x) << S_CRYPTO2MPS_TX1)
+#define F_CRYPTO2MPS_TX1 V_CRYPTO2MPS_TX1(1U)
+
+#define S_TP2MPS_TX2 9
+#define V_TP2MPS_TX2(x) ((x) << S_TP2MPS_TX2)
+#define F_TP2MPS_TX2 V_TP2MPS_TX2(1U)
+
+#define S_CRYPTO2MPS_TX2 8
+#define V_CRYPTO2MPS_TX2(x) ((x) << S_CRYPTO2MPS_TX2)
+#define F_CRYPTO2MPS_TX2 V_CRYPTO2MPS_TX2(1U)
+
+#define S_TP2MPS_TX3 7
+#define V_TP2MPS_TX3(x) ((x) << S_TP2MPS_TX3)
+#define F_TP2MPS_TX3 V_TP2MPS_TX3(1U)
+
+#define S_CRYPTO2MPS_TX3 6
+#define V_CRYPTO2MPS_TX3(x) ((x) << S_CRYPTO2MPS_TX3)
+#define F_CRYPTO2MPS_TX3 V_CRYPTO2MPS_TX3(1U)
+
+#define S_NCSI2MPS 5
+#define V_NCSI2MPS(x) ((x) << S_NCSI2MPS)
+#define F_NCSI2MPS V_NCSI2MPS(1U)
#define A_MPS_TX_INT_CAUSE 0x9408
#define A_MPS_TX_NCSI2MPS_CNT 0x940c
@@ -41420,6 +41485,16 @@
#define V_BUBBLEERRINT(x) ((x) << S_BUBBLEERRINT)
#define F_BUBBLEERRINT V_BUBBLEERRINT(1U)
+#define S_TXTOKENFIFO 15
+#define M_TXTOKENFIFO 0x3ffU
+#define V_TXTOKENFIFO(x) ((x) << S_TXTOKENFIFO)
+#define G_TXTOKENFIFO(x) (((x) >> S_TXTOKENFIFO) & M_TXTOKENFIFO)
+
+#define S_PERR_TP2MPS_TFIFO 13
+#define M_PERR_TP2MPS_TFIFO 0x3U
+#define V_PERR_TP2MPS_TFIFO(x) ((x) << S_PERR_TP2MPS_TFIFO)
+#define G_PERR_TP2MPS_TFIFO(x) (((x) >> S_PERR_TP2MPS_TFIFO) & M_PERR_TP2MPS_TFIFO)
+
#define A_MPS_TX_PERR_INJECT 0x9414
#define S_MPSTXMEMSEL 1
@@ -42174,7 +42249,45 @@
#define A_MPS_TX_DBG_CNT 0x947c
#define A_MPS_TX_INT2_ENABLE 0x9498
+
+#define S_T7_TX_FIFO_PERR 4
+#define V_T7_TX_FIFO_PERR(x) ((x) << S_T7_TX_FIFO_PERR)
+#define F_T7_TX_FIFO_PERR V_T7_TX_FIFO_PERR(1U)
+
+#define S_NON_IPSEC_TX_FIFO3 3
+#define V_NON_IPSEC_TX_FIFO3(x) ((x) << S_NON_IPSEC_TX_FIFO3)
+#define F_NON_IPSEC_TX_FIFO3 V_NON_IPSEC_TX_FIFO3(1U)
+
+#define S_NON_IPSEC_TX_FIFO2 2
+#define V_NON_IPSEC_TX_FIFO2(x) ((x) << S_NON_IPSEC_TX_FIFO2)
+#define F_NON_IPSEC_TX_FIFO2 V_NON_IPSEC_TX_FIFO2(1U)
+
+#define S_NON_IPSEC_TX_FIFO1 1
+#define V_NON_IPSEC_TX_FIFO1(x) ((x) << S_NON_IPSEC_TX_FIFO1)
+#define F_NON_IPSEC_TX_FIFO1 V_NON_IPSEC_TX_FIFO1(1U)
+
+#define S_NON_IPSEC_TX_FIFO0 0
+#define V_NON_IPSEC_TX_FIFO0(x) ((x) << S_NON_IPSEC_TX_FIFO0)
+#define F_NON_IPSEC_TX_FIFO0 V_NON_IPSEC_TX_FIFO0(1U)
+
#define A_MPS_TX_INT2_CAUSE 0x949c
+
+#define S_T7_NON_IPSEC_TX_FIFO3_PERR 3
+#define V_T7_NON_IPSEC_TX_FIFO3_PERR(x) ((x) << S_T7_NON_IPSEC_TX_FIFO3_PERR)
+#define F_T7_NON_IPSEC_TX_FIFO3_PERR V_T7_NON_IPSEC_TX_FIFO3_PERR(1U)
+
+#define S_T7_NON_IPSEC_TX_FIFO2_PERR 2
+#define V_T7_NON_IPSEC_TX_FIFO2_PERR(x) ((x) << S_T7_NON_IPSEC_TX_FIFO2_PERR)
+#define F_T7_NON_IPSEC_TX_FIFO2_PERR V_T7_NON_IPSEC_TX_FIFO2_PERR(1U)
+
+#define S_T7_NON_IPSEC_TX_FIFO1_PERR 1
+#define V_T7_NON_IPSEC_TX_FIFO1_PERR(x) ((x) << S_T7_NON_IPSEC_TX_FIFO1_PERR)
+#define F_T7_NON_IPSEC_TX_FIFO1_PERR V_T7_NON_IPSEC_TX_FIFO1_PERR(1U)
+
+#define S_T7_NON_IPSEC_TX_FIFO0_PERR 0
+#define V_T7_NON_IPSEC_TX_FIFO0_PERR(x) ((x) << S_T7_NON_IPSEC_TX_FIFO0_PERR)
+#define F_T7_NON_IPSEC_TX_FIFO0_PERR V_T7_NON_IPSEC_TX_FIFO0_PERR(1U)
+
#define A_MPS_TX_PERR2_ENABLE 0x94a0
#define A_MPS_TX_INT3_ENABLE 0x94a4
#define A_MPS_TX_INT3_CAUSE 0x94a8
@@ -42307,6 +42420,12 @@
#define G_T5_TXPORT(x) (((x) >> S_T5_TXPORT) & M_T5_TXPORT)
#define A_MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
+
+#define S_T5_RXPP 29
+#define M_T5_RXPP 0x3U
+#define V_T5_RXPP(x) ((x) << S_T5_RXPP)
+#define G_T5_RXPP(x) (((x) >> S_T5_RXPP) & M_T5_RXPP)
+
#define A_MPS_STAT_PERR_ENABLE_SRAM 0x9618
#define A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO 0x961c
@@ -42429,6 +42548,26 @@
#define V_T5_TXVF(x) ((x) << S_T5_TXVF)
#define G_T5_TXVF(x) (((x) >> S_T5_TXVF) & M_T5_TXVF)
+#define S_RXVF_CERR 12
+#define M_RXVF_CERR 0xfU
+#define V_RXVF_CERR(x) ((x) << S_RXVF_CERR)
+#define G_RXVF_CERR(x) (((x) >> S_RXVF_CERR) & M_RXVF_CERR)
+
+#define S_TXVF_CERR 8
+#define M_TXVF_CERR 0xfU
+#define V_TXVF_CERR(x) ((x) << S_TXVF_CERR)
+#define G_TXVF_CERR(x) (((x) >> S_TXVF_CERR) & M_TXVF_CERR)
+
+#define S_RXVF_PERR 5
+#define M_RXVF_PERR 0x7U
+#define V_RXVF_PERR(x) ((x) << S_RXVF_PERR)
+#define G_RXVF_PERR(x) (((x) >> S_RXVF_PERR) & M_RXVF_PERR)
+
+#define S_TXVF_PERR 0
+#define M_TXVF_PERR 0x1fU
+#define V_TXVF_PERR(x) ((x) << S_TXVF_PERR)
+#define G_TXVF_PERR(x) (((x) >> S_TXVF_PERR) & M_TXVF_PERR)
+
#define A_MPS_STAT_PERR_INT_CAUSE_SRAM1 0x96c4
#define A_MPS_STAT_PERR_ENABLE_SRAM1 0x96c8
#define A_MPS_STAT_STOP_UPD_BG 0x96cc
@@ -42641,6 +42780,10 @@
#define V_FILTMEM(x) ((x) << S_FILTMEM)
#define G_FILTMEM(x) (((x) >> S_FILTMEM) & M_FILTMEM)
+#define S_T7_TRCPLERRENB 17
+#define V_T7_TRCPLERRENB(x) ((x) << S_T7_TRCPLERRENB)
+#define F_T7_TRCPLERRENB V_T7_TRCPLERRENB(1U)
+
#define S_T7_MISCPERR 16
#define V_T7_MISCPERR(x) ((x) << S_T7_MISCPERR)
#define F_T7_MISCPERR V_T7_MISCPERR(1U)
@@ -42814,11 +42957,6 @@
#define A_T7_MPS_TRC_FILTER_RUNT_CTL 0xa4a0
#define A_T7_MPS_TRC_FILTER_DROP 0xa4c0
#define A_T7_MPS_TRC_INT_ENABLE 0xa4e0
-
-#define S_T7_TRCPLERRENB 17
-#define V_T7_TRCPLERRENB(x) ((x) << S_T7_TRCPLERRENB)
-#define F_T7_TRCPLERRENB V_T7_TRCPLERRENB(1U)
-
#define A_T7_MPS_TRC_INT_CAUSE 0xa4e4
#define A_T7_MPS_TRC_TIMESTAMP_L 0xa4e8
#define A_T7_MPS_TRC_TIMESTAMP_H 0xa4ec
@@ -42885,13 +43023,72 @@
#define G_PERR_TF_IN_CTL(x) (((x) >> S_PERR_TF_IN_CTL) & M_PERR_TF_IN_CTL)
#define A_MPS_TRC_INT_ENABLE2 0xa4f4
-#define A_MPS_TRC_INT_CAUSE2 0xa4f8
-#define S_T7_TRC_TF_ECC 22
-#define M_T7_TRC_TF_ECC 0xffU
-#define V_T7_TRC_TF_ECC(x) ((x) << S_T7_TRC_TF_ECC)
-#define G_T7_TRC_TF_ECC(x) (((x) >> S_T7_TRC_TF_ECC) & M_T7_TRC_TF_ECC)
+#define S_TX2RX_DWN_CONV_PERR_PT3_CERR 16
+#define V_TX2RX_DWN_CONV_PERR_PT3_CERR(x) ((x) << S_TX2RX_DWN_CONV_PERR_PT3_CERR)
+#define F_TX2RX_DWN_CONV_PERR_PT3_CERR V_TX2RX_DWN_CONV_PERR_PT3_CERR(1U)
+
+#define S_TX2RX_DWN_CONV_PERR_PT2_CERR 15
+#define V_TX2RX_DWN_CONV_PERR_PT2_CERR(x) ((x) << S_TX2RX_DWN_CONV_PERR_PT2_CERR)
+#define F_TX2RX_DWN_CONV_PERR_PT2_CERR V_TX2RX_DWN_CONV_PERR_PT2_CERR(1U)
+
+#define S_TX2RX_DWN_CONV_PERR_PT1_CERR 14
+#define V_TX2RX_DWN_CONV_PERR_PT1_CERR(x) ((x) << S_TX2RX_DWN_CONV_PERR_PT1_CERR)
+#define F_TX2RX_DWN_CONV_PERR_PT1_CERR V_TX2RX_DWN_CONV_PERR_PT1_CERR(1U)
+
+#define S_TX2RX_DWN_CONV_PERR_PT0_CERR 13
+#define V_TX2RX_DWN_CONV_PERR_PT0_CERR(x) ((x) << S_TX2RX_DWN_CONV_PERR_PT0_CERR)
+#define F_TX2RX_DWN_CONV_PERR_PT0_CERR V_TX2RX_DWN_CONV_PERR_PT0_CERR(1U)
+#define S_MPS2MAC_DWN_CONV_PERR_PT1_CERR 12
+#define V_MPS2MAC_DWN_CONV_PERR_PT1_CERR(x) ((x) << S_MPS2MAC_DWN_CONV_PERR_PT1_CERR)
+#define F_MPS2MAC_DWN_CONV_PERR_PT1_CERR V_MPS2MAC_DWN_CONV_PERR_PT1_CERR(1U)
+
+#define S_MPS2MAC_DWN_CONV_PERR_PT0_CERR 11
+#define V_MPS2MAC_DWN_CONV_PERR_PT0_CERR(x) ((x) << S_MPS2MAC_DWN_CONV_PERR_PT0_CERR)
+#define F_MPS2MAC_DWN_CONV_PERR_PT0_CERR V_MPS2MAC_DWN_CONV_PERR_PT0_CERR(1U)
+
+#define S_MAC2MPS_DWN_CONV_PERR_PT1_CERR 10
+#define V_MAC2MPS_DWN_CONV_PERR_PT1_CERR(x) ((x) << S_MAC2MPS_DWN_CONV_PERR_PT1_CERR)
+#define F_MAC2MPS_DWN_CONV_PERR_PT1_CERR V_MAC2MPS_DWN_CONV_PERR_PT1_CERR(1U)
+
+#define S_MAC2MPS_DWN_CONV_PERR_PT0_CERR 9
+#define V_MAC2MPS_DWN_CONV_PERR_PT0_CERR(x) ((x) << S_MAC2MPS_DWN_CONV_PERR_PT0_CERR)
+#define F_MAC2MPS_DWN_CONV_PERR_PT0_CERR V_MAC2MPS_DWN_CONV_PERR_PT0_CERR(1U)
+
+#define S_TX2RX_DWN_CONV_PERR_PT3_PERR 8
+#define V_TX2RX_DWN_CONV_PERR_PT3_PERR(x) ((x) << S_TX2RX_DWN_CONV_PERR_PT3_PERR)
+#define F_TX2RX_DWN_CONV_PERR_PT3_PERR V_TX2RX_DWN_CONV_PERR_PT3_PERR(1U)
+
+#define S_TX2RX_DWN_CONV_PERR_PT2_PERR 7
+#define V_TX2RX_DWN_CONV_PERR_PT2_PERR(x) ((x) << S_TX2RX_DWN_CONV_PERR_PT2_PERR)
+#define F_TX2RX_DWN_CONV_PERR_PT2_PERR V_TX2RX_DWN_CONV_PERR_PT2_PERR(1U)
+
+#define S_TX2RX_DWN_CONV_PERR_PT1_PERR 6
+#define V_TX2RX_DWN_CONV_PERR_PT1_PERR(x) ((x) << S_TX2RX_DWN_CONV_PERR_PT1_PERR)
+#define F_TX2RX_DWN_CONV_PERR_PT1_PERR V_TX2RX_DWN_CONV_PERR_PT1_PERR(1U)
+
+#define S_TX2RX_DWN_CONV_PERR_PT0_PERR 5
+#define V_TX2RX_DWN_CONV_PERR_PT0_PERR(x) ((x) << S_TX2RX_DWN_CONV_PERR_PT0_PERR)
+#define F_TX2RX_DWN_CONV_PERR_PT0_PERR V_TX2RX_DWN_CONV_PERR_PT0_PERR(1U)
+
+#define S_MAC2MPS_DWN_CONV_PERR_PT1_PERR 4
+#define V_MAC2MPS_DWN_CONV_PERR_PT1_PERR(x) ((x) << S_MAC2MPS_DWN_CONV_PERR_PT1_PERR)
+#define F_MAC2MPS_DWN_CONV_PERR_PT1_PERR V_MAC2MPS_DWN_CONV_PERR_PT1_PERR(1U)
+
+#define S_MAC2MPS_DWN_CONV_PERR_PT0_PERR 3
+#define V_MAC2MPS_DWN_CONV_PERR_PT0_PERR(x) ((x) << S_MAC2MPS_DWN_CONV_PERR_PT0_PERR)
+#define F_MAC2MPS_DWN_CONV_PERR_PT0_PERR V_MAC2MPS_DWN_CONV_PERR_PT0_PERR(1U)
+
+#define S_MPS2MAC_DWN_CONV_PERR_PT1_PERR 2
+#define V_MPS2MAC_DWN_CONV_PERR_PT1_PERR(x) ((x) << S_MPS2MAC_DWN_CONV_PERR_PT1_PERR)
+#define F_MPS2MAC_DWN_CONV_PERR_PT1_PERR V_MPS2MAC_DWN_CONV_PERR_PT1_PERR(1U)
+
+#define S_MPS2MAC_DWN_CONV_PERR_PT0_PERR 1
+#define V_MPS2MAC_DWN_CONV_PERR_PT0_PERR(x) ((x) << S_MPS2MAC_DWN_CONV_PERR_PT0_PERR)
+#define F_MPS2MAC_DWN_CONV_PERR_PT0_PERR V_MPS2MAC_DWN_CONV_PERR_PT0_PERR(1U)
+
+#define A_MPS_TRC_INT_CAUSE2 0xa4f8
#define A_MPS_CLS_CTL 0xd000
#define S_MEMWRITEFAULT 4
@@ -43743,9 +43940,9 @@
#define A_MPS_RX_CHMN_CNT 0x11070
#define A_MPS_CTL_STAT 0x11070
-#define S_T7_CTL 0
-#define V_T7_CTL(x) ((x) << S_T7_CTL)
-#define F_T7_CTL V_T7_CTL(1U)
+#define S_T7_MPS_CTL 0
+#define V_T7_MPS_CTL(x) ((x) << S_T7_MPS_CTL)
+#define F_T7_MPS_CTL V_T7_MPS_CTL(1U)
#define A_MPS_RX_PERR_INT_CAUSE 0x11074
@@ -43849,54 +44046,60 @@
#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
-#define S_MAC_IN_FIFO_768B 30
-#define V_MAC_IN_FIFO_768B(x) ((x) << S_MAC_IN_FIFO_768B)
-#define F_MAC_IN_FIFO_768B V_MAC_IN_FIFO_768B(1U)
-
-#define S_T7_1_INT_ERR_INT 29
-#define V_T7_1_INT_ERR_INT(x) ((x) << S_T7_1_INT_ERR_INT)
-#define F_T7_1_INT_ERR_INT V_T7_1_INT_ERR_INT(1U)
+#define S_T7_INT_ERR_INT 30
+#define V_T7_INT_ERR_INT(x) ((x) << S_T7_INT_ERR_INT)
+#define F_T7_INT_ERR_INT V_T7_INT_ERR_INT(1U)
#define S_FLOP_PERR 28
#define V_FLOP_PERR(x) ((x) << S_FLOP_PERR)
#define F_FLOP_PERR V_FLOP_PERR(1U)
-#define S_RPLC_MAP 13
-#define M_RPLC_MAP 0x1fU
-#define V_RPLC_MAP(x) ((x) << S_RPLC_MAP)
-#define G_RPLC_MAP(x) (((x) >> S_RPLC_MAP) & M_RPLC_MAP)
+#define S_MPS_RX_ATRB_MAP_PERR 23
+#define V_MPS_RX_ATRB_MAP_PERR(x) ((x) << S_MPS_RX_ATRB_MAP_PERR)
+#define F_MPS_RX_ATRB_MAP_PERR V_MPS_RX_ATRB_MAP_PERR(1U)
-#define S_TKN_RUNT_DROP_FIFO 12
-#define V_TKN_RUNT_DROP_FIFO(x) ((x) << S_TKN_RUNT_DROP_FIFO)
-#define F_TKN_RUNT_DROP_FIFO V_TKN_RUNT_DROP_FIFO(1U)
+#define S_RPLC_MAP_VNI_PERR 18
+#define M_RPLC_MAP_VNI_PERR 0x1fU
+#define V_RPLC_MAP_VNI_PERR(x) ((x) << S_RPLC_MAP_VNI_PERR)
+#define G_RPLC_MAP_VNI_PERR(x) (((x) >> S_RPLC_MAP_VNI_PERR) & M_RPLC_MAP_VNI_PERR)
-#define S_T7_PPM3 9
-#define M_T7_PPM3 0x7U
-#define V_T7_PPM3(x) ((x) << S_T7_PPM3)
-#define G_T7_PPM3(x) (((x) >> S_T7_PPM3) & M_T7_PPM3)
+#define S_RPLC_MAP_MCAST_PERR 13
+#define M_RPLC_MAP_MCAST_PERR 0x1fU
+#define V_RPLC_MAP_MCAST_PERR(x) ((x) << S_RPLC_MAP_MCAST_PERR)
+#define G_RPLC_MAP_MCAST_PERR(x) (((x) >> S_RPLC_MAP_MCAST_PERR) & M_RPLC_MAP_MCAST_PERR)
-#define S_T7_PPM2 6
-#define M_T7_PPM2 0x7U
-#define V_T7_PPM2(x) ((x) << S_T7_PPM2)
-#define G_T7_PPM2(x) (((x) >> S_T7_PPM2) & M_T7_PPM2)
+#define S_PPM3_PERR 9
+#define M_PPM3_PERR 0x7U
+#define V_PPM3_PERR(x) ((x) << S_PPM3_PERR)
+#define G_PPM3_PERR(x) (((x) >> S_PPM3_PERR) & M_PPM3_PERR)
-#define S_T7_PPM1 3
-#define M_T7_PPM1 0x7U
-#define V_T7_PPM1(x) ((x) << S_T7_PPM1)
-#define G_T7_PPM1(x) (((x) >> S_T7_PPM1) & M_T7_PPM1)
+#define S_PPM2_PERR 6
+#define M_PPM2_PERR 0x7U
+#define V_PPM2_PERR(x) ((x) << S_PPM2_PERR)
+#define G_PPM2_PERR(x) (((x) >> S_PPM2_PERR) & M_PPM2_PERR)
-#define S_T7_PPM0 0
-#define M_T7_PPM0 0x7U
-#define V_T7_PPM0(x) ((x) << S_T7_PPM0)
-#define G_T7_PPM0(x) (((x) >> S_T7_PPM0) & M_T7_PPM0)
+#define S_PPM1_PERR 3
+#define M_PPM1_PERR 0x7U
+#define V_PPM1_PERR(x) ((x) << S_PPM1_PERR)
+#define G_PPM1_PERR(x) (((x) >> S_PPM1_PERR) & M_PPM1_PERR)
+
+#define S_PPM0_PERR 0
+#define M_PPM0_PERR 0x7U
+#define V_PPM0_PERR(x) ((x) << S_PPM0_PERR)
+#define G_PPM0_PERR(x) (((x) >> S_PPM0_PERR) & M_PPM0_PERR)
#define A_MPS_RX_PERR_INT_ENABLE 0x11078
+#define A_MPS_RX_PERR_ENABLE 0x1107c
-#define S_T7_2_INT_ERR_INT 30
-#define V_T7_2_INT_ERR_INT(x) ((x) << S_T7_2_INT_ERR_INT)
-#define F_T7_2_INT_ERR_INT V_T7_2_INT_ERR_INT(1U)
+#define S_MPS_RX_ATRB_MA_PERRP 23
+#define V_MPS_RX_ATRB_MA_PERRP(x) ((x) << S_MPS_RX_ATRB_MA_PERRP)
+#define F_MPS_RX_ATRB_MA_PERRP V_MPS_RX_ATRB_MA_PERRP(1U)
+
+#define S_RPLC_MAP_VN_PERRI 18
+#define M_RPLC_MAP_VN_PERRI 0x1fU
+#define V_RPLC_MAP_VN_PERRI(x) ((x) << S_RPLC_MAP_VN_PERRI)
+#define G_RPLC_MAP_VN_PERRI(x) (((x) >> S_RPLC_MAP_VN_PERRI) & M_RPLC_MAP_VN_PERRI)
-#define A_MPS_RX_PERR_ENABLE 0x1107c
#define A_MPS_RX_PERR_INJECT 0x11080
#define A_MPS_RX_FUNC_INT_CAUSE 0x11084
@@ -43965,6 +44168,38 @@
#define V_LEN_ERR_INT(x) ((x) << S_LEN_ERR_INT)
#define F_LEN_ERR_INT V_LEN_ERR_INT(1U)
+#define S_MTU_ERR3 19
+#define V_MTU_ERR3(x) ((x) << S_MTU_ERR3)
+#define F_MTU_ERR3 V_MTU_ERR3(1U)
+
+#define S_MTU_ERR2 18
+#define V_MTU_ERR2(x) ((x) << S_MTU_ERR2)
+#define F_MTU_ERR2 V_MTU_ERR2(1U)
+
+#define S_MTU_ERR1 17
+#define V_MTU_ERR1(x) ((x) << S_MTU_ERR1)
+#define F_MTU_ERR1 V_MTU_ERR1(1U)
+
+#define S_MTU_ERR0 16
+#define V_MTU_ERR0(x) ((x) << S_MTU_ERR0)
+#define F_MTU_ERR0 V_MTU_ERR0(1U)
+
+#define S_DBG_LEN_ERR 15
+#define V_DBG_LEN_ERR(x) ((x) << S_DBG_LEN_ERR)
+#define F_DBG_LEN_ERR V_DBG_LEN_ERR(1U)
+
+#define S_DBG_SPI_ERR 14
+#define V_DBG_SPI_ERR(x) ((x) << S_DBG_SPI_ERR)
+#define F_DBG_SPI_ERR V_DBG_SPI_ERR(1U)
+
+#define S_DBG_SE_CNT_ERR 13
+#define V_DBG_SE_CNT_ERR(x) ((x) << S_DBG_SE_CNT_ERR)
+#define F_DBG_SE_CNT_ERR V_DBG_SE_CNT_ERR(1U)
+
+#define S_DBG_SPI_LEN_SE_CNT_ERR 12
+#define V_DBG_SPI_LEN_SE_CNT_ERR(x) ((x) << S_DBG_SPI_LEN_SE_CNT_ERR)
+#define F_DBG_SPI_LEN_SE_CNT_ERR V_DBG_SPI_LEN_SE_CNT_ERR(1U)
+
#define A_MPS_RX_FUNC_INT_ENABLE 0x11088
#define A_MPS_RX_PAUSE_GEN_TH_0 0x1108c
@@ -43980,59 +44215,59 @@
#define A_MPS_RX_PERR_INT_CAUSE2 0x1108c
-#define S_CRYPT2MPS_RX_INTF_FIFO 28
-#define M_CRYPT2MPS_RX_INTF_FIFO 0xfU
-#define V_CRYPT2MPS_RX_INTF_FIFO(x) ((x) << S_CRYPT2MPS_RX_INTF_FIFO)
-#define G_CRYPT2MPS_RX_INTF_FIFO(x) (((x) >> S_CRYPT2MPS_RX_INTF_FIFO) & M_CRYPT2MPS_RX_INTF_FIFO)
+#define S_CRYPTO2MPS_RX0_PERR 31
+#define V_CRYPTO2MPS_RX0_PERR(x) ((x) << S_CRYPTO2MPS_RX0_PERR)
+#define F_CRYPTO2MPS_RX0_PERR V_CRYPTO2MPS_RX0_PERR(1U)
-#define S_INIC2MPS_TX0_PERR 27
-#define V_INIC2MPS_TX0_PERR(x) ((x) << S_INIC2MPS_TX0_PERR)
-#define F_INIC2MPS_TX0_PERR V_INIC2MPS_TX0_PERR(1U)
+#define S_CRYPTO2MPS_RX1_PERR 30
+#define V_CRYPTO2MPS_RX1_PERR(x) ((x) << S_CRYPTO2MPS_RX1_PERR)
+#define F_CRYPTO2MPS_RX1_PERR V_CRYPTO2MPS_RX1_PERR(1U)
+
+#define S_CRYPTO2MPS_RX2_PERR 29
+#define V_CRYPTO2MPS_RX2_PERR(x) ((x) << S_CRYPTO2MPS_RX2_PERR)
+#define F_CRYPTO2MPS_RX2_PERR V_CRYPTO2MPS_RX2_PERR(1U)
-#define S_INIC2MPS_TX1_PERR 26
+#define S_CRYPTO2MPS_RX3_PERR 28
+#define V_CRYPTO2MPS_RX3_PERR(x) ((x) << S_CRYPTO2MPS_RX3_PERR)
+#define F_CRYPTO2MPS_RX3_PERR V_CRYPTO2MPS_RX3_PERR(1U)
+
+#define S_INIC2MPS_TX1_PERR 27
#define V_INIC2MPS_TX1_PERR(x) ((x) << S_INIC2MPS_TX1_PERR)
#define F_INIC2MPS_TX1_PERR V_INIC2MPS_TX1_PERR(1U)
-#define S_XGMAC2MPS_RX0_PERR 25
-#define V_XGMAC2MPS_RX0_PERR(x) ((x) << S_XGMAC2MPS_RX0_PERR)
-#define F_XGMAC2MPS_RX0_PERR V_XGMAC2MPS_RX0_PERR(1U)
+#define S_INIC2MPS_TX0_PERR 26
+#define V_INIC2MPS_TX0_PERR(x) ((x) << S_INIC2MPS_TX0_PERR)
+#define F_INIC2MPS_TX0_PERR V_INIC2MPS_TX0_PERR(1U)
-#define S_XGMAC2MPS_RX1_PERR 24
+#define S_XGMAC2MPS_RX1_PERR 25
#define V_XGMAC2MPS_RX1_PERR(x) ((x) << S_XGMAC2MPS_RX1_PERR)
#define F_XGMAC2MPS_RX1_PERR V_XGMAC2MPS_RX1_PERR(1U)
-#define S_MPS2CRYPTO_RX_INTF_FIFO 20
-#define M_MPS2CRYPTO_RX_INTF_FIFO 0xfU
-#define V_MPS2CRYPTO_RX_INTF_FIFO(x) ((x) << S_MPS2CRYPTO_RX_INTF_FIFO)
-#define G_MPS2CRYPTO_RX_INTF_FIFO(x) (((x) >> S_MPS2CRYPTO_RX_INTF_FIFO) & M_MPS2CRYPTO_RX_INTF_FIFO)
-
-#define S_MAC_RX_PPROC_MPS2TP_TF 19
-#define V_MAC_RX_PPROC_MPS2TP_TF(x) ((x) << S_MAC_RX_PPROC_MPS2TP_TF)
-#define F_MAC_RX_PPROC_MPS2TP_TF V_MAC_RX_PPROC_MPS2TP_TF(1U)
-
-#define S_MAC_RX_PPROC_LB_CH3 18
-#define V_MAC_RX_PPROC_LB_CH3(x) ((x) << S_MAC_RX_PPROC_LB_CH3)
-#define F_MAC_RX_PPROC_LB_CH3 V_MAC_RX_PPROC_LB_CH3(1U)
+#define S_XGMAC2MPS_RX0_PERR 24
+#define V_XGMAC2MPS_RX0_PERR(x) ((x) << S_XGMAC2MPS_RX0_PERR)
+#define F_XGMAC2MPS_RX0_PERR V_XGMAC2MPS_RX0_PERR(1U)
-#define S_MAC_RX_PPROC_LB_CH2 17
-#define V_MAC_RX_PPROC_LB_CH2(x) ((x) << S_MAC_RX_PPROC_LB_CH2)
-#define F_MAC_RX_PPROC_LB_CH2 V_MAC_RX_PPROC_LB_CH2(1U)
+#define S_MPS2CRYPTO_CH0_INTF_FIFO_PERR 20
+#define M_MPS2CRYPTO_CH0_INTF_FIFO_PERR 0xfU
+#define V_MPS2CRYPTO_CH0_INTF_FIFO_PERR(x) ((x) << S_MPS2CRYPTO_CH0_INTF_FIFO_PERR)
+#define G_MPS2CRYPTO_CH0_INTF_FIFO_PERR(x) (((x) >> S_MPS2CRYPTO_CH0_INTF_FIFO_PERR) & M_MPS2CRYPTO_CH0_INTF_FIFO_PERR)
-#define S_MAC_RX_PPROC_LB_CH1 16
-#define V_MAC_RX_PPROC_LB_CH1(x) ((x) << S_MAC_RX_PPROC_LB_CH1)
-#define F_MAC_RX_PPROC_LB_CH1 V_MAC_RX_PPROC_LB_CH1(1U)
+#define S_RX_FINAL_TF_FIFO_PERR 19
+#define V_RX_FINAL_TF_FIFO_PERR(x) ((x) << S_RX_FINAL_TF_FIFO_PERR)
+#define F_RX_FINAL_TF_FIFO_PERR V_RX_FINAL_TF_FIFO_PERR(1U)
-#define S_MAC_RX_PPROC_LB_CH0 15
-#define V_MAC_RX_PPROC_LB_CH0(x) ((x) << S_MAC_RX_PPROC_LB_CH0)
-#define F_MAC_RX_PPROC_LB_CH0 V_MAC_RX_PPROC_LB_CH0(1U)
+#define S_MPS_LB_FIFO_PERR 15
+#define M_MPS_LB_FIFO_PERR 0xfU
+#define V_MPS_LB_FIFO_PERR(x) ((x) << S_MPS_LB_FIFO_PERR)
+#define G_MPS_LB_FIFO_PERR(x) (((x) >> S_MPS_LB_FIFO_PERR) & M_MPS_LB_FIFO_PERR)
-#define S_MAC_RX_PPROC_DWRR_CH0_3 14
-#define V_MAC_RX_PPROC_DWRR_CH0_3(x) ((x) << S_MAC_RX_PPROC_DWRR_CH0_3)
-#define F_MAC_RX_PPROC_DWRR_CH0_3 V_MAC_RX_PPROC_DWRR_CH0_3(1U)
+#define S_MPS_DWRR_FIFO_PERR 14
+#define V_MPS_DWRR_FIFO_PERR(x) ((x) << S_MPS_DWRR_FIFO_PERR)
+#define F_MPS_DWRR_FIFO_PERR V_MPS_DWRR_FIFO_PERR(1U)
-#define S_MAC_RX_FIFO_PERR 13
-#define V_MAC_RX_FIFO_PERR(x) ((x) << S_MAC_RX_FIFO_PERR)
-#define F_MAC_RX_FIFO_PERR V_MAC_RX_FIFO_PERR(1U)
+#define S_MAC_TF_FIFO_PERR 13
+#define V_MAC_TF_FIFO_PERR(x) ((x) << S_MAC_TF_FIFO_PERR)
+#define F_MAC_TF_FIFO_PERR V_MAC_TF_FIFO_PERR(1U)
#define S_MAC2MPS_PT3_PERR 12
#define V_MAC2MPS_PT3_PERR(x) ((x) << S_MAC2MPS_PT3_PERR)
@@ -44050,13 +44285,18 @@
#define V_MAC2MPS_PT0_PERR(x) ((x) << S_MAC2MPS_PT0_PERR)
#define F_MAC2MPS_PT0_PERR V_MAC2MPS_PT0_PERR(1U)
-#define S_LPBK_FIFO_PERR 8
-#define V_LPBK_FIFO_PERR(x) ((x) << S_LPBK_FIFO_PERR)
-#define F_LPBK_FIFO_PERR V_LPBK_FIFO_PERR(1U)
+#define S_TP_LPBK_FIFO_PERR 8
+#define V_TP_LPBK_FIFO_PERR(x) ((x) << S_TP_LPBK_FIFO_PERR)
+#define F_TP_LPBK_FIFO_PERR V_TP_LPBK_FIFO_PERR(1U)
-#define S_TP2MPS_TF_FIFO_PERR 7
-#define V_TP2MPS_TF_FIFO_PERR(x) ((x) << S_TP2MPS_TF_FIFO_PERR)
-#define F_TP2MPS_TF_FIFO_PERR V_TP2MPS_TF_FIFO_PERR(1U)
+#define S_TP_LPBK_TF_PERR 7
+#define V_TP_LPBK_TF_PERR(x) ((x) << S_TP_LPBK_TF_PERR)
+#define F_TP_LPBK_TF_PERR V_TP_LPBK_TF_PERR(1U)
+
+#define S_RSDV1 0
+#define M_RSDV1 0x7fU
+#define V_RSDV1(x) ((x) << S_RSDV1)
+#define G_RSDV1(x) (((x) >> S_RSDV1) & M_RSDV1)
#define A_MPS_RX_PAUSE_GEN_TH_1 0x11090
#define A_MPS_RX_PERR_INT_ENABLE2 0x11090
@@ -44978,67 +45218,407 @@
#define A_MPS_VF_RPLCT_MAP6 0x11308
#define A_MPS_VF_RPLCT_MAP7 0x1130c
#define A_MPS_RX_PERR_INT_CAUSE3 0x11310
+
+#define S_FIFO_REPL_CH3_CERR 28
+#define V_FIFO_REPL_CH3_CERR(x) ((x) << S_FIFO_REPL_CH3_CERR)
+#define F_FIFO_REPL_CH3_CERR V_FIFO_REPL_CH3_CERR(1U)
+
+#define S_FIFO_REPL_CH2_CERR 27
+#define V_FIFO_REPL_CH2_CERR(x) ((x) << S_FIFO_REPL_CH2_CERR)
+#define F_FIFO_REPL_CH2_CERR V_FIFO_REPL_CH2_CERR(1U)
+
+#define S_FIFO_REPL_CH1_CERR 26
+#define V_FIFO_REPL_CH1_CERR(x) ((x) << S_FIFO_REPL_CH1_CERR)
+#define F_FIFO_REPL_CH1_CERR V_FIFO_REPL_CH1_CERR(1U)
+
+#define S_FIFO_REPL_CH0_CERR 25
+#define V_FIFO_REPL_CH0_CERR(x) ((x) << S_FIFO_REPL_CH0_CERR)
+#define F_FIFO_REPL_CH0_CERR V_FIFO_REPL_CH0_CERR(1U)
+
+#define S_VLAN_FILTER_RAM_CERR 24
+#define V_VLAN_FILTER_RAM_CERR(x) ((x) << S_VLAN_FILTER_RAM_CERR)
+#define F_VLAN_FILTER_RAM_CERR V_VLAN_FILTER_RAM_CERR(1U)
+
+#define S_MPS_RX_TD_STAT_FIFO_PERR_CH3 23
+#define V_MPS_RX_TD_STAT_FIFO_PERR_CH3(x) ((x) << S_MPS_RX_TD_STAT_FIFO_PERR_CH3)
+#define F_MPS_RX_TD_STAT_FIFO_PERR_CH3 V_MPS_RX_TD_STAT_FIFO_PERR_CH3(1U)
+
+#define S_RPLCT_HDR_FIFO_IN_PERR_CH3 22
+#define V_RPLCT_HDR_FIFO_IN_PERR_CH3(x) ((x) << S_RPLCT_HDR_FIFO_IN_PERR_CH3)
+#define F_RPLCT_HDR_FIFO_IN_PERR_CH3 V_RPLCT_HDR_FIFO_IN_PERR_CH3(1U)
+
+#define S_ID_FIFO_IN_PERR_CH3 21
+#define V_ID_FIFO_IN_PERR_CH3(x) ((x) << S_ID_FIFO_IN_PERR_CH3)
+#define F_ID_FIFO_IN_PERR_CH3 V_ID_FIFO_IN_PERR_CH3(1U)
+
+#define S_DESC_HDR2_PERR_CH3 20
+#define V_DESC_HDR2_PERR_CH3(x) ((x) << S_DESC_HDR2_PERR_CH3)
+#define F_DESC_HDR2_PERR_CH3 V_DESC_HDR2_PERR_CH3(1U)
+
+#define S_FIFO_REPL_PERR_CH3 19
+#define V_FIFO_REPL_PERR_CH3(x) ((x) << S_FIFO_REPL_PERR_CH3)
+#define F_FIFO_REPL_PERR_CH3 V_FIFO_REPL_PERR_CH3(1U)
+
+#define S_MPS_RX_TD_PERR_CH3 18
+#define V_MPS_RX_TD_PERR_CH3(x) ((x) << S_MPS_RX_TD_PERR_CH3)
+#define F_MPS_RX_TD_PERR_CH3 V_MPS_RX_TD_PERR_CH3(1U)
+
+#define S_MPS_RX_TD_STAT_FIFO_PERR_CH2 17
+#define V_MPS_RX_TD_STAT_FIFO_PERR_CH2(x) ((x) << S_MPS_RX_TD_STAT_FIFO_PERR_CH2)
+#define F_MPS_RX_TD_STAT_FIFO_PERR_CH2 V_MPS_RX_TD_STAT_FIFO_PERR_CH2(1U)
+
+#define S_RPLCT_HDR_FIFO_IN_PERR_CH2 16
+#define V_RPLCT_HDR_FIFO_IN_PERR_CH2(x) ((x) << S_RPLCT_HDR_FIFO_IN_PERR_CH2)
+#define F_RPLCT_HDR_FIFO_IN_PERR_CH2 V_RPLCT_HDR_FIFO_IN_PERR_CH2(1U)
+
+#define S_ID_FIFO_IN_PERR_CH2 15
+#define V_ID_FIFO_IN_PERR_CH2(x) ((x) << S_ID_FIFO_IN_PERR_CH2)
+#define F_ID_FIFO_IN_PERR_CH2 V_ID_FIFO_IN_PERR_CH2(1U)
+
+#define S_DESC_HDR2_PERR_CH2 14
+#define V_DESC_HDR2_PERR_CH2(x) ((x) << S_DESC_HDR2_PERR_CH2)
+#define F_DESC_HDR2_PERR_CH2 V_DESC_HDR2_PERR_CH2(1U)
+
+#define S_FIFO_REPL_PERR_CH2 13
+#define V_FIFO_REPL_PERR_CH2(x) ((x) << S_FIFO_REPL_PERR_CH2)
+#define F_FIFO_REPL_PERR_CH2 V_FIFO_REPL_PERR_CH2(1U)
+
+#define S_MPS_RX_TD_PERR_CH2 12
+#define V_MPS_RX_TD_PERR_CH2(x) ((x) << S_MPS_RX_TD_PERR_CH2)
+#define F_MPS_RX_TD_PERR_CH2 V_MPS_RX_TD_PERR_CH2(1U)
+
+#define S_MPS_RX_TD_STAT_FIFO_PERR_CH1 11
+#define V_MPS_RX_TD_STAT_FIFO_PERR_CH1(x) ((x) << S_MPS_RX_TD_STAT_FIFO_PERR_CH1)
+#define F_MPS_RX_TD_STAT_FIFO_PERR_CH1 V_MPS_RX_TD_STAT_FIFO_PERR_CH1(1U)
+
+#define S_RPLCT_HDR_FIFO_IN_PERR_CH1 10
+#define V_RPLCT_HDR_FIFO_IN_PERR_CH1(x) ((x) << S_RPLCT_HDR_FIFO_IN_PERR_CH1)
+#define F_RPLCT_HDR_FIFO_IN_PERR_CH1 V_RPLCT_HDR_FIFO_IN_PERR_CH1(1U)
+
+#define S_ID_FIFO_IN_PERR_CH1 9
+#define V_ID_FIFO_IN_PERR_CH1(x) ((x) << S_ID_FIFO_IN_PERR_CH1)
+#define F_ID_FIFO_IN_PERR_CH1 V_ID_FIFO_IN_PERR_CH1(1U)
+
+#define S_DESC_HDR2_PERR_CH1 8
+#define V_DESC_HDR2_PERR_CH1(x) ((x) << S_DESC_HDR2_PERR_CH1)
+#define F_DESC_HDR2_PERR_CH1 V_DESC_HDR2_PERR_CH1(1U)
+
+#define S_FIFO_REPL_PERR_CH1 7
+#define V_FIFO_REPL_PERR_CH1(x) ((x) << S_FIFO_REPL_PERR_CH1)
+#define F_FIFO_REPL_PERR_CH1 V_FIFO_REPL_PERR_CH1(1U)
+
+#define S_MPS_RX_TD_PERR_CH1 6
+#define V_MPS_RX_TD_PERR_CH1(x) ((x) << S_MPS_RX_TD_PERR_CH1)
+#define F_MPS_RX_TD_PERR_CH1 V_MPS_RX_TD_PERR_CH1(1U)
+
+#define S_MPS_RX_TD_STAT_FIFO_PERR_CH0 5
+#define V_MPS_RX_TD_STAT_FIFO_PERR_CH0(x) ((x) << S_MPS_RX_TD_STAT_FIFO_PERR_CH0)
+#define F_MPS_RX_TD_STAT_FIFO_PERR_CH0 V_MPS_RX_TD_STAT_FIFO_PERR_CH0(1U)
+
+#define S_RPLCT_HDR_FIFO_IN_PERR_CH0 4
+#define V_RPLCT_HDR_FIFO_IN_PERR_CH0(x) ((x) << S_RPLCT_HDR_FIFO_IN_PERR_CH0)
+#define F_RPLCT_HDR_FIFO_IN_PERR_CH0 V_RPLCT_HDR_FIFO_IN_PERR_CH0(1U)
+
+#define S_ID_FIFO_IN_PERR_CH0 3
+#define V_ID_FIFO_IN_PERR_CH0(x) ((x) << S_ID_FIFO_IN_PERR_CH0)
+#define F_ID_FIFO_IN_PERR_CH0 V_ID_FIFO_IN_PERR_CH0(1U)
+
+#define S_DESC_HDR2_PERR_CH0 2
+#define V_DESC_HDR2_PERR_CH0(x) ((x) << S_DESC_HDR2_PERR_CH0)
+#define F_DESC_HDR2_PERR_CH0 V_DESC_HDR2_PERR_CH0(1U)
+
+#define S_FIFO_REPL_PERR_CH0 1
+#define V_FIFO_REPL_PERR_CH0(x) ((x) << S_FIFO_REPL_PERR_CH0)
+#define F_FIFO_REPL_PERR_CH0 V_FIFO_REPL_PERR_CH0(1U)
+
+#define S_MPS_RX_TD_PERR_CH0 0
+#define V_MPS_RX_TD_PERR_CH0(x) ((x) << S_MPS_RX_TD_PERR_CH0)
+#define F_MPS_RX_TD_PERR_CH0 V_MPS_RX_TD_PERR_CH0(1U)
+
#define A_MPS_RX_PERR_INT_ENABLE3 0x11314
#define A_MPS_RX_PERR_ENABLE3 0x11318
#define A_MPS_RX_PERR_INT_CAUSE4 0x1131c
-#define S_CLS 20
-#define M_CLS 0x3fU
-#define V_CLS(x) ((x) << S_CLS)
-#define G_CLS(x) (((x) >> S_CLS) & M_CLS)
+#define S_VNI_MULTICAST_FIFO_ECC_ERR_CH3 30
+#define V_VNI_MULTICAST_FIFO_ECC_ERR_CH3(x) ((x) << S_VNI_MULTICAST_FIFO_ECC_ERR_CH3)
+#define F_VNI_MULTICAST_FIFO_ECC_ERR_CH3 V_VNI_MULTICAST_FIFO_ECC_ERR_CH3(1U)
+
+#define S_VNI_MULTICAST_FIFO_ECC_ERR_CH2 29
+#define V_VNI_MULTICAST_FIFO_ECC_ERR_CH2(x) ((x) << S_VNI_MULTICAST_FIFO_ECC_ERR_CH2)
+#define F_VNI_MULTICAST_FIFO_ECC_ERR_CH2 V_VNI_MULTICAST_FIFO_ECC_ERR_CH2(1U)
+
+#define S_HASH_SRAM_CLS_ENG1 28
+#define V_HASH_SRAM_CLS_ENG1(x) ((x) << S_HASH_SRAM_CLS_ENG1)
+#define F_HASH_SRAM_CLS_ENG1 V_HASH_SRAM_CLS_ENG1(1U)
+
+#define S_HASH_SRAM_CLS_ENG0 27
+#define V_HASH_SRAM_CLS_ENG0(x) ((x) << S_HASH_SRAM_CLS_ENG0)
+#define F_HASH_SRAM_CLS_ENG0 V_HASH_SRAM_CLS_ENG0(1U)
+
+#define S_CLS_TCAM_SRAM_CLS_ENG1 26
+#define V_CLS_TCAM_SRAM_CLS_ENG1(x) ((x) << S_CLS_TCAM_SRAM_CLS_ENG1)
+#define F_CLS_TCAM_SRAM_CLS_ENG1 V_CLS_TCAM_SRAM_CLS_ENG1(1U)
+
+#define S_CLS_TCAM_CRC_SRAM_CLS_ENG1 25
+#define V_CLS_TCAM_CRC_SRAM_CLS_ENG1(x) ((x) << S_CLS_TCAM_CRC_SRAM_CLS_ENG1)
+#define F_CLS_TCAM_CRC_SRAM_CLS_ENG1 V_CLS_TCAM_CRC_SRAM_CLS_ENG1(1U)
+
+#define S_CLS_TCAM_SRAM_CLS_ENG0 24
+#define V_CLS_TCAM_SRAM_CLS_ENG0(x) ((x) << S_CLS_TCAM_SRAM_CLS_ENG0)
+#define F_CLS_TCAM_SRAM_CLS_ENG0 V_CLS_TCAM_SRAM_CLS_ENG0(1U)
+
+#define S_CLS_TCAM_CRC_SRAM_CLS_ENG0 23
+#define V_CLS_TCAM_CRC_SRAM_CLS_ENG0(x) ((x) << S_CLS_TCAM_CRC_SRAM_CLS_ENG0)
+#define F_CLS_TCAM_CRC_SRAM_CLS_ENG0 V_CLS_TCAM_CRC_SRAM_CLS_ENG0(1U)
+
+#define S_LB_FIFO_ECC_ERR 19
+#define M_LB_FIFO_ECC_ERR 0xfU
+#define V_LB_FIFO_ECC_ERR(x) ((x) << S_LB_FIFO_ECC_ERR)
+#define G_LB_FIFO_ECC_ERR(x) (((x) >> S_LB_FIFO_ECC_ERR) & M_LB_FIFO_ECC_ERR)
+
+#define S_DWRR_CH_FIFO_ECC_ERR 18
+#define V_DWRR_CH_FIFO_ECC_ERR(x) ((x) << S_DWRR_CH_FIFO_ECC_ERR)
+#define F_DWRR_CH_FIFO_ECC_ERR V_DWRR_CH_FIFO_ECC_ERR(1U)
+
+#define S_MAC_RX_FIFO_ECC_ERR 17
+#define V_MAC_RX_FIFO_ECC_ERR(x) ((x) << S_MAC_RX_FIFO_ECC_ERR)
+#define F_MAC_RX_FIFO_ECC_ERR V_MAC_RX_FIFO_ECC_ERR(1U)
+
+#define S_LPBK_RX_FIFO_ECC_ERR 16
+#define V_LPBK_RX_FIFO_ECC_ERR(x) ((x) << S_LPBK_RX_FIFO_ECC_ERR)
+#define F_LPBK_RX_FIFO_ECC_ERR V_LPBK_RX_FIFO_ECC_ERR(1U)
+
+#define S_CRS_DATA_STORE_N_FWD_CH3 15
+#define V_CRS_DATA_STORE_N_FWD_CH3(x) ((x) << S_CRS_DATA_STORE_N_FWD_CH3)
+#define F_CRS_DATA_STORE_N_FWD_CH3 V_CRS_DATA_STORE_N_FWD_CH3(1U)
+
+#define S_TRACE_FWD_FIFO_CERR_CH3 14
+#define V_TRACE_FWD_FIFO_CERR_CH3(x) ((x) << S_TRACE_FWD_FIFO_CERR_CH3)
+#define F_TRACE_FWD_FIFO_CERR_CH3 V_TRACE_FWD_FIFO_CERR_CH3(1U)
-#define S_RX_PRE_PROC 16
-#define M_RX_PRE_PROC 0xfU
-#define V_RX_PRE_PROC(x) ((x) << S_RX_PRE_PROC)
-#define G_RX_PRE_PROC(x) (((x) >> S_RX_PRE_PROC) & M_RX_PRE_PROC)
+#define S_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH3 13
+#define V_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH3(x) ((x) << S_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH3)
+#define F_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH3 V_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH3(1U)
-#define S_PPROC3 12
-#define M_PPROC3 0xfU
-#define V_PPROC3(x) ((x) << S_PPROC3)
-#define G_PPROC3(x) (((x) >> S_PPROC3) & M_PPROC3)
+#define S_PTP_TRACE_FWD_FIFO_CERR_CH3 12
+#define V_PTP_TRACE_FWD_FIFO_CERR_CH3(x) ((x) << S_PTP_TRACE_FWD_FIFO_CERR_CH3)
+#define F_PTP_TRACE_FWD_FIFO_CERR_CH3 V_PTP_TRACE_FWD_FIFO_CERR_CH3(1U)
-#define S_PPROC2 8
-#define M_PPROC2 0xfU
-#define V_PPROC2(x) ((x) << S_PPROC2)
-#define G_PPROC2(x) (((x) >> S_PPROC2) & M_PPROC2)
+#define S_CRS_DATA_STORE_N_FWD_CH2 11
+#define V_CRS_DATA_STORE_N_FWD_CH2(x) ((x) << S_CRS_DATA_STORE_N_FWD_CH2)
+#define F_CRS_DATA_STORE_N_FWD_CH2 V_CRS_DATA_STORE_N_FWD_CH2(1U)
-#define S_PPROC1 4
-#define M_PPROC1 0xfU
-#define V_PPROC1(x) ((x) << S_PPROC1)
-#define G_PPROC1(x) (((x) >> S_PPROC1) & M_PPROC1)
+#define S_TRACE_FWD_FIFO_CERR_CH2 10
+#define V_TRACE_FWD_FIFO_CERR_CH2(x) ((x) << S_TRACE_FWD_FIFO_CERR_CH2)
+#define F_TRACE_FWD_FIFO_CERR_CH2 V_TRACE_FWD_FIFO_CERR_CH2(1U)
-#define S_PPROC0 0
-#define M_PPROC0 0xfU
-#define V_PPROC0(x) ((x) << S_PPROC0)
-#define G_PPROC0(x) (((x) >> S_PPROC0) & M_PPROC0)
+#define S_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH2 9
+#define V_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH2(x) ((x) << S_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH2)
+#define F_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH2 V_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH2(1U)
+
+#define S_PTP_TRACE_FWD_FIFO_CERR_CH2 8
+#define V_PTP_TRACE_FWD_FIFO_CERR_CH2(x) ((x) << S_PTP_TRACE_FWD_FIFO_CERR_CH2)
+#define F_PTP_TRACE_FWD_FIFO_CERR_CH2 V_PTP_TRACE_FWD_FIFO_CERR_CH2(1U)
+
+#define S_CRS_DATA_STORE_N_FWD_CH1 7
+#define V_CRS_DATA_STORE_N_FWD_CH1(x) ((x) << S_CRS_DATA_STORE_N_FWD_CH1)
+#define F_CRS_DATA_STORE_N_FWD_CH1 V_CRS_DATA_STORE_N_FWD_CH1(1U)
+
+#define S_TRACE_FWD_FIFO_CERR_CH1 6
+#define V_TRACE_FWD_FIFO_CERR_CH1(x) ((x) << S_TRACE_FWD_FIFO_CERR_CH1)
+#define F_TRACE_FWD_FIFO_CERR_CH1 V_TRACE_FWD_FIFO_CERR_CH1(1U)
+
+#define S_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH1 5
+#define V_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH1(x) ((x) << S_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH1)
+#define F_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH1 V_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH1(1U)
+
+#define S_PTP_TRACE_FWD_FIFO_CERR_CH1 4
+#define V_PTP_TRACE_FWD_FIFO_CERR_CH1(x) ((x) << S_PTP_TRACE_FWD_FIFO_CERR_CH1)
+#define F_PTP_TRACE_FWD_FIFO_CERR_CH1 V_PTP_TRACE_FWD_FIFO_CERR_CH1(1U)
+
+#define S_CRS_DATA_STORE_N_FWD_CH0 3
+#define V_CRS_DATA_STORE_N_FWD_CH0(x) ((x) << S_CRS_DATA_STORE_N_FWD_CH0)
+#define F_CRS_DATA_STORE_N_FWD_CH0 V_CRS_DATA_STORE_N_FWD_CH0(1U)
+
+#define S_TRACE_FWD_FIFO_CERR_CH0 2
+#define V_TRACE_FWD_FIFO_CERR_CH0(x) ((x) << S_TRACE_FWD_FIFO_CERR_CH0)
+#define F_TRACE_FWD_FIFO_CERR_CH0 V_TRACE_FWD_FIFO_CERR_CH0(1U)
+
+#define S_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH0 1
+#define V_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH0(x) ((x) << S_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH0)
+#define F_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH0 V_TRANSPARENT_ENCAP_FWD_FIFO_CERR_CH0(1U)
+
+#define S_PTP_TRACE_FWD_FIFO_CERR_CH0 0
+#define V_PTP_TRACE_FWD_FIFO_CERR_CH0(x) ((x) << S_PTP_TRACE_FWD_FIFO_CERR_CH0)
+#define F_PTP_TRACE_FWD_FIFO_CERR_CH0 V_PTP_TRACE_FWD_FIFO_CERR_CH0(1U)
#define A_MPS_RX_PERR_INT_ENABLE4 0x11320
#define A_MPS_RX_PERR_ENABLE4 0x11324
#define A_MPS_RX_PERR_INT_CAUSE5 0x11328
-#define S_MPS2CRYP_RX_FIFO 26
-#define M_MPS2CRYP_RX_FIFO 0xfU
-#define V_MPS2CRYP_RX_FIFO(x) ((x) << S_MPS2CRYP_RX_FIFO)
-#define G_MPS2CRYP_RX_FIFO(x) (((x) >> S_MPS2CRYP_RX_FIFO) & M_MPS2CRYP_RX_FIFO)
+#define S_MPS2CRYP_RX_FIFO3_PERR 31
+#define V_MPS2CRYP_RX_FIFO3_PERR(x) ((x) << S_MPS2CRYP_RX_FIFO3_PERR)
+#define F_MPS2CRYP_RX_FIFO3_PERR V_MPS2CRYP_RX_FIFO3_PERR(1U)
+
+#define S_MPS2CRYP_RX_FIFO2_PERR 30
+#define V_MPS2CRYP_RX_FIFO2_PERR(x) ((x) << S_MPS2CRYP_RX_FIFO2_PERR)
+#define F_MPS2CRYP_RX_FIFO2_PERR V_MPS2CRYP_RX_FIFO2_PERR(1U)
+
+#define S_MPS2CRYP_RX_FIFO1_PERR 29
+#define V_MPS2CRYP_RX_FIFO1_PERR(x) ((x) << S_MPS2CRYP_RX_FIFO1_PERR)
+#define F_MPS2CRYP_RX_FIFO1_PERR V_MPS2CRYP_RX_FIFO1_PERR(1U)
+
+#define S_MPS2CRYP_RX_FIFO0_PERR 28
+#define V_MPS2CRYP_RX_FIFO0_PERR(x) ((x) << S_MPS2CRYP_RX_FIFO0_PERR)
+#define F_MPS2CRYP_RX_FIFO0_PERR V_MPS2CRYP_RX_FIFO0_PERR(1U)
+
+#define S_VNI_MULTICAST_SRAM2_PERR 27
+#define V_VNI_MULTICAST_SRAM2_PERR(x) ((x) << S_VNI_MULTICAST_SRAM2_PERR)
+#define F_VNI_MULTICAST_SRAM2_PERR V_VNI_MULTICAST_SRAM2_PERR(1U)
+
+#define S_VNI_MULTICAST_SRAM1_PERR 26
+#define V_VNI_MULTICAST_SRAM1_PERR(x) ((x) << S_VNI_MULTICAST_SRAM1_PERR)
+#define F_VNI_MULTICAST_SRAM1_PERR V_VNI_MULTICAST_SRAM1_PERR(1U)
+
+#define S_VNI_MULTICAST_SRAM0_PERR 25
+#define V_VNI_MULTICAST_SRAM0_PERR(x) ((x) << S_VNI_MULTICAST_SRAM0_PERR)
+#define F_VNI_MULTICAST_SRAM0_PERR V_VNI_MULTICAST_SRAM0_PERR(1U)
+
+#define S_MAC_MULTICAST_SRAM4_PERR 24
+#define V_MAC_MULTICAST_SRAM4_PERR(x) ((x) << S_MAC_MULTICAST_SRAM4_PERR)
+#define F_MAC_MULTICAST_SRAM4_PERR V_MAC_MULTICAST_SRAM4_PERR(1U)
+
+#define S_MAC_MULTICAST_SRAM3_PERR 23
+#define V_MAC_MULTICAST_SRAM3_PERR(x) ((x) << S_MAC_MULTICAST_SRAM3_PERR)
+#define F_MAC_MULTICAST_SRAM3_PERR V_MAC_MULTICAST_SRAM3_PERR(1U)
+
+#define S_MAC_MULTICAST_SRAM2_PERR 22
+#define V_MAC_MULTICAST_SRAM2_PERR(x) ((x) << S_MAC_MULTICAST_SRAM2_PERR)
+#define F_MAC_MULTICAST_SRAM2_PERR V_MAC_MULTICAST_SRAM2_PERR(1U)
+
+#define S_MAC_MULTICAST_SRAM1_PERR 21
+#define V_MAC_MULTICAST_SRAM1_PERR(x) ((x) << S_MAC_MULTICAST_SRAM1_PERR)
+#define F_MAC_MULTICAST_SRAM1_PERR V_MAC_MULTICAST_SRAM1_PERR(1U)
+
+#define S_MAC_MULTICAST_SRAM0_PERR 20
+#define V_MAC_MULTICAST_SRAM0_PERR(x) ((x) << S_MAC_MULTICAST_SRAM0_PERR)
+#define F_MAC_MULTICAST_SRAM0_PERR V_MAC_MULTICAST_SRAM0_PERR(1U)
+
+#define S_MEM_WRAP_IPSEC_HDR_UPD_FIFO3_PERR 19
+#define V_MEM_WRAP_IPSEC_HDR_UPD_FIFO3_PERR(x) ((x) << S_MEM_WRAP_IPSEC_HDR_UPD_FIFO3_PERR)
+#define F_MEM_WRAP_IPSEC_HDR_UPD_FIFO3_PERR V_MEM_WRAP_IPSEC_HDR_UPD_FIFO3_PERR(1U)
+
+#define S_MEM_WRAP_IPSEC_HDR_UPD_FIFO2_PERR 18
+#define V_MEM_WRAP_IPSEC_HDR_UPD_FIFO2_PERR(x) ((x) << S_MEM_WRAP_IPSEC_HDR_UPD_FIFO2_PERR)
+#define F_MEM_WRAP_IPSEC_HDR_UPD_FIFO2_PERR V_MEM_WRAP_IPSEC_HDR_UPD_FIFO2_PERR(1U)
+
+#define S_MEM_WRAP_IPSEC_HDR_UPD_FIFO1_PERR 17
+#define V_MEM_WRAP_IPSEC_HDR_UPD_FIFO1_PERR(x) ((x) << S_MEM_WRAP_IPSEC_HDR_UPD_FIFO1_PERR)
+#define F_MEM_WRAP_IPSEC_HDR_UPD_FIFO1_PERR V_MEM_WRAP_IPSEC_HDR_UPD_FIFO1_PERR(1U)
+
+#define S_MEM_WRAP_IPSEC_HDR_UPD_FIFO0_PERR 16
+#define V_MEM_WRAP_IPSEC_HDR_UPD_FIFO0_PERR(x) ((x) << S_MEM_WRAP_IPSEC_HDR_UPD_FIFO0_PERR)
+#define F_MEM_WRAP_IPSEC_HDR_UPD_FIFO0_PERR V_MEM_WRAP_IPSEC_HDR_UPD_FIFO0_PERR(1U)
+
+#define S_MEM_WRAP_CR2MPS_RX_FIFO3_PERR 15
+#define V_MEM_WRAP_CR2MPS_RX_FIFO3_PERR(x) ((x) << S_MEM_WRAP_CR2MPS_RX_FIFO3_PERR)
+#define F_MEM_WRAP_CR2MPS_RX_FIFO3_PERR V_MEM_WRAP_CR2MPS_RX_FIFO3_PERR(1U)
+
+#define S_MEM_WRAP_CR2MPS_RX_FIFO2_PERR 14
+#define V_MEM_WRAP_CR2MPS_RX_FIFO2_PERR(x) ((x) << S_MEM_WRAP_CR2MPS_RX_FIFO2_PERR)
+#define F_MEM_WRAP_CR2MPS_RX_FIFO2_PERR V_MEM_WRAP_CR2MPS_RX_FIFO2_PERR(1U)
+
+#define S_MEM_WRAP_CR2MPS_RX_FIFO1_PERR 13
+#define V_MEM_WRAP_CR2MPS_RX_FIFO1_PERR(x) ((x) << S_MEM_WRAP_CR2MPS_RX_FIFO1_PERR)
+#define F_MEM_WRAP_CR2MPS_RX_FIFO1_PERR V_MEM_WRAP_CR2MPS_RX_FIFO1_PERR(1U)
+
+#define S_MEM_WRAP_CR2MPS_RX_FIFO0_PERR 12
+#define V_MEM_WRAP_CR2MPS_RX_FIFO0_PERR(x) ((x) << S_MEM_WRAP_CR2MPS_RX_FIFO0_PERR)
+#define F_MEM_WRAP_CR2MPS_RX_FIFO0_PERR V_MEM_WRAP_CR2MPS_RX_FIFO0_PERR(1U)
-#define S_RX_OUT 20
-#define M_RX_OUT 0x3fU
-#define V_RX_OUT(x) ((x) << S_RX_OUT)
-#define G_RX_OUT(x) (((x) >> S_RX_OUT) & M_RX_OUT)
+#define S_MEM_WRAP_NON_IPSEC_FIFO3_PERR 11
+#define V_MEM_WRAP_NON_IPSEC_FIFO3_PERR(x) ((x) << S_MEM_WRAP_NON_IPSEC_FIFO3_PERR)
+#define F_MEM_WRAP_NON_IPSEC_FIFO3_PERR V_MEM_WRAP_NON_IPSEC_FIFO3_PERR(1U)
-#define S_MEM_WRAP 0
-#define M_MEM_WRAP 0xfffffU
-#define V_MEM_WRAP(x) ((x) << S_MEM_WRAP)
-#define G_MEM_WRAP(x) (((x) >> S_MEM_WRAP) & M_MEM_WRAP)
+#define S_MEM_WRAP_NON_IPSEC_FIFO2_PERR 10
+#define V_MEM_WRAP_NON_IPSEC_FIFO2_PERR(x) ((x) << S_MEM_WRAP_NON_IPSEC_FIFO2_PERR)
+#define F_MEM_WRAP_NON_IPSEC_FIFO2_PERR V_MEM_WRAP_NON_IPSEC_FIFO2_PERR(1U)
+
+#define S_MEM_WRAP_NON_IPSEC_FIFO1_PERR 9
+#define V_MEM_WRAP_NON_IPSEC_FIFO1_PERR(x) ((x) << S_MEM_WRAP_NON_IPSEC_FIFO1_PERR)
+#define F_MEM_WRAP_NON_IPSEC_FIFO1_PERR V_MEM_WRAP_NON_IPSEC_FIFO1_PERR(1U)
+
+#define S_MEM_WRAP_NON_IPSEC_FIFO0_PERR 8
+#define V_MEM_WRAP_NON_IPSEC_FIFO0_PERR(x) ((x) << S_MEM_WRAP_NON_IPSEC_FIFO0_PERR)
+#define F_MEM_WRAP_NON_IPSEC_FIFO0_PERR V_MEM_WRAP_NON_IPSEC_FIFO0_PERR(1U)
+
+#define S_MEM_WRAP_TP_DB_REQ_FIFO3_PERR 7
+#define V_MEM_WRAP_TP_DB_REQ_FIFO3_PERR(x) ((x) << S_MEM_WRAP_TP_DB_REQ_FIFO3_PERR)
+#define F_MEM_WRAP_TP_DB_REQ_FIFO3_PERR V_MEM_WRAP_TP_DB_REQ_FIFO3_PERR(1U)
+
+#define S_MEM_WRAP_TP_DB_REQ_FIFO2_PERR 6
+#define V_MEM_WRAP_TP_DB_REQ_FIFO2_PERR(x) ((x) << S_MEM_WRAP_TP_DB_REQ_FIFO2_PERR)
+#define F_MEM_WRAP_TP_DB_REQ_FIFO2_PERR V_MEM_WRAP_TP_DB_REQ_FIFO2_PERR(1U)
+
+#define S_MEM_WRAP_TP_DB_REQ_FIFO1_PERR 5
+#define V_MEM_WRAP_TP_DB_REQ_FIFO1_PERR(x) ((x) << S_MEM_WRAP_TP_DB_REQ_FIFO1_PERR)
+#define F_MEM_WRAP_TP_DB_REQ_FIFO1_PERR V_MEM_WRAP_TP_DB_REQ_FIFO1_PERR(1U)
+
+#define S_MEM_WRAP_TP_DB_REQ_FIFO0_PERR 4
+#define V_MEM_WRAP_TP_DB_REQ_FIFO0_PERR(x) ((x) << S_MEM_WRAP_TP_DB_REQ_FIFO0_PERR)
+#define F_MEM_WRAP_TP_DB_REQ_FIFO0_PERR V_MEM_WRAP_TP_DB_REQ_FIFO0_PERR(1U)
+
+#define S_MEM_WRAP_CNTRL_FIFO3_PERR 3
+#define V_MEM_WRAP_CNTRL_FIFO3_PERR(x) ((x) << S_MEM_WRAP_CNTRL_FIFO3_PERR)
+#define F_MEM_WRAP_CNTRL_FIFO3_PERR V_MEM_WRAP_CNTRL_FIFO3_PERR(1U)
+
+#define S_MEM_WRAP_CNTRL_FIFO2_PERR 2
+#define V_MEM_WRAP_CNTRL_FIFO2_PERR(x) ((x) << S_MEM_WRAP_CNTRL_FIFO2_PERR)
+#define F_MEM_WRAP_CNTRL_FIFO2_PERR V_MEM_WRAP_CNTRL_FIFO2_PERR(1U)
+
+#define S_MEM_WRAP_CNTRL_FIFO1_PERR 1
+#define V_MEM_WRAP_CNTRL_FIFO1_PERR(x) ((x) << S_MEM_WRAP_CNTRL_FIFO1_PERR)
+#define F_MEM_WRAP_CNTRL_FIFO1_PERR V_MEM_WRAP_CNTRL_FIFO1_PERR(1U)
+
+#define S_MEM_WRAP_CNTRL_FIFO0_PERR 0
+#define V_MEM_WRAP_CNTRL_FIFO0_PERR(x) ((x) << S_MEM_WRAP_CNTRL_FIFO0_PERR)
+#define F_MEM_WRAP_CNTRL_FIFO0_PERR V_MEM_WRAP_CNTRL_FIFO0_PERR(1U)
#define A_MPS_RX_PERR_INT_ENABLE5 0x1132c
#define A_MPS_RX_PERR_ENABLE5 0x11330
#define A_MPS_RX_PERR_INT_CAUSE6 0x11334
-#define S_MPS_RX_MEM_WRAP 0
-#define M_MPS_RX_MEM_WRAP 0x1ffffffU
-#define V_MPS_RX_MEM_WRAP(x) ((x) << S_MPS_RX_MEM_WRAP)
-#define G_MPS_RX_MEM_WRAP(x) (((x) >> S_MPS_RX_MEM_WRAP) & M_MPS_RX_MEM_WRAP)
+#define S_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO3_PERR 23
+#define V_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO3_PERR(x) ((x) << S_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO3_PERR)
+#define F_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO3_PERR V_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO3_PERR(1U)
+
+#define S_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO2_PERR 22
+#define V_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO2_PERR(x) ((x) << S_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO2_PERR)
+#define F_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO2_PERR V_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO2_PERR(1U)
+
+#define S_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO1_PERR 21
+#define V_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO1_PERR(x) ((x) << S_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO1_PERR)
+#define F_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO1_PERR V_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO1_PERR(1U)
+
+#define S_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO0_PERR 20
+#define V_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO0_PERR(x) ((x) << S_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO0_PERR)
+#define F_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO0_PERR V_T7_MEM_WRAP_IPSEC_HDR_UPD_FIFO0_PERR(1U)
+
+#define S_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO3_PERR 19
+#define V_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO3_PERR(x) ((x) << S_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO3_PERR)
+#define F_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO3_PERR V_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO3_PERR(1U)
+
+#define S_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO2_PERR 18
+#define V_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO2_PERR(x) ((x) << S_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO2_PERR)
+#define F_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO2_PERR V_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO2_PERR(1U)
+
+#define S_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO1_PERR 17
+#define V_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO1_PERR(x) ((x) << S_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO1_PERR)
+#define F_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO1_PERR V_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO1_PERR(1U)
+
+#define S_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO0_PERR 16
+#define V_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO0_PERR(x) ((x) << S_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO0_PERR)
+#define F_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO0_PERR V_MEM_WRAP_CR2MPS_UPDTD_HDR_FIFO0_PERR(1U)
#define A_MPS_RX_PERR_INT_ENABLE6 0x11338
#define A_MPS_RX_PERR_ENABLE6 0x1133c
@@ -45844,6 +46424,18 @@
#define V_SLVFIFO(x) ((x) << S_SLVFIFO)
#define F_SLVFIFO V_SLVFIFO(1U)
+#define S_T7_MSTTXFIFO 22
+#define V_T7_MSTTXFIFO(x) ((x) << S_T7_MSTTXFIFO)
+#define F_T7_MSTTXFIFO V_T7_MSTTXFIFO(1U)
+
+#define S_T7_MSTRXFIFO 21
+#define V_T7_MSTRXFIFO(x) ((x) << S_T7_MSTRXFIFO)
+#define F_T7_MSTRXFIFO V_T7_MSTRXFIFO(1U)
+
+#define S_T7_SLVFIFO 20
+#define V_T7_SLVFIFO(x) ((x) << S_T7_SLVFIFO)
+#define F_T7_SLVFIFO V_T7_SLVFIFO(1U)
+
#define A_SMB_PERR_INJ 0x1909c
#define S_MSTTXINJDATAERR 3
@@ -46167,20 +46759,20 @@
#define V_UART_CLKDIV(x) ((x) << S_UART_CLKDIV)
#define G_UART_CLKDIV(x) (((x) >> S_UART_CLKDIV) & M_UART_CLKDIV)
-#define S_T7_STOPBITS 25
-#define M_T7_STOPBITS 0x3U
-#define V_T7_STOPBITS(x) ((x) << S_T7_STOPBITS)
-#define G_T7_STOPBITS(x) (((x) >> S_T7_STOPBITS) & M_T7_STOPBITS)
+#define S_T7_UART_STOPBITS 25
+#define M_T7_UART_STOPBITS 0x3U
+#define V_T7_UART_STOPBITS(x) ((x) << S_T7_UART_STOPBITS)
+#define G_T7_UART_STOPBITS(x) (((x) >> S_T7_UART_STOPBITS) & M_T7_UART_STOPBITS)
-#define S_T7_PARITY 23
-#define M_T7_PARITY 0x3U
-#define V_T7_PARITY(x) ((x) << S_T7_PARITY)
-#define G_T7_PARITY(x) (((x) >> S_T7_PARITY) & M_T7_PARITY)
+#define S_T7_UART_PARITY 23
+#define M_T7_UART_PARITY 0x3U
+#define V_T7_UART_PARITY(x) ((x) << S_T7_UART_PARITY)
+#define G_T7_UART_PARITY(x) (((x) >> S_T7_UART_PARITY) & M_T7_UART_PARITY)
-#define S_T7_DATABITS 19
-#define M_T7_DATABITS 0xfU
-#define V_T7_DATABITS(x) ((x) << S_T7_DATABITS)
-#define G_T7_DATABITS(x) (((x) >> S_T7_DATABITS) & M_T7_DATABITS)
+#define S_T7_UART_DATABITS 19
+#define M_T7_UART_DATABITS 0xfU
+#define V_T7_UART_DATABITS(x) ((x) << S_T7_UART_DATABITS)
+#define G_T7_UART_DATABITS(x) (((x) >> S_T7_UART_DATABITS) & M_T7_UART_DATABITS)
#define S_T7_UART_CLKDIV 0
#define M_T7_UART_CLKDIV 0x3ffffU
@@ -46607,13 +47199,13 @@
#define V_T7_SE_CNT_MISMATCH_0(x) ((x) << S_T7_SE_CNT_MISMATCH_0)
#define F_T7_SE_CNT_MISMATCH_0 V_T7_SE_CNT_MISMATCH_0(1U)
-#define S_ENABLE_CTX_3 7
-#define V_ENABLE_CTX_3(x) ((x) << S_ENABLE_CTX_3)
-#define F_ENABLE_CTX_3 V_ENABLE_CTX_3(1U)
+#define S_T7_ENABLE_CTX_3 7
+#define V_T7_ENABLE_CTX_3(x) ((x) << S_T7_ENABLE_CTX_3)
+#define F_T7_ENABLE_CTX_3 V_T7_ENABLE_CTX_3(1U)
-#define S_ENABLE_CTX_2 6
-#define V_ENABLE_CTX_2(x) ((x) << S_ENABLE_CTX_2)
-#define F_ENABLE_CTX_2 V_ENABLE_CTX_2(1U)
+#define S_T7_ENABLE_CTX_2 6
+#define V_T7_ENABLE_CTX_2(x) ((x) << S_T7_ENABLE_CTX_2)
+#define F_T7_ENABLE_CTX_2 V_T7_ENABLE_CTX_2(1U)
#define S_T7_ENABLE_CTX_1 5
#define V_T7_ENABLE_CTX_1(x) ((x) << S_T7_ENABLE_CTX_1)
@@ -46623,13 +47215,13 @@
#define V_T7_ENABLE_CTX_0(x) ((x) << S_T7_ENABLE_CTX_0)
#define F_T7_ENABLE_CTX_0 V_T7_ENABLE_CTX_0(1U)
-#define S_ENABLE_ALN_SDC_ERR_3 3
-#define V_ENABLE_ALN_SDC_ERR_3(x) ((x) << S_ENABLE_ALN_SDC_ERR_3)
-#define F_ENABLE_ALN_SDC_ERR_3 V_ENABLE_ALN_SDC_ERR_3(1U)
+#define S_T7_ENABLE_ALN_SDC_ERR_3 3
+#define V_T7_ENABLE_ALN_SDC_ERR_3(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_3)
+#define F_T7_ENABLE_ALN_SDC_ERR_3 V_T7_ENABLE_ALN_SDC_ERR_3(1U)
-#define S_ENABLE_ALN_SDC_ERR_2 2
-#define V_ENABLE_ALN_SDC_ERR_2(x) ((x) << S_ENABLE_ALN_SDC_ERR_2)
-#define F_ENABLE_ALN_SDC_ERR_2 V_ENABLE_ALN_SDC_ERR_2(1U)
+#define S_T7_ENABLE_ALN_SDC_ERR_2 2
+#define V_T7_ENABLE_ALN_SDC_ERR_2(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_2)
+#define F_T7_ENABLE_ALN_SDC_ERR_2 V_T7_ENABLE_ALN_SDC_ERR_2(1U)
#define S_T7_ENABLE_ALN_SDC_ERR_1 1
#define V_T7_ENABLE_ALN_SDC_ERR_1(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_1)
@@ -78548,6 +79140,17 @@
#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_ADDR_1 0x38210
#define A_MAC_MTIP_MAC400G_0_MTIP_FRM_LENGTH 0x38214
#define A_MAC_MTIP_MAC400G_0_MTIP_RX_FIFO_SECTIONS 0x3821c
+
+#define S_T7_MAC_EMPTY 16
+#define M_T7_MAC_EMPTY 0xffffU
+#define V_T7_MAC_EMPTY(x) ((x) << S_T7_MAC_EMPTY)
+#define G_T7_MAC_EMPTY(x) (((x) >> S_T7_MAC_EMPTY) & M_T7_MAC_EMPTY)
+
+#define S_T7_MAC_AVAIL 0
+#define M_T7_MAC_AVAIL 0xffffU
+#define V_T7_MAC_AVAIL(x) ((x) << S_T7_MAC_AVAIL)
+#define G_T7_MAC_AVAIL(x) (((x) >> S_T7_MAC_AVAIL) & M_T7_MAC_AVAIL)
+
#define A_MAC_MTIP_MAC400G_0_MTIP_TX_FIFO_SECTIONS 0x38220
#define A_MAC_MTIP_MAC400G_0_MTIP_RX_FIFO_ALMOST_F_E 0x38224
#define A_MAC_MTIP_MAC400G_0_MTIP_TX_FIFO_ALMOST_F_E 0x38228
@@ -82543,7 +83146,6 @@
#define F_DMA_PL_RST_N V_DMA_PL_RST_N(1U)
#define A_ARM_PLM_RID_CFG 0x4703c
-#define A_ARM_PLM_EROM_CFG 0x47040
#define A_ARM_PL_ARM_HDR_CFG 0x4704c
#define A_ARM_RC_INT_STATUS 0x4705c
@@ -85733,20 +86335,7 @@
#define V_T7_ECC_UE_INT_CAUSE(x) ((x) << S_T7_ECC_UE_INT_CAUSE)
#define F_T7_ECC_UE_INT_CAUSE V_T7_ECC_UE_INT_CAUSE(1U)
-#define A_MC_P_ECC_UE_INT_ENABLE 0x49324
-
-#define S_BIST_RSP_SRAM_UERR_ENABLE 0
-#define V_BIST_RSP_SRAM_UERR_ENABLE(x) ((x) << S_BIST_RSP_SRAM_UERR_ENABLE)
-#define F_BIST_RSP_SRAM_UERR_ENABLE V_BIST_RSP_SRAM_UERR_ENABLE(1U)
-
-#define A_MC_P_ECC_UE_INT_CAUSE 0x49328
-
-#define S_BIST_RSP_SRAM_UERR_CAUSE 0
-#define V_BIST_RSP_SRAM_UERR_CAUSE(x) ((x) << S_BIST_RSP_SRAM_UERR_CAUSE)
-#define F_BIST_RSP_SRAM_UERR_CAUSE V_BIST_RSP_SRAM_UERR_CAUSE(1U)
-
#define A_T7_MC_P_ECC_STATUS 0x4932c
-#define A_T7_MC_P_PHY_CTRL 0x49330
#define A_T7_MC_P_STATIC_CFG_STATUS 0x49334
#define S_DFIFREQRATIO 27
@@ -86100,6 +86689,7 @@
#define V_FLIP_BIT_POS0(x) ((x) << S_FLIP_BIT_POS0)
#define G_FLIP_BIT_POS0(x) (((x) >> S_FLIP_BIT_POS0) & M_FLIP_BIT_POS0)
+#define A_MC_REGB_DDRC_CH1_ECCSTAT 0x11608
#define A_MC_REGB_DDRC_CH1_ECCCTL 0x1160c
#define A_MC_REGB_DDRC_CH1_ECCERRCNT 0x11610
#define A_MC_REGB_DDRC_CH1_ECCCADDR0 0x11614
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg.txt b/sys/dev/cxgbe/firmware/t7fw_cfg.txt
index 499af3675bd9..70b05da04a23 100644
--- a/sys/dev/cxgbe/firmware/t7fw_cfg.txt
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg.txt
@@ -114,7 +114,8 @@
reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
- reg[0x46004] = 0x3/0x3 #Crypto core reset
+ reg[0x46004] = 0x3/0x3 # Crypto core reset
+ reg[0x46000] = 0xa/0xe # 16K ESH Hi Extraction window
#Tick granularities in kbps
tsch_ticks = 100000, 10000, 1000, 10
@@ -192,14 +193,15 @@
reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
# Terminate_with_err = 0
- gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ #Enabling GC for HMA
+ gc_disable = 1 # 3 - disable gc for hma/mc1 and mc0,
# 2 - disable gc for mc1/hma enable mc0,
# 1 - enable gc for mc1/hma disable mc0,
# 0 - enable gc for mc1/hma and for mc0,
# default gc enabled.
# HMA configuration (uncomment following lines to enable HMA)
- hma_size = 92 # Size (in MBs) of host memory expected
+ hma_size = 128 # Size (in MBs) of host memory expected
hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
#mc[0]=0
@@ -429,7 +431,7 @@
nethofld = 1024 # number of user mode ethernet flow contexts
ncrypto_lookaside = 32
nclip = 320 # number of clip region entries
- nfilter = 480 # number of filter region entries
+ nfilter = 448 # number of filter region entries
nserver = 480 # number of server region entries
nhash = 12288 # number of hash region entries
nhpfilter = 64 # number of high priority filter region entries
@@ -505,12 +507,20 @@
nfilter = 16 # number of filter region entries
#nhpfilter = 16 # number of high priority filter region entries
niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
- nethctrl = 32 # NPORTS*NCPUS
- neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nethctrl = 128 # NPORTS*NCPUS
+ neq = 256 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
nserver = 16
nhash = 1024
tp_l2t = 512
protocol = nic_vm, ofld, rddp, rdmac, tlskeys, ipsec_inline, rocev2, nvme_tcp
+ tp_ddp = 1
+ tp_ddp_iscsi = 1
+ tp_tls_key = 1
+ tp_stag = 1
+ tp_pbl = 3
+ tp_rq = 4
+ tp_rrq = 2
+ tp_srq = 96
# The following function, 1023, is not an actual PCIE function but is used to
# configure and reserve firmware internal resources that come from the global
@@ -523,7 +533,7 @@
cmask = all # access to all channels
pmask = all # access to all four ports ...
nexactf = 8 # NPORTS + DCBX +
- nfilter = 16 # number of filter region entries
+ nfilter = 48 # number of filter region entries
#nhpfilter = 0 # number of high priority filter region entries
@@ -594,7 +604,7 @@
# Bytes)
#
[port "0"]
- #dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ #dcb = ppp, dcbx # configure for DCB PPP and enable DCBX offload
hwm = 30
lwm = 15
dwm = 30
@@ -604,7 +614,7 @@
[port "1"]
- #dcb = ppp, dcbx, b2b
+ #dcb = ppp, dcbx
hwm = 30
lwm = 15
dwm = 30
@@ -613,7 +623,7 @@
dcb_app_tlv[2] = 3260, socketnum, 5
[port "2"]
- #dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ #dcb = ppp, dcbx # configure for DCB PPP and enable DCBX offload
hwm = 30
lwm = 15
dwm = 30
@@ -623,7 +633,7 @@
[port "3"]
- #dcb = ppp, dcbx, b2b
+ #dcb = ppp, dcbx
hwm = 30
lwm = 15
dwm = 30
@@ -633,7 +643,7 @@
[fini]
version = 0x1425001d
- checksum = 0x684e23fb
+ checksum = 0x3671da3b
# Total resources used by above allocations:
# Virtual Interfaces: 104
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt b/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
index 0bca1c194af8..b1f5129238eb 100644
--- a/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
@@ -114,7 +114,8 @@
reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
- reg[0x46004] = 0x3/0x3 #Crypto core reset
+ reg[0x46004] = 0x3/0x3 # Crypto core reset
+ reg[0x46000] = 0xa/0xe # 16K ESH Hi Extraction window
#Tick granularities in kbps
tsch_ticks = 100000, 10000, 1000, 10
@@ -192,14 +193,15 @@
reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
# Terminate_with_err = 0
- gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ #Enabling GC for HMA
+ gc_disable = 1 # 3 - disable gc for hma/mc1 and mc0,
# 2 - disable gc for mc1/hma enable mc0,
# 1 - enable gc for mc1/hma disable mc0,
# 0 - enable gc for mc1/hma and for mc0,
# default gc enabled.
# HMA configuration (uncomment following lines to enable HMA)
- hma_size = 92 # Size (in MBs) of host memory expected
+ hma_size = 128 # Size (in MBs) of host memory expected
hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
#mc[0]=0
@@ -429,7 +431,7 @@
nethofld = 1024 # number of user mode ethernet flow contexts
ncrypto_lookaside = 32
nclip = 320 # number of clip region entries
- nfilter = 480 # number of filter region entries
+ nfilter = 448 # number of filter region entries
nserver = 480 # number of server region entries
nhash = 12288 # number of hash region entries
nhpfilter = 64 # number of high priority filter region entries
@@ -505,12 +507,20 @@
nfilter = 16 # number of filter region entries
#nhpfilter = 16 # number of high priority filter region entries
niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
- nethctrl = 32 # NPORTS*NCPUS
- neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nethctrl = 128 # NPORTS*NCPUS
+ neq = 256 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
nserver = 16
nhash = 1024
tp_l2t = 512
protocol = nic_vm, ofld, rddp, rdmac, tlskeys, ipsec_inline, rocev2, nvme_tcp
+ tp_ddp = 1
+ tp_ddp_iscsi = 1
+ tp_tls_key = 1
+ tp_stag = 1
+ tp_pbl = 3
+ tp_rq = 4
+ tp_rrq = 2
+ tp_srq = 96
# The following function, 1023, is not an actual PCIE function but is used to
# configure and reserve firmware internal resources that come from the global
@@ -523,7 +533,7 @@
cmask = all # access to all channels
pmask = all # access to all four ports ...
nexactf = 8 # NPORTS + DCBX +
- nfilter = 16 # number of filter region entries
+ nfilter = 48 # number of filter region entries
#nhpfilter = 0 # number of high priority filter region entries
@@ -633,7 +643,7 @@
[fini]
version = 0x1425001d
- checksum = 0x5cab62d4
+ checksum = 0x96513217
# Total resources used by above allocations:
# Virtual Interfaces: 104
diff --git a/sys/dev/cxgbe/t4_netmap.c b/sys/dev/cxgbe/t4_netmap.c
index 0135bec6e2c1..a858867239c6 100644
--- a/sys/dev/cxgbe/t4_netmap.c
+++ b/sys/dev/cxgbe/t4_netmap.c
@@ -606,10 +606,8 @@ cxgbe_netmap_split_rss(struct adapter *sc, struct vi_info *vi,
(nm_state == NM_OFF && nm_kring_pending_on(kring))) {
MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
nactive[j]++;
- if (dq[j] == -1) {
+ if (dq[j] == -1)
dq[j] = nm_rxq->iq_abs_id;
- break;
- }
}
}
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index e9754ace27c2..b6d44792dce4 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -1548,16 +1548,13 @@ sort_before_lro(struct lro_ctrl *lro)
}
#endif
-#define CGBE_SHIFT_SCALE 10
-
static inline uint64_t
-t4_tstmp_to_ns(struct adapter *sc, uint64_t lf)
+t4_tstmp_to_ns(struct adapter *sc, uint64_t hw_tstmp)
{
struct clock_sync *cur, dcur;
uint64_t hw_clocks;
uint64_t hw_clk_div;
sbintime_t sbt_cur_to_prev, sbt;
- uint64_t hw_tstmp = lf & 0xfffffffffffffffULL; /* 60b, not 64b. */
seqc_t gen;
for (;;) {
@@ -1967,25 +1964,12 @@ get_segment_len(struct adapter *sc, struct sge_fl *fl, int plen)
return (min(plen, len));
}
-static int
-eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d,
- u_int plen)
+static void
+handle_cpl_rx_pkt(struct adapter *sc, struct sge_rxq *rxq,
+ const struct cpl_rx_pkt *cpl, struct mbuf *m0)
{
- struct mbuf *m0;
if_t ifp = rxq->ifp;
- struct sge_fl *fl = &rxq->fl;
- struct vi_info *vi = if_getsoftc(ifp);
- const struct cpl_rx_pkt *cpl;
-#if defined(INET) || defined(INET6)
- struct lro_ctrl *lro = &rxq->lro;
-#endif
uint16_t err_vec, tnl_type, tnlhdr_len;
- static const int sw_hashtype[4][2] = {
- {M_HASHTYPE_NONE, M_HASHTYPE_NONE},
- {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6},
- {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6},
- {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6},
- };
static const int sw_csum_flags[2][2] = {
{
/* IP, inner IP */
@@ -2015,43 +1999,6 @@ eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d,
},
};
- MPASS(plen > sc->params.sge.fl_pktshift);
- if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) &&
- __predict_true((fl->flags & FL_BUF_RESUME) == 0)) {
- struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
- caddr_t frame;
- int rc, slen;
-
- slen = get_segment_len(sc, fl, plen) -
- sc->params.sge.fl_pktshift;
- frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift;
- CURVNET_SET_QUIET(if_getvnet(ifp));
- rc = pfil_mem_in(vi->pfil, frame, slen, ifp, &m0);
- CURVNET_RESTORE();
- if (rc == PFIL_DROPPED || rc == PFIL_CONSUMED) {
- skip_fl_payload(sc, fl, plen);
- return (0);
- }
- if (rc == PFIL_REALLOCED) {
- skip_fl_payload(sc, fl, plen);
- goto have_mbuf;
- }
- }
-
- m0 = get_fl_payload(sc, fl, plen);
- if (__predict_false(m0 == NULL))
- return (ENOMEM);
-
- m0->m_pkthdr.len -= sc->params.sge.fl_pktshift;
- m0->m_len -= sc->params.sge.fl_pktshift;
- m0->m_data += sc->params.sge.fl_pktshift;
-
-have_mbuf:
- m0->m_pkthdr.rcvif = ifp;
- M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]);
- m0->m_pkthdr.flowid = be32toh(d->rss.hash_val);
-
- cpl = (const void *)(&d->rss + 1);
if (sc->params.tp.rx_pkt_encap) {
const uint16_t ev = be16toh(cpl->err_vec);
@@ -2136,23 +2083,79 @@ have_mbuf:
rxq->vlan_extraction++;
}
}
+}
+
+static int
+eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d,
+ u_int plen)
+{
+ struct mbuf *m0;
+ if_t ifp = rxq->ifp;
+ struct sge_fl *fl = &rxq->fl;
+ struct vi_info *vi = if_getsoftc(ifp);
+#if defined(INET) || defined(INET6)
+ struct lro_ctrl *lro = &rxq->lro;
+#endif
+ int rc;
+ const uint8_t fl_pktshift = sc->params.sge.fl_pktshift;
+ static const uint8_t sw_hashtype[4][2] = {
+ {M_HASHTYPE_NONE, M_HASHTYPE_NONE},
+ {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6},
+ {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6},
+ {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6},
+ };
+
+ MPASS(plen > fl_pktshift);
+ if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) &&
+ __predict_true((fl->flags & FL_BUF_RESUME) == 0)) {
+ struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
+ caddr_t frame;
+ const int slen = get_segment_len(sc, fl, plen) - fl_pktshift;
+
+ frame = sd->cl + fl->rx_offset + fl_pktshift;
+ CURVNET_SET_QUIET(if_getvnet(ifp));
+ rc = pfil_mem_in(vi->pfil, frame, slen, ifp, &m0);
+ CURVNET_RESTORE();
+ if (rc == PFIL_DROPPED || rc == PFIL_CONSUMED) {
+ skip_fl_payload(sc, fl, plen);
+ return (0);
+ }
+ if (rc == PFIL_REALLOCED) {
+ skip_fl_payload(sc, fl, plen);
+ goto have_mbuf;
+ }
+ }
+
+ m0 = get_fl_payload(sc, fl, plen);
+ if (__predict_false(m0 == NULL))
+ return (ENOMEM);
+ m0->m_pkthdr.len -= fl_pktshift;
+ m0->m_len -= fl_pktshift;
+ m0->m_data += fl_pktshift;
+have_mbuf:
+ m0->m_pkthdr.rcvif = ifp;
+ M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]);
+ m0->m_pkthdr.flowid = be32toh(d->rss.hash_val);
+#ifdef NUMA
+ m0->m_pkthdr.numa_domain = if_getnumadomain(ifp);
+#endif
if (rxq->iq.flags & IQ_RX_TIMESTAMP) {
/*
- * Fill up rcv_tstmp but do not set M_TSTMP as
- * long as we get a non-zero back from t4_tstmp_to_ns().
+ * Fill up rcv_tstmp and set M_TSTMP if we get a a non-zero back
+ * from t4_tstmp_to_ns(). The descriptor has a 60b timestamp.
*/
m0->m_pkthdr.rcv_tstmp = t4_tstmp_to_ns(sc,
- be64toh(d->rsp.u.last_flit));
+ be64toh(d->rsp.u.last_flit) & 0x0fffffffffffffffULL);
if (m0->m_pkthdr.rcv_tstmp != 0)
m0->m_flags |= M_TSTMP;
}
-#ifdef NUMA
- m0->m_pkthdr.numa_domain = if_getnumadomain(ifp);
-#endif
+ handle_cpl_rx_pkt(sc, rxq, (const void *)(&d->rss + 1), m0);
+
#if defined(INET) || defined(INET6)
- if (rxq->iq.flags & IQ_LRO_ENABLED && tnl_type == 0 &&
+ if (rxq->iq.flags & IQ_LRO_ENABLED &&
+ (m0->m_pkthdr.rsstype & M_HASHTYPE_INNER) == 0 &&
(M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 ||
M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) {
if (sort_before_lro(lro)) {
diff --git a/sys/dev/dpaa/bman_fdt.c b/sys/dev/dpaa/bman_fdt.c
index dffec52d5248..34b29ad6e236 100644
--- a/sys/dev/dpaa/bman_fdt.c
+++ b/sys/dev/dpaa/bman_fdt.c
@@ -136,25 +136,6 @@ bman_portals_fdt_probe(device_t dev)
return (BUS_PROBE_DEFAULT);
}
-static phandle_t
-bman_portal_find_cpu(int cpu)
-{
- phandle_t node;
- pcell_t reg;
-
- node = OF_finddevice("/cpus");
- if (node == -1)
- return (node);
-
- for (node = OF_child(node); node != 0; node = OF_peer(node)) {
- if (OF_getprop(node, "reg", &reg, sizeof(reg)) <= 0)
- continue;
- if (reg == cpu)
- return (node);
- }
- return (-1);
-}
-
static int
bman_portals_fdt_attach(device_t dev)
{
@@ -185,17 +166,15 @@ bman_portals_fdt_attach(device_t dev)
}
/* Checkout related cpu */
if (OF_getprop(child, "cpu-handle", (void *)&cpu,
- sizeof(cpu)) <= 0) {
- cpu = bman_portal_find_cpu(cpus);
- if (cpu <= 0)
- continue;
- }
- /* Acquire cpu number */
- cpu_node = OF_instance_to_package(cpu);
- if (OF_getencprop(cpu_node, "reg", &cpu_num, sizeof(cpu_num)) <= 0) {
- device_printf(dev, "Could not retrieve CPU number.\n");
- return (ENXIO);
- }
+ sizeof(cpu)) > 0) {
+ cpu_node = OF_instance_to_package(cpu);
+ /* Acquire cpu number */
+ if (OF_getencprop(cpu_node, "reg", &cpu_num, sizeof(cpu_num)) <= 0) {
+ device_printf(dev, "Could not retrieve CPU number.\n");
+ return (ENXIO);
+ }
+ } else
+ cpu_num = cpus;
cpus++;
diff --git a/sys/dev/dpaa/qman_fdt.c b/sys/dev/dpaa/qman_fdt.c
index 3f22ea4d651a..35016073ba0e 100644
--- a/sys/dev/dpaa/qman_fdt.c
+++ b/sys/dev/dpaa/qman_fdt.c
@@ -136,25 +136,6 @@ qman_portals_fdt_probe(device_t dev)
return (BUS_PROBE_DEFAULT);
}
-static phandle_t
-qman_portal_find_cpu(int cpu)
-{
- phandle_t node;
- pcell_t reg;
-
- node = OF_finddevice("/cpus");
- if (node == -1)
- return (-1);
-
- for (node = OF_child(node); node != 0; node = OF_peer(node)) {
- if (OF_getprop(node, "reg", &reg, sizeof(reg)) <= 0)
- continue;
- if (reg == cpu)
- return (node);
- }
- return (-1);
-}
-
static int
qman_portals_fdt_attach(device_t dev)
{
@@ -213,18 +194,15 @@ qman_portals_fdt_attach(device_t dev)
}
/* Checkout related cpu */
if (OF_getprop(child, "cpu-handle", (void *)&cpu,
- sizeof(cpu)) <= 0) {
- cpu = qman_portal_find_cpu(cpus);
- if (cpu <= 0)
- continue;
- }
- /* Acquire cpu number */
- cpu_node = OF_instance_to_package(cpu);
- if (OF_getencprop(cpu_node, "reg", &cpu_num, sizeof(cpu_num)) <= 0) {
- device_printf(dev, "Could not retrieve CPU number.\n");
- return (ENXIO);
- }
-
+ sizeof(cpu)) > 0) {
+ cpu_node = OF_instance_to_package(cpu);
+ /* Acquire cpu number */
+ if (OF_getencprop(cpu_node, "reg", &cpu_num, sizeof(cpu_num)) <= 0) {
+ device_printf(dev, "Could not retrieve CPU number.\n");
+ return (ENXIO);
+ }
+ } else
+ cpu_num = cpus;
cpus++;
if (ofw_bus_gen_setup_devinfo(&ofw_di, child) != 0) {
diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c
index 98a6c6047188..5f796e6e472b 100644
--- a/sys/dev/dpaa2/dpaa2_ni.c
+++ b/sys/dev/dpaa2/dpaa2_ni.c
@@ -220,6 +220,9 @@ MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
#define RXH_DISCARD (1 << 31)
+/* Transmit checksum offload */
+#define DPAA2_CSUM_TX_OFFLOAD (CSUM_IP | CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)
+
/* Default Rx hash options, set during attaching. */
#define DPAA2_RXH_DEFAULT (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
@@ -559,6 +562,7 @@ dpaa2_ni_attach(device_t dev)
if_settransmitfn(ifp, dpaa2_ni_transmit);
if_setqflushfn(ifp, dpaa2_ni_qflush);
+ if_sethwassist(sc->ifp, DPAA2_CSUM_TX_OFFLOAD);
if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU);
if_setcapenable(ifp, if_getcapabilities(ifp));
@@ -2598,25 +2602,11 @@ dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
break;
case SIOCSIFCAP:
changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
- if (changed & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
- if ((ifr->ifr_reqcap & changed) &
- (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
- if_setcapenablebit(ifp,
- IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6, 0);
- } else {
- if_setcapenablebit(ifp, 0,
- IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
- }
- }
- if (changed & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) {
- if ((ifr->ifr_reqcap & changed) &
- (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) {
- if_setcapenablebit(ifp,
- IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6, 0);
- } else {
- if_setcapenablebit(ifp, 0,
- IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
- }
+ if ((changed & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) != 0)
+ if_togglecapenable(ifp, IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
+ if ((changed & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 0) {
+ if_togglecapenable(ifp, IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
+ if_togglehwassist(ifp, DPAA2_CSUM_TX_OFFLOAD);
}
rc = dpaa2_ni_setup_if_caps(sc);
diff --git a/sys/dev/dwc/dwc1000_core.c b/sys/dev/dwc/dwc1000_core.c
index ba895f991b50..ab1d50c61150 100644
--- a/sys/dev/dwc/dwc1000_core.c
+++ b/sys/dev/dwc/dwc1000_core.c
@@ -238,7 +238,7 @@ dwc1000_enable_csum_offload(struct dwc_softc *sc)
DWC_ASSERT_LOCKED(sc);
reg = READ4(sc, MAC_CONFIGURATION);
- if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0)
+ if ((if_getcapenable(sc->ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) != 0)
reg |= CONF_IPC;
else
reg &= ~CONF_IPC;
diff --git a/sys/dev/dwc/dwc1000_dma.c b/sys/dev/dwc/dwc1000_dma.c
index 44b9f0d114bf..6457503d2a7f 100644
--- a/sys/dev/dwc/dwc1000_dma.c
+++ b/sys/dev/dwc/dwc1000_dma.c
@@ -248,7 +248,7 @@ dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
struct bus_dma_segment segs[TX_MAP_MAX_SEGS];
int error, nsegs;
struct mbuf * m;
- uint32_t flags = 0;
+ uint32_t flags;
int i;
int last;
@@ -276,19 +276,12 @@ dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
m = *mp;
- if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
- if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) {
- if (!sc->dma_ext_desc)
- flags = NTDESC1_CIC_FULL;
- else
- flags = ETDESC0_CIC_FULL;
- } else {
- if (!sc->dma_ext_desc)
- flags = NTDESC1_CIC_HDR;
- else
- flags = ETDESC0_CIC_HDR;
- }
- }
+ if ((m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) != 0)
+ flags = sc->dma_ext_desc ? ETDESC0_CIC_SEG : NTDESC1_CIC_SEG;
+ else if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
+ flags = sc->dma_ext_desc ? ETDESC0_CIC_HDR : NTDESC1_CIC_HDR;
+ else
+ flags = sc->dma_ext_desc ? ETDESC0_CIC_NONE : NTDESC1_CIC_NONE;
bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
BUS_DMASYNC_PREWRITE);
@@ -397,8 +390,8 @@ dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
m->m_len = len;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
- if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
- (rdesc0 & RDESC0_FT) != 0) {
+ if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) != 0 &&
+ (rdesc0 & RDESC0_FT) != 0) {
m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
if ((rdesc0 & RDESC0_ICE) == 0)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
diff --git a/sys/dev/dwc/if_dwc.c b/sys/dev/dwc/if_dwc.c
index 1b4b4be68747..f57a19e8a112 100644
--- a/sys/dev/dwc/if_dwc.c
+++ b/sys/dev/dwc/if_dwc.c
@@ -263,14 +263,16 @@ dwc_ioctl(if_t ifp, u_long cmd, caddr_t data)
/* No work to do except acknowledge the change took */
if_togglecapenable(ifp, IFCAP_VLAN_MTU);
}
- if (mask & IFCAP_RXCSUM)
- if_togglecapenable(ifp, IFCAP_RXCSUM);
- if (mask & IFCAP_TXCSUM)
+ if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
+ if_togglecapenable(ifp, IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
+ if (mask & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
- if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
- if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0);
- else
- if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP);
+ if_togglehwassist(ifp, CSUM_IP | CSUM_DELAY_DATA);
+ }
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
+ if_togglehwassist(ifp, CSUM_DELAY_DATA_IPV6);
+ }
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
DWC_LOCK(sc);
@@ -607,8 +609,8 @@ dwc_attach(device_t dev)
if_setinitfn(ifp, dwc_init);
if_setsendqlen(ifp, TX_MAP_COUNT - 1);
if_setsendqready(sc->ifp);
- if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP);
- if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM);
+ if_sethwassist(sc->ifp, CSUM_IP | CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6);
+ if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
/* Attach the mii driver. */
diff --git a/sys/dev/hpt27xx/hpt27xx_osm_bsd.c b/sys/dev/hpt27xx/hpt27xx_osm_bsd.c
index e086a1554940..638efa9e2ae2 100644
--- a/sys/dev/hpt27xx/hpt27xx_osm_bsd.c
+++ b/sys/dev/hpt27xx/hpt27xx_osm_bsd.c
@@ -1271,13 +1271,9 @@ static driver_t hpt_pci_driver = {
#error "no TARGETNAME found"
#endif
-/* use this to make TARGETNAME be expanded */
-#define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5)
-#define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
-#define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
-__DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0);
-__MODULE_VERSION(TARGETNAME, 1);
-__MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
+DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, NULL, NULL);
+MODULE_VERSION(TARGETNAME, 1);
+MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
{
diff --git a/sys/dev/hptmv/entry.c b/sys/dev/hptmv/entry.c
index f3d58f285b39..a82ffb66bdc8 100644
--- a/sys/dev/hptmv/entry.c
+++ b/sys/dev/hptmv/entry.c
@@ -95,8 +95,7 @@ static driver_t hpt_pci_driver = {
sizeof(IAL_ADAPTER_T)
};
-#define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5)
-__DRIVER_MODULE(PROC_DIR_NAME, pci, hpt_pci_driver, 0, 0);
+DRIVER_MODULE(PROC_DIR_NAME, pci, hpt_pci_driver, NULL, NULL);
MODULE_DEPEND(PROC_DIR_NAME, cam, 1, 1, 1);
#define ccb_ccb_ptr spriv_ptr0
diff --git a/sys/dev/hptnr/hptnr_osm_bsd.c b/sys/dev/hptnr/hptnr_osm_bsd.c
index 7426873964fb..fa0f78a7e01a 100644
--- a/sys/dev/hptnr/hptnr_osm_bsd.c
+++ b/sys/dev/hptnr/hptnr_osm_bsd.c
@@ -1561,13 +1561,9 @@ static driver_t hpt_pci_driver = {
#error "no TARGETNAME found"
#endif
-/* use this to make TARGETNAME be expanded */
-#define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5)
-#define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
-#define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
-__DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0);
-__MODULE_VERSION(TARGETNAME, 1);
-__MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
+DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, NULL, NULL);
+MODULE_VERSION(TARGETNAME, 1);
+MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
{
diff --git a/sys/dev/hptrr/hptrr_osm_bsd.c b/sys/dev/hptrr/hptrr_osm_bsd.c
index 78a051d54cf8..f4ae6732ea5d 100644
--- a/sys/dev/hptrr/hptrr_osm_bsd.c
+++ b/sys/dev/hptrr/hptrr_osm_bsd.c
@@ -1204,13 +1204,9 @@ static driver_t hpt_pci_driver = {
#error "no TARGETNAME found"
#endif
-/* use this to make TARGETNAME be expanded */
-#define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5)
-#define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
-#define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
-__DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0);
-__MODULE_VERSION(TARGETNAME, 1);
-__MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
+DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, NULL, NULL);
+MODULE_VERSION(TARGETNAME, 1);
+MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
{
diff --git a/sys/dev/iicbus/adc/ads111x.c b/sys/dev/iicbus/adc/ads111x.c
index 21924627cc68..8baf9ff789ec 100644
--- a/sys/dev/iicbus/adc/ads111x.c
+++ b/sys/dev/iicbus/adc/ads111x.c
@@ -407,15 +407,15 @@ ads111x_setup_channel(struct ads111x_softc *sc, int chan, int gainidx, int ratei
chantree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(devtree), OID_AUTO,
chanstr, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "channel data");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(chantree), OID_AUTO,
- "gain_index", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT,
+ "gain_index", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
sc, chan, ads111x_sysctl_gainidx, "I",
"programmable gain amp setting, 0-7");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(chantree), OID_AUTO,
- "rate_index", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT,
+ "rate_index", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
sc, chan, ads111x_sysctl_rateidx, "I", "sample rate setting, 0-7");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(chantree), OID_AUTO,
"voltage",
- CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_NEEDGIANT, sc,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, sc,
chan, ads111x_sysctl_voltage, "I", "sampled voltage in microvolts");
c->configured = true;
@@ -565,13 +565,13 @@ ads111x_attach(device_t dev)
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
- "config", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, sc, 0,
+ "config", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, sc, 0,
ads111x_sysctl_config, "I", "configuration register word");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
- "lo_thresh", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, sc, 0,
+ "lo_thresh", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, sc, 0,
ads111x_sysctl_lothresh, "I", "comparator low threshold");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
- "hi_thresh", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, sc, 0,
+ "hi_thresh", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, sc, 0,
ads111x_sysctl_hithresh, "I", "comparator high threshold");
/* Set up channels based on metadata or default config. */
diff --git a/sys/dev/isl/isl.c b/sys/dev/isl/isl.c
index 6a0d406aeeda..815c189adee5 100644
--- a/sys/dev/isl/isl.c
+++ b/sys/dev/isl/isl.c
@@ -202,7 +202,7 @@ isl_attach(device_t dev)
if (use_als) {
SYSCTL_ADD_PROC(sysctl_ctx,
SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "als",
- CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
ISL_METHOD_ALS, isl_sysctl, "I",
"Current ALS sensor read-out");
}
@@ -210,7 +210,7 @@ isl_attach(device_t dev)
if (use_ir) {
SYSCTL_ADD_PROC(sysctl_ctx,
SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "ir",
- CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
ISL_METHOD_IR, isl_sysctl, "I",
"Current IR sensor read-out");
}
@@ -218,20 +218,20 @@ isl_attach(device_t dev)
if (use_prox) {
SYSCTL_ADD_PROC(sysctl_ctx,
SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "prox",
- CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
ISL_METHOD_PROX, isl_sysctl, "I",
"Current proximity sensor read-out");
}
SYSCTL_ADD_PROC(sysctl_ctx,
SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "resolution",
- CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
ISL_METHOD_RESOLUTION, isl_sysctl, "I",
"Current proximity sensor resolution");
SYSCTL_ADD_PROC(sysctl_ctx,
SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "range",
- CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
ISL_METHOD_RANGE, isl_sysctl, "I",
"Current proximity sensor range");
diff --git a/sys/dev/jme/if_jme.c b/sys/dev/jme/if_jme.c
index d9982a2f031c..02e71d54851a 100644
--- a/sys/dev/jme/if_jme.c
+++ b/sys/dev/jme/if_jme.c
@@ -971,23 +971,23 @@ jme_sysctl_node(struct jme_softc *sc)
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_to,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->jme_tx_coal_to,
0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_pkt,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->jme_tx_coal_pkt,
0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_to,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->jme_rx_coal_to,
0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_pkt,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->jme_rx_coal_pkt,
0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
&sc->jme_process_limit, 0, sysctl_hw_jme_proc_limit, "I",
"max number of Rx events to process");
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index 9d246d7c78fd..8d2908264aac 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -1617,7 +1617,6 @@ mdresize(struct md_s *sc, struct md_req *mdr)
0, 0);
swap_release_by_cred(IDX_TO_OFF(oldpages -
newpages), sc->cred);
- sc->s_swap.object->charge = IDX_TO_OFF(newpages);
sc->s_swap.object->size = newpages;
VM_OBJECT_WUNLOCK(sc->s_swap.object);
} else if (newpages > oldpages) {
@@ -1637,7 +1636,6 @@ mdresize(struct md_s *sc, struct md_req *mdr)
}
}
VM_OBJECT_WLOCK(sc->s_swap.object);
- sc->s_swap.object->charge = IDX_TO_OFF(newpages);
sc->s_swap.object->size = newpages;
VM_OBJECT_WUNLOCK(sc->s_swap.object);
}
diff --git a/sys/dev/mpr/mpr.c b/sys/dev/mpr/mpr.c
index 262d6b58b705..9bceabe637b5 100644
--- a/sys/dev/mpr/mpr.c
+++ b/sys/dev/mpr/mpr.c
@@ -30,7 +30,6 @@
*
*/
-#include <sys/cdefs.h>
/* Communications core for Avago Technologies (LSI) MPT3 */
/* TODO Move headers to mprvar */
diff --git a/sys/dev/mpr/mpr_config.c b/sys/dev/mpr/mpr_config.c
index b7882feed158..8a36a12a225c 100644
--- a/sys/dev/mpr/mpr_config.c
+++ b/sys/dev/mpr/mpr_config.c
@@ -28,7 +28,6 @@
* Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
*/
-#include <sys/cdefs.h>
/* TODO Move headers to mprvar */
#include <sys/types.h>
#include <sys/param.h>
diff --git a/sys/dev/mpr/mpr_mapping.c b/sys/dev/mpr/mpr_mapping.c
index 38aa4dfc7ef2..a907ebafe2ff 100644
--- a/sys/dev/mpr/mpr_mapping.c
+++ b/sys/dev/mpr/mpr_mapping.c
@@ -28,7 +28,6 @@
* Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
*/
-#include <sys/cdefs.h>
/* TODO Move headers to mprvar */
#include <sys/types.h>
#include <sys/param.h>
diff --git a/sys/dev/mpr/mpr_pci.c b/sys/dev/mpr/mpr_pci.c
index 6b74f7e38811..a04ef7c6fb4d 100644
--- a/sys/dev/mpr/mpr_pci.c
+++ b/sys/dev/mpr/mpr_pci.c
@@ -24,7 +24,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/* PCI/PCI-X/PCIe bus interface for the Avago Tech (LSI) MPT3 controllers */
/* TODO Move headers to mprvar */
diff --git a/sys/dev/mpr/mpr_sas.c b/sys/dev/mpr/mpr_sas.c
index 5f3a27a468b0..f0470de3efcf 100644
--- a/sys/dev/mpr/mpr_sas.c
+++ b/sys/dev/mpr/mpr_sas.c
@@ -30,7 +30,6 @@
*
*/
-#include <sys/cdefs.h>
/* Communications core for Avago Technologies (LSI) MPT3 */
/* TODO Move headers to mprvar */
@@ -86,6 +85,12 @@
#define MPRSAS_DISCOVERY_TIMEOUT 20
#define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
+#include <sys/sdt.h>
+
+/* SDT Probes */
+SDT_PROBE_DEFINE4(cam, , mpr, complete, "union ccb *",
+ "struct mpr_command *", "u_int", "u32");
+
/*
* static array to check SCSI OpCode for EEDP protection bits
*/
@@ -2540,6 +2545,9 @@ mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
sc->SSU_refcount--;
}
+ SDT_PROBE4(cam, , mpr, complete, ccb, cm, sassc->flags,
+ sc->mapping_table[target_id].device_info);
+
/* Take the fast path to completion */
if (cm->cm_reply == NULL) {
if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
diff --git a/sys/dev/mpr/mpr_sas_lsi.c b/sys/dev/mpr/mpr_sas_lsi.c
index 9ba776e49e7a..f88dd6e2532f 100644
--- a/sys/dev/mpr/mpr_sas_lsi.c
+++ b/sys/dev/mpr/mpr_sas_lsi.c
@@ -28,7 +28,6 @@
* Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
*/
-#include <sys/cdefs.h>
/* Communications core for Avago Technologies (LSI) MPT3 */
/* TODO Move headers to mprvar */
@@ -1517,7 +1516,6 @@ mprsas_SSU_to_SATA_devices(struct mpr_softc *sc, int howto)
/*start*/FALSE,
/*load/eject*/0,
/*immediate*/FALSE,
- /*power_condition*/SSS_PC_START_VALID,
MPR_SENSE_LEN,
/*timeout*/10000);
xpt_action(ccb);
diff --git a/sys/dev/mpr/mpr_table.c b/sys/dev/mpr/mpr_table.c
index 910f47bb2937..b47b5259c63d 100644
--- a/sys/dev/mpr/mpr_table.c
+++ b/sys/dev/mpr/mpr_table.c
@@ -24,7 +24,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/* Debugging tables for MPT2 */
/* TODO Move headers to mprvar */
diff --git a/sys/dev/mpr/mpr_user.c b/sys/dev/mpr/mpr_user.c
index 5245129ce8c1..826528a3aeaf 100644
--- a/sys/dev/mpr/mpr_user.c
+++ b/sys/dev/mpr/mpr_user.c
@@ -59,7 +59,6 @@
* Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
*/
-#include <sys/cdefs.h>
/* TODO Move headers to mprvar */
#include <sys/types.h>
#include <sys/param.h>
diff --git a/sys/dev/mps/mps.c b/sys/dev/mps/mps.c
index 357eacf28925..cb82045356fc 100644
--- a/sys/dev/mps/mps.c
+++ b/sys/dev/mps/mps.c
@@ -30,7 +30,6 @@
* Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
*/
-#include <sys/cdefs.h>
/* Communications core for Avago Technologies (LSI) MPT2 */
/* TODO Move headers to mpsvar */
diff --git a/sys/dev/mps/mps_config.c b/sys/dev/mps/mps_config.c
index 47f9fe573a2b..cfb96c3e9bee 100644
--- a/sys/dev/mps/mps_config.c
+++ b/sys/dev/mps/mps_config.c
@@ -29,7 +29,6 @@
* Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
*/
-#include <sys/cdefs.h>
/* TODO Move headers to mpsvar */
#include <sys/types.h>
#include <sys/param.h>
diff --git a/sys/dev/mps/mps_mapping.c b/sys/dev/mps/mps_mapping.c
index 25d17d575970..9e069a3a924d 100644
--- a/sys/dev/mps/mps_mapping.c
+++ b/sys/dev/mps/mps_mapping.c
@@ -29,7 +29,6 @@
* Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
*/
-#include <sys/cdefs.h>
/* TODO Move headers to mpsvar */
#include <sys/types.h>
#include <sys/param.h>
diff --git a/sys/dev/mps/mps_pci.c b/sys/dev/mps/mps_pci.c
index 7a0c577eb72a..8855c186754a 100644
--- a/sys/dev/mps/mps_pci.c
+++ b/sys/dev/mps/mps_pci.c
@@ -26,7 +26,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/* PCI/PCI-X/PCIe bus interface for the Avago Tech (LSI) MPT2 controllers */
/* TODO Move headers to mpsvar */
diff --git a/sys/dev/mps/mps_sas.c b/sys/dev/mps/mps_sas.c
index fa0f817ed67b..fb591391f6a5 100644
--- a/sys/dev/mps/mps_sas.c
+++ b/sys/dev/mps/mps_sas.c
@@ -30,7 +30,6 @@
* Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
*/
-#include <sys/cdefs.h>
/* Communications core for Avago Technologies (LSI) MPT2 */
/* TODO Move headers to mpsvar */
@@ -81,6 +80,12 @@
#include <dev/mps/mps_table.h>
#include <dev/mps/mps_sas.h>
+#include <sys/sdt.h>
+
+/* SDT Probes */
+SDT_PROBE_DEFINE4(cam, , mps, complete, "union ccb *",
+ "struct mps_command *", "u_int", "u32");
+
/*
* static array to check SCSI OpCode for EEDP protection bits
*/
@@ -2077,6 +2082,9 @@ mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
sc->SSU_refcount--;
}
+ SDT_PROBE4(cam, , mps, complete, ccb, cm, sassc->flags,
+ sc->mapping_table[target_id].device_info);
+
/* Take the fast path to completion */
if (cm->cm_reply == NULL) {
if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
diff --git a/sys/dev/mps/mps_sas_lsi.c b/sys/dev/mps/mps_sas_lsi.c
index 42119b5c0a43..bcde5d69a021 100644
--- a/sys/dev/mps/mps_sas_lsi.c
+++ b/sys/dev/mps/mps_sas_lsi.c
@@ -29,7 +29,6 @@
* Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
*/
-#include <sys/cdefs.h>
/* Communications core for Avago Technologies (LSI) MPT2 */
/* TODO Move headers to mpsvar */
@@ -1153,7 +1152,6 @@ mpssas_SSU_to_SATA_devices(struct mps_softc *sc, int howto)
/*start*/FALSE,
/*load/eject*/0,
/*immediate*/FALSE,
- /*power_condition*/SSS_PC_START_VALID,
MPS_SENSE_LEN,
/*timeout*/10000);
xpt_action(ccb);
diff --git a/sys/dev/mps/mps_table.c b/sys/dev/mps/mps_table.c
index f2c6ed9b88c3..e97583ffa3f8 100644
--- a/sys/dev/mps/mps_table.c
+++ b/sys/dev/mps/mps_table.c
@@ -26,7 +26,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/* Debugging tables for MPT2 */
/* TODO Move headers to mpsvar */
diff --git a/sys/dev/mps/mps_user.c b/sys/dev/mps/mps_user.c
index 01edcbed2609..619eea6e9c69 100644
--- a/sys/dev/mps/mps_user.c
+++ b/sys/dev/mps/mps_user.c
@@ -60,7 +60,6 @@
* Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
*/
-#include <sys/cdefs.h>
/* TODO Move headers to mpsvar */
#include <sys/types.h>
#include <sys/param.h>
diff --git a/sys/dev/mxge/if_mxge.c b/sys/dev/mxge/if_mxge.c
index f36f41d53b40..ca2267098c4f 100644
--- a/sys/dev/mxge/if_mxge.c
+++ b/sys/dev/mxge/if_mxge.c
@@ -1804,7 +1804,7 @@ mxge_encap_tso(struct mxge_slice_state *ss, struct mbuf *m,
uint32_t low, high_swapped;
int len, seglen, cum_len, cum_len_next;
int next_is_first, chop, cnt, rdma_count, small;
- uint16_t pseudo_hdr_offset, cksum_offset, mss, sum;
+ uint16_t pseudo_hdr_offset, cksum_offset, mss, sum = 0;
uint8_t flags, flags_next;
static int once;
diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c
index 7e1a3f02f326..b75033300061 100644
--- a/sys/dev/nvme/nvme_ctrlr.c
+++ b/sys/dev/nvme/nvme_ctrlr.c
@@ -782,6 +782,47 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
}
static void
+nvme_ctrlr_configure_apst(struct nvme_controller *ctrlr)
+{
+ struct nvme_completion_poll_status status;
+ uint64_t *data;
+ int data_size, i, read_size;
+ bool enable, error = true;
+
+ if (TUNABLE_BOOL_FETCH("hw.nvme.apst_enable", &enable) == 0 ||
+ ctrlr->cdata.apsta == 0)
+ return;
+
+ data_size = 32 * sizeof(*data);
+ data = malloc(data_size, M_NVME, M_WAITOK | M_ZERO);
+
+ if (getenv_array("hw.nvme.apst_data", data, data_size,
+ &read_size, sizeof(*data), GETENV_UNSIGNED) != 0) {
+ for (i = 0; i < read_size / sizeof(*data); ++i)
+ data[i] = htole64(data[i]);
+ } else {
+ status.done = 0;
+ nvme_ctrlr_cmd_get_feature(ctrlr,
+ NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION, 0,
+ data, data_size, nvme_completion_poll_cb, &status);
+ nvme_completion_poll(&status);
+ if (nvme_completion_is_error(&status.cpl))
+ goto out;
+ }
+
+ status.done = 0;
+ nvme_ctrlr_cmd_set_feature(ctrlr,
+ NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION, enable, 0, 0,
+ 0, 0, data, data_size, nvme_completion_poll_cb, &status);
+ nvme_completion_poll(&status);
+ error = nvme_completion_is_error(&status.cpl);
+out:
+ if (error && bootverbose)
+ nvme_printf(ctrlr, "failed to configure APST\n");
+ free(data, M_NVME);
+}
+
+static void
nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
{
ctrlr->int_coal_time = 0;
@@ -1047,6 +1088,7 @@ nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
}
nvme_ctrlr_configure_aer(ctrlr);
+ nvme_ctrlr_configure_apst(ctrlr);
nvme_ctrlr_configure_int_coalescing(ctrlr);
for (i = 0; i < ctrlr->num_io_queues; i++)
@@ -1835,8 +1877,10 @@ nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
struct nvme_request *req)
{
struct nvme_qpair *qpair;
+ int32_t ioq;
- qpair = &ctrlr->ioq[QP(ctrlr, curcpu)];
+ ioq = req->ioq == NVME_IOQ_DEFAULT ? QP(ctrlr, curcpu) : req->ioq;
+ qpair = &ctrlr->ioq[ioq];
nvme_qpair_submit_request(qpair, req);
}
diff --git a/sys/dev/nvme/nvme_ctrlr_cmd.c b/sys/dev/nvme/nvme_ctrlr_cmd.c
index 5a44ed425acb..1a48a058edd8 100644
--- a/sys/dev/nvme/nvme_ctrlr_cmd.c
+++ b/sys/dev/nvme/nvme_ctrlr_cmd.c
@@ -171,7 +171,11 @@ nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
struct nvme_request *req;
struct nvme_command *cmd;
- req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
+ if (payload != NULL)
+ req = nvme_allocate_request_vaddr(payload, payload_size,
+ M_WAITOK, cb_fn, cb_arg);
+ else
+ req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_SET_FEATURES;
@@ -193,7 +197,11 @@ nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
struct nvme_request *req;
struct nvme_command *cmd;
- req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
+ if (payload != NULL)
+ req = nvme_allocate_request_vaddr(payload, payload_size,
+ M_WAITOK, cb_fn, cb_arg);
+ else
+ req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_GET_FEATURES;
diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h
index 8837275e2ed5..96ab5cc553e3 100644
--- a/sys/dev/nvme/nvme_private.h
+++ b/sys/dev/nvme/nvme_private.h
@@ -112,7 +112,9 @@ struct nvme_request {
struct memdesc payload;
nvme_cb_fn_t cb_fn;
void *cb_arg;
- int32_t retries;
+ int16_t retries;
+ uint16_t ioq;
+#define NVME_IOQ_DEFAULT 0xffff
bool payload_valid;
bool timeout;
bool spare[2]; /* Future use */
@@ -491,6 +493,7 @@ _nvme_allocate_request(const int how, nvme_cb_fn_t cb_fn, void *cb_arg)
req = malloc(sizeof(*req), M_NVME, how | M_ZERO);
if (req != NULL) {
+ req->ioq = NVME_IOQ_DEFAULT;
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
req->timeout = true;
@@ -551,6 +554,22 @@ nvme_allocate_request_ccb(union ccb *ccb, const int how, nvme_cb_fn_t cb_fn,
#define nvme_free_request(req) free(req, M_NVME)
+static __inline void
+nvme_request_set_ioq(struct nvme_controller *ctrlr, struct nvme_request *req,
+ uint16_t ioq)
+{
+ /*
+ * Note: NVMe queues are numbered 1-65535. The ioq here is numbered
+ * 0-65534 to avoid off-by-one bugs, with 65535 being reserved for
+ * DEFAULT.
+ */
+ KASSERT(ioq == NVME_IOQ_DEFAULT || ioq < ctrlr->num_io_queues,
+ ("ioq %d out of range 0..%d", ioq, ctrlr->num_io_queues));
+ if (ioq < 0 || ioq >= ctrlr->num_io_queues)
+ ioq = NVME_IOQ_DEFAULT;
+ req->ioq = ioq;
+}
+
void nvme_notify_async(struct nvme_controller *ctrlr,
const struct nvme_completion *async_cpl,
uint32_t log_page_id, void *log_page_buffer,
diff --git a/sys/dev/sdio/sdio_subr.c b/sys/dev/sdio/sdio_subr.c
index f234eb8bdc84..64b1145e2057 100644
--- a/sys/dev/sdio/sdio_subr.c
+++ b/sys/dev/sdio/sdio_subr.c
@@ -166,6 +166,36 @@ sdio_write_1(struct sdio_func *f, uint32_t addr, uint8_t val, int *err)
*err = error;
}
+uint16_t
+sdio_read_2(struct sdio_func *f, uint32_t addr, int *err)
+{
+ int error;
+ uint16_t v;
+
+ error = SDIO_READ_EXTENDED(device_get_parent(f->dev), f->fn, addr,
+ sizeof(v), (uint8_t *)&v, true);
+ if (error) {
+ if (err != NULL)
+ *err = error;
+ return (0xffff);
+ } else {
+ if (err != NULL)
+ *err = 0;
+ return (le16toh(v));
+ }
+}
+
+void
+sdio_write_2(struct sdio_func *f, uint32_t addr, uint16_t val, int *err)
+{
+ int error;
+
+ error = SDIO_WRITE_EXTENDED(device_get_parent(f->dev), f->fn, addr,
+ sizeof(val), (uint8_t *)&val, true);
+ if (err != NULL)
+ *err = error;
+}
+
uint32_t
sdio_read_4(struct sdio_func *f, uint32_t addr, int *err)
{
diff --git a/sys/dev/sdio/sdio_subr.h b/sys/dev/sdio/sdio_subr.h
index 2d2ae9b01230..96df2e7d658a 100644
--- a/sys/dev/sdio/sdio_subr.h
+++ b/sys/dev/sdio/sdio_subr.h
@@ -95,6 +95,8 @@ int sdio_set_block_size(struct sdio_func *, uint16_t);
uint8_t sdio_read_1(struct sdio_func *, uint32_t, int *);
void sdio_write_1(struct sdio_func *, uint32_t, uint8_t, int *);
+uint16_t sdio_read_2(struct sdio_func *, uint32_t, int *);
+void sdio_write_2(struct sdio_func *, uint32_t, uint16_t, int *);
uint32_t sdio_read_4(struct sdio_func *, uint32_t, int *);
void sdio_write_4(struct sdio_func *, uint32_t, uint32_t, int *);
diff --git a/sys/dev/smartpqi/smartpqi_cam.c b/sys/dev/smartpqi/smartpqi_cam.c
index ffdd9fd7da79..93043a296c5d 100644
--- a/sys/dev/smartpqi/smartpqi_cam.c
+++ b/sys/dev/smartpqi/smartpqi_cam.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -71,7 +71,6 @@ update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
cpi->hba_subvendor = pci_get_subvendor(dev);
cpi->hba_subdevice = pci_get_subdevice(dev);
-
DBG_FUNC("OUT\n");
}
@@ -154,10 +153,6 @@ os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
}
xpt_async(AC_LOST_DEVICE, tmppath, NULL);
xpt_free_path(tmppath);
- /* softs->device_list[device->target][device->lun] = NULL; */
- int index = pqisrc_find_device_list_index(softs,device);
- if (index >= 0 && index < PQI_MAX_DEVICES)
- softs->dev_list[index] = NULL;
pqisrc_free_device(softs, device);
}
@@ -335,7 +330,7 @@ os_io_response_success(rcb_t *rcb)
static void
copy_sense_data_to_csio(struct ccb_scsiio *csio,
- uint8_t *sense_data, uint16_t sense_data_len)
+ uint8_t const *sense_data, uint16_t sense_data_len)
{
DBG_IO("IN csio = %p\n", csio);
@@ -740,7 +735,7 @@ smartpqi_target_rescan(struct pqisrc_softstate *softs)
/* if(softs->device_list[target][lun]){ */
if(softs->dev_list[index] != NULL) {
device = softs->dev_list[index];
- DBG_INFO("calling smartpqi_lun_rescan with TL = %d:%d\n",device->target,device->lun);
+ DBG_INFO("calling smartpqi_lun_rescan with T%d:L%d\n",device->target,device->lun);
smartpqi_lun_rescan(softs, device->target, device->lun);
}
}
@@ -821,7 +816,6 @@ pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
if (index == INVALID_ELEM) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE;
- DBG_INFO("Invalid index/device!!!, Device BTL %u:%d:%d\n", softs->bus_id, target, lun);
return ENXIO;
}
@@ -850,7 +844,7 @@ pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
}
/* Check device reset */
if (DEVICE_RESET(dvp)) {
- ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
+ ccb->ccb_h.status = CAM_BUSY;
DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
return EBUSY;
}
@@ -915,7 +909,7 @@ pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
}
static inline int
-pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
+pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t const *rcb)
{
if (PQI_STATUS_SUCCESS == pqi_status &&
PQI_STATUS_SUCCESS == rcb->status)
@@ -931,7 +925,7 @@ static int
pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
{
rcb_t *rcb = NULL;
- struct ccb_hdr *ccb_h = &ccb->ccb_h;
+ struct ccb_hdr const *ccb_h = &ccb->ccb_h;
rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
uint32_t tag;
int rval;
@@ -971,7 +965,7 @@ error_tmf:
static int
pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
{
- struct ccb_hdr *ccb_h = &ccb->ccb_h;
+ struct ccb_hdr const *ccb_h = &ccb->ccb_h;
rcb_t *rcb = NULL;
uint32_t tag;
int rval;
@@ -1013,7 +1007,7 @@ pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
{
/* pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
- struct ccb_hdr *ccb_h = &ccb->ccb_h;
+ struct ccb_hdr const *ccb_h = &ccb->ccb_h;
rcb_t *rcb = NULL;
uint32_t tag;
int rval;
@@ -1069,7 +1063,7 @@ static void
smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
{
struct pqisrc_softstate *softs = cam_sim_softc(sim);
- struct ccb_hdr *ccb_h = &ccb->ccb_h;
+ struct ccb_hdr const *ccb_h = &ccb->ccb_h;
DBG_FUNC("IN\n");
@@ -1209,22 +1203,19 @@ smartpqi_async(void *callback_arg, u_int32_t code,
}
uint32_t t_id = cgd->ccb_h.target_id;
- /* if (t_id <= (PQI_CTLR_INDEX - 1)) { */
- if (t_id >= PQI_CTLR_INDEX) {
- if (softs != NULL) {
- /* pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; */
- int lun = cgd->ccb_h.target_lun;
- int index = pqisrc_find_btl_list_index(softs,softs->bus_id,t_id,lun);
- if (index != INVALID_ELEM) {
- pqi_scsi_dev_t *dvp = softs->dev_list[index];
- if (dvp == NULL) {
- DBG_ERR("Target is null, target id=%u\n", t_id);
- break;
- }
- smartpqi_adjust_queue_depth(path, dvp->queue_depth);
- }
- }
- }
+ if (softs != NULL) {
+ /* pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; */
+ int lun = cgd->ccb_h.target_lun;
+ int index = pqisrc_find_btl_list_index(softs,softs->bus_id,t_id,lun);
+ if (index != INVALID_ELEM) {
+ pqi_scsi_dev_t const *dvp = softs->dev_list[index];
+ if (dvp == NULL) {
+ DBG_ERR("Target is null, target id=%u\n", t_id);
+ break;
+ }
+ smartpqi_adjust_queue_depth(path, dvp->queue_depth);
+ }
+ }
break;
}
default:
diff --git a/sys/dev/smartpqi/smartpqi_cmd.c b/sys/dev/smartpqi/smartpqi_cmd.c
index 8486ac12df79..b71879aa81f6 100644
--- a/sys/dev/smartpqi/smartpqi_cmd.c
+++ b/sys/dev/smartpqi/smartpqi_cmd.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,7 +35,7 @@ pqisrc_submit_cmnd(pqisrc_softstate_t *softs, ib_queue_t *ib_q, void *req)
{
char *slot = NULL;
uint32_t offset;
- iu_header_t *hdr = (iu_header_t *)req;
+ iu_header_t const *hdr = (iu_header_t *)req;
/*TODO : Can be fixed a size copying of IU ? */
uint32_t iu_len = hdr->iu_length + 4 ; /* header size */
int i = 0;
diff --git a/sys/dev/smartpqi/smartpqi_controllers.h b/sys/dev/smartpqi/smartpqi_controllers.h
new file mode 100644
index 000000000000..6356159fd5f7
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_controllers.h
@@ -0,0 +1,371 @@
+/*-
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Supported controllers
+ */
+struct pqi_ident
+{
+ u_int16_t vendor;
+ u_int16_t device;
+ u_int16_t subvendor;
+ u_int16_t subdevice;
+ int hwif;
+ char *desc;
+} pqi_identifiers[] = {
+ /* (MSCC PM8205 8x12G based) */
+ {0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"},
+ {0x9005, 0x028f, 0x1028, 0x1FE0, PQI_HWIF_SRCV, "SmartRAID 3162-8i/eDell"},
+ {0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"},
+ {0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"},
+
+ /* (MSCC PM8225 8x12G based) */
+ {0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"},
+ {0x9005, 0x028f, 0x9005, 0x659, PQI_HWIF_SRCV, "2100C8iOXS"},
+
+ /* (MSCC PM8221 8x12G based) */
+ {0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"},
+ {0x9005, 0x028f, 0x193d, 0x1104, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-2GB"},
+ {0x9005, 0x028f, 0x193d, 0x1106, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-1GB"},
+ {0x9005, 0x028f, 0x193d, 0x1108, PQI_HWIF_SRCV, "UN RAID P4408-Ma-8i-2GB"},
+ {0x9005, 0x028f, 0x193d, 0x1109, PQI_HWIF_SRCV, "UN RAID P4408-Mr-8i-2GB"},
+
+ /* (MSCC PM8204 8x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"},
+ {0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"},
+ {0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"},
+ {0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"},
+ {0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"},
+ {0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"},
+ {0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"},
+ {0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"},
+ {0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"},
+ {0x9005, 0x028f, 0x193d, 0xf460, PQI_HWIF_SRCV, "UN RAID P460-M4"},
+ {0x9005, 0x028f, 0x193d, 0xf461, PQI_HWIF_SRCV, "UN RAID P460-B4"},
+ {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "PM8204-2GB"},
+ {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "PM8204-4GB"},
+ {0x9005, 0x028f, 0x1ff9, 0x004b, PQI_HWIF_SRCV, "RAID PM8204-2GB"},
+ {0x9005, 0x028f, 0x1ff9, 0x004c, PQI_HWIF_SRCV, "RAID PM8204-4GB"},
+ {0x9005, 0x028f, 0x193d, 0x0462, PQI_HWIF_SRCV, "UN RAID P460-Mr1-8i-4GB"},
+ {0x9005, 0x028f, 0x193d, 0x1105, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-2GB"},
+ {0x9005, 0x028f, 0x193d, 0x1107, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-4GB"},
+ {0x9005, 0x028f, 0x193d, 0x1110, PQI_HWIF_SRCV, "UN RAID P4408-Mr-2"},
+ {0x9005, 0x028f, 0x1d8d, 0x800, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8204-8i"},
+ {0x9005, 0x028f, 0x9005, 0x0808, PQI_HWIF_SRCV, "SmartRAID 3101E-4i"},
+ {0x9005, 0x028f, 0x9005, 0x0809, PQI_HWIF_SRCV, "SmartRAID 3102E-8i"},
+ {0x9005, 0x028f, 0x9005, 0x080a, PQI_HWIF_SRCV, "SmartRAID 3152-8i/N"},
+ {0x9005, 0x028f, 0x1cc4, 0x0101, PQI_HWIF_SRCV, "Ramaxel FBGF-RAD PM8204"},
+ {0x9005, 0x028f, 0x1f3a, 0x0104, PQI_HWIF_SRCV, "PL SmartROC PM8204"},
+ {0x9005, 0x028f, 0x1f51, 0x1043, PQI_HWIF_SRCV, "SmartRAID P7502-8i"},
+ {0x9005, 0x028f, 0x1f51, 0x1045, PQI_HWIF_SRCV, "SmartRAID P7504-8i"},
+ {0x9005, 0x028f, 0x1f51, 0x1011, PQI_HWIF_SRCV, "SmartRAID P7504N-8i"},
+ {0x9005, 0x028f, 0x207d, 0x4140, PQI_HWIF_SRCV, "HRDT TrustRAID D3152s-8i"},
+ {0x9005, 0x028f, 0x207d, 0x4240, PQI_HWIF_SRCV, "HRDT TrustRAID D3154s-8i"},
+
+ /* (MSCC PM8222 8x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"},
+ {0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"},
+ {0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"},
+ {0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"},
+ {0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"},
+ {0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"},
+ {0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"},
+ {0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"},
+ {0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"},
+ {0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"},
+ {0x9005, 0x028f, 0x193d, 0x8460, PQI_HWIF_SRCV, "UN HBA H460-M1"},
+ {0x9005, 0x028f, 0x193d, 0x8461, PQI_HWIF_SRCV, "UN HBA H460-B1"},
+ {0x9005, 0x028f, 0x193d, 0x8462, PQI_HWIF_SRCV, "UN HBA H460-Mr1-8i"},
+ {0x9005, 0x028f, 0x193d, 0xc460, PQI_HWIF_SRCV, "UN RAID P460-M2"},
+ {0x9005, 0x028f, 0x193d, 0xc461, PQI_HWIF_SRCV, "UN RAID P460-B2"},
+ {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "PM8222-SHBA"},
+ {0x9005, 0x028f, 0x1ff9, 0x004a, PQI_HWIF_SRCV, "PM8222-SHBA"},
+ {0x9005, 0x028f, 0x13fe, 0x8312, PQI_HWIF_SRCV, "MIC-8312BridgeB"},
+ {0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "PM8222-HBA"},
+ {0x9005, 0x028f, 0x1ff9, 0x004f, PQI_HWIF_SRCV, "PM8222-HBA"},
+ {0x9005, 0x028f, 0x1d8d, 0x908, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8222-8i"},
+ {0x9005, 0x028f, 0x1bd4, 0x006C, PQI_HWIF_SRCV, "RS0800M5E8i"},
+ {0x9005, 0x028f, 0x1bd4, 0x006D, PQI_HWIF_SRCV, "RS0800M5H8i"},
+ {0x9005, 0x028f, 0x1ff9, 0x006C, PQI_HWIF_SRCV, "RS0800M5E8i"},
+ {0x9005, 0x028f, 0x1ff9, 0x006D, PQI_HWIF_SRCV, "RS0800M5H8i"},
+ {0x9005, 0x028f, 0x1cc4, 0x0201, PQI_HWIF_SRCV, "Ramaxel FBGF-RAD PM8222"},
+ {0x9005, 0x028f, 0x1f51, 0x1044, PQI_HWIF_SRCV, "SmartHBA P6500-8i"},
+ {0x9005, 0x028f, 0x1f3f, 0x0610, PQI_HWIF_SRCV, "3SNIC SSSRAID 3S610"},
+ {0x9005, 0x028f, 0x207d, 0x4840, PQI_HWIF_SRCV, "HRDT TrustHBA H3100s-8i"},
+
+ /* (SRCx MSCC FVB 24x12G based) */
+ {0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"},
+
+ /* (MSCC PM8241 24x12G based) */
+
+ /* (MSCC PM8242 24x12G based) */
+ {0x9005, 0x028f, 0x152d, 0x8a37, PQI_HWIF_SRCV, "QS-8242-24i"},
+ {0x9005, 0x028f, 0x9005, 0x1300, PQI_HWIF_SRCV, "HBA 1100-8i8e"},
+ {0x9005, 0x028f, 0x9005, 0x1301, PQI_HWIF_SRCV, "HBA 1100-24i"},
+ {0x9005, 0x028f, 0x9005, 0x1302, PQI_HWIF_SRCV, "SmartHBA 2100-8i8e"},
+ {0x9005, 0x028f, 0x9005, 0x1303, PQI_HWIF_SRCV, "SmartHBA 2100-24i"},
+ {0x9005, 0x028f, 0x105b, 0x1321, PQI_HWIF_SRCV, "8242-24i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0045, PQI_HWIF_SRCV, "SMART-HBA 8242-24i"},
+ {0x9005, 0x028f, 0x1ff9, 0x0045, PQI_HWIF_SRCV, "SMART-HBA 8242-24i"},
+ {0x9005, 0x028f, 0x1bd4, 0x006B, PQI_HWIF_SRCV, "RS0800M5H24i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0070, PQI_HWIF_SRCV, "RS0800M5E24i"},
+ {0x9005, 0x028f, 0x1ff9, 0x006B, PQI_HWIF_SRCV, "RS0800M5H24i"},
+ {0x9005, 0x028f, 0x1ff9, 0x0070, PQI_HWIF_SRCV, "RS0800M5E24i"},
+
+ /* (MSCC PM8236 16x12G based) */
+ {0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"},
+ {0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "RAID 8236-16i"},
+ {0x9005, 0x028f, 0x1ff9, 0x0046, PQI_HWIF_SRCV, "RAID 8236-16i"},
+ {0x9005, 0x028f, 0x1d8d, 0x806, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8236-16i"},
+ {0x9005, 0x028f, 0x1cf2, 0x0B27, PQI_HWIF_SRCV, "ZTE SmartROC3100 SDPSA/B-18i 4G"},
+ {0x9005, 0x028f, 0x1cf2, 0x0B45, PQI_HWIF_SRCV, "ZTE SmartROC3100 SDPSA/B_L-18i 2G"},
+ {0x9005, 0x028f, 0x1cf2, 0x5445, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241-18i 2G"},
+ {0x9005, 0x028f, 0x1cf2, 0x5446, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242-18i 4G"},
+ {0x9005, 0x028f, 0x1cf2, 0x5449, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS241-18i 2G"},
+ {0x9005, 0x028f, 0x1cf2, 0x544A, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS242-18i 4G"},
+ {0x9005, 0x028f, 0x1cf2, 0x544D, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241B-18i 2G"},
+ {0x9005, 0x028f, 0x1cf2, 0x544E, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242B-18i 4G"},
+ {0x9005, 0x028f, 0x1bd4, 0x006F, PQI_HWIF_SRCV, "RS0804M5R16i"},
+ {0x9005, 0x028f, 0x1ff9, 0x006F, PQI_HWIF_SRCV, "RS0804M5R16i"},
+ {0x9005, 0x028f, 0x1f51, 0x1010, PQI_HWIF_SRCV, "SmartRAID P7504N-16i"},
+
+
+
+ /* (MSCC PM8237 24x12G based) */
+ {0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x1101, PQI_HWIF_SRCV, "P416ie-m SR G10"},
+
+ /* (MSCC PM8238 16x12G based) */
+ {0x9005, 0x028f, 0x152d, 0x8a23, PQI_HWIF_SRCV, "QS-8238-16i"},
+ {0x9005, 0x028f, 0x9005, 0x1280, PQI_HWIF_SRCV, "HBA 1100-16i"},
+ {0x9005, 0x028f, 0x9005, 0x1281, PQI_HWIF_SRCV, "HBA 1100-16e"},
+ {0x9005, 0x028f, 0x105b, 0x1211, PQI_HWIF_SRCV, "8238-16i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "SMART-HBA 8238-16i"},
+ {0x9005, 0x028f, 0x1ff9, 0x0048, PQI_HWIF_SRCV, "SMART-HBA 8238-16i"},
+ {0x9005, 0x028f, 0x9005, 0x1282, PQI_HWIF_SRCV, "SmartHBA 2100-16i"},
+ {0x9005, 0x028f, 0x1d8d, 0x916, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8238-16i"},
+ {0x9005, 0x028f, 0x1458, 0x1000, PQI_HWIF_SRCV, "GIGABYTE SmartHBA CLN1832"},
+ {0x9005, 0x028f, 0x1cf2, 0x0B29, PQI_HWIF_SRCV, "ZTE SmartIOC2100 SDPSA/B_I-18i"},
+ {0x9005, 0x028f, 0x1cf2, 0x5447, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243-18i"},
+ {0x9005, 0x028f, 0x1cf2, 0x544B, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RS243-18i"},
+ {0x9005, 0x028f, 0x1cf2, 0x544F, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243B-18i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0071, PQI_HWIF_SRCV, "RS0800M5H16i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0072, PQI_HWIF_SRCV, "RS0800M5E16i"},
+ {0x9005, 0x028f, 0x1ff9, 0x0071, PQI_HWIF_SRCV, "RS0800M5H16i"},
+ {0x9005, 0x028f, 0x1ff9, 0x0072, PQI_HWIF_SRCV, "RS0800M5E16i"},
+ {0x9005, 0x028f, 0x1018, 0x8238, PQI_HWIF_SRCV, "Ramaxel SmartHBA RX8238-16i"},
+
+ /* (MSCC PM8240 24x12G based) */
+ {0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"},
+ {0x9005, 0x028f, 0x9005, 0x1200, PQI_HWIF_SRCV, "SmartRAID 3154-24i"},
+ {0x9005, 0x028f, 0x9005, 0x1201, PQI_HWIF_SRCV, "SmartRAID 3154-8i16e"},
+ {0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"},
+ {0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "RAID 8240-24i"},
+ {0x9005, 0x028f, 0x1ff9, 0x0047, PQI_HWIF_SRCV, "RAID 8240-24i"},
+ {0x9005, 0x028f, 0x1dfc, 0x3161, PQI_HWIF_SRCV, "NTCOM SAS3 RAID-24i"},
+ {0x9005, 0x028f, 0x1F0C, 0x3161, PQI_HWIF_SRCV, "NT RAID 3100-24i"},
+
+ /* Huawei ID's */
+ {0x9005, 0x028f, 0x19e5, 0xd227, PQI_HWIF_SRCV, "SR465C-M 4G"},
+ {0x9005, 0x028f, 0x19e5, 0xd22a, PQI_HWIF_SRCV, "SR765-M"},
+ {0x9005, 0x028f, 0x19e5, 0xd228, PQI_HWIF_SRCV, "SR455C-M 2G"},
+ {0x9005, 0x028f, 0x19e5, 0xd22c, PQI_HWIF_SRCV, "SR455C-M 4G"},
+ {0x9005, 0x028f, 0x19e5, 0xd229, PQI_HWIF_SRCV, "SR155-M"},
+ {0x9005, 0x028f, 0x19e5, 0xd22b, PQI_HWIF_SRCV, "SR455C-ME 4G"},
+
+ /* (MSCC PM8252 8x12G based) */
+ {0x9005, 0x028f, 0x193d, 0x110b, PQI_HWIF_SRCV, "UN HBA H4508-Mf-8i"},
+ {0x9005, 0x028f, 0x1bd4, 0x0052, PQI_HWIF_SRCV, "MT0801M6E"},
+ {0x9005, 0x028f, 0x1bd4, 0x0054, PQI_HWIF_SRCV, "MT0800M6H"},
+ {0x9005, 0x028f, 0x1bd4, 0x0086, PQI_HWIF_SRCV, "RT0800M7E"},
+ {0x9005, 0x028f, 0x1bd4, 0x0087, PQI_HWIF_SRCV, "RT0800M7H"},
+ {0x9005, 0x028f, 0x1ff9, 0x0052, PQI_HWIF_SRCV, "MT0801M6E"},
+ {0x9005, 0x028f, 0x1ff9, 0x0054, PQI_HWIF_SRCV, "MT0800M6H"},
+ {0x9005, 0x028f, 0x1BD4, 0x00a3, PQI_HWIF_SRCV, "RT0800M6E2i"},
+ {0x9005, 0x028f, 0x1ff9, 0x00a3, PQI_HWIF_SRCV, "RT0800M6E2i"},
+ {0x9005, 0x028f, 0x1ff9, 0x0086, PQI_HWIF_SRCV, "RT0800M7E"},
+ {0x9005, 0x028f, 0x1ff9, 0x0087, PQI_HWIF_SRCV, "RT0800M7H"},
+ {0x9005, 0x028f, 0x1f51, 0x1001, PQI_HWIF_SRCV, "SmartHBA P6600-8i"},
+ {0x9005, 0x028f, 0x1f51, 0x1003, PQI_HWIF_SRCV, "SmartHBA P6600-8e"},
+ {0x9005, 0x028f, 0x9005, 0x1460, PQI_HWIF_SRCV, "HBA 1200"},
+ {0x9005, 0x028f, 0x9005, 0x1461, PQI_HWIF_SRCV, "SmartHBA 2200"},
+ {0x9005, 0x028f, 0x9005, 0x1462, PQI_HWIF_SRCV, "HBA 1200-8i"},
+ {0x9005, 0x028f, 0x1d49, 0x0222, PQI_HWIF_SRCV, "4450-8i HBA"},
+ {0x9005, 0x028f, 0x207d, 0x4044, PQI_HWIF_SRCV, "HRDT TrustHBA H4100-8i"},
+ {0x9005, 0x028f, 0x207d, 0x4054, PQI_HWIF_SRCV, "HRDT TrustHBA H4100-8e"},
+
+ /* (MSCC PM8254 32x12G based) */
+ {0x9005, 0x028f, 0x1bd4, 0x0051, PQI_HWIF_SRCV, "MT0804M6R"},
+ {0x9005, 0x028f, 0x1bd4, 0x0053, PQI_HWIF_SRCV, "MT0808M6R"},
+ {0x9005, 0x028f, 0x1bd4, 0x0088, PQI_HWIF_SRCV, "RT0804M7R"},
+ {0x9005, 0x028f, 0x1bd4, 0x0089, PQI_HWIF_SRCV, "RT0808M7R"},
+ {0x9005, 0x028f, 0x1ff9, 0x0051, PQI_HWIF_SRCV, "MT0804M6R"},
+ {0x9005, 0x028f, 0x1ff9, 0x0053, PQI_HWIF_SRCV, "MT0808M6R"},
+ {0x9005, 0x028f, 0x1ff9, 0x0088, PQI_HWIF_SRCV, "RT0804M7R"},
+ {0x9005, 0x028f, 0x1ff9, 0x0089, PQI_HWIF_SRCV, "RT0808M7R"},
+ {0x9005, 0x028f, 0x1f51, 0x1002, PQI_HWIF_SRCV, "SmartRAID P7604-8i"},
+ {0x9005, 0x028f, 0x1f51, 0x1004, PQI_HWIF_SRCV, "SmartRAID P7604-8e"},
+ {0x9005, 0x028f, 0x1f51, 0x100f, PQI_HWIF_SRCV, "SmartRAID P7604N-8i"},
+ {0x9005, 0x028f, 0x9005, 0x14a0, PQI_HWIF_SRCV, "SmartRAID 3254-8i"},
+ {0x9005, 0x028f, 0x9005, 0x14a1, PQI_HWIF_SRCV, "SmartRAID 3204-8i"},
+ {0x9005, 0x028f, 0x9005, 0x14a2, PQI_HWIF_SRCV, "SmartRAID 3252-8i"},
+ {0x9005, 0x028f, 0x9005, 0x14a4, PQI_HWIF_SRCV, "SmartRAID 3254-8i /e"},
+ {0x9005, 0x028f, 0x9005, 0x14a5, PQI_HWIF_SRCV, "SmartRAID 3252-8i /e"},
+ {0x9005, 0x028f, 0x9005, 0x14a6, PQI_HWIF_SRCV, "SmartRAID 3204-8i /e"},
+ {0x9005, 0x028f, 0x1d49, 0x0624, PQI_HWIF_SRCV, "9450-8i 4GB Flash"},
+
+ /* (MSCC PM8262 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x14c0, PQI_HWIF_SRCV, "SmartHBA 2200-16i"},
+ {0x9005, 0x028f, 0x9005, 0x14c1, PQI_HWIF_SRCV, "HBA 1200-16i"},
+ {0x9005, 0x028f, 0x9005, 0x14c3, PQI_HWIF_SRCV, "HBA 1200-16e"},
+ {0x9005, 0x028f, 0x9005, 0x14c4, PQI_HWIF_SRCV, "HBA 1200-8e"},
+ {0x9005, 0x028f, 0x1f51, 0x1005, PQI_HWIF_SRCV, "SmartHBA P6600-16i"},
+ {0x9005, 0x028f, 0x1f51, 0x1007, PQI_HWIF_SRCV, "SmartHBA P6600-8i8e"},
+ {0x9005, 0x028f, 0x1f51, 0x1009, PQI_HWIF_SRCV, "SmartHBA P6600-16e"},
+ {0x9005, 0x028f, 0x1cf2, 0x54dc, PQI_HWIF_SRCV, "ZTE SmartIOC2200 RM346-16i"},
+ {0x9005, 0x028f, 0x1cf2, 0x0806, PQI_HWIF_SRCV, "ZTE SmartIOC2200 RS346-16i"},
+ {0x9005, 0x028f, 0x1d49, 0x0223, PQI_HWIF_SRCV, "4450-16i HBA"},
+ {0x9005, 0x028f, 0x1d49, 0x0224, PQI_HWIF_SRCV, "4450-8e HBA"},
+ {0x9005, 0x028f, 0x1d49, 0x0225, PQI_HWIF_SRCV, "4450-16e HBA"},
+ {0x9005, 0x028f, 0x1d49, 0x0521, PQI_HWIF_SRCV, "5450-16i"},
+ {0x9005, 0x028f, 0x207d, 0x4084, PQI_HWIF_SRCV, "HRDT TrustHBA H4100-16i"},
+ {0x9005, 0x028f, 0x207d, 0x4094, PQI_HWIF_SRCV, "HRDT TrustHBA H4100-16e"},
+
+ /* (MSCC PM8264 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x14b0, PQI_HWIF_SRCV, "SmartRAID 3254-16i"},
+ {0x9005, 0x028f, 0x9005, 0x14b1, PQI_HWIF_SRCV, "SmartRAID 3258-16i"},
+ {0x9005, 0x028f, 0x1f51, 0x1006, PQI_HWIF_SRCV, "SmartRAID P7608-16i"},
+ {0x9005, 0x028f, 0x1f51, 0x1008, PQI_HWIF_SRCV, "SmartRAID P7608-8i8e"},
+ {0x9005, 0x028f, 0x1f51, 0x100a, PQI_HWIF_SRCV, "SmartRAID P7608-16e"},
+ {0x9005, 0x028f, 0x1cf2, 0x54da, PQI_HWIF_SRCV, "ZTE SmartROC3200 RM344-16i 4G"},
+ {0x9005, 0x028f, 0x1cf2, 0x54db, PQI_HWIF_SRCV, "ZTE SmartROC3200 RM345-16i 8G"},
+ {0x9005, 0x028f, 0x1cf2, 0x0804, PQI_HWIF_SRCV, "ZTE SmartROC3200 RS344-16i 4G"},
+ {0x9005, 0x028f, 0x1cf2, 0x0805, PQI_HWIF_SRCV, "ZTE SmartROC3200 RS345-16i 8G"},
+ {0x9005, 0x028f, 0x1f51, 0x100e, PQI_HWIF_SRCV, "SmartRAID P7604N-16i"},
+ {0x9005, 0x028f, 0x1d49, 0x0625, PQI_HWIF_SRCV, "9450-16i 4GB Flash"},
+ {0x9005, 0x028f, 0x1d49, 0x0626, PQI_HWIF_SRCV, "9450-16i 8GB Flash"},
+
+ /* (MSCC PM8265 16x12G based) */
+ {0x9005, 0x028f, 0x1590, 0x02dc, PQI_HWIF_SRCV, "SR416i-a Gen10+"},
+ {0x9005, 0x028f, 0x9005, 0x1470, PQI_HWIF_SRCV, "SmartRAID 3200"},
+ {0x9005, 0x028f, 0x9005, 0x1471, PQI_HWIF_SRCV, "SmartRAID 3254-16i /e"},
+ {0x9005, 0x028f, 0x9005, 0x1472, PQI_HWIF_SRCV, "SmartRAID 3258-16i /e"},
+ {0x9005, 0x028f, 0x9005, 0x1473, PQI_HWIF_SRCV, "SmartRAID 3284-16io /e/uC"},
+ {0x9005, 0x028f, 0x9005, 0x1474, PQI_HWIF_SRCV, "SmartRAID 3254-16io /e"},
+ {0x9005, 0x028f, 0x9005, 0x1475, PQI_HWIF_SRCV, "SmartRAID 3254-16e /e"},
+
+ /* (MSCC PM8266 16x12G based) */
+ {0x9005, 0x028f, 0x1014, 0x0718, PQI_HWIF_SRCV, "IBM 4-Port 24G SAS"},
+ {0x9005, 0x028f, 0x9005, 0x1490, PQI_HWIF_SRCV, "HBA 1200p Ultra"},
+ {0x9005, 0x028f, 0x9005, 0x1491, PQI_HWIF_SRCV, "SmartHBA 2200p Ultra"},
+ {0x9005, 0x028f, 0x9005, 0x1402, PQI_HWIF_SRCV, "HBA Ultra 1200P-16i"},
+ {0x9005, 0x028f, 0x9005, 0x1441, PQI_HWIF_SRCV, "HBA Ultra 1200P-32i"},
+ {0x9005, 0x028f, 0x1137, 0x0300, PQI_HWIF_SRCV, "Cisco 24G TriMode M1 HBA LFF 32D UCSC-HBAMP1LL32"},
+
+ /* (MSCC PM8268 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x14d0, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-16i"},
+
+ /* (MSCC PM8269 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1400, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-16i /e"},
+ {0x9005, 0x028f, 0x1ff9, 0x00a1, PQI_HWIF_SRCV, "RT1608M6R16i"},
+
+ /* (MSCC PM8270 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1410, PQI_HWIF_SRCV, "HBA Ultra 1200P-16e"},
+ {0x9005, 0x028f, 0x9005, 0x1411, PQI_HWIF_SRCV, "HBA 1200 Ultra"},
+ {0x9005, 0x028f, 0x9005, 0x1412, PQI_HWIF_SRCV, "SmartHBA 2200 Ultra"},
+ {0x9005, 0x028f, 0x9005, 0x1463, PQI_HWIF_SRCV, "SmartHBA 2200-8io /e"},
+ {0x9005, 0x028f, 0x9005, 0x14c2, PQI_HWIF_SRCV, "SmartHBA 2200-16io /e"},
+ {0x9005, 0x028f, 0x1337, 0x02fa, PQI_HWIF_SRCV, "Cisco 24G TriMode M1 HBA 16D UCSC-HBA-M1L16"},
+
+ /* (MSCC PM8271 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x14e0, PQI_HWIF_SRCV, "SmartIOC PM8271"},
+
+ /* (MSCC PM8272 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1420, PQI_HWIF_SRCV, "SmartRAID Ultra 3254-16e"},
+ {0x9005, 0x028f, 0x1d49, 0x0628, PQI_HWIF_SRCV, "9450-16e 4GB Flash"},
+
+ /* (MSCC PM8273 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1430, PQI_HWIF_SRCV, "SmartRAID Ultra 3254-16e /e"},
+ {0x9005, 0x028f, 0x1137, 0x02f9, PQI_HWIF_SRCV, "Cisco 24G TriMode M1 RAID 4GB FBWC 16D UCSC-RAID-M1L16"},
+ {0x9005, 0x028f, 0x1137, 0x02ff, PQI_HWIF_SRCV, "Cisco 24G TriMode M1 RAID 4GB FBWC 6D UCSX-RAID-M1L6"},
+
+ /* (MSCC PM8274 16x12G based) */
+ {0x9005, 0x028f, 0x1e93, 0x1000, PQI_HWIF_SRCV, "ByteHBA JGH43024-8"},
+ {0x9005, 0x028f, 0x1e93, 0x1001, PQI_HWIF_SRCV, "ByteHBA JGH43034-8"},
+ {0x9005, 0x028f, 0x1e93, 0x1005, PQI_HWIF_SRCV, "ByteHBA JGH43014-8"},
+ {0x9005, 0x028f, 0x1f51, 0x100B, PQI_HWIF_SRCV, "SmartHBA P6600-24i"},
+
+ /* (MSCC PM8275 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x14f0, PQI_HWIF_SRCV, "SmartIOC PM8275"},
+
+ /* (MSCC PM8276 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1480, PQI_HWIF_SRCV, "SmartRAID 3200 Ultra"},
+ {0x9005, 0x028f, 0x1e93, 0x1002, PQI_HWIF_SRCV, "ByteHBA JGH44014-8"},
+
+ /* (MSCC PM8277 16x12G based) */
+ {0x9005, 0x028f, 0x1137, 0x02f8, PQI_HWIF_SRCV, "Cisco 24G TriMode M1 RAID 4GB FBWC 32D UCSC-RAID-MP1L32"},
+
+ /* (MSCC PM8278 16x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1440, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-32i"},
+ {0x9005, 0x028f, 0x1d49, 0x0627, PQI_HWIF_SRCV, "9450-32i 8GB Flash"},
+
+ /* (MSCC PM8279 32x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x1450, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-32i /e"},
+ {0x9005, 0x028f, 0x1590, 0x0294, PQI_HWIF_SRCV, "SR932i-p Gen10+"},
+ {0x9005, 0x028f, 0x1590, 0x0381, PQI_HWIF_SRCV, "SR932i-p Gen11"},
+ {0x9005, 0x028f, 0x1590, 0x0382, PQI_HWIF_SRCV, "SR308i-p Gen11"},
+ {0x9005, 0x028f, 0x1590, 0x0383, PQI_HWIF_SRCV, "SR308i-o Gen11"},
+ {0x9005, 0x028f, 0x1590, 0x02db, PQI_HWIF_SRCV, "SR416ie-m Gen11"},
+ {0x9005, 0x028f, 0x1590, 0x032e, PQI_HWIF_SRCV, "SR416i-o Gen11"},
+ {0x9005, 0x028f, 0x9005, 0x1452, PQI_HWIF_SRCV, "SmartRAID 3200p Ultra"},
+ {0x9005, 0x028f, 0x1137, 0x02fe, PQI_HWIF_SRCV, "Cisco 24G TriMode M1 RAID LFF 32D UCSC-RAIDMP1LL32"},
+
+ /* (MSCC HBA/SMARTHBA/CFF SmartRAID - Lenovo 8X12G 16X12G based) */
+ {0x9005, 0x028f, 0x1d49, 0x0220, PQI_HWIF_SRCV, "4350-8i SAS/SATA HBA"},
+ {0x9005, 0x028f, 0x1d49, 0x0221, PQI_HWIF_SRCV, "4350-16i SAS/SATA HBA"},
+ {0x9005, 0x028f, 0x1d49, 0x0520, PQI_HWIF_SRCV, "5350-8i"},
+ {0x9005, 0x028f, 0x1d49, 0x0522, PQI_HWIF_SRCV, "5350-8i INTR"},
+ {0x9005, 0x028f, 0x1d49, 0x0620, PQI_HWIF_SRCV, "9350-8i 2GB Flash"},
+ {0x9005, 0x028f, 0x1d49, 0x0621, PQI_HWIF_SRCV, "9350-8i 2GB Flash INTR"},
+ {0x9005, 0x028f, 0x1d49, 0x0622, PQI_HWIF_SRCV, "9350-16i 4GB Flash"},
+ {0x9005, 0x028f, 0x1d49, 0x0623, PQI_HWIF_SRCV, "9350-16i 4GB Flash INTR"},
+
+ {0, 0, 0, 0, 0, 0}
+};
+
+struct pqi_ident
+pqi_family_identifiers[] = {
+ {0x9005, 0x028f, 0, 0, PQI_HWIF_SRCV, "Smart Array Storage Controller"},
+ {0, 0, 0, 0, 0, 0}
+};
diff --git a/sys/dev/smartpqi/smartpqi_defines.h b/sys/dev/smartpqi/smartpqi_defines.h
index bb0bb2b709aa..fe2edf7a74a9 100644
--- a/sys/dev/smartpqi/smartpqi_defines.h
+++ b/sys/dev/smartpqi/smartpqi_defines.h
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -927,7 +927,7 @@ typedef struct pqi_pci_info
typedef struct _driver_info
{
unsigned char major_version;
- unsigned long minor_version;
+ unsigned char minor_version;
unsigned char release_version;
unsigned long build_revision;
unsigned long max_targets;
@@ -938,9 +938,13 @@ typedef struct _driver_info
typedef uint8_t *passthru_buf_type_t;
#define PQISRC_DRIVER_MAJOR __FreeBSD__
-#define PQISRC_DRIVER_MINOR 4410
+#if __FreeBSD__ <= 14
+#define PQISRC_DRIVER_MINOR 4660
+#else
+#define PQISRC_DRIVER_MINOR 0
+#endif
#define PQISRC_DRIVER_RELEASE 0
-#define PQISRC_DRIVER_REVISION 2005
+#define PQISRC_DRIVER_REVISION 2002
#define STR(s) # s
#define PQISRC_VERSION(a, b, c, d) STR(a.b.c-d)
@@ -1234,19 +1238,21 @@ typedef struct sema OS_SEMA_LOCK_T;
/* Debug facility */
-#define PQISRC_FLAGS_MASK 0x0000ffff
-#define PQISRC_FLAGS_INIT 0x00000001
-#define PQISRC_FLAGS_INFO 0x00000002
-#define PQISRC_FLAGS_FUNC 0x00000004
-#define PQISRC_FLAGS_TRACEIO 0x00000008
-#define PQISRC_FLAGS_DISC 0x00000010
-#define PQISRC_FLAGS_WARN 0x00000020
-#define PQISRC_FLAGS_ERROR 0x00000040
-#define PQISRC_FLAGS_NOTE 0x00000080
+#define PQISRC_FLAGS_MASK 0x0000000000ff
+#define PQISRC_FLAGS_INIT 0x0001
+#define PQISRC_FLAGS_INFO 0x0002
+#define PQISRC_FLAGS_FUNC 0x0004
+#define PQISRC_FLAGS_TRACEIO 0x0008
+#define PQISRC_FLAGS_DISC 0x0010
+#define PQISRC_FLAGS_WARN 0x0020
+#define PQISRC_FLAGS_ERROR 0x0040
+#define PQISRC_FLAGS_NOTE 0x0080
-#define PQISRC_LOG_LEVEL (PQISRC_FLAGS_WARN | PQISRC_FLAGS_ERROR | PQISRC_FLAGS_NOTE)
+#define PQISRC_LOG_LEVEL (PQISRC_FLAGS_WARN | PQISRC_FLAGS_ERROR)
-static int logging_level = PQISRC_LOG_LEVEL;
+extern unsigned long logging_level;
+
+#define DBG_SET_LOGGING_LEVEL(value) logging_level = value & PQISRC_FLAGS_MASK
#define DBG_INIT(fmt,args...) \
do { \
@@ -1276,13 +1282,6 @@ static int logging_level = PQISRC_LOG_LEVEL;
} \
}while(0);
-#define DBG_TRACEIO(fmt,args...) \
- do { \
- if (logging_level & PQISRC_FLAGS_TRACEIO) { \
- printf("[TRACEIO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
- } \
- }while(0);
-
#define DBG_WARN(fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_WARN) { \
diff --git a/sys/dev/smartpqi/smartpqi_discovery.c b/sys/dev/smartpqi/smartpqi_discovery.c
index ac37c2233762..a7de5a149810 100644
--- a/sys/dev/smartpqi/smartpqi_discovery.c
+++ b/sys/dev/smartpqi/smartpqi_discovery.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -189,7 +189,7 @@ pqisrc_set_btl(pqi_scsi_dev_t *device, int bus, int target, int lun)
* devices and multi-lun devices */
boolean_t
pqisrc_add_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device,
- uint8_t *scsi3addr)
+ uint8_t const *scsi3addr)
{
/* Add physical devices with targets that need
* targets */
@@ -269,7 +269,7 @@ pqisrc_add_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device,
}
}
DBG_ERR("The device is not a physical, lun or ptraid device"
- "B %d: T %d: L %d\n", device->bus, device->target,
+ "B%d:T%d:L%d\n", device->bus, device->target,
device->lun );
return false;
@@ -283,7 +283,7 @@ add_device_to_dev_list:
softs->dev_list[j] = device;
break;
}
- DBG_NOTE("Added device [%d of %d]: B %d: T %d: L %d\n",
+ DBG_NOTE("Added device [%d of %d]: B%d:T%d:L%d\n",
j, softs->num_devs, device->bus, device->target,
device->lun);
return true;
@@ -307,7 +307,6 @@ pqisrc_find_btl_list_index(pqisrc_softstate_t *softs,
if(bus == softs->bus_id &&
target == temp_device->target &&
lun == temp_device->lun){
- DBG_DISC("Returning device list index %d\n", index);
return index;
}
@@ -322,7 +321,7 @@ pqisrc_find_btl_list_index(pqisrc_softstate_t *softs,
/* Return a given index for a specific device within the
* softs dev_list */
int
-pqisrc_find_device_list_index(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+pqisrc_find_device_list_index(pqisrc_softstate_t *softs, pqi_scsi_dev_t const *device)
{
int index;
@@ -346,7 +345,7 @@ pqisrc_find_device_list_index(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
/* Delete a given device from the softs dev_list*/
int
-pqisrc_delete_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+pqisrc_delete_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t const *device)
{
int index;
@@ -354,8 +353,8 @@ pqisrc_delete_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
if (0 <= index && index < MAX_TARGET_BIT) {
softs->dev_list[index] = NULL;
softs->num_devs--;
- DBG_NOTE("Removing device : B %d: T %d: L %d positioned at %d\n",
- device->bus, device->target, device->lun, softs->num_devs);
+ DBG_NOTE("Removing device: B%d:T%d:L%d positioned at %d\n",
+ device->bus, device->target, device->lun, index);
return PQI_STATUS_SUCCESS;
}
if (index == INVALID_ELEM) {
@@ -414,7 +413,7 @@ pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, struct dma_mem device_
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
- ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+ ob_queue_t const *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
rcb_t *rcb = NULL;
@@ -740,13 +739,13 @@ pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_PHYS, physical_dev_list, phys_data_length);
if (ret) {
- DBG_ERR("report physical LUNs failed");
+ DBG_ERR("report physical LUNs failed\n");
return ret;
}
ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_LOG, logical_dev_list, log_data_length);
if (ret) {
- DBG_ERR("report logical LUNs failed");
+ DBG_ERR("report logical LUNs failed\n");
return ret;
}
@@ -763,7 +762,7 @@ pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
ret = pqisrc_get_queue_lun_list(softs, PQI_LOG_EXT_QUEUE_ENABLE, queue_dev_list, queue_data_length);
if (ret) {
- DBG_ERR("report logical LUNs failed");
+ DBG_ERR("report logical LUNs failed\n");
return ret;
}
@@ -808,13 +807,13 @@ pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
}
inline boolean_t
-pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
+pqisrc_is_external_raid_device(pqi_scsi_dev_t const *device)
{
return device->is_external_raid_device;
}
static inline boolean_t
-pqisrc_is_external_raid_addr(uint8_t *scsi3addr)
+pqisrc_is_external_raid_addr(uint8_t const *scsi3addr)
{
return scsi3addr[2] != 0;
}
@@ -940,12 +939,12 @@ out:
/* Validate the RAID map parameters */
static int
pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map)
+ pqi_scsi_dev_t const *device, pqisrc_raid_map_t *raid_map)
{
char *error_msg;
uint32_t raidmap_size;
uint32_t r5or6_blocks_per_row;
-/* unsigned phys_dev_num; */
+ /* unsigned phys_dev_num; */
DBG_FUNC("IN\n");
@@ -1033,7 +1032,7 @@ pqisrc_get_device_raidmap(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
if (raidmap_reported_size <= raidmap_alloc_size)
break;
- DBG_NOTE("Raid map is larger than 1024 entries, request once again");
+ DBG_NOTE("Raid map is larger than 1024 entries, request once again\n");
os_mem_free(softs, (char*)raid_map, raidmap_alloc_size);
raidmap_alloc_size = raidmap_reported_size;
@@ -1331,7 +1330,7 @@ pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
/* Function used to find the entry of the device in a list */
static device_status_t
pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device_to_find, pqi_scsi_dev_t **same_device)
+ pqi_scsi_dev_t const *device_to_find, pqi_scsi_dev_t **same_device)
{
pqi_scsi_dev_t *device;
int i;
@@ -1361,7 +1360,7 @@ pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
/* Update the newly added devices as existed device */
static void
-pqisrc_exist_device_update(pqisrc_softstate_t *softs,
+pqisrc_exist_device_update(pqisrc_softstate_t const *softs,
pqi_scsi_dev_t *device_exist, pqi_scsi_dev_t *new_device)
{
DBG_FUNC("IN\n");
@@ -1395,15 +1394,13 @@ pqisrc_exist_device_update(pqisrc_softstate_t *softs,
device_exist->offload_config = new_device->offload_config;
device_exist->offload_enabled_pending =
new_device->offload_enabled_pending;
- if (device_exist->offload_to_mirror)
- os_mem_free(softs,
- (int *) device_exist->offload_to_mirror,
- sizeof(*(device_exist->offload_to_mirror)));
+ if (device_exist->offload_to_mirror) {
+ device_exist->temp_offload_to_mirror = device_exist->offload_to_mirror;
+ }
device_exist->offload_to_mirror = new_device->offload_to_mirror;
- if (device_exist->raid_map)
- os_mem_free(softs,
- (char *)device_exist->raid_map,
- sizeof(*device_exist->raid_map));
+ if (device_exist->raid_map) {
+ device_exist->temp_raid_map = device_exist->raid_map;
+ }
device_exist->raid_map = new_device->raid_map;
/* To prevent these from being freed later. */
new_device->raid_map = NULL;
@@ -1416,7 +1413,7 @@ static int
pqisrc_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
- DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
+ DBG_NOTE("vendor: %s model: %s B%d:T%d:L%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
device->invalid = false;
@@ -1439,7 +1436,7 @@ void
pqisrc_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
- DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
+ DBG_NOTE("vendor: %s model: %s B%d:T%d:L%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
device->invalid = true;
if (device->expose_device == false) {
@@ -1460,63 +1457,9 @@ pqisrc_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
}
-/*
- * When exposing new device to OS fails then adjst list according to the
- * mid scsi list
- */
-static void
-pqisrc_adjust_list(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
-{
- int i;
- unsigned char addr1[8], addr2[8];
- pqi_scsi_dev_t *temp_device;
- DBG_FUNC("IN\n");
-
- if (!device) {
- DBG_ERR("softs = %p: device is NULL !!!\n", softs);
- return;
- }
-
- OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
- uint8_t *scsi3addr;
- /*For external raid device, there can be multiple luns
- *with same target. So while freeing external raid device,
- *free target only after removing all luns with same target.*/
- if (pqisrc_is_external_raid_device(device)) {
- memcpy(addr1, device->scsi3addr, 8);
- for(i = 0; i < PQI_MAX_DEVICES; i++) {
- if(softs->dev_list[i] == NULL)
- continue;
- temp_device = softs->dev_list[i];
- memcpy(addr2, temp_device->scsi3addr, 8);
- if(memcmp(addr1, addr2, 8) == 0) {
- continue;
- }
- if (addr1[2] == addr2[2]) {
- break;
- }
- }
- if(i == PQI_MAX_DEVICES) {
- pqisrc_remove_target_bit(softs, device->target);
- }
- }
-
- if(pqisrc_delete_softs_entry(softs, device) == PQI_STATUS_SUCCESS){
- scsi3addr = device->scsi3addr;
- if (!pqisrc_is_logical_device(device) && !MASKED_DEVICE(scsi3addr)){
- DBG_NOTE("About to remove target bit %d \n", device->target);
- pqisrc_remove_target_bit(softs, device->target);
- }
- }
- OS_RELEASE_SPINLOCK(&softs->devlist_lock);
- pqisrc_device_mem_free(softs, device);
-
- DBG_FUNC("OUT\n");
-}
-
/* Debug routine used to display the RAID volume status of the device */
static void
-pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t const *device)
{
char *status;
@@ -1590,7 +1533,7 @@ pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
break;
}
- DBG_NOTE("scsi BTL %d:%d:%d %s\n",
+ DBG_NOTE("scsi B%d:T%d:L%d %s\n",
device->bus, device->target, device->lun, status);
DBG_FUNC("OUT\n");
}
@@ -1635,7 +1578,7 @@ pqisrc_free_device(pqisrc_softstate_t * softs, pqi_scsi_dev_t *device)
/* Find the entry in device list for the freed device softs->dev_list[i]&
*make it NULL before freeing the device memory
*/
- index = pqisrc_find_device_list_index(softs, device);
+ index = pqisrc_find_btl_list_index(softs, device->bus, device->target, device->lun);
OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
scsi3addr = device->scsi3addr;
@@ -1669,13 +1612,14 @@ pqisrc_free_device(pqisrc_softstate_t * softs, pqi_scsi_dev_t *device)
softs->dev_list[index] = NULL;
if (device->expose_device == true){
pqisrc_delete_softs_entry(softs, device);
- DBG_NOTE("Removed memory for device : B %d: T %d: L %d\n",
+ DBG_NOTE("Removed memory for device: B%d:T%d:L%d\n",
device->bus, device->target, device->lun);
OS_RELEASE_SPINLOCK(&softs->devlist_lock);
pqisrc_device_mem_free(softs, device);
} else {
OS_RELEASE_SPINLOCK(&softs->devlist_lock);
}
+
}
@@ -1684,7 +1628,6 @@ static void
pqisrc_update_device_list(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *new_device_list[], int num_new_devices)
{
- int ret;
int i;
device_status_t dev_status;
pqi_scsi_dev_t *device;
@@ -1755,8 +1698,7 @@ pqisrc_update_device_list(pqisrc_softstate_t *softs,
if(device == NULL)
continue;
if (device->device_gone) {
- if(device->in_remove == true)
- {
+ if(device->in_remove == true) {
continue;
}
device->in_remove = true;
@@ -1794,7 +1736,7 @@ pqisrc_update_device_list(pqisrc_softstate_t *softs,
continue;
if (device->offload_enabled != device->offload_enabled_pending)
{
- DBG_NOTE("[%d:%d:%d]Changing AIO to %d (was %d)\n",
+ DBG_NOTE("[B%d:T%d:L%d]Changing AIO to %d (was %d)\n",
device->bus, device->target, device->lun,
device->offload_enabled_pending,
device->offload_enabled);
@@ -1831,24 +1773,14 @@ pqisrc_update_device_list(pqisrc_softstate_t *softs,
device->firmware_queue_depth_set == false)
device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
- if (device->scsi_rescan) {
- os_rescan_target(softs, device);
- }
}
- softs->ld_rescan = false;
OS_RELEASE_SPINLOCK(&softs->devlist_lock);
for(i = 0; i < nadded; i++) {
device = added[i];
if (device->expose_device) {
- ret = pqisrc_add_device(softs, device);
- if (ret) {
- DBG_WARN("scsi %d:%d:%d addition failed, device not added\n",
- device->bus, device->target, device->lun);
- pqisrc_adjust_list(softs, device);
- continue;
- }
+ pqisrc_add_device(softs, device);
}
pqisrc_display_device_info(softs, "added", device);
@@ -1874,7 +1806,22 @@ pqisrc_update_device_list(pqisrc_softstate_t *softs,
DBG_DISC("Current device %d : B%d:T%d:L%d\n",
i, device->bus, device->target,
device->lun);
+ if (device->scsi_rescan) {
+ os_rescan_target(softs, device);
+ }
+ if (device->temp_offload_to_mirror) {
+ os_mem_free(softs,
+ (int *) device->temp_offload_to_mirror,
+ sizeof(*(device->temp_offload_to_mirror)));
+ }
+ if (device->temp_raid_map) {
+ os_mem_free(softs,
+ (int *) device->temp_raid_map,
+ sizeof(*(device->temp_raid_map)));
+ }
+
}
+ softs->ld_rescan = false;
free_and_out:
if (added)
@@ -1928,6 +1875,8 @@ pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
}
host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
+ host_wellness_driver_ver->dont_write_tag[0] = 'D';
+ host_wellness_driver_ver->dont_write_tag[1] = 'W';
host_wellness_driver_ver->end_tag[0] = 'Z';
host_wellness_driver_ver->end_tag[1] = 'Z';
@@ -1941,7 +1890,7 @@ pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
os_mem_free(softs, (char *)host_wellness_driver_ver, data_length);
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
return rval;
}
@@ -1974,8 +1923,8 @@ pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
host_wellness_time->start_tag[3] = '>';
host_wellness_time->time_tag[0] = 'T';
host_wellness_time->time_tag[1] = 'D';
- host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) -
- offsetof(struct bmic_host_wellness_time, century));
+ host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, dont_write_tag) -
+ offsetof(struct bmic_host_wellness_time, hour));
os_get_time(host_wellness_time);
@@ -1994,12 +1943,12 @@ pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
os_mem_free(softs, (char *)host_wellness_time, data_length);
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
return rval;
}
static void
pqisrc_get_device_vpd_info(pqisrc_softstate_t *softs,
- bmic_ident_physdev_t *bmic_phy_info,pqi_scsi_dev_t *device)
+ bmic_ident_physdev_t const *bmic_phy_info,pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
memcpy(&device->wwid, &bmic_phy_info->padding[79], sizeof(device->wwid));
@@ -2102,7 +2051,7 @@ pqisrc_scan_devices(pqisrc_softstate_t *softs)
scsi3addr = lun_ext_entry->lunid;
- /* Save the target sas adderess for external raid device */
+ /* Save the target sas address for external raid device */
if(lun_ext_entry->device_type == CONTROLLER_DEVICE) {
#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
num_ext_raid_devices++;
diff --git a/sys/dev/smartpqi/smartpqi_event.c b/sys/dev/smartpqi/smartpqi_event.c
index 88dcf45dd08a..761bb5588ff9 100644
--- a/sys/dev/smartpqi/smartpqi_event.c
+++ b/sys/dev/smartpqi/smartpqi_event.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -59,7 +59,7 @@ pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs)
*/
static void
pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
- struct pqi_event *event)
+ struct pqi_event const *event)
{
int ret;
@@ -225,7 +225,7 @@ pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
os_eventtaskqueue_enqueue(softs);
}
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
@@ -241,7 +241,7 @@ pqisrc_build_send_vendor_request(pqisrc_softstate_t *softs,
{
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
- ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+ ob_queue_t const *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
rcb_t *rcb = NULL;
@@ -341,7 +341,7 @@ err_out:
static int
pqi_event_configure(pqisrc_softstate_t *softs ,
pqi_event_config_request_t *request,
- dma_mem_t *buff)
+ dma_mem_t const *buff)
{
int ret = PQI_STATUS_SUCCESS;
diff --git a/sys/dev/smartpqi/smartpqi_helper.c b/sys/dev/smartpqi/smartpqi_helper.c
index 741ac0a05a06..0a4bf6ec21cb 100644
--- a/sys/dev/smartpqi/smartpqi_helper.c
+++ b/sys/dev/smartpqi/smartpqi_helper.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +30,7 @@
* Function used to validate the adapter health.
*/
boolean_t
-pqisrc_ctrl_offline(pqisrc_softstate_t *softs)
+pqisrc_ctrl_offline(pqisrc_softstate_t const *softs)
{
DBG_FUNC("IN\n");
@@ -83,12 +83,10 @@ pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
- int lockupcode = 0;
-
softs->ctrl_online = false;
if (SIS_IS_KERNEL_PANIC(softs)) {
- lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7);
+ int lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7);
DBG_ERR("Controller FW is not running, Lockup code = %x\n", lockupcode);
}
else {
@@ -172,7 +170,7 @@ pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb,
}
if (pqisrc_ctrl_offline(softs)) {
- DBG_ERR("Controller is Offline");
+ DBG_ERR("Controller is Offline\n");
ret = PQI_STATUS_FAILURE;
break;
}
@@ -187,29 +185,29 @@ pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb,
/* Function used to validate the device wwid. */
boolean_t
-pqisrc_device_equal(pqi_scsi_dev_t *dev1,
- pqi_scsi_dev_t *dev2)
+pqisrc_device_equal(pqi_scsi_dev_t const *dev1,
+ pqi_scsi_dev_t const *dev2)
{
return dev1->wwid == dev2->wwid;
}
/* Function used to validate the device scsi3addr. */
boolean_t
-pqisrc_scsi3addr_equal(uint8_t *scsi3addr1, uint8_t *scsi3addr2)
+pqisrc_scsi3addr_equal(uint8_t const *scsi3addr1, uint8_t const *scsi3addr2)
{
return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
}
/* Function used to validate hba_lunid */
boolean_t
-pqisrc_is_hba_lunid(uint8_t *scsi3addr)
+pqisrc_is_hba_lunid(uint8_t const *scsi3addr)
{
return pqisrc_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
}
/* Function used to validate type of device */
boolean_t
-pqisrc_is_logical_device(pqi_scsi_dev_t *device)
+pqisrc_is_logical_device(pqi_scsi_dev_t const *device)
{
return !device->is_physical_device;
}
@@ -256,10 +254,10 @@ pqisrc_raidlevel_to_string(uint8_t raid_level)
/* Debug routine for displaying device info */
void pqisrc_display_device_info(pqisrc_softstate_t *softs,
- char *action, pqi_scsi_dev_t *device)
+ char const *action, pqi_scsi_dev_t *device)
{
if (device->is_physical_device) {
- DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s "
+ DBG_NOTE("%s scsi B%d:T%d:L%d %.8s %.16s %-12s "
"SSDSmartPathCap%c En%c Exp%c qd=%d\n",
action,
device->bus,
@@ -273,7 +271,7 @@ void pqisrc_display_device_info(pqisrc_softstate_t *softs,
device->expose_device ? '+' : '-',
device->queue_depth);
} else if (device->devtype == RAID_DEVICE) {
- DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s "
+ DBG_NOTE("%s scsi B%d:T%d:L%d %.8s %.16s %-12s "
"SSDSmartPathCap%c En%c Exp%c qd=%d\n",
action,
device->bus,
@@ -287,7 +285,7 @@ void pqisrc_display_device_info(pqisrc_softstate_t *softs,
device->expose_device ? '+' : '-',
device->queue_depth);
} else if (device->devtype == CONTROLLER_DEVICE) {
- DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s "
+ DBG_NOTE("%s scsi B%d:T%d:L%d %.8s %.16s %-12s "
"SSDSmartPathCap%c En%c Exp%c qd=%d\n",
action,
device->bus,
@@ -301,7 +299,7 @@ void pqisrc_display_device_info(pqisrc_softstate_t *softs,
device->expose_device ? '+' : '-',
device->queue_depth);
} else {
- DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s "
+ DBG_NOTE("%s scsi B%d:T%d:L%d %.8s %.16s %-12s "
"SSDSmartPathCap%c En%c Exp%c qd=%d devtype=%d\n",
action,
device->bus,
@@ -335,7 +333,7 @@ check_struct_sizes(void)
64 bit and 32 bit system*/
ASSERT(sizeof(IOCTL_Command_struct)== 86 ||
sizeof(IOCTL_Command_struct)== 82);
- ASSERT(sizeof(struct bmic_host_wellness_driver_version)== 42);
+ ASSERT(sizeof(struct bmic_host_wellness_driver_version)== 44);
ASSERT(sizeof(struct bmic_host_wellness_time)== 20);
ASSERT(sizeof(struct pqi_dev_adminq_cap)== 8);
ASSERT(sizeof(struct admin_q_param)== 4);
diff --git a/sys/dev/smartpqi/smartpqi_init.c b/sys/dev/smartpqi/smartpqi_init.c
index 41c990a15909..fde17a542421 100644
--- a/sys/dev/smartpqi/smartpqi_init.c
+++ b/sys/dev/smartpqi/smartpqi_init.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -234,7 +234,7 @@ pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
DBG_FUNC("IN\n");
- DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d",
+ DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d\n",
softs->intr_count, softs->num_cpus_online);
/* TODO : Get the number of IB and OB queues from OS layer */
@@ -437,7 +437,6 @@ int
pqi_reset(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
- uint32_t val = 0;
pqi_reset_reg_t pqi_reset_reg;
DBG_FUNC("IN\n");
@@ -445,7 +444,7 @@ pqi_reset(pqisrc_softstate_t *softs)
if (true == softs->ctrl_in_pqi_mode) {
if (softs->pqi_reset_quiesce_allowed) {
- val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
+ int val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR);
val |= SIS_PQI_RESET_QUIESCE;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
@@ -629,7 +628,7 @@ pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
int count = 0;
int ret = PQI_STATUS_SUCCESS;
- DBG_NOTE("softs->taglist.num_elem : %u",softs->taglist.num_elem);
+ DBG_NOTE("softs->taglist.num_elem : %u\n",softs->taglist.num_elem);
if (softs->taglist.num_elem == softs->max_outstanding_io)
return ret;
@@ -650,7 +649,7 @@ pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
OS_SLEEP(1000);
count++;
if(count % 1000 == 0) {
- DBG_WARN("Waited for %d seconds", count/1000);
+ DBG_WARN("Waited for %d seconds\n", count/1000);
}
if (count >= PQI_QUIESCE_TIMEOUT) {
return PQI_STATUS_FAILURE;
@@ -849,7 +848,7 @@ pqisrc_init(pqisrc_softstate_t *softs)
/* update driver version in to FW */
ret = pqisrc_write_driver_version_to_host_wellness(softs);
if (ret) {
- DBG_ERR(" Failed to update driver version in to FW");
+ DBG_ERR(" Failed to update driver version in to FW\n");
goto err_host_wellness;
}
diff --git a/sys/dev/smartpqi/smartpqi_intr.c b/sys/dev/smartpqi/smartpqi_intr.c
index a62bdc9e8389..1971ac03a556 100644
--- a/sys/dev/smartpqi/smartpqi_intr.c
+++ b/sys/dev/smartpqi/smartpqi_intr.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,7 +35,6 @@ os_get_processor_config(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
softs->num_cpus_online = mp_ncpus;
- bsd_set_hint_adapter_cpu_config(softs);
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
diff --git a/sys/dev/smartpqi/smartpqi_ioctl.c b/sys/dev/smartpqi/smartpqi_ioctl.c
index 2bdc5c09e916..0517c0513cce 100644
--- a/sys/dev/smartpqi/smartpqi_ioctl.c
+++ b/sys/dev/smartpqi/smartpqi_ioctl.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -74,7 +74,7 @@ smartpqi_close(struct cdev *cdev, int flags, int devtype,
* ioctl for getting driver info
*/
static void
-smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
+smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev const *cdev)
{
struct pqisrc_softstate *softs = cdev->si_drv1;
pdriver_info driver_info = (pdriver_info)udata;
@@ -82,7 +82,11 @@ smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
driver_info->major_version = PQISRC_DRIVER_MAJOR;
+#if __FreeBSD__ <= 14
+ driver_info->minor_version = (unsigned char) ((PQISRC_DRIVER_MINOR >> 4) & 0xFF);
+#else
driver_info->minor_version = PQISRC_DRIVER_MINOR;
+#endif
driver_info->release_version = PQISRC_DRIVER_RELEASE;
driver_info->build_revision = PQISRC_DRIVER_REVISION;
driver_info->max_targets = PQI_MAX_DEVICES - 1;
@@ -96,7 +100,7 @@ smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
* ioctl for getting controller info
*/
static void
-smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
+smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev const *cdev)
{
struct pqisrc_softstate *softs = cdev->si_drv1;
device_t dev = softs->os_specific.pqi_dev;
@@ -242,7 +246,7 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
pqisrc_raid_req_t request;
raid_path_error_info_elem_t error_info;
ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
- ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+ ob_queue_t const *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
rcb_t *rcb = NULL;
memset(&request, 0, sizeof(request));
diff --git a/sys/dev/smartpqi/smartpqi_main.c b/sys/dev/smartpqi/smartpqi_main.c
index 402841bbf4d6..6274ecc957d4 100644
--- a/sys/dev/smartpqi/smartpqi_main.c
+++ b/sys/dev/smartpqi/smartpqi_main.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,290 +29,14 @@
*/
#include "smartpqi_includes.h"
+#include "smartpqi_controllers.h"
CTASSERT(BSD_SUCCESS == PQI_STATUS_SUCCESS);
/*
- * Supported devices
- */
-struct pqi_ident
-{
- u_int16_t vendor;
- u_int16_t device;
- u_int16_t subvendor;
- u_int16_t subdevice;
- int hwif;
- char *desc;
-} pqi_identifiers[] = {
- /* (MSCC PM8205 8x12G based) */
- {0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"},
- {0x9005, 0x028f, 0x1028, 0x1FE0, PQI_HWIF_SRCV, "SmartRAID 3162-8i/eDell"},
- {0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"},
- {0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"},
-
- /* (MSCC PM8225 8x12G based) */
- {0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"},
- {0x9005, 0x028f, 0x9005, 0x659, PQI_HWIF_SRCV, "2100C8iOXS"},
-
- /* (MSCC PM8221 8x12G based) */
- {0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"},
- {0x9005, 0x028f, 0x193d, 0x1104, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-2GB"},
- {0x9005, 0x028f, 0x193d, 0x1106, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-1GB"},
- {0x9005, 0x028f, 0x193d, 0x1108, PQI_HWIF_SRCV, "UN RAID P4408-Ma-8i-2GB"},
- {0x9005, 0x028f, 0x193d, 0x1109, PQI_HWIF_SRCV, "UN RAID P4408-Mr-8i-2GB"},
-
- /* (MSCC PM8204 8x12G based) */
- {0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"},
- {0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"},
- {0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"},
- {0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"},
- {0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"},
- {0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"},
- {0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"},
- {0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"},
- {0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"},
- {0x9005, 0x028f, 0x193d, 0xf460, PQI_HWIF_SRCV, "UN RAID P460-M4"},
- {0x9005, 0x028f, 0x193d, 0xf461, PQI_HWIF_SRCV, "UN RAID P460-B4"},
- {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "PM8204-2GB"},
- {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "PM8204-4GB"},
- {0x9005, 0x028f, 0x193d, 0x1105, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-2GB"},
- {0x9005, 0x028f, 0x193d, 0x1107, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-4GB"},
- {0x9005, 0x028f, 0x1d8d, 0x800, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8204-8i"},
- {0x9005, 0x028f, 0x9005, 0x0808, PQI_HWIF_SRCV, "SmartRAID 3101E-4i"},
- {0x9005, 0x028f, 0x9005, 0x0809, PQI_HWIF_SRCV, "SmartRAID 3102E-8i"},
- {0x9005, 0x028f, 0x9005, 0x080a, PQI_HWIF_SRCV, "SmartRAID 3152-8i/N"},
- {0x9005, 0x028f, 0x1cc4, 0x0101, PQI_HWIF_SRCV, "Ramaxel FBGF-RAD PM8204"},
-
- /* (MSCC PM8222 8x12G based) */
- {0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"},
- {0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"},
- {0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"},
- {0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"},
- {0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"},
- {0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"},
- {0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"},
- {0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"},
- {0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"},
- {0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"},
- {0x9005, 0x028f, 0x193d, 0x8460, PQI_HWIF_SRCV, "UN HBA H460-M1"},
- {0x9005, 0x028f, 0x193d, 0x8461, PQI_HWIF_SRCV, "UN HBA H460-B1"},
- {0x9005, 0x028f, 0x193d, 0xc460, PQI_HWIF_SRCV, "UN RAID P460-M2"},
- {0x9005, 0x028f, 0x193d, 0xc461, PQI_HWIF_SRCV, "UN RAID P460-B2"},
- {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "PM8222-SHBA"},
- {0x9005, 0x028f, 0x13fe, 0x8312, PQI_HWIF_SRCV, "MIC-8312BridgeB"},
- {0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "PM8222-HBA"},
- {0x9005, 0x028f, 0x1d8d, 0x908, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8222-8i"},
- {0x9005, 0x028f, 0x1bd4, 0x006C, PQI_HWIF_SRCV, "RS0800M5E8i"},
- {0x9005, 0x028f, 0x1bd4, 0x006D, PQI_HWIF_SRCV, "RS0800M5H8i"},
- {0x9005, 0x028f, 0x1cc4, 0x0201, PQI_HWIF_SRCV, "Ramaxel FBGF-RAD PM8222"},
-
- /* (SRCx MSCC FVB 24x12G based) */
- {0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"},
-
- /* (MSCC PM8241 24x12G based) */
-
- /* (MSCC PM8242 24x12G based) */
- {0x9005, 0x028f, 0x152d, 0x8a37, PQI_HWIF_SRCV, "QS-8242-24i"},
- {0x9005, 0x028f, 0x9005, 0x1300, PQI_HWIF_SRCV, "HBA 1100-8i8e"},
- {0x9005, 0x028f, 0x9005, 0x1301, PQI_HWIF_SRCV, "HBA 1100-24i"},
- {0x9005, 0x028f, 0x9005, 0x1302, PQI_HWIF_SRCV, "SmartHBA 2100-8i8e"},
- {0x9005, 0x028f, 0x9005, 0x1303, PQI_HWIF_SRCV, "SmartHBA 2100-24i"},
- {0x9005, 0x028f, 0x105b, 0x1321, PQI_HWIF_SRCV, "8242-24i"},
- {0x9005, 0x028f, 0x1bd4, 0x0045, PQI_HWIF_SRCV, "SMART-HBA 8242-24i"},
- {0x9005, 0x028f, 0x1bd4, 0x006B, PQI_HWIF_SRCV, "RS0800M5H24i"},
- {0x9005, 0x028f, 0x1bd4, 0x0070, PQI_HWIF_SRCV, "RS0800M5E24i"},
-
- /* (MSCC PM8236 16x12G based) */
- {0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"},
- {0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"},
- {0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "RAID 8236-16i"},
- {0x9005, 0x028f, 0x1d8d, 0x806, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8236-16i"},
- {0x9005, 0x028f, 0x1cf2, 0x0B27, PQI_HWIF_SRCV, "ZTE SmartROC3100 SDPSA/B-18i 4G"},
- {0x9005, 0x028f, 0x1cf2, 0x0B45, PQI_HWIF_SRCV, "ZTE SmartROC3100 SDPSA/B_L-18i 2G"},
- {0x9005, 0x028f, 0x1cf2, 0x5445, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241-18i 2G"},
- {0x9005, 0x028f, 0x1cf2, 0x5446, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242-18i 4G"},
- {0x9005, 0x028f, 0x1cf2, 0x5449, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS241-18i 2G"},
- {0x9005, 0x028f, 0x1cf2, 0x544A, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS242-18i 4G"},
- {0x9005, 0x028f, 0x1cf2, 0x544D, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241B-18i 2G"},
- {0x9005, 0x028f, 0x1cf2, 0x544E, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242B-18i 4G"},
- {0x9005, 0x028f, 0x1bd4, 0x006F, PQI_HWIF_SRCV, "RS0804M5R16i"},
-
-
-
- /* (MSCC PM8237 24x12G based) */
- {0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x1101, PQI_HWIF_SRCV, "P416ie-m SR G10"},
-
- /* (MSCC PM8238 16x12G based) */
- {0x9005, 0x028f, 0x152d, 0x8a23, PQI_HWIF_SRCV, "QS-8238-16i"},
- {0x9005, 0x028f, 0x9005, 0x1280, PQI_HWIF_SRCV, "HBA 1100-16i"},
- {0x9005, 0x028f, 0x9005, 0x1281, PQI_HWIF_SRCV, "HBA 1100-16e"},
- {0x9005, 0x028f, 0x105b, 0x1211, PQI_HWIF_SRCV, "8238-16i"},
- {0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "SMART-HBA 8238-16i"},
- {0x9005, 0x028f, 0x9005, 0x1282, PQI_HWIF_SRCV, "SmartHBA 2100-16i"},
- {0x9005, 0x028f, 0x1d8d, 0x916, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8238-16i"},
- {0x9005, 0x028f, 0x1458, 0x1000, PQI_HWIF_SRCV, "GIGABYTE SmartHBA CLN1832"},
- {0x9005, 0x028f, 0x1cf2, 0x0B29, PQI_HWIF_SRCV, "ZTE SmartIOC2100 SDPSA/B_I-18i"},
- {0x9005, 0x028f, 0x1cf2, 0x5447, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243-18i"},
- {0x9005, 0x028f, 0x1cf2, 0x544B, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RS243-18i"},
- {0x9005, 0x028f, 0x1cf2, 0x544F, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243B-18i"},
- {0x9005, 0x028f, 0x1bd4, 0x0071, PQI_HWIF_SRCV, "RS0800M5H16i"},
- {0x9005, 0x028f, 0x1bd4, 0x0072, PQI_HWIF_SRCV, "RS0800M5E16i"},
-
- /* (MSCC PM8240 24x12G based) */
- {0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"},
- {0x9005, 0x028f, 0x9005, 0x1200, PQI_HWIF_SRCV, "SmartRAID 3154-24i"},
- {0x9005, 0x028f, 0x9005, 0x1201, PQI_HWIF_SRCV, "SmartRAID 3154-8i16e"},
- {0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"},
- {0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "RAID 8240-24i"},
- {0x9005, 0x028f, 0x1dfc, 0x3161, PQI_HWIF_SRCV, "NTCOM SAS3 RAID-24i"},
- {0x9005, 0x028f, 0x1F0C, 0x3161, PQI_HWIF_SRCV, "NT RAID 3100-24i"},
-
- /* Huawei ID's */
- {0x9005, 0x028f, 0x19e5, 0xd227, PQI_HWIF_SRCV, "SR465C-M 4G"},
- {0x9005, 0x028f, 0x19e5, 0xd22a, PQI_HWIF_SRCV, "SR765-M"},
- {0x9005, 0x028f, 0x19e5, 0xd228, PQI_HWIF_SRCV, "SR455C-M 2G"},
- {0x9005, 0x028f, 0x19e5, 0xd22c, PQI_HWIF_SRCV, "SR455C-M 4G"},
- {0x9005, 0x028f, 0x19e5, 0xd229, PQI_HWIF_SRCV, "SR155-M"},
- {0x9005, 0x028f, 0x19e5, 0xd22b, PQI_HWIF_SRCV, "SR455C-ME 4G"},
-
- /* (MSCC PM8252 8x12G based) */
- {0x9005, 0x028f, 0x193d, 0x110b, PQI_HWIF_SRCV, "UN HBA H4508-Mf-8i"},
- {0x9005, 0x028f, 0x1bd4, 0x0052, PQI_HWIF_SRCV, "MT0801M6E"},
- {0x9005, 0x028f, 0x1bd4, 0x0054, PQI_HWIF_SRCV, "MT0800M6H"},
- {0x9005, 0x028f, 0x1bd4, 0x0086, PQI_HWIF_SRCV, "RT0800M7E"},
- {0x9005, 0x028f, 0x1bd4, 0x0087, PQI_HWIF_SRCV, "RT0800M7H"},
- {0x9005, 0x028f, 0x1f51, 0x1001, PQI_HWIF_SRCV, "SmartHBA P6600-8i"},
- {0x9005, 0x028f, 0x1f51, 0x1003, PQI_HWIF_SRCV, "SmartHBA P6600-8e"},
- {0x9005, 0x028f, 0x9005, 0x1460, PQI_HWIF_SRCV, "HBA 1200"},
- {0x9005, 0x028f, 0x9005, 0x1461, PQI_HWIF_SRCV, "SmartHBA 2200"},
- {0x9005, 0x028f, 0x9005, 0x1462, PQI_HWIF_SRCV, "HBA 1200-8i"},
-
- /* (MSCC PM8254 32x12G based) */
- {0x9005, 0x028f, 0x1bd4, 0x0051, PQI_HWIF_SRCV, "MT0804M6R"},
- {0x9005, 0x028f, 0x1bd4, 0x0053, PQI_HWIF_SRCV, "MT0808M6R"},
- {0x9005, 0x028f, 0x1bd4, 0x0088, PQI_HWIF_SRCV, "RT0804M7R"},
- {0x9005, 0x028f, 0x1bd4, 0x0089, PQI_HWIF_SRCV, "RT0808M7R"},
- {0x9005, 0x028f, 0x1f51, 0x1002, PQI_HWIF_SRCV, "SmartRAID P7604-8i"},
- {0x9005, 0x028f, 0x1f51, 0x1004, PQI_HWIF_SRCV, "SmartRAID P7604-8e"},
- {0x9005, 0x028f, 0x9005, 0x14a0, PQI_HWIF_SRCV, "SmartRAID 3254-8i"},
- {0x9005, 0x028f, 0x9005, 0x14a1, PQI_HWIF_SRCV, "SmartRAID 3204-8i"},
- {0x9005, 0x028f, 0x9005, 0x14a2, PQI_HWIF_SRCV, "SmartRAID 3252-8i"},
- {0x9005, 0x028f, 0x9005, 0x14a4, PQI_HWIF_SRCV, "SmartRAID 3254-8i /e"},
- {0x9005, 0x028f, 0x9005, 0x14a5, PQI_HWIF_SRCV, "SmartRAID 3252-8i /e"},
- {0x9005, 0x028f, 0x9005, 0x14a6, PQI_HWIF_SRCV, "SmartRAID 3204-8i /e"},
-
- /* (MSCC PM8262 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x14c0, PQI_HWIF_SRCV, "SmartHBA 2200-16i"},
- {0x9005, 0x028f, 0x9005, 0x14c1, PQI_HWIF_SRCV, "HBA 1200-16i"},
- {0x9005, 0x028f, 0x9005, 0x14c3, PQI_HWIF_SRCV, "HBA 1200-16e"},
- {0x9005, 0x028f, 0x9005, 0x14c4, PQI_HWIF_SRCV, "HBA 1200-8e"},
- {0x9005, 0x028f, 0x1f51, 0x1005, PQI_HWIF_SRCV, "SmartHBA P6600-16i"},
- {0x9005, 0x028f, 0x1f51, 0x1007, PQI_HWIF_SRCV, "SmartHBA P6600-8i8e"},
- {0x9005, 0x028f, 0x1f51, 0x1009, PQI_HWIF_SRCV, "SmartHBA P6600-16e"},
- {0x9005, 0x028f, 0x1cf2, 0x54dc, PQI_HWIF_SRCV, "ZTE SmartIOC2200 RM346-16i"},
- {0x9005, 0x028f, 0x1cf2, 0x0806, PQI_HWIF_SRCV, "ZTE SmartIOC2200 RS346-16i"},
-
- /* (MSCC PM8264 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x14b0, PQI_HWIF_SRCV, "SmartRAID 3254-16i"},
- {0x9005, 0x028f, 0x9005, 0x14b1, PQI_HWIF_SRCV, "SmartRAID 3258-16i"},
- {0x9005, 0x028f, 0x1f51, 0x1006, PQI_HWIF_SRCV, "SmartRAID P7608-16i"},
- {0x9005, 0x028f, 0x1f51, 0x1008, PQI_HWIF_SRCV, "SmartRAID P7608-8i8e"},
- {0x9005, 0x028f, 0x1f51, 0x100a, PQI_HWIF_SRCV, "SmartRAID P7608-16e"},
- {0x9005, 0x028f, 0x1cf2, 0x54da, PQI_HWIF_SRCV, "ZTE SmartROC3200 RM344-16i 4G"},
- {0x9005, 0x028f, 0x1cf2, 0x54db, PQI_HWIF_SRCV, "ZTE SmartROC3200 RM345-16i 8G"},
- {0x9005, 0x028f, 0x1cf2, 0x0804, PQI_HWIF_SRCV, "ZTE SmartROC3200 RS344-16i 4G"},
- {0x9005, 0x028f, 0x1cf2, 0x0805, PQI_HWIF_SRCV, "ZTE SmartROC3200 RS345-16i 8G"},
-
- /* (MSCC PM8265 16x12G based) */
- {0x9005, 0x028f, 0x1590, 0x02dc, PQI_HWIF_SRCV, "SR416i-a Gen10+"},
- {0x9005, 0x028f, 0x9005, 0x1470, PQI_HWIF_SRCV, "SmartRAID 3200"},
- {0x9005, 0x028f, 0x9005, 0x1471, PQI_HWIF_SRCV, "SmartRAID 3254-16i /e"},
- {0x9005, 0x028f, 0x9005, 0x1472, PQI_HWIF_SRCV, "SmartRAID 3258-16i /e"},
- {0x9005, 0x028f, 0x9005, 0x1473, PQI_HWIF_SRCV, "SmartRAID 3284-16io /e/uC"},
- {0x9005, 0x028f, 0x9005, 0x1474, PQI_HWIF_SRCV, "SmartRAID 3254-16io /e"},
- {0x9005, 0x028f, 0x9005, 0x1475, PQI_HWIF_SRCV, "SmartRAID 3254-16e /e"},
-
- /* (MSCC PM8266 16x12G based) */
- {0x9005, 0x028f, 0x1014, 0x0718, PQI_HWIF_SRCV, "IBM 4-Port 24G SAS"},
- {0x9005, 0x028f, 0x9005, 0x1490, PQI_HWIF_SRCV, "HBA 1200p Ultra"},
- {0x9005, 0x028f, 0x9005, 0x1491, PQI_HWIF_SRCV, "SmartHBA 2200p Ultra"},
- {0x9005, 0x028f, 0x9005, 0x1402, PQI_HWIF_SRCV, "HBA Ultra 1200P-16i"},
- {0x9005, 0x028f, 0x9005, 0x1441, PQI_HWIF_SRCV, "HBA Ultra 1200P-32i"},
-
- /* (MSCC PM8268 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x14d0, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-16i"},
-
- /* (MSCC PM8269 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x1400, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-16i /e"},
-
- /* (MSCC PM8270 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x1410, PQI_HWIF_SRCV, "HBA Ultra 1200P-16e"},
- {0x9005, 0x028f, 0x9005, 0x1411, PQI_HWIF_SRCV, "HBA 1200 Ultra"},
- {0x9005, 0x028f, 0x9005, 0x1412, PQI_HWIF_SRCV, "SmartHBA 2200 Ultra"},
- {0x9005, 0x028f, 0x9005, 0x1463, PQI_HWIF_SRCV, "SmartHBA 2200-8io /e"},
- {0x9005, 0x028f, 0x9005, 0x14c2, PQI_HWIF_SRCV, "SmartHBA 2200-16io /e"},
-
- /* (MSCC PM8271 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x14e0, PQI_HWIF_SRCV, "SmartIOC PM8271"},
-
- /* (MSCC PM8272 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x1420, PQI_HWIF_SRCV, "SmartRAID Ultra 3254-16e"},
-
- /* (MSCC PM8273 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x1430, PQI_HWIF_SRCV, "SmartRAID Ultra 3254-16e /e"},
-
- /* (MSCC PM8274 16x12G based) */
- {0x9005, 0x028f, 0x1e93, 0x1000, PQI_HWIF_SRCV, "ByteHBA JGH43024-8"},
- {0x9005, 0x028f, 0x1e93, 0x1001, PQI_HWIF_SRCV, "ByteHBA JGH43034-8"},
- {0x9005, 0x028f, 0x1e93, 0x1005, PQI_HWIF_SRCV, "ByteHBA JGH43014-8"},
-
- /* (MSCC PM8275 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x14f0, PQI_HWIF_SRCV, "SmartIOC PM8275"},
-
- /* (MSCC PM8276 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x1480, PQI_HWIF_SRCV, "SmartRAID 3200 Ultra"},
- {0x9005, 0x028f, 0x1e93, 0x1002, PQI_HWIF_SRCV, "ByteHBA JGH44014-8"},
-
- /* (MSCC PM8278 16x12G based) */
- {0x9005, 0x028f, 0x9005, 0x1440, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-32i"},
-
- /* (MSCC PM8279 32x12G based) */
- {0x9005, 0x028f, 0x9005, 0x1450, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-32i /e"},
- {0x9005, 0x028f, 0x1590, 0x0294, PQI_HWIF_SRCV, "SR932i-p Gen10+"},
- {0x9005, 0x028f, 0x1590, 0x0381, PQI_HWIF_SRCV, "SR932i-p Gen11"},
- {0x9005, 0x028f, 0x1590, 0x0382, PQI_HWIF_SRCV, "SR308i-p Gen11"},
- {0x9005, 0x028f, 0x1590, 0x0383, PQI_HWIF_SRCV, "SR308i-o Gen11"},
- {0x9005, 0x028f, 0x1590, 0x02db, PQI_HWIF_SRCV, "SR416ie-m Gen11"},
- {0x9005, 0x028f, 0x1590, 0x032e, PQI_HWIF_SRCV, "SR416i-o Gen11"},
- {0x9005, 0x028f, 0x9005, 0x1452, PQI_HWIF_SRCV, "SmartRAID 3200p Ultra"},
-
- /* (MSCC HBA/SMARTHBA/CFF SmartRAID - Lenovo 8X12G 16X12G based) */
- {0x9005, 0x028f, 0x1d49, 0x0220, PQI_HWIF_SRCV, "4350-8i SAS/SATA HBA"},
- {0x9005, 0x028f, 0x1d49, 0x0221, PQI_HWIF_SRCV, "4350-16i SAS/SATA HBA"},
- {0x9005, 0x028f, 0x1d49, 0x0520, PQI_HWIF_SRCV, "5350-8i"},
- {0x9005, 0x028f, 0x1d49, 0x0522, PQI_HWIF_SRCV, "5350-8i INTR"},
- {0x9005, 0x028f, 0x1d49, 0x0620, PQI_HWIF_SRCV, "9350-8i 2GB Flash"},
- {0x9005, 0x028f, 0x1d49, 0x0621, PQI_HWIF_SRCV, "9350-8i 2GB Flash INTR"},
- {0x9005, 0x028f, 0x1d49, 0x0622, PQI_HWIF_SRCV, "9350-16i 4GB Flash"},
- {0x9005, 0x028f, 0x1d49, 0x0623, PQI_HWIF_SRCV, "9350-16i 4GB Flash INTR"},
-
- {0, 0, 0, 0, 0, 0}
-};
-
-struct pqi_ident
-pqi_family_identifiers[] = {
- {0x9005, 0x028f, 0, 0, PQI_HWIF_SRCV, "Smart Array Storage Controller"},
- {0, 0, 0, 0, 0, 0}
-};
+ * Logging levels global
+*/
+unsigned long logging_level = PQISRC_LOG_LEVEL;
/*
* Function to identify the installed adapter.
@@ -404,10 +128,12 @@ static void read_device_hint_resource(struct pqisrc_softstate *softs,
{
DBG_FUNC("IN\n");
+ long result = 0;
+
device_t dev = softs->os_specific.pqi_dev;
- if (resource_long_value("smartpqi", device_get_unit(dev), keyword, (long *)value) == DEVICE_HINT_SUCCESS) {
- if (*value) {
+ if (resource_long_value("smartpqi", device_get_unit(dev), keyword, &result) == DEVICE_HINT_SUCCESS) {
+ if (result) {
/* set resource to 1 for disabling the
* firmware feature in device hint file. */
*value = 0;
@@ -434,11 +160,14 @@ static void read_device_hint_decimal_value(struct pqisrc_softstate *softs,
{
DBG_FUNC("IN\n");
+ long result = 0;
+
device_t dev = softs->os_specific.pqi_dev;
- if (resource_long_value("smartpqi", device_get_unit(dev), keyword, (long *)value) == DEVICE_HINT_SUCCESS) {
+ if (resource_long_value("smartpqi", device_get_unit(dev), keyword, &result) == DEVICE_HINT_SUCCESS) {
/* Nothing to do here. Value reads
* directly from Device.Hint file */
+ *value = result;
}
else {
/* Set to max to determine the value */
@@ -482,13 +211,23 @@ static void smartpqi_read_all_device_hint_file_entries(struct pqisrc_softstate *
read_device_hint_decimal_value(softs, SCATTER_GATHER_COUNT, &value);
softs->hint.sg_segments = value;
- /* hint.smartpqi.0.queue_count = "0" */
- read_device_hint_decimal_value(softs, QUEUE_COUNT, &value);
- softs->hint.cpu_count = value;
-
DBG_FUNC("IN\n");
}
+/* Get the driver parameter tunables. */
+static void
+smartpqi_get_tunables(void)
+{
+ /*
+ * Temp variable used to get the value from loader.conf.
+ * Initializing it with the current logging level value.
+ */
+ unsigned long logging_level_temp = PQISRC_LOG_LEVEL;
+
+ TUNABLE_ULONG_FETCH("hw.smartpqi.debug_level", &logging_level_temp);
+
+ DBG_SET_LOGGING_LEVEL(logging_level_temp);
+}
/*
* Allocate resources for our device, set up the bus interface.
@@ -506,7 +245,7 @@ smartpqi_attach(device_t dev)
rcb_t *rcbp = NULL;
/*
- * Initialise softc.
+ * Initialize softc.
*/
softs = device_get_softc(dev);
@@ -518,6 +257,8 @@ smartpqi_attach(device_t dev)
memset(softs, 0, sizeof(*softs));
softs->os_specific.pqi_dev = dev;
+ smartpqi_get_tunables();
+
DBG_FUNC("IN\n");
/* assume failure is 'not configured' */
@@ -830,4 +571,5 @@ static driver_t smartpqi_pci_driver = {
};
DRIVER_MODULE(smartpqi, pci, smartpqi_pci_driver, 0, 0);
+
MODULE_DEPEND(smartpqi, pci, 1, 1, 1);
diff --git a/sys/dev/smartpqi/smartpqi_misc.c b/sys/dev/smartpqi/smartpqi_misc.c
index 20ba4fc11eb7..6db0d80ed993 100644
--- a/sys/dev/smartpqi/smartpqi_misc.c
+++ b/sys/dev/smartpqi/smartpqi_misc.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -58,7 +58,7 @@ void
os_wellness_periodic(void *data)
{
struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data;
- int ret = 0;
+ int ret;
/* update time to FW */
if (!pqisrc_ctrl_offline(softs)){
@@ -286,27 +286,3 @@ bsd_set_hint_adapter_cap(struct pqisrc_softstate *softs)
DBG_FUNC("OUT\n");
}
-
-void
-bsd_set_hint_adapter_cpu_config(struct pqisrc_softstate *softs)
-{
- DBG_FUNC("IN\n");
-
- /* online cpu count decides the no.of queues the driver can create,
- * and msi interrupt count as well.
- * If the cpu count is "zero" set by hint file then the driver
- * can have "one" queue and "one" legacy interrupt. (It shares event queue for
- * operational IB queue).
- * Check for os_get_intr_config function for interrupt assignment.*/
-
- if (softs->hint.cpu_count > softs->num_cpus_online) {
- /* Nothing to do here. Supported cpu count
- * already fetched from hardware */
- }
- else {
- /* Set Device.Hint cpu count here */
- softs->num_cpus_online = softs->hint.cpu_count;
- }
-
- DBG_FUNC("OUT\n");
-}
diff --git a/sys/dev/smartpqi/smartpqi_prototypes.h b/sys/dev/smartpqi/smartpqi_prototypes.h
index a10e5031d85e..fdf69e38fa59 100644
--- a/sys/dev/smartpqi/smartpqi_prototypes.h
+++ b/sys/dev/smartpqi/smartpqi_prototypes.h
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -84,23 +84,23 @@ uint32_t pqisrc_get_tag(lockless_stack_t *);
/* smartpqi_discovery.c */
void pqisrc_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
boolean_t pqisrc_add_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device,
- uint8_t *scsi3addr);
+ uint8_t const *scsi3addr);
int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *);
int pqisrc_rescan_devices(pqisrc_softstate_t *);
int pqisrc_scan_devices(pqisrc_softstate_t *);
void pqisrc_cleanup_devices(pqisrc_softstate_t *);
void pqisrc_device_mem_free(pqisrc_softstate_t *, pqi_scsi_dev_t *);
-boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device);
+boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t const *device);
void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device);
void pqisrc_init_bitmap(pqisrc_softstate_t *softs);
void pqisrc_remove_target_bit(pqisrc_softstate_t *softs, int target);
int pqisrc_find_avail_target(pqisrc_softstate_t *softs);
int pqisrc_find_device_list_index(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device);
+ pqi_scsi_dev_t const *device);
int pqisrc_find_btl_list_index(pqisrc_softstate_t *softs,
int bus, int target, int lun);
int pqisrc_delete_softs_entry(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device);
+ pqi_scsi_dev_t const *device);
int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
reportlun_data_ext_t **buff, size_t *data_length);
int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
@@ -112,16 +112,16 @@ int pqisrc_prepare_send_raid(pqisrc_softstate_t *, pqisrc_raid_req_t *,
/* smartpqi_helper.c */
-boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *);
+boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t const *);
void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *);
int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb,
uint32_t timeout);
-boolean_t pqisrc_device_equal(pqi_scsi_dev_t *, pqi_scsi_dev_t *);
-boolean_t pqisrc_is_hba_lunid(uint8_t *);
-boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *);
+boolean_t pqisrc_device_equal(pqi_scsi_dev_t const *, pqi_scsi_dev_t const *);
+boolean_t pqisrc_is_hba_lunid(uint8_t const*);
+boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t const *);
void pqisrc_sanitize_inquiry_string(unsigned char *, int );
-void pqisrc_display_device_info(pqisrc_softstate_t *, char *, pqi_scsi_dev_t *);
-boolean_t pqisrc_scsi3addr_equal(uint8_t *, uint8_t *);
+void pqisrc_display_device_info(pqisrc_softstate_t *, char const *, pqi_scsi_dev_t *);
+boolean_t pqisrc_scsi3addr_equal(uint8_t const *, uint8_t const *);
void check_struct_sizes(void);
char *pqisrc_raidlevel_to_string(uint8_t);
void pqisrc_configure_legacy_intx(pqisrc_softstate_t*, boolean_t);
@@ -151,7 +151,7 @@ void pqisrc_show_aio_error_info(pqisrc_softstate_t *softs, rcb_t *rcb,
aio_path_error_info_elem_t *aio_err);
void pqisrc_show_raid_error_info(pqisrc_softstate_t *softs, rcb_t *rcb,
raid_path_error_info_elem_t *aio_err);
-boolean_t suppress_innocuous_error_prints(pqisrc_softstate_t *softs,
+boolean_t suppress_innocuous_error_prints(pqisrc_softstate_t const *softs,
rcb_t *rcb);
uint8_t pqisrc_get_cmd_from_rcb(rcb_t *);
boolean_t pqisrc_is_innocuous_error(pqisrc_softstate_t *, rcb_t *, void *);
@@ -163,7 +163,7 @@ int pqisrc_build_send_vendor_request(pqisrc_softstate_t *softs,
int pqisrc_build_send_io(pqisrc_softstate_t *,rcb_t *);
int pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, rcb_t *rcb);
-int pqisrc_send_tmf(pqisrc_softstate_t *, pqi_scsi_dev_t *,
+int pqisrc_send_tmf(pqisrc_softstate_t *, pqi_scsi_dev_t const *,
rcb_t *, rcb_t *, int);
int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs);
int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs);
@@ -173,27 +173,26 @@ void pqisrc_build_aio_R1_write(pqisrc_softstate_t *,
pqi_aio_raid1_write_req_t *, rcb_t *, uint32_t);
void pqisrc_build_aio_R5or6_write(pqisrc_softstate_t *,
pqi_aio_raid5or6_write_req_t *, rcb_t *, uint32_t);
-void pqisrc_show_cdb(pqisrc_softstate_t *softs, char *msg, rcb_t *rcb, uint8_t *cdb);
-void pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf, uint32_t total_len, uint32_t flags);
+void pqisrc_show_cdb(pqisrc_softstate_t *softs, char const *msg, rcb_t const *rcb, uint8_t *cdb);
+void pqisrc_print_buffer(pqisrc_softstate_t *softs, char const *msg, void *user_buf, uint32_t total_len, uint32_t flags);
void pqisrc_show_rcb_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg, void *err_info);
void pqisrc_show_aio_io(pqisrc_softstate_t *, rcb_t *,
pqi_aio_req_t *, uint32_t);
-void pqisrc_show_aio_common(pqisrc_softstate_t *, rcb_t *, pqi_aio_req_t *);
-void pqisrc_show_aio_R1_write(pqisrc_softstate_t *, rcb_t *,
+void pqisrc_show_aio_common(pqisrc_softstate_t *, rcb_t const *, pqi_aio_req_t *);
+void pqisrc_show_aio_R1_write(pqisrc_softstate_t *, rcb_t const *,
pqi_aio_raid1_write_req_t *);
-void pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *, rcb_t *,
+void pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *, rcb_t const *,
pqi_aio_raid5or6_write_req_t *);
-boolean_t pqisrc_cdb_is_write(uint8_t *);
-void print_this_counter(pqisrc_softstate_t *softs, io_counters_t *pcounter, char *msg);
+boolean_t pqisrc_cdb_is_write(uint8_t const *);
+void print_this_counter(pqisrc_softstate_t const *softs, io_counters_t const *pcounter, char const *msg);
void print_all_counters(pqisrc_softstate_t *softs, uint32_t flags);
char *io_path_to_ascii(IO_PATH_T path);
void int_to_scsilun(uint64_t, uint8_t *);
-boolean_t pqisrc_cdb_is_read(uint8_t *);
+boolean_t pqisrc_cdb_is_read(uint8_t const *);
void pqisrc_build_aio_io(pqisrc_softstate_t *, rcb_t *, pqi_aio_req_t *, uint32_t);
uint8_t pqisrc_get_aio_data_direction(rcb_t *);
uint8_t pqisrc_get_raid_data_direction(rcb_t *);
-void dump_tmf_details(pqisrc_softstate_t *, rcb_t *, char *);
-io_type_t get_io_type_from_cdb(uint8_t *);
+io_type_t get_io_type_from_cdb(uint8_t const *);
OS_ATOMIC64_T increment_this_counter(io_counters_t *, IO_PATH_T , io_type_t );
boolean_t
is_buffer_zero(void *, uint32_t );
@@ -237,7 +236,7 @@ int pqisrc_delete_op_queue(pqisrc_softstate_t *, uint32_t, boolean_t);
void pqisrc_destroy_event_queue(pqisrc_softstate_t *);
void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *);
void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *);
-int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *, ib_queue_t *,
+int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *, ib_queue_t const *,
uint32_t);
int pqisrc_create_op_obq(pqisrc_softstate_t *, ob_queue_t *);
int pqisrc_create_op_ibq(pqisrc_softstate_t *, ib_queue_t *);
@@ -287,7 +286,6 @@ int os_destroy_semaphore(struct sema *);
void os_sema_lock(struct sema *);
void os_sema_unlock(struct sema *);
void bsd_set_hint_adapter_cap(struct pqisrc_softstate *);
-void bsd_set_hint_adapter_cpu_config(struct pqisrc_softstate *);
int os_strlcpy(char *dst, char *src, int len);
void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *);
diff --git a/sys/dev/smartpqi/smartpqi_queue.c b/sys/dev/smartpqi/smartpqi_queue.c
index f05c951cd4f9..22bc2db572f8 100644
--- a/sys/dev/smartpqi/smartpqi_queue.c
+++ b/sys/dev/smartpqi/smartpqi_queue.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -514,11 +514,7 @@ pqisrc_destroy_event_queue(pqisrc_softstate_t *softs)
DBG_FUNC("IN\n");
if (softs->event_q.created == true) {
- int ret = PQI_STATUS_SUCCESS;
- ret = pqisrc_delete_op_queue(softs, softs->event_q.q_id, false);
- if (ret) {
- DBG_ERR("Failed to Delete Event Q %u\n", softs->event_q.q_id);
- }
+ pqisrc_delete_op_queue(softs, softs->event_q.q_id, false);
softs->event_q.created = false;
}
@@ -535,7 +531,7 @@ pqisrc_destroy_event_queue(pqisrc_softstate_t *softs)
void
pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
{
- int ret = PQI_STATUS_SUCCESS;
+
ib_queue_t *op_ib_q = NULL;
uint32_t total_op_ibq = softs->num_op_raid_ibq;
int i;
@@ -548,12 +544,7 @@ pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
op_ib_q = &softs->op_raid_ib_q[i];
release_queue:
if (op_ib_q->created == true) {
- ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id,
- true);
- if (ret) {
- DBG_ERR("Failed to Delete IB Q %u\n",
- op_ib_q->q_id);
- }
+ pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
op_ib_q->created = false;
}
@@ -582,7 +573,6 @@ release_queue:
void
pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs)
{
- int ret = PQI_STATUS_SUCCESS;
int i;
ob_queue_t *op_ob_q = NULL;
@@ -592,10 +582,7 @@ pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs)
op_ob_q = &softs->op_ob_q[i];
if (op_ob_q->created == true) {
- ret = pqisrc_delete_op_queue(softs, op_ob_q->q_id, false);
- if (ret) {
- DBG_ERR("Failed to Delete OB Q %u\n",op_ob_q->q_id);
- }
+ pqisrc_delete_op_queue(softs, op_ob_q->q_id, false);
op_ob_q->created = false;
}
@@ -643,7 +630,7 @@ pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs)
*/
int
pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *softs,
- ib_queue_t *op_ib_q, uint32_t prop)
+ ib_queue_t const *op_ib_q, uint32_t prop)
{
int ret = PQI_STATUS_SUCCESS;
gen_adm_req_iu_t admin_req;
diff --git a/sys/dev/smartpqi/smartpqi_request.c b/sys/dev/smartpqi/smartpqi_request.c
index d3c4fb989a99..e6ba41a814b2 100644
--- a/sys/dev/smartpqi/smartpqi_request.c
+++ b/sys/dev/smartpqi/smartpqi_request.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -85,8 +85,8 @@ pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
/* Subroutine to find out num of elements need for the request */
static uint32_t
-pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count,
- pqi_scsi_dev_t *devp, boolean_t is_write, IO_PATH_T io_path)
+pqisrc_num_elem_needed(pqisrc_softstate_t const *softs, uint32_t SG_Count,
+ pqi_scsi_dev_t const *devp, boolean_t is_write, IO_PATH_T io_path)
{
uint32_t num_sg;
uint32_t num_elem_required = 1;
@@ -129,7 +129,7 @@ pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
DBG_FUNC("IN\n");
- /* DBG_IO("SGL_Count :%d",num_sg); */
+ /* DBG_IO("SGL_Count :%d\n",num_sg); */
if (0 == num_sg) {
goto out;
}
@@ -404,11 +404,11 @@ pqisrc_build_aio_common(pqisrc_softstate_t *softs, pqi_aio_req_t *aio_req,
}
/*Subroutine used to show standard AIO IU fields */
void
-pqisrc_show_aio_common(pqisrc_softstate_t *softs, rcb_t *rcb,
+pqisrc_show_aio_common(pqisrc_softstate_t *softs, rcb_t const *rcb,
pqi_aio_req_t *aio_req)
{
#ifdef DEBUG_AIO
- DBG_INFO("AIO IU Content, tag# 0x%08x", rcb->tag);
+ DBG_INFO("AIO IU Content, tag# 0x%08x\n", rcb->tag);
DBG_INFO("%15s: 0x%x\n", "iu_type", aio_req->header.iu_type);
DBG_INFO("%15s: 0x%x\n", "comp_feat", aio_req->header.comp_feature);
DBG_INFO("%15s: 0x%x\n", "length", aio_req->header.iu_length);
@@ -453,11 +453,11 @@ pqisrc_build_aio_R1_write(pqisrc_softstate_t *softs,
{
DBG_FUNC("IN\n");
if (!rcb->dvp) {
- DBG_WARN("%s: DEBUG: dev ptr is null", __func__);
+ DBG_WARN("%s: DEBUG: dev ptr is NULL\n", __func__);
return;
}
if (!rcb->dvp->raid_map) {
- DBG_WARN("%s: DEBUG: raid_map is null", __func__);
+ DBG_WARN("%s: DEBUG: raid_map is NULL\n", __func__);
return;
}
@@ -522,12 +522,12 @@ pqisrc_build_aio_R1_write(pqisrc_softstate_t *softs,
/*Subroutine used to show AIO RAID1 Write bypass IU fields */
void
-pqisrc_show_aio_R1_write(pqisrc_softstate_t *softs, rcb_t *rcb,
+pqisrc_show_aio_R1_write(pqisrc_softstate_t *softs, rcb_t const *rcb,
pqi_aio_raid1_write_req_t *aio_req)
{
#ifdef DEBUG_AIO
- DBG_INFO("AIO RAID1 Write IU Content, tag# 0x%08x", rcb->tag);
+ DBG_INFO("AIO RAID1 Write IU Content, tag# 0x%08x\n", rcb->tag);
DBG_INFO("%15s: 0x%x\n", "iu_type", aio_req->header.iu_type);
DBG_INFO("%15s: 0x%x\n", "comp_feat", aio_req->header.comp_feature);
DBG_INFO("%15s: 0x%x\n", "length", aio_req->header.iu_length);
@@ -674,7 +674,7 @@ pqisrc_build_aio_R5or6_write(pqisrc_softstate_t *softs,
/*Subroutine used to show AIO RAID5/6 Write bypass IU fields */
void
-pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *softs, rcb_t *rcb,
+pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *softs, rcb_t const *rcb,
pqi_aio_raid5or6_write_req_t *aio_req)
{
#ifdef DEBUG_AIO
@@ -724,7 +724,7 @@ pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *softs, rcb_t *rcb,
/* Is the cdb a read command? */
boolean_t
-pqisrc_cdb_is_read(uint8_t *cdb)
+pqisrc_cdb_is_read(uint8_t const *cdb)
{
if (cdb[0] == SCMD_READ_6 || cdb[0] == SCMD_READ_10 ||
cdb[0] == SCMD_READ_12 || cdb[0] == SCMD_READ_16)
@@ -734,7 +734,7 @@ pqisrc_cdb_is_read(uint8_t *cdb)
/* Is the cdb a write command? */
boolean_t
-pqisrc_cdb_is_write(uint8_t *cdb)
+pqisrc_cdb_is_write(uint8_t const *cdb)
{
if (cdb == NULL)
return false;
@@ -1021,7 +1021,7 @@ pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
/* coverity[unchecked_value] */
num_elem_needed = pqisrc_num_elem_needed(softs,
OS_GET_IO_SG_COUNT(rcb), devp, is_write, io_path);
- DBG_IO("num_elem_needed :%u",num_elem_needed);
+ DBG_IO("num_elem_needed :%u\n",num_elem_needed);
do {
uint32_t num_elem_available;
@@ -1047,7 +1047,7 @@ pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
}
}while(TraverseCount < 2);
- DBG_IO("num_elem_alloted :%u",num_elem_alloted);
+ DBG_IO("num_elem_alloted :%u\n",num_elem_alloted);
if (num_elem_alloted == 0) {
DBG_WARN("OUT: IB Queues were full\n");
return PQI_STATUS_QFULL;
@@ -1131,7 +1131,7 @@ fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t
{
if (!l) {
- DBG_INFO("No locator ptr: AIO ineligible");
+ DBG_INFO("No locator ptr: AIO ineligible\n");
return PQI_STATUS_FAILURE;
}
@@ -1172,7 +1172,7 @@ fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t
break;
default:
/* Process via normal I/O path. */
- DBG_AIO("NOT read or write 6/10/12/16: AIO ineligible");
+ DBG_AIO("NOT read or write 6/10/12/16: AIO ineligible\n");
return PQI_STATUS_FAILURE;
}
return PQI_STATUS_SUCCESS;
@@ -1180,9 +1180,9 @@ fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t
/* determine whether writes to certain types of RAID are supported. */
-static boolean_t
-pqisrc_is_supported_write(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static inline boolean_t
+pqisrc_is_supported_write(pqisrc_softstate_t const *softs,
+ pqi_scsi_dev_t const *device)
{
DBG_FUNC("IN\n");
@@ -1381,7 +1381,7 @@ pqisrc_is_r5or6_single_group(pqisrc_softstate_t *softs, aio_req_locator_t *l)
l->r5or6.row.blks_per_row;
if (l->group.first != l->group.last) {
- DBG_AIO("AIO ineligible");
+ DBG_AIO("AIO ineligible\n");
ret = false;
}
@@ -1403,7 +1403,7 @@ pqisrc_is_r5or6_single_row(pqisrc_softstate_t *softs, aio_req_locator_t *l)
l->r5or6.row.last = l->block.last / l->stripesz;
if (l->r5or6.row.first != l->r5or6.row.last) {
- DBG_AIO("AIO ineligible");
+ DBG_AIO("AIO ineligible\n");
ret = false;
}
@@ -1431,7 +1431,7 @@ pqisrc_is_r5or6_single_column(pqisrc_softstate_t *softs, aio_req_locator_t *l)
l->r5or6.col.last = l->r5or6.row.offset_last / l->strip_sz;
if (l->r5or6.col.first != l->r5or6.col.last) {
- DBG_AIO("AIO ineligible");
+ DBG_AIO("AIO ineligible\n");
ret = false;
}
@@ -1494,7 +1494,7 @@ pqisrc_handle_blk_size_diffs(aio_req_locator_t *l)
*/
static boolean_t
pqisrc_aio_req_too_big(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device, rcb_t *rcb,
+ pqi_scsi_dev_t *device, rcb_t const *rcb,
aio_req_locator_t *l, uint32_t disk_blk_cnt)
{
boolean_t ret = false;
@@ -1537,7 +1537,7 @@ pqisrc_aio_req_too_big(pqisrc_softstate_t *softs,
if (size > dev_max) {
- DBG_AIO("AIO ineligible: size=%u, max=%u", size, dev_max);
+ DBG_AIO("AIO ineligible: size=%u, max=%u\n", size, dev_max);
ret = true;
}
@@ -1547,7 +1547,7 @@ pqisrc_aio_req_too_big(pqisrc_softstate_t *softs,
#ifdef DEBUG_RAID_MAP
static inline void
-pqisrc_aio_show_raid_map(pqisrc_softstate_t *softs, struct raid_map *m)
+pqisrc_aio_show_raid_map(pqisrc_softstate_t const *softs, struct raid_map const *m)
{
int i;
@@ -1583,7 +1583,7 @@ pqisrc_aio_show_raid_map(pqisrc_softstate_t *softs, struct raid_map *m)
static inline void
pqisrc_aio_show_locator_info(pqisrc_softstate_t *softs,
- aio_req_locator_t *l, uint32_t disk_blk_cnt, rcb_t *rcb)
+ aio_req_locator_t *l, uint32_t disk_blk_cnt, rcb_t const *rcb)
{
#ifdef DEBUG_AIO_LOCATOR
pqisrc_aio_show_raid_map(softs, l->raid_map);
@@ -1665,7 +1665,7 @@ pqisrc_aio_build_cdb(aio_req_locator_t *l,
/* print any arbitrary buffer of length total_len */
void
-pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf,
+pqisrc_print_buffer(pqisrc_softstate_t *softs, char const *msg, void *user_buf,
uint32_t total_len, uint32_t flags)
{
#define LINE_BUF_LEN 60
@@ -1713,7 +1713,7 @@ pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf,
/* print CDB with column header */
void
-pqisrc_show_cdb(pqisrc_softstate_t *softs, char *msg, rcb_t *rcb, uint8_t *cdb)
+pqisrc_show_cdb(pqisrc_softstate_t *softs, char const *msg, rcb_t const *rcb, uint8_t *cdb)
{
/* Print the CDB contents */
pqisrc_print_buffer(softs, msg, cdb, rcb->cmdlen, PRINT_FLAG_HDR_COLUMN);
@@ -1742,7 +1742,7 @@ pqisrc_show_rcb_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg, void *
DBG_INFO("tag=0x%x dir=%u host_timeout=%ums\n", rcb->tag,
rcb->data_dir, (uint32_t)rcb->host_timeout_ms);
- DBG_INFO("BTL: %d:%d:%d addr=0x%x\n", devp->bus, devp->target,
+ DBG_INFO("B%d:T%d:L%d addr=0x%x\n", devp->bus, devp->target,
devp->lun, GET_LE32(devp->scsi3addr));
if (rcb->path == AIO_PATH)
@@ -1786,7 +1786,7 @@ pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
return PQI_STATUS_FAILURE;
}
if (device->raid_map == NULL) {
- DBG_INFO("tag=0x%x BTL: %d:%d:%d Raid map is NULL\n",
+ DBG_INFO("tag=0x%x B%d:T%d:L%d Raid map is NULL\n",
rcb->tag, device->bus, device->target, device->lun);
return PQI_STATUS_FAILURE;
}
@@ -1846,15 +1846,18 @@ pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
}
if (l->map.idx >= RAID_MAP_MAX_ENTRIES) {
- DBG_INFO("AIO ineligible: index exceeds max map entries");
+ DBG_INFO("AIO ineligible: index exceeds max map entries\n");
return PQI_STATUS_FAILURE;
}
rcb->ioaccel_handle =
l->raid_map->dev_data[l->map.idx].ioaccel_handle;
+ /*
if (!pqisrc_calc_aio_block(l))
return PQI_STATUS_FAILURE;
+ */
+ pqisrc_calc_aio_block(l);
disk_blk_cnt = pqisrc_handle_blk_size_diffs(l);
@@ -1889,8 +1892,8 @@ pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
*/
static int
-pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
- rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
+pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t const *devp,
+ rcb_t *rcb, rcb_t const *rcb_to_manage, int tmf_type)
{
int rval = PQI_STATUS_SUCCESS;
pqi_aio_tmf_req_t tmf_req;
@@ -1966,8 +1969,8 @@ pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
/* Function used to submit a Raid TMF to the adapter */
static int
-pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
- rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
+pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t const *devp,
+ rcb_t *rcb, rcb_t const *rcb_to_manage, int tmf_type)
{
int rval = PQI_STATUS_SUCCESS;
pqi_raid_tmf_req_t tmf_req;
@@ -1997,9 +2000,6 @@ pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) {
/* OS_TMF_TIMEOUT_SEC - 1 to accomodate driver processing */
tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1;
- /* if OS tmf timeout is 0, set minimum value for timeout */
- if (!tmf_req.timeout_in_sec)
- tmf_req.timeout_in_sec = 1;
}
op_ib_q = &softs->op_raid_ib_q[0];
@@ -2034,8 +2034,8 @@ pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
return rval;
}
-void
-dump_tmf_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg)
+static void
+dump_tmf_details(rcb_t *rcb, char const *msg)
{
uint32_t qid = rcb->req_q ? rcb->req_q->q_id : -1;
@@ -2045,7 +2045,7 @@ dump_tmf_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg)
}
int
-pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
+pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t const *devp,
rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
{
int ret = PQI_STATUS_SUCCESS;
@@ -2062,11 +2062,11 @@ pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK)
{
rcb_to_manage->host_wants_to_abort_this = true;
- dump_tmf_details(softs, rcb_to_manage, "rcb_to_manage");
+ dump_tmf_details(rcb_to_manage, "rcb_to_manage");
}
- dump_tmf_details(softs, rcb, "rcb");
+ dump_tmf_details(rcb, "rcb");
if(!devp->is_physical_device) {
if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
@@ -2167,7 +2167,7 @@ io_type_to_ascii(io_type_t io_type)
/* return the io type based on cdb */
io_type_t
-get_io_type_from_cdb(uint8_t *cdb)
+get_io_type_from_cdb(uint8_t const *cdb)
{
if (cdb == NULL)
return UNKNOWN_IO_TYPE;
@@ -2223,9 +2223,9 @@ pqisrc_increment_io_counters(pqisrc_softstate_t *softs, rcb_t *rcb)
#if 1 /* leave this enabled while we gain confidence for each io path */
if (ret_val == 1)
{
- char *raid_type = counter_type_to_raid_ascii(type_index);
- char *path = io_path_to_ascii(rcb->path);
- char *io_ascii = io_type_to_ascii(io_type);
+ char const *raid_type = counter_type_to_raid_ascii(type_index);
+ char const *path = io_path_to_ascii(rcb->path);
+ char const *io_ascii = io_type_to_ascii(io_type);
DBG_INFO("Got first path/type hit. "
"Path=%s RaidType=%s IoType=%s\n",
@@ -2238,7 +2238,7 @@ pqisrc_increment_io_counters(pqisrc_softstate_t *softs, rcb_t *rcb)
/* public routine to print a particular counter with header msg */
void
-print_this_counter(pqisrc_softstate_t *softs, io_counters_t *pcounter, char *msg)
+print_this_counter(pqisrc_softstate_t const *softs, io_counters_t const *pcounter, char const *msg)
{
io_counters_t counter;
uint32_t percent_reads;
@@ -2280,7 +2280,7 @@ print_this_counter(pqisrc_softstate_t *softs, io_counters_t *pcounter, char *msg
boolean_t
is_buffer_zero(void *buffer, uint32_t size)
{
- char *buf = buffer;
+ char const *buf = buffer;
DWORD ii;
if (buffer == NULL || size == 0)
diff --git a/sys/dev/smartpqi/smartpqi_response.c b/sys/dev/smartpqi/smartpqi_response.c
index 1b4f0d86095d..38695860e520 100644
--- a/sys/dev/smartpqi/smartpqi_response.c
+++ b/sys/dev/smartpqi/smartpqi_response.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -121,7 +121,7 @@ pqisrc_extract_sense_data(sense_data_u_t *sense_data, uint8_t *key, uint8_t *asc
if (sense_data->fixed_format.response_code == SCSI_SENSE_RESPONSE_70 ||
sense_data->fixed_format.response_code == SCSI_SENSE_RESPONSE_71)
{
- sense_data_fixed_t *fixed = &sense_data->fixed_format;
+ sense_data_fixed_t const *fixed = &sense_data->fixed_format;
*key = fixed->sense_key;
*asc = fixed->sense_code;
@@ -130,7 +130,7 @@ pqisrc_extract_sense_data(sense_data_u_t *sense_data, uint8_t *key, uint8_t *asc
else if (sense_data->descriptor_format.response_code == SCSI_SENSE_RESPONSE_72 ||
sense_data->descriptor_format.response_code == SCSI_SENSE_RESPONSE_73)
{
- sense_data_descriptor_t *desc = &sense_data->descriptor_format;
+ sense_data_descriptor_t const *desc = &sense_data->descriptor_format;
*key = desc->sense_key;
*asc = desc->sense_code;
@@ -146,7 +146,7 @@ pqisrc_extract_sense_data(sense_data_u_t *sense_data, uint8_t *key, uint8_t *asc
/* Suppress common errors unless verbose debug flag is on */
boolean_t
-suppress_innocuous_error_prints(pqisrc_softstate_t *softs, rcb_t *rcb)
+suppress_innocuous_error_prints(pqisrc_softstate_t const *softs, rcb_t *rcb)
{
uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
@@ -165,7 +165,7 @@ static void
pqisrc_show_sense_data_simple(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
{
uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
- char *path = io_path_to_ascii(rcb->path);
+ char const *path = io_path_to_ascii(rcb->path);
uint8_t key, asc, ascq;
pqisrc_extract_sense_data(sense_data, &key, &asc, &ascq);
@@ -393,7 +393,7 @@ pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
}
static int
-pqisrc_process_vendor_general_response(pqi_vendor_general_response_t *response)
+pqisrc_process_vendor_general_response(pqi_vendor_general_response_t const *response)
{
int ret = PQI_STATUS_SUCCESS;
@@ -448,7 +448,7 @@ pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
rcb = &softs->rcb[tag];
/* Make sure we are processing a valid response. */
if ((rcb->tag != tag) || (rcb->req_pending == false)) {
- DBG_ERR("No such request pending with tag : %x rcb->tag : %x", tag, rcb->tag);
+ DBG_ERR("No such request pending with tag : %x rcb->tag : %x\n", tag, rcb->tag);
oq_ci = (oq_ci + 1) % ob_q->num_elem;
break;
}
diff --git a/sys/dev/smartpqi/smartpqi_sis.c b/sys/dev/smartpqi/smartpqi_sis.c
index 74dcb90e7d3d..82eb999ca4b8 100644
--- a/sys/dev/smartpqi/smartpqi_sis.c
+++ b/sys/dev/smartpqi/smartpqi_sis.c
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -84,7 +84,7 @@ sis_disable_intx(pqisrc_softstate_t *softs)
void
sis_disable_interrupt(pqisrc_softstate_t *softs)
{
- DBG_FUNC("IN");
+ DBG_FUNC("IN\n");
switch(softs->intr_type) {
case INTR_TYPE_FIXED:
@@ -100,7 +100,7 @@ sis_disable_interrupt(pqisrc_softstate_t *softs)
break;
}
- DBG_FUNC("OUT");
+ DBG_FUNC("OUT\n");
}
diff --git a/sys/dev/smartpqi/smartpqi_structures.h b/sys/dev/smartpqi/smartpqi_structures.h
index 4af824fa2592..0c9ad375823d 100644
--- a/sys/dev/smartpqi/smartpqi_structures.h
+++ b/sys/dev/smartpqi/smartpqi_structures.h
@@ -1,5 +1,5 @@
/*-
- * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
+ * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@ struct bmic_host_wellness_driver_version {
uint8_t driver_version_tag[2];
uint16_t driver_version_length;
char driver_version[32];
+ uint8_t dont_write_tag[2];
uint8_t end_tag[2];
}OS_ATTRIBUTE_PACKED;
@@ -901,6 +902,8 @@ typedef struct pqi_scsi_device {
int *offload_to_mirror; /* Send next I/O accelerator RAID
offload request to mirror drive. */
struct raid_map *raid_map; /* I/O accelerator RAID map */
+ int *temp_offload_to_mirror; /* Temporary stored offload_to_mirror which will be freed later */
+ struct raid_map *temp_raid_map; /* Temporary stored RAID map which will be freed later */
int reset_in_progress;
int logical_unit_number;
diff --git a/sys/dev/sound/midi/midi.c b/sys/dev/sound/midi/midi.c
index c86f5fb41b14..4fd0e3dcf134 100644
--- a/sys/dev/sound/midi/midi.c
+++ b/sys/dev/sound/midi/midi.c
@@ -236,7 +236,7 @@ midi_out(struct snd_midi *m, uint8_t *buf, int size)
return (0);
}
- used = MIN(size, MIDIQ_LEN(m->outq));
+ used = min(size, MIDIQ_LEN(m->outq));
if (used)
MIDIQ_DEQ(m->outq, buf, used);
if (MIDIQ_EMPTY(m->outq)) {
@@ -387,13 +387,15 @@ midi_read(struct cdev *i_dev, struct uio *uio, int ioflag)
* At this point, it is certain that m->inq has data
*/
- used = MIN(MIDIQ_LEN(m->inq), uio->uio_resid);
- used = MIN(used, MIDI_RSIZE);
+ used = min(MIDIQ_LEN(m->inq), uio->uio_resid);
+ used = min(used, MIDI_RSIZE);
MIDIQ_DEQ(m->inq, buf, used);
+ mtx_unlock(&m->lock);
retval = uiomove(buf, used, uio);
if (retval)
- goto err1;
+ goto err0;
+ mtx_lock(&m->lock);
}
/*
@@ -454,12 +456,14 @@ midi_write(struct cdev *i_dev, struct uio *uio, int ioflag)
* We are certain than data can be placed on the queue
*/
- used = MIN(MIDIQ_AVAIL(m->outq), uio->uio_resid);
- used = MIN(used, MIDI_WSIZE);
+ used = min(MIDIQ_AVAIL(m->outq), uio->uio_resid);
+ used = min(used, MIDI_WSIZE);
+ mtx_unlock(&m->lock);
retval = uiomove(buf, used, uio);
if (retval)
- goto err1;
+ goto err0;
+ mtx_lock(&m->lock);
MIDIQ_ENQ(m->outq, buf, used);
/*
* Inform the bottom half that data can be written
diff --git a/sys/dev/sound/midi/midiq.h b/sys/dev/sound/midi/midiq.h
index 80825974283e..8ffa4a40d23d 100644
--- a/sys/dev/sound/midi/midiq.h
+++ b/sys/dev/sound/midi/midiq.h
@@ -56,7 +56,7 @@ struct name { \
* No protection against overflow, underflow
*/
#define MIDIQ_ENQ(head, buf, size) do { \
- MIDIQ_MOVE(&(head).b[(head).h], (buf), sizeof(*(head).b) * MIN((size), (head).s - (head).h)); \
+ MIDIQ_MOVE(&(head).b[(head).h], (buf), sizeof(*(head).b) * min((size), (head).s - (head).h)); \
if( (head).s - (head).h < (size) ) { \
MIDIQ_MOVE((head).b, (buf) + (head).s - (head).h, sizeof(*(head).b) * ((size) - (head).s + (head).h) ); \
} \
@@ -67,7 +67,7 @@ struct name { \
#define MIDIQ_DEQ_I(head, buf, size, move, update) do { \
if(MIDIQ_FULL(head)) (head).h=(head).t; \
- if (move) MIDIQ_MOVE((buf), &(head).b[(head).t], sizeof(*(head).b) * MIN((size), (head).s - (head).t)); \
+ if (move) MIDIQ_MOVE((buf), &(head).b[(head).t], sizeof(*(head).b) * min((size), (head).s - (head).t)); \
if( (head).s - (head).t < (size) ) { \
if (move) MIDIQ_MOVE((buf) + (head).s - (head).t, (head).b, sizeof(*(head).b) * ((size) - (head).s + (head).t) ); \
} \
diff --git a/sys/dev/sound/pci/cs4281.c b/sys/dev/sound/pci/cs4281.c
index 5b0b229a021b..eecfc740bb3f 100644
--- a/sys/dev/sound/pci/cs4281.c
+++ b/sys/dev/sound/pci/cs4281.c
@@ -350,7 +350,7 @@ cs4281chan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize)
/* 2 interrupts are possible and used in buffer (half-empty,empty),
* hence factor of 2. */
- ch->blksz = MIN(blocksize, sc->bufsz / 2);
+ ch->blksz = min(blocksize, sc->bufsz / 2);
sndbuf_resize(ch->buffer, 2, ch->blksz);
ch->dma_setup = 0;
adcdac_prog(ch);
diff --git a/sys/dev/sound/pci/hdspe-pcm.c b/sys/dev/sound/pci/hdspe-pcm.c
index 678693960e5e..0ccdf69c32ee 100644
--- a/sys/dev/sound/pci/hdspe-pcm.c
+++ b/sys/dev/sound/pci/hdspe-pcm.c
@@ -474,7 +474,7 @@ buffer_mux_port(uint32_t *dma, uint32_t *pcm, uint32_t subset, uint32_t ports,
channels = hdspe_channel_count(ports, pcm_width);
/* Only copy as much as supported by both hardware and pcm channel. */
- slots = hdspe_port_slot_width(subset, MIN(adat_width, pcm_width));
+ slots = hdspe_port_slot_width(subset, min(adat_width, pcm_width));
/* Let the compiler inline and loop unroll common cases. */
if (slots == 2)
@@ -520,7 +520,7 @@ buffer_demux_port(uint32_t *dma, uint32_t *pcm, uint32_t subset, uint32_t ports,
channels = hdspe_channel_count(ports, pcm_width);
/* Only copy as much as supported by both hardware and pcm channel. */
- slots = hdspe_port_slot_width(subset, MIN(adat_width, pcm_width));
+ slots = hdspe_port_slot_width(subset, min(adat_width, pcm_width));
/* Let the compiler inline and loop unroll common cases. */
if (slots == 2)
diff --git a/sys/dev/sound/usb/uaudio.c b/sys/dev/sound/usb/uaudio.c
index 0987ca12d933..7f49bae9ce5e 100644
--- a/sys/dev/sound/usb/uaudio.c
+++ b/sys/dev/sound/usb/uaudio.c
@@ -150,7 +150,7 @@ SYSCTL_INT(_hw_usb_uaudio, OID_AUTO, debug, CTLFLAG_RWTUN,
#define UAUDIO_NCHANBUFS 2 /* number of outstanding request */
#define UAUDIO_RECURSE_LIMIT 255 /* rounds */
#define UAUDIO_BITS_MAX 32 /* maximum sample size in bits */
-#define UAUDIO_CHANNELS_MAX MIN(64, AFMT_CHANNEL_MAX)
+#define UAUDIO_CHANNELS_MAX min(64, AFMT_CHANNEL_MAX)
#define UAUDIO_MATRIX_MAX 8 /* channels */
#define MAKE_WORD(h,l) (((h) << 8) | (l))
@@ -1651,7 +1651,7 @@ uaudio20_check_rate(struct usb_device *udev, uint8_t iface_no,
* buffer. Try using a larger buffer and see if that
* helps:
*/
- rates = MIN(UAUDIO20_MAX_RATES, (255 - 2) / 12);
+ rates = min(UAUDIO20_MAX_RATES, (255 - 2) / 12);
error = USB_ERR_INVAL;
} else {
rates = UGETW(data);
diff --git a/sys/dev/sym/sym_hipd.c b/sys/dev/sym/sym_hipd.c
index b4e5c1075fb4..f78d595a73ce 100644
--- a/sys/dev/sym/sym_hipd.c
+++ b/sys/dev/sym/sym_hipd.c
@@ -58,7 +58,6 @@
*/
#include <sys/cdefs.h>
-#define SYM_DRIVER_NAME "sym-1.6.5-20000902"
/* #define SYM_DEBUG_GENERIC_SUPPORT */
@@ -114,27 +113,16 @@ typedef u_int32_t u32;
#include <dev/sym/sym_fw.h>
/*
- * IA32 architecture does not reorder STORES and prevents
- * LOADS from passing STORES. It is called `program order'
- * by Intel and allows device drivers to deal with memory
- * ordering by only ensuring that the code is not reordered
- * by the compiler when ordering is required.
- * Other architectures implement a weaker ordering that
- * requires memory barriers (and also IO barriers when they
- * make sense) to be used.
- */
-#if defined __i386__ || defined __amd64__
-#define MEMORY_BARRIER() do { ; } while(0)
-#elif defined __powerpc__
-#define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory")
-#elif defined __arm__
-#define MEMORY_BARRIER() dmb()
-#elif defined __aarch64__
-#define MEMORY_BARRIER() dmb(sy)
-#elif defined __riscv
-#define MEMORY_BARRIER() fence()
+ * Architectures may implement weak ordering that requires memory barriers
+ * to be used for LOADS and STORES to become globally visible (and also IO
+ * barriers when they make sense).
+ */
+#ifdef __powerpc__
+#define MEMORY_READ_BARRIER() __asm__ volatile("eieio; sync" : : : "memory")
+#define MEMORY_WRITE_BARRIER() MEMORY_READ_BARRIER()
#else
-#error "Not supported platform"
+#define MEMORY_READ_BARRIER() rmb()
+#define MEMORY_WRITE_BARRIER() wmb()
#endif
/*
@@ -892,13 +880,13 @@ struct sym_nvram {
*/
#define OUTL_DSP(v) \
do { \
- MEMORY_BARRIER(); \
+ MEMORY_WRITE_BARRIER(); \
OUTL (nc_dsp, (v)); \
} while (0)
#define OUTONB_STD() \
do { \
- MEMORY_BARRIER(); \
+ MEMORY_WRITE_BARRIER(); \
OUTONB (nc_dcntl, (STD|NOCOM)); \
} while (0)
@@ -2012,8 +2000,8 @@ static void sym_fw_bind_script (hcb_p np, u32 *start, int len)
* command.
*/
if (opcode == 0) {
- printf ("%s: ERROR0 IN SCRIPT at %d.\n",
- sym_name(np), (int) (cur-start));
+ device_printf(np->device, "ERROR0 IN SCRIPT at %d.\n",
+ (int)(cur-start));
MDELAY (10000);
++cur;
continue;
@@ -2056,8 +2044,9 @@ static void sym_fw_bind_script (hcb_p np, u32 *start, int len)
tmp1 = cur[1];
tmp2 = cur[2];
if ((tmp1 ^ tmp2) & 3) {
- printf ("%s: ERROR1 IN SCRIPT at %d.\n",
- sym_name(np), (int) (cur-start));
+ device_printf(np->device,
+ "ERROR1 IN SCRIPT at %d.\n",
+ (int)(cur-start));
MDELAY (10000);
}
/*
@@ -2248,10 +2237,11 @@ static void sym_update_dflags(hcb_p np, u_char *flags,
struct ccb_trans_settings *cts);
static const struct sym_pci_chip *sym_find_pci_chip (device_t dev);
-static int sym_pci_probe (device_t dev);
-static int sym_pci_attach (device_t dev);
-static void sym_pci_free (hcb_p np);
+static device_probe_t sym_pci_probe;
+static device_attach_t sym_pci_attach;
+static device_detach_t sym_pci_detach;
+
static int sym_cam_attach (hcb_p np);
static void sym_cam_free (hcb_p np);
@@ -2426,8 +2416,8 @@ static void sym_print_targets_flag(hcb_p np, int mask, char *msg)
continue;
if (np->target[i].usrflags & mask) {
if (!cnt++)
- printf("%s: %s disabled for targets",
- sym_name(np), msg);
+ device_printf(np->device,
+ "%s disabled for targets", msg);
printf(" %d", i);
}
}
@@ -2750,41 +2740,42 @@ static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram)
* Let user know about the settings.
*/
i = nvram->type;
- printf("%s: %s NVRAM, ID %d, Fast-%d, %s, %s\n", sym_name(np),
- i == SYM_SYMBIOS_NVRAM ? "Symbios" :
- (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"),
- np->myaddr,
- (np->features & FE_ULTRA3) ? 80 :
- (np->features & FE_ULTRA2) ? 40 :
- (np->features & FE_ULTRA) ? 20 : 10,
- sym_scsi_bus_mode(np->scsi_mode),
- (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity");
+ device_printf(np->device, "%s NVRAM, ID %d, Fast-%d, %s, %s\n",
+ i == SYM_SYMBIOS_NVRAM ? "Symbios" :
+ (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"),
+ np->myaddr,
+ (np->features & FE_ULTRA3) ? 80 :
+ (np->features & FE_ULTRA2) ? 40 :
+ (np->features & FE_ULTRA) ? 20 : 10,
+ sym_scsi_bus_mode(np->scsi_mode),
+ (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity");
/*
* Tell him more on demand.
*/
if (sym_verbose) {
- printf("%s: %s IRQ line driver%s\n",
- sym_name(np),
- np->rv_dcntl & IRQM ? "totem pole" : "open drain",
- np->ram_ba ? ", using on-chip SRAM" : "");
- printf("%s: using %s firmware.\n", sym_name(np), np->fw_name);
+ device_printf(np->device, "%s IRQ line driver%s\n",
+ np->rv_dcntl & IRQM ? "totem pole" : "open drain",
+ np->ram_ba ? ", using on-chip SRAM" : "");
+ device_printf(np->device, "using %s firmware.\n", np->fw_name);
if (np->features & FE_NOPM)
- printf("%s: handling phase mismatch from SCRIPTS.\n",
- sym_name(np));
+ device_printf(np->device,
+ "handling phase mismatch from SCRIPTS.\n");
}
/*
* And still more.
*/
if (sym_verbose > 1) {
- printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
- "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
- sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
- np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
+ device_printf(np->device,
+ "initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3,
+ np->sv_ctest4, np->sv_ctest5);
- printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
- "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
- sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
- np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
+ device_printf(np->device,
+ "final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
+ np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
}
/*
* Let user be aware of targets that have some disable flags set.
@@ -2905,19 +2896,19 @@ static void sym_put_start_queue(hcb_p np, ccb_p cp)
if (qidx >= MAX_QUEUE*2) qidx = 0;
np->squeue [qidx] = cpu_to_scr(np->idletask_ba);
- MEMORY_BARRIER();
+ MEMORY_WRITE_BARRIER();
np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba);
np->squeueput = qidx;
if (DEBUG_FLAGS & DEBUG_QUEUE)
- printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput);
+ device_printf(np->device, "queuepos=%d.\n", np->squeueput);
/*
* Script processor may be waiting for reselect.
* Wake it up.
*/
- MEMORY_BARRIER();
+ MEMORY_WRITE_BARRIER();
OUTB (nc_istat, SIGP|np->istat_sem);
}
@@ -2965,8 +2956,8 @@ static void sym_soft_reset (hcb_p np)
}
}
if (!i)
- printf("%s: unable to abort current chip operation.\n",
- sym_name(np));
+ device_printf(np->device,
+ "unable to abort current chip operation.\n");
sym_chip_reset (np);
}
@@ -3016,13 +3007,12 @@ static int sym_reset_scsi_bus(hcb_p np, int enab_int)
term &= 0x3ffff;
if (term != (2<<7)) {
- printf("%s: suspicious SCSI data while resetting the BUS.\n",
- sym_name(np));
- printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
- "0x%lx, expecting 0x%lx\n",
- sym_name(np),
- (np->features & FE_WIDE) ? "dp1,d15-8," : "",
- (u_long)term, (u_long)(2<<7));
+ device_printf(np->device,
+ "suspicious SCSI data while resetting the BUS.\n");
+ device_printf(np->device,
+ "%sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
+ "0x%lx, expecting 0x%lx\n", (np->features & FE_WIDE) ?
+ "dp1,d15-8," : "", (u_long)term, (u_long)(2 << 7));
if (SYM_SETUP_SCSI_BUS_CHECK == 1)
retv = 1;
}
@@ -3059,13 +3049,12 @@ static int sym_wakeup_done (hcb_p np)
cp = sym_ccb_from_dsa(np, dsa);
if (cp) {
- MEMORY_BARRIER();
+ MEMORY_READ_BARRIER();
sym_complete_ok (np, cp);
++n;
- }
- else
- printf ("%s: bad DSA (%x) in done queue.\n",
- sym_name(np), (u_int) dsa);
+ } else
+ device_printf(np->device,
+ "bad DSA (%x) in done queue.\n", (u_int)dsa);
}
np->dqueueget = i;
@@ -3286,8 +3275,8 @@ static void sym_init (hcb_p np, int reason)
*/
if (np->ram_ba) {
if (sym_verbose > 1)
- printf ("%s: Downloading SCSI SCRIPTS.\n",
- sym_name(np));
+ device_printf(np->device,
+ "Downloading SCSI SCRIPTS.\n");
if (np->ram_ws == 8192) {
OUTRAM_OFF(4096, np->scriptb0, np->scriptb_sz);
OUTL (nc_mmws, np->scr_ram_seg);
@@ -3710,11 +3699,11 @@ static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat)
if (((script_ofs & 3) == 0) &&
(unsigned)script_ofs < script_size) {
- printf ("%s: script cmd = %08x\n", sym_name(np),
- scr_to_cpu((int) *(u32 *)(script_base + script_ofs)));
+ device_printf(np->device, "script cmd = %08x\n",
+ scr_to_cpu((int) *(u32 *)(script_base + script_ofs)));
}
- printf ("%s: regdump:", sym_name(np));
+ device_printf(np->device, "regdump:");
for (i = 0; i < 24; i++)
printf (" %02x", (unsigned)INB_OFF(i));
printf (".\n");
@@ -3727,8 +3716,8 @@ static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat)
pci_sts = pci_read_config(np->device, PCIR_STATUS, 2);
if (pci_sts & 0xf900) {
pci_write_config(np->device, PCIR_STATUS, pci_sts, 2);
- printf("%s: PCI STATUS = 0x%04x\n",
- sym_name(np), pci_sts & 0xf900);
+ device_printf(np->device, "PCI STATUS = 0x%04x\n",
+ pci_sts & 0xf900);
}
}
}
@@ -3858,7 +3847,7 @@ static void sym_intr1 (hcb_p np)
* On paper, a memory barrier may be needed here.
* And since we are paranoid ... :)
*/
- MEMORY_BARRIER();
+ MEMORY_READ_BARRIER();
/*
* First, interrupts we want to service cleanly.
@@ -3933,9 +3922,9 @@ unknown_int:
* We just miss the cause of the interrupt. :(
* Print a message. The timeout will do the real work.
*/
- printf( "%s: unknown interrupt(s) ignored, "
- "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
- sym_name(np), istat, dstat, sist);
+ device_printf(np->device,
+ "unknown interrupt(s) ignored, ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
+ istat, dstat, sist);
}
static void sym_intr(void *arg)
@@ -4050,7 +4039,7 @@ static void sym_int_sto (hcb_p np)
*/
static void sym_int_udc (hcb_p np)
{
- printf ("%s: unexpected disconnect\n", sym_name(np));
+ device_printf(np->device, "unexpected disconnect\n");
sym_recover_scsi_int(np, HS_UNEXPECTED);
}
@@ -4117,8 +4106,9 @@ static void sym_int_par (hcb_p np, u_short sist)
int phase = cmd & 7;
ccb_p cp = sym_ccb_from_dsa(np, dsa);
- printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
- sym_name(np), hsts, dbc, sbcl);
+ device_printf(np->device,
+ "SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", hsts, dbc,
+ sbcl);
/*
* Check that the chip is connected to the SCSI BUS.
@@ -4305,14 +4295,14 @@ static void sym_int_ma (hcb_p np)
}
if (!vdsp) {
- printf ("%s: interrupted SCRIPT address not found.\n",
- sym_name (np));
+ device_printf(np->device,
+ "interrupted SCRIPT address not found.\n");
goto reset_all;
}
if (!cp) {
- printf ("%s: SCSI phase error fixup: CCB already dequeued.\n",
- sym_name (np));
+ device_printf(np->device,
+ "SCSI phase error fixup: CCB already dequeued.\n");
goto reset_all;
}
@@ -6757,15 +6747,15 @@ restart_test:
dstat = INB (nc_dstat);
#if 1 /* Band aiding for broken hardwares that fail PCI parity */
if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) {
- printf ("%s: PCI DATA PARITY ERROR DETECTED - "
- "DISABLING MASTER DATA PARITY CHECKING.\n",
- sym_name(np));
+ device_printf(np->device, "PCI DATA PARITY ERROR DETECTED - "
+ "DISABLING MASTER DATA PARITY CHECKING.\n");
np->rv_ctest4 &= ~MPEE;
goto restart_test;
}
#endif
if (dstat & (MDPE|BF|IID)) {
- printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat);
+ device_printf(np->device,
+ "CACHE TEST FAILED: DMA error (dstat=0x%02x).\n", dstat);
return (0x80);
}
/*
@@ -6783,28 +6773,32 @@ restart_test:
* Check termination position.
*/
if (pc != SCRIPTB0_BA (np, snoopend)+8) {
- printf ("CACHE TEST FAILED: script execution failed.\n");
- printf ("start=%08lx, pc=%08lx, end=%08lx\n",
- (u_long) SCRIPTB0_BA (np, snooptest), (u_long) pc,
- (u_long) SCRIPTB0_BA (np, snoopend) +8);
+ device_printf(np->device,
+ "CACHE TEST FAILED: script execution failed.\n");
+ device_printf(np->device, "start=%08lx, pc=%08lx, end=%08lx\n",
+ (u_long)SCRIPTB0_BA(np, snooptest), (u_long)pc,
+ (u_long)SCRIPTB0_BA(np, snoopend) + 8);
return (0x40);
}
/*
* Show results.
*/
if (host_wr != sym_rd) {
- printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n",
- (int) host_wr, (int) sym_rd);
+ device_printf(np->device,
+ "CACHE TEST FAILED: host wrote %d, chip read %d.\n",
+ (int)host_wr, (int)sym_rd);
err |= 1;
}
if (host_rd != sym_wr) {
- printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n",
- (int) sym_wr, (int) host_rd);
+ device_printf(np->device,
+ "CACHE TEST FAILED: chip wrote %d, host read %d.\n",
+ (int)sym_wr, (int)host_rd);
err |= 2;
}
if (sym_bk != sym_wr) {
- printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n",
- (int) sym_wr, (int) sym_bk);
+ device_printf(np->device,
+ "CACHE TEST FAILED: chip wrote %d, read back %d.\n",
+ (int)sym_wr, (int)sym_bk);
err |= 4;
}
@@ -6843,7 +6837,7 @@ static void sym_selectclock(hcb_p np, u_char scntl3)
}
if (sym_verbose >= 2)
- printf ("%s: enabling clock multiplier\n", sym_name(np));
+ device_printf(np->device, "enabling clock multiplier\n");
OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */
/*
@@ -6855,8 +6849,8 @@ static void sym_selectclock(hcb_p np, u_char scntl3)
while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
UDELAY (20);
if (!i)
- printf("%s: the chip cannot lock the frequency\n",
- sym_name(np));
+ device_printf(np->device,
+ "the chip cannot lock the frequency\n");
} else
UDELAY (20);
OUTB(nc_stest3, HSC); /* Halt the scsi clock */
@@ -6911,8 +6905,8 @@ static unsigned getfreq (hcb_p np, int gen)
f = ms ? ((1 << gen) * 4340) / ms : 0;
if (sym_verbose >= 2)
- printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n",
- sym_name(np), gen, ms, f);
+ device_printf(np->device, "Delay (GEN=%d): %u msec, %u KHz\n",
+ gen, ms, f);
return f;
}
@@ -6954,7 +6948,7 @@ static void sym_getclock (hcb_p np, int mult)
*/
if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
if (sym_verbose >= 2)
- printf ("%s: clock multiplier found\n", sym_name(np));
+ device_printf(np->device, "clock multiplier found\n");
np->multiplier = mult;
}
@@ -6968,7 +6962,7 @@ static void sym_getclock (hcb_p np, int mult)
f1 = sym_getfreq (np);
if (sym_verbose)
- printf ("%s: chip clock is %uKHz\n", sym_name(np), f1);
+ device_printf(np->device, "chip clock is %uKHz\n", f1);
if (f1 < 45000) f1 = 40000;
else if (f1 < 55000) f1 = 50000;
@@ -6976,8 +6970,8 @@ static void sym_getclock (hcb_p np, int mult)
if (f1 < 80000 && mult > 1) {
if (sym_verbose >= 2)
- printf ("%s: clock multiplier assumed\n",
- sym_name(np));
+ device_printf(np->device,
+ "clock multiplier assumed\n");
np->multiplier = mult;
}
} else {
@@ -7146,7 +7140,7 @@ static void sym_complete_error (hcb_p np, ccb_p cp)
sense_returned;
else
csio->sense_resid = 0;
- bcopy(cp->sns_bbuf, &csio->sense_data,
+ memcpy(&csio->sense_data, cp->sns_bbuf,
MIN(csio->sense_len, sense_returned));
#if 0
/*
@@ -7631,7 +7625,7 @@ static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
/* CDB is a pointer */
if (!(ccb_h->flags & CAM_CDB_PHYS)) {
/* CDB pointer is virtual */
- bcopy(csio->cdb_io.cdb_ptr, cp->cdb_buf, cmd_len);
+ memcpy(cp->cdb_buf, csio->cdb_io.cdb_ptr, cmd_len);
cmd_ba = CCB_BA (cp, cdb_buf[0]);
} else {
/* CDB pointer is physical */
@@ -7644,7 +7638,7 @@ static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
}
} else {
/* CDB is in the CAM ccb (buffer) */
- bcopy(csio->cdb_io.cdb_bytes, cp->cdb_buf, cmd_len);
+ memcpy(cp->cdb_buf, csio->cdb_io.cdb_bytes, cmd_len);
cmd_ba = CCB_BA (cp, cdb_buf[0]);
}
@@ -7858,9 +7852,9 @@ sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp,
data->addr = cpu_to_scr(psegs2->ds_addr);
data->size = cpu_to_scr(psegs2->ds_len);
if (DEBUG_FLAGS & DEBUG_SCATTER) {
- printf ("%s scatter: paddr=%lx len=%ld\n",
- sym_name(np), (long) psegs2->ds_addr,
- (long) psegs2->ds_len);
+ device_printf(np->device,
+ "scatter: paddr=%lx len=%ld\n",
+ (long)psegs2->ds_addr, (long)psegs2->ds_len);
}
if (psegs2 != psegs) {
--data;
@@ -7895,8 +7889,8 @@ sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs)
pn = ps;
k = pe - pn;
if (DEBUG_FLAGS & DEBUG_SCATTER) {
- printf ("%s scatter: paddr=%lx len=%ld\n",
- sym_name(np), pn, k);
+ device_printf(np->device,
+ "scatter: paddr=%lx len=%ld\n", pn, k);
}
cp->phys.data[s].addr = cpu_to_scr(pn);
cp->phys.data[s].size = cpu_to_scr(k);
@@ -8232,6 +8226,7 @@ sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts)
static device_method_t sym_pci_methods[] = {
DEVMETHOD(device_probe, sym_pci_probe),
DEVMETHOD(device_attach, sym_pci_attach),
+ DEVMETHOD(device_detach, sym_pci_detach),
DEVMETHOD_END
};
@@ -8631,8 +8626,8 @@ sym_pci_attach(device_t dev)
/*
* Copy scripts to controller instance.
*/
- bcopy(fw->a_base, np->scripta0, np->scripta_sz);
- bcopy(fw->b_base, np->scriptb0, np->scriptb_sz);
+ memcpy(np->scripta0, fw->a_base, np->scripta_sz);
+ memcpy(np->scriptb0, fw->b_base, np->scriptb_sz);
/*
* Setup variable parts in scripts and compute
@@ -8735,21 +8730,25 @@ sym_pci_attach(device_t dev)
*/
attach_failed:
if (np)
- sym_pci_free(np);
+ sym_pci_detach(dev);
return ENXIO;
}
/*
- * Free everything that have been allocated for this device.
+ * Detach a device by freeing everything that has been allocated for it.
*/
-static void sym_pci_free(hcb_p np)
+static int
+sym_pci_detach(device_t dev)
{
+ hcb_p np;
SYM_QUEHEAD *qp;
ccb_p cp;
tcb_p tp;
lcb_p lp;
int target, lun;
+ np = device_get_softc(dev);
+
/*
* First free CAM resources.
*/
@@ -8824,6 +8823,8 @@ static void sym_pci_free(hcb_p np)
SYM_LOCK_DESTROY();
device_set_softc(np->device, NULL);
sym_mfree_dma(np, sizeof(*np), "HCB");
+
+ return (0);
}
/*
@@ -8897,11 +8898,6 @@ static int sym_cam_attach(hcb_p np)
return 1;
fail:
- if (sim)
- cam_sim_free(sim, FALSE);
- if (devq)
- cam_simq_free(devq);
-
SYM_UNLOCK();
sym_cam_free(np);
@@ -8924,15 +8920,16 @@ static void sym_cam_free(hcb_p np)
SYM_LOCK();
+ if (np->path) {
+ xpt_async(AC_LOST_DEVICE, np->path, NULL);
+ xpt_free_path(np->path);
+ np->path = NULL;
+ }
if (np->sim) {
xpt_bus_deregister(cam_sim_path(np->sim));
cam_sim_free(np->sim, /*free_devq*/ TRUE);
np->sim = NULL;
}
- if (np->path) {
- xpt_free_path(np->path);
- np->path = NULL;
- }
SYM_UNLOCK();
}
@@ -9057,14 +9054,14 @@ static void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram)
int i;
/* display Symbios nvram host data */
- printf("%s: HOST ID=%d%s%s%s%s%s%s\n",
- sym_name(np), nvram->host_id & 0x0f,
- (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
- (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
- (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"",
- (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"",
- (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"",
- (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
+ device_printf(np->device, "HOST ID=%d%s%s%s%s%s%s\n",
+ nvram->host_id & 0x0f,
+ (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" : "",
+ (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" : "",
+ (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" : "",
+ (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" : "",
+ (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET) ? " NO_RESET" : "",
+ (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" : "");
/* display Symbios nvram drive data */
for (i = 0 ; i < 15 ; i++) {
@@ -9102,17 +9099,18 @@ static void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram)
case 2: rem = " REMOVABLE=all"; break;
}
- printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
- sym_name(np), nvram->host_id & 0x0f,
- (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
- (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"",
- (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
- (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
- (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
- (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
- (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
- (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
- rem, boot_delay, tags);
+ device_printf(np->device,
+ "HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+ nvram->host_id & 0x0f,
+ (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" : "",
+ (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" : "",
+ (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" : "",
+ (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" : "",
+ (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" : "",
+ (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" : "",
+ (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" : "",
+ (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" : "",
+ rem, boot_delay, tags);
/* display Tekram nvram drive data */
for (i = 0; i <= 15; i++) {
diff --git a/sys/dev/ufshci/ufshci_sim.c b/sys/dev/ufshci/ufshci_sim.c
index bee1fff858ff..404f3e99d1e2 100644
--- a/sys/dev/ufshci/ufshci_sim.c
+++ b/sys/dev/ufshci/ufshci_sim.c
@@ -494,7 +494,7 @@ ufshci_sim_send_ssu(struct ufshci_controller *ctrlr, bool start,
return ENOMEM;
}
- scsi_start_stop(&ccb->csio,
+ scsi_start_stop_pc(&ccb->csio,
/*retries*/ 4,
/*cbfcnp*/ NULL,
/*tag_action*/ MSG_SIMPLE_Q_TAG,
diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c
index f0989972f49f..4e0268110787 100644
--- a/sys/dev/usb/usb_device.c
+++ b/sys/dev/usb/usb_device.c
@@ -3127,7 +3127,7 @@ usbd_fill_deviceinfo(struct usb_device *udev, struct usb_device_info *di)
{
struct usb_device *hub;
- bzero(di, sizeof(di[0]));
+ memset(di, 0, sizeof(di[0]));
di->udi_bus = device_get_unit(udev->bus->bdev);
di->udi_addr = udev->address;
diff --git a/sys/dev/usb/wlan/if_mtw.c b/sys/dev/usb/wlan/if_mtw.c
index 8384c0a2d9fc..9d256056f6b2 100644
--- a/sys/dev/usb/wlan/if_mtw.c
+++ b/sys/dev/usb/wlan/if_mtw.c
@@ -64,6 +64,7 @@
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
+#include <dev/usb/usb_request.h>
#include "usbdevs.h"
@@ -525,6 +526,15 @@ mtw_attach(device_t self)
sc->sc_dev = self;
sc->sc_sent = 0;
+ /*
+ * Reset the device to clear any stale state left over from
+ * a previous warm reboot. Some MT7601U devices fail otherwise.
+ */
+ error = usbd_req_re_enumerate(uaa->device, NULL);
+ if (error != 0)
+ device_printf(self, "USB re-enumerate failed, continuing\n");
+ DELAY(100000); /* 100ms settle time */
+
mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev),
MTX_NETWORK_LOCK, MTX_DEF);
@@ -585,7 +595,7 @@ mtw_attach(device_t self)
sc->mac_rev = tmp & 0xffff;
mtw_load_microcode(sc);
- ret = msleep(&sc->fwloading, &sc->sc_mtx, 0, "fwload", 3 * hz);
+ ret = msleep(&sc->fwloading, &sc->sc_mtx, 0, "fwload", 10 * hz);
if (ret == EWOULDBLOCK || sc->fwloading != 1) {
device_printf(sc->sc_dev,
"timeout waiting for MCU to initialize\n");
@@ -1105,11 +1115,22 @@ mtw_load_microcode(void *arg)
// int ntries;
int dlen, ilen;
device_printf(sc->sc_dev, "version:0x%hx\n", sc->asic_ver);
- /* is firmware already running? */
+ /*
+ * Firmware may still be running from a previous warm reboot.
+ * Force a reset of the MCU to ensure a clean state.
+ */
mtw_read_cfg(sc, MTW_MCU_DMA_ADDR, &tmp);
if (tmp == MTW_MCU_READY) {
- return;
+ device_printf(sc->sc_dev, "MCU already running, resetting\n");
+ mtw_write(sc, MTW_MCU_RESET_CTL, MTW_RESET);
+ DELAY(10000);
+ mtw_write(sc, MTW_MCU_RESET_CTL, 0);
+ DELAY(10000);
+ /* Clear ready flag */
+ mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, 0);
+ DELAY(1000);
}
+
if (sc->asic_ver == 0x7612) {
fwname = "mtw-mt7662u_rom_patch";
@@ -2856,7 +2877,7 @@ mtw_fw_callback(struct usb_xfer *xfer, usb_error_t error)
}
mtw_delay(sc, 10);
- for (ntries = 0; ntries < 100; ntries++) {
+ for (ntries = 0; ntries < 300; ntries++) {
if ((error = mtw_read_cfg(sc, MTW_MCU_DMA_ADDR,
&tmp)) != 0) {
device_printf(sc->sc_dev,
@@ -2870,9 +2891,9 @@ mtw_fw_callback(struct usb_xfer *xfer, usb_error_t error)
break;
}
- mtw_delay(sc, 10);
+ mtw_delay(sc, 30);
}
- if (ntries == 100)
+ if (ntries == 300)
sc->fwloading = 0;
wakeup(&sc->fwloading);
return;
diff --git a/sys/dev/vmm/vmm_dev.c b/sys/dev/vmm/vmm_dev.c
index 840e810a39fb..fa51dc950459 100644
--- a/sys/dev/vmm/vmm_dev.c
+++ b/sys/dev/vmm/vmm_dev.c
@@ -33,6 +33,7 @@
#include <dev/vmm/vmm_dev.h>
#include <dev/vmm/vmm_mem.h>
#include <dev/vmm/vmm_stat.h>
+#include <dev/vmm/vmm_vm.h>
#ifdef __amd64__
#ifdef COMPAT_FREEBSD12
@@ -140,38 +141,6 @@ vcpu_unlock_one(struct vcpu *vcpu)
vcpu_set_state(vcpu, VCPU_IDLE, false);
}
-#ifndef __amd64__
-static int
-vcpu_set_state_all(struct vm *vm, enum vcpu_state newstate)
-{
- struct vcpu *vcpu;
- int error;
- uint16_t i, j, maxcpus;
-
- error = 0;
- maxcpus = vm_get_maxcpus(vm);
- for (i = 0; i < maxcpus; i++) {
- vcpu = vm_vcpu(vm, i);
- if (vcpu == NULL)
- continue;
- error = vcpu_lock_one(vcpu);
- if (error)
- break;
- }
-
- if (error) {
- for (j = 0; j < i; j++) {
- vcpu = vm_vcpu(vm, j);
- if (vcpu == NULL)
- continue;
- vcpu_unlock_one(vcpu);
- }
- }
-
- return (error);
-}
-#endif
-
static int
vcpu_lock_all(struct vmmdev_softc *sc)
{
@@ -1259,9 +1228,11 @@ vmm_handler(module_t mod, int what, void *arg)
if (error == 0)
vmm_initialized = true;
else {
- error = vmmdev_cleanup();
- KASSERT(error == 0,
- ("%s: vmmdev_cleanup failed: %d", __func__, error));
+ int error1 __diagused;
+
+ error1 = vmmdev_cleanup();
+ KASSERT(error1 == 0,
+ ("%s: vmmdev_cleanup failed: %d", __func__, error1));
}
break;
case MOD_UNLOAD:
diff --git a/sys/dev/vmm/vmm_ktr.h b/sys/dev/vmm/vmm_ktr.h
index 20370a229530..afd9831e4225 100644
--- a/sys/dev/vmm/vmm_ktr.h
+++ b/sys/dev/vmm/vmm_ktr.h
@@ -30,7 +30,9 @@
#define _VMM_KTR_H_
#include <sys/ktr.h>
-#include <sys/pcpu.h>
+#ifdef KTR
+#include <dev/vmm/vmm_vm.h>
+#endif
#ifndef KTR_VMM
#define KTR_VMM KTR_GEN
diff --git a/sys/dev/vmm/vmm_mem.c b/sys/dev/vmm/vmm_mem.c
index 5ae944713c81..5a73cbf7fc5b 100644
--- a/sys/dev/vmm/vmm_mem.c
+++ b/sys/dev/vmm/vmm_mem.c
@@ -23,6 +23,7 @@
#include <dev/vmm/vmm_dev.h>
#include <dev/vmm/vmm_mem.h>
+#include <dev/vmm/vmm_vm.h>
static void vm_free_memmap(struct vm *vm, int ident);
diff --git a/sys/dev/vmm/vmm_mem.h b/sys/dev/vmm/vmm_mem.h
index f3d22058c7b8..64bb29352a55 100644
--- a/sys/dev/vmm/vmm_mem.h
+++ b/sys/dev/vmm/vmm_mem.h
@@ -34,7 +34,10 @@ enum {
#include <sys/types.h>
#include <sys/_sx.h>
+struct domainset;
+struct vcpu;
struct vm;
+struct vm_guest_paging;
struct vm_object;
struct vmspace;
diff --git a/sys/dev/vmm/vmm_stat.h b/sys/dev/vmm/vmm_stat.h
index 471afd0dd827..469d8ef54829 100644
--- a/sys/dev/vmm/vmm_stat.h
+++ b/sys/dev/vmm/vmm_stat.h
@@ -32,6 +32,8 @@
#ifndef _DEV_VMM_STAT_H_
#define _DEV_VMM_STAT_H_
+#include <dev/vmm/vmm_vm.h>
+
struct vm;
#define MAX_VMM_STAT_ELEMS 64 /* arbitrary */
diff --git a/sys/dev/vmm/vmm_vm.c b/sys/dev/vmm/vmm_vm.c
new file mode 100644
index 000000000000..7941038ed671
--- /dev/null
+++ b/sys/dev/vmm/vmm_vm.c
@@ -0,0 +1,476 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+
+#include <machine/smp.h>
+
+#include <dev/vmm/vmm_vm.h>
+
+SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL);
+
+int vmm_ipinum;
+SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
+ "IPI vector used for vcpu notifications");
+
+/*
+ * Invoke the rendezvous function on the specified vcpu if applicable. Return
+ * true if the rendezvous is finished, false otherwise.
+ */
+static bool
+vm_rendezvous(struct vcpu *vcpu)
+{
+ struct vm *vm = vcpu->vm;
+ int vcpuid;
+
+ mtx_assert(&vcpu->vm->rendezvous_mtx, MA_OWNED);
+ KASSERT(vcpu->vm->rendezvous_func != NULL,
+ ("vm_rendezvous: no rendezvous pending"));
+
+ /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
+ CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus,
+ &vm->active_cpus);
+
+ vcpuid = vcpu->vcpuid;
+ if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
+ !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
+ (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg);
+ CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
+ }
+ if (CPU_CMP(&vm->rendezvous_req_cpus, &vm->rendezvous_done_cpus) == 0) {
+ CPU_ZERO(&vm->rendezvous_req_cpus);
+ vm->rendezvous_func = NULL;
+ wakeup(&vm->rendezvous_func);
+ return (true);
+ }
+ return (false);
+}
+
+int
+vm_handle_rendezvous(struct vcpu *vcpu)
+{
+ struct vm *vm;
+ struct thread *td;
+
+ td = curthread;
+ vm = vcpu->vm;
+
+ mtx_lock(&vm->rendezvous_mtx);
+ while (vm->rendezvous_func != NULL) {
+ if (vm_rendezvous(vcpu))
+ break;
+
+ mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
+ "vmrndv", hz);
+ if (td_ast_pending(td, TDA_SUSPEND)) {
+ int error;
+
+ mtx_unlock(&vm->rendezvous_mtx);
+ error = thread_check_susp(td, true);
+ if (error != 0)
+ return (error);
+ mtx_lock(&vm->rendezvous_mtx);
+ }
+ }
+ mtx_unlock(&vm->rendezvous_mtx);
+ return (0);
+}
+
+static void
+vcpu_wait_idle(struct vcpu *vcpu)
+{
+ KASSERT(vcpu->state != VCPU_IDLE, ("vcpu already idle"));
+
+ vcpu->reqidle = 1;
+ vcpu_notify_event_locked(vcpu);
+ msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
+}
+
+int
+vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
+ bool from_idle)
+{
+ int error;
+
+ vcpu_assert_locked(vcpu);
+
+ /*
+ * State transitions from the vmmdev_ioctl() must always begin from
+ * the VCPU_IDLE state. This guarantees that there is only a single
+ * ioctl() operating on a vcpu at any point.
+ */
+ if (from_idle) {
+ while (vcpu->state != VCPU_IDLE)
+ vcpu_wait_idle(vcpu);
+ } else {
+ KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
+ "vcpu idle state"));
+ }
+
+ if (vcpu->state == VCPU_RUNNING) {
+ KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
+ "mismatch for running vcpu", curcpu, vcpu->hostcpu));
+ } else {
+ KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
+ "vcpu that is not running", vcpu->hostcpu));
+ }
+
+ /*
+ * The following state transitions are allowed:
+ * IDLE -> FROZEN -> IDLE
+ * FROZEN -> RUNNING -> FROZEN
+ * FROZEN -> SLEEPING -> FROZEN
+ */
+ switch (vcpu->state) {
+ case VCPU_IDLE:
+ case VCPU_RUNNING:
+ case VCPU_SLEEPING:
+ error = (newstate != VCPU_FROZEN);
+ break;
+ case VCPU_FROZEN:
+ error = (newstate == VCPU_FROZEN);
+ break;
+ default:
+ error = 1;
+ break;
+ }
+
+ if (error)
+ return (EBUSY);
+
+ vcpu->state = newstate;
+ if (newstate == VCPU_RUNNING)
+ vcpu->hostcpu = curcpu;
+ else
+ vcpu->hostcpu = NOCPU;
+
+ if (newstate == VCPU_IDLE)
+ wakeup(&vcpu->state);
+
+ return (0);
+}
+
+/*
+ * Try to lock all of the vCPUs in the VM while taking care to avoid deadlocks
+ * with vm_smp_rendezvous().
+ *
+ * The complexity here suggests that the rendezvous mechanism needs a rethink.
+ */
+int
+vcpu_set_state_all(struct vm *vm, enum vcpu_state newstate)
+{
+ cpuset_t locked;
+ struct vcpu *vcpu;
+ int error, i;
+ uint16_t maxcpus;
+
+ KASSERT(newstate != VCPU_IDLE,
+ ("vcpu_set_state_all: invalid target state %d", newstate));
+
+ error = 0;
+ CPU_ZERO(&locked);
+ maxcpus = vm->maxcpus;
+
+ mtx_lock(&vm->rendezvous_mtx);
+restart:
+ if (vm->rendezvous_func != NULL) {
+ /*
+ * If we have a pending rendezvous, then the initiator may be
+ * blocked waiting for other vCPUs to execute the callback. The
+ * current thread may be a vCPU thread so we must not block
+ * waiting for the initiator, otherwise we get a deadlock.
+ * Thus, execute the callback on behalf of any idle vCPUs.
+ */
+ for (i = 0; i < maxcpus; i++) {
+ vcpu = vm_vcpu(vm, i);
+ if (vcpu == NULL)
+ continue;
+ vcpu_lock(vcpu);
+ if (vcpu->state == VCPU_IDLE) {
+ (void)vcpu_set_state_locked(vcpu, VCPU_FROZEN,
+ true);
+ CPU_SET(i, &locked);
+ }
+ if (CPU_ISSET(i, &locked)) {
+ /*
+ * We can safely execute the callback on this
+ * vCPU's behalf.
+ */
+ vcpu_unlock(vcpu);
+ (void)vm_rendezvous(vcpu);
+ vcpu_lock(vcpu);
+ }
+ vcpu_unlock(vcpu);
+ }
+ }
+
+ /*
+ * Now wait for remaining vCPUs to become idle. This may include the
+ * initiator of a rendezvous that is currently blocked on the rendezvous
+ * mutex.
+ */
+ CPU_FOREACH_ISCLR(i, &locked) {
+ if (i >= maxcpus)
+ break;
+ vcpu = vm_vcpu(vm, i);
+ if (vcpu == NULL)
+ continue;
+ vcpu_lock(vcpu);
+ while (vcpu->state != VCPU_IDLE) {
+ mtx_unlock(&vm->rendezvous_mtx);
+ vcpu_wait_idle(vcpu);
+ vcpu_unlock(vcpu);
+ mtx_lock(&vm->rendezvous_mtx);
+ if (vm->rendezvous_func != NULL)
+ goto restart;
+ vcpu_lock(vcpu);
+ }
+ error = vcpu_set_state_locked(vcpu, newstate, true);
+ vcpu_unlock(vcpu);
+ if (error != 0) {
+ /* Roll back state changes. */
+ CPU_FOREACH_ISSET(i, &locked)
+ (void)vcpu_set_state(vcpu, VCPU_IDLE, false);
+ break;
+ }
+ CPU_SET(i, &locked);
+ }
+ mtx_unlock(&vm->rendezvous_mtx);
+ return (error);
+}
+
+
+int
+vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle)
+{
+ int error;
+
+ vcpu_lock(vcpu);
+ error = vcpu_set_state_locked(vcpu, newstate, from_idle);
+ vcpu_unlock(vcpu);
+
+ return (error);
+}
+
+enum vcpu_state
+vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
+{
+ enum vcpu_state state;
+
+ vcpu_lock(vcpu);
+ state = vcpu->state;
+ if (hostcpu != NULL)
+ *hostcpu = vcpu->hostcpu;
+ vcpu_unlock(vcpu);
+
+ return (state);
+}
+
+/*
+ * This function is called to ensure that a vcpu "sees" a pending event
+ * as soon as possible:
+ * - If the vcpu thread is sleeping then it is woken up.
+ * - If the vcpu is running on a different host_cpu then an IPI will be directed
+ * to the host_cpu to cause the vcpu to trap into the hypervisor.
+ */
+void
+vcpu_notify_event_locked(struct vcpu *vcpu)
+{
+ int hostcpu;
+
+ hostcpu = vcpu->hostcpu;
+ if (vcpu->state == VCPU_RUNNING) {
+ KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
+ if (hostcpu != curcpu) {
+ ipi_cpu(hostcpu, vmm_ipinum);
+ } else {
+ /*
+ * If the 'vcpu' is running on 'curcpu' then it must
+ * be sending a notification to itself (e.g. SELF_IPI).
+ * The pending event will be picked up when the vcpu
+ * transitions back to guest context.
+ */
+ }
+ } else {
+ KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
+ "with hostcpu %d", vcpu->state, hostcpu));
+ if (vcpu->state == VCPU_SLEEPING)
+ wakeup_one(vcpu);
+ }
+}
+
+void
+vcpu_notify_event(struct vcpu *vcpu)
+{
+ vcpu_lock(vcpu);
+ vcpu_notify_event_locked(vcpu);
+ vcpu_unlock(vcpu);
+}
+
+int
+vcpu_debugged(struct vcpu *vcpu)
+{
+ return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
+}
+
+void
+vm_lock_vcpus(struct vm *vm)
+{
+ sx_xlock(&vm->vcpus_init_lock);
+}
+
+void
+vm_unlock_vcpus(struct vm *vm)
+{
+ sx_unlock(&vm->vcpus_init_lock);
+}
+
+void
+vm_disable_vcpu_creation(struct vm *vm)
+{
+ sx_xlock(&vm->vcpus_init_lock);
+ vm->dying = true;
+ sx_xunlock(&vm->vcpus_init_lock);
+}
+
+uint16_t
+vm_get_maxcpus(struct vm *vm)
+{
+ return (vm->maxcpus);
+}
+
+void
+vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
+ uint16_t *threads, uint16_t *maxcpus)
+{
+ *sockets = vm->sockets;
+ *cores = vm->cores;
+ *threads = vm->threads;
+ *maxcpus = vm->maxcpus;
+}
+
+int
+vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
+ uint16_t threads, uint16_t maxcpus __unused)
+{
+ /* Ignore maxcpus. */
+ if (sockets * cores * threads > vm->maxcpus)
+ return (EINVAL);
+ vm->sockets = sockets;
+ vm->cores = cores;
+ vm->threads = threads;
+ return (0);
+}
+
+int
+vm_suspend(struct vm *vm, enum vm_suspend_how how)
+{
+ int i;
+
+ if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
+ return (EINVAL);
+
+ if (atomic_cmpset_int(&vm->suspend, 0, how) == 0)
+ return (EALREADY);
+
+ /*
+ * Notify all active vcpus that they are now suspended.
+ */
+ for (i = 0; i < vm->maxcpus; i++) {
+ if (CPU_ISSET(i, &vm->active_cpus))
+ vcpu_notify_event(vm_vcpu(vm, i));
+ }
+
+ return (0);
+}
+
+int
+vm_reinit(struct vm *vm)
+{
+ int error;
+
+ /*
+ * A virtual machine can be reset only if all vcpus are suspended.
+ */
+ if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
+ vm_reset(vm);
+ error = 0;
+ } else {
+ error = EBUSY;
+ }
+
+ return (error);
+}
+
+int
+vm_activate_cpu(struct vcpu *vcpu)
+{
+ struct vm *vm = vcpu->vm;
+
+ if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
+ return (EBUSY);
+
+ CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
+ return (0);
+}
+
+int
+vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
+{
+ if (vcpu == NULL) {
+ vm->debug_cpus = vm->active_cpus;
+ for (int i = 0; i < vm->maxcpus; i++) {
+ if (CPU_ISSET(i, &vm->active_cpus))
+ vcpu_notify_event(vm_vcpu(vm, i));
+ }
+ } else {
+ if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
+ return (EINVAL);
+
+ CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
+ vcpu_notify_event(vcpu);
+ }
+ return (0);
+}
+
+int
+vm_resume_cpu(struct vm *vm, struct vcpu *vcpu)
+{
+ if (vcpu == NULL) {
+ CPU_ZERO(&vm->debug_cpus);
+ } else {
+ if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
+ return (EINVAL);
+
+ CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
+ }
+ return (0);
+}
+
+cpuset_t
+vm_active_cpus(struct vm *vm)
+{
+ return (vm->active_cpus);
+}
+
+cpuset_t
+vm_debug_cpus(struct vm *vm)
+{
+ return (vm->debug_cpus);
+}
+
+cpuset_t
+vm_suspended_cpus(struct vm *vm)
+{
+ return (vm->suspended_cpus);
+}
diff --git a/sys/dev/vmm/vmm_vm.h b/sys/dev/vmm/vmm_vm.h
new file mode 100644
index 000000000000..66d3545d1dd5
--- /dev/null
+++ b/sys/dev/vmm/vmm_vm.h
@@ -0,0 +1,233 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _DEV_VMM_VM_H_
+#define _DEV_VMM_VM_H_
+
+#ifdef _KERNEL
+#include <sys/_cpuset.h>
+
+#include <machine/vmm.h>
+
+#include <dev/vmm/vmm_param.h>
+#include <dev/vmm/vmm_mem.h>
+
+struct vcpu;
+
+enum vcpu_state {
+ VCPU_IDLE,
+ VCPU_FROZEN,
+ VCPU_RUNNING,
+ VCPU_SLEEPING,
+};
+
+/*
+ * Initialization:
+ * (a) allocated when vcpu is created
+ * (i) initialized when vcpu is created and when it is reinitialized
+ * (o) initialized the first time the vcpu is created
+ * (x) initialized before use
+ */
+struct vcpu {
+ struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */
+ enum vcpu_state state; /* (o) vcpu state */
+ int vcpuid; /* (o) */
+ int hostcpu; /* (o) vcpu's host cpu */
+ int reqidle; /* (i) request vcpu to idle */
+ struct vm *vm; /* (o) */
+ void *cookie; /* (i) cpu-specific data */
+ void *stats; /* (a,i) statistics */
+
+ VMM_VCPU_MD_FIELDS;
+};
+
+#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
+#define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
+#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
+#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
+#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
+
+extern int vmm_ipinum;
+
+int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
+int vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
+ bool from_idle);
+int vcpu_set_state_all(struct vm *vm, enum vcpu_state state);
+enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
+void vcpu_notify_event(struct vcpu *vcpu);
+void vcpu_notify_event_locked(struct vcpu *vcpu);
+int vcpu_debugged(struct vcpu *vcpu);
+
+static inline void *
+vcpu_stats(struct vcpu *vcpu)
+{
+ return (vcpu->stats);
+}
+
+static inline struct vm *
+vcpu_vm(struct vcpu *vcpu)
+{
+ return (vcpu->vm);
+}
+
+static inline int
+vcpu_vcpuid(struct vcpu *vcpu)
+{
+ return (vcpu->vcpuid);
+}
+
+static int __inline
+vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
+{
+ return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
+}
+
+#ifdef _SYS_PROC_H_
+static int __inline
+vcpu_should_yield(struct vcpu *vcpu)
+{
+ struct thread *td;
+
+ td = curthread;
+ return (td->td_ast != 0 || td->td_owepreempt != 0);
+}
+#endif
+
+typedef void (*vm_rendezvous_func_t)(struct vcpu *vcpu, void *arg);
+int vm_handle_rendezvous(struct vcpu *vcpu);
+
+/*
+ * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.
+ * The rendezvous 'func(arg)' is not allowed to do anything that will
+ * cause the thread to be put to sleep.
+ *
+ * The caller cannot hold any locks when initiating the rendezvous.
+ *
+ * The implementation of this API may cause vcpus other than those specified
+ * by 'dest' to be stalled. The caller should not rely on any vcpus making
+ * forward progress when the rendezvous is in progress.
+ */
+int vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
+ vm_rendezvous_func_t func, void *arg);
+
+/*
+ * Initialization:
+ * (o) initialized the first time the VM is created
+ * (i) initialized when VM is created and when it is reinitialized
+ * (x) initialized before use
+ *
+ * Locking:
+ * [m] mem_segs_lock
+ * [r] rendezvous_mtx
+ * [v] reads require one frozen vcpu, writes require freezing all vcpus
+ */
+struct vm {
+ void *cookie; /* (i) cpu-specific data */
+ struct vcpu **vcpu; /* (o) guest vcpus */
+ struct vm_mem mem; /* (i) [m+v] guest memory */
+
+ char name[VM_MAX_NAMELEN + 1]; /* (o) virtual machine name */
+ struct sx vcpus_init_lock; /* (o) */
+
+ bool dying; /* (o) is dying */
+ int suspend; /* (i) stop VM execution */
+
+ volatile cpuset_t active_cpus; /* (i) active vcpus */
+ volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */
+ volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
+ volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
+
+ cpuset_t rendezvous_req_cpus; /* (x) [r] rendezvous requested */
+ cpuset_t rendezvous_done_cpus; /* (x) [r] rendezvous finished */
+ void *rendezvous_arg; /* (x) [r] rendezvous func/arg */
+ vm_rendezvous_func_t rendezvous_func;
+ struct mtx rendezvous_mtx; /* (o) rendezvous lock */
+
+ uint16_t sockets; /* (o) num of sockets */
+ uint16_t cores; /* (o) num of cores/socket */
+ uint16_t threads; /* (o) num of threads/core */
+ uint16_t maxcpus; /* (o) max pluggable cpus */
+
+ VMM_VM_MD_FIELDS;
+};
+
+int vm_create(const char *name, struct vm **retvm);
+struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
+void vm_destroy(struct vm *vm);
+int vm_reinit(struct vm *vm);
+void vm_reset(struct vm *vm);
+
+void vm_lock_vcpus(struct vm *vm);
+void vm_unlock_vcpus(struct vm *vm);
+void vm_disable_vcpu_creation(struct vm *vm);
+
+int vm_suspend(struct vm *vm, enum vm_suspend_how how);
+int vm_activate_cpu(struct vcpu *vcpu);
+int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
+int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
+
+cpuset_t vm_active_cpus(struct vm *vm);
+cpuset_t vm_debug_cpus(struct vm *vm);
+cpuset_t vm_suspended_cpus(struct vm *vm);
+
+uint16_t vm_get_maxcpus(struct vm *vm);
+void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
+ uint16_t *threads, uint16_t *maxcpus);
+int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
+ uint16_t threads, uint16_t maxcpus);
+
+static inline const char *
+vm_name(struct vm *vm)
+{
+ return (vm->name);
+}
+
+static inline struct vm_mem *
+vm_mem(struct vm *vm)
+{
+ return (&vm->mem);
+}
+
+static inline struct vcpu *
+vm_vcpu(struct vm *vm, int vcpuid)
+{
+ return (vm->vcpu[vcpuid]);
+}
+
+struct vm_eventinfo {
+ cpuset_t *rptr; /* rendezvous cookie */
+ int *sptr; /* suspend cookie */
+ int *iptr; /* reqidle cookie */
+};
+
+static inline int
+vcpu_rendezvous_pending(struct vcpu *vcpu, struct vm_eventinfo *info)
+{
+ /*
+ * This check isn't done with atomic operations or under a lock because
+ * there's no need to. If the vcpuid bit is set, the vcpu is part of a
+ * rendezvous and the bit won't be cleared until the vcpu enters the
+ * rendezvous. On rendezvous exit, the cpuset is cleared and the vcpu
+ * will see an empty cpuset. So, the races are harmless.
+ */
+ return (CPU_ISSET(vcpu_vcpuid(vcpu), info->rptr));
+}
+
+static inline int
+vcpu_suspended(struct vm_eventinfo *info)
+{
+ return (*info->sptr);
+}
+
+static inline int
+vcpu_reqidle(struct vm_eventinfo *info)
+{
+ return (*info->iptr);
+}
+#endif /* _KERNEL */
+
+#endif /* !_DEV_VMM_VM_H_ */
diff --git a/sys/dev/vt/vt.h b/sys/dev/vt/vt.h
index 8e35a81bc101..4abe99e4ab13 100644
--- a/sys/dev/vt/vt.h
+++ b/sys/dev/vt/vt.h
@@ -81,7 +81,6 @@
#else
#define DPRINTF(_l, ...) do {} while (0)
#endif
-#define ISSIGVALID(sig) ((sig) > 0 && (sig) < NSIG)
#define VT_SYSCTL_INT(_name, _default, _descr) \
int vt_##_name = (_default); \
diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c
index 5e8f7b1d0bb7..a6a5f0eeff9d 100644
--- a/sys/dev/vt/vt_core.c
+++ b/sys/dev/vt/vt_core.c
@@ -3046,9 +3046,9 @@ skip_thunk:
DPRINTF(5, "reset WAIT_ACQ, ");
return (0);
} else if (mode->mode == VT_PROCESS) {
- if (!(ISSIGVALID(mode->relsig) &&
- ISSIGVALID(mode->acqsig) &&
- (mode->frsig == 0 || ISSIGVALID(mode->frsig)))) {
+ if (!(_SIG_VALID(mode->relsig) &&
+ _SIG_VALID(mode->acqsig) &&
+ (mode->frsig == 0 || _SIG_VALID(mode->frsig)))) {
DPRINTF(5, "error EINVAL\n");
return (EINVAL);
}
diff --git a/sys/dev/wg/if_wg.c b/sys/dev/wg/if_wg.c
index 17aedee0e6b0..611314883643 100644
--- a/sys/dev/wg/if_wg.c
+++ b/sys/dev/wg/if_wg.c
@@ -3033,8 +3033,8 @@ wg_clone_create(struct if_clone *ifc, char *name, size_t len,
if_attach(ifp);
bpfattach(ifp, DLT_NULL, sizeof(uint32_t));
#ifdef INET6
- ND_IFINFO(ifp)->flags &= ~ND6_IFF_AUTO_LINKLOCAL;
- ND_IFINFO(ifp)->flags |= ND6_IFF_NO_DAD;
+ if_getinet6(ifp)->nd_flags &= ~ND6_IFF_AUTO_LINKLOCAL;
+ if_getinet6(ifp)->nd_flags |= ND6_IFF_NO_DAD;
#endif
sx_xlock(&wg_sx);
LIST_INSERT_HEAD(&wg_list, sc, sc_entry);
diff --git a/sys/fs/fuse/fuse_device.c b/sys/fs/fuse/fuse_device.c
index 41387de3ce71..2c1e19953dfd 100644
--- a/sys/fs/fuse/fuse_device.c
+++ b/sys/fs/fuse/fuse_device.c
@@ -65,7 +65,6 @@
#include <sys/module.h>
#include <sys/systm.h>
#include <sys/errno.h>
-#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/conf.h>
#include <sys/uio.h>
@@ -177,6 +176,11 @@ fdata_dtor(void *arg)
fuse_lck_mtx_unlock(fdata->ms_mtx);
FUSE_UNLOCK();
+ if (fdata->mp && fdata->dataflags & FSESS_AUTO_UNMOUNT) {
+ vfs_ref(fdata->mp);
+ dounmount(fdata->mp, MNT_FORCE, curthread);
+ }
+
fdata_trydestroy(fdata);
}
diff --git a/sys/fs/fuse/fuse_internal.c b/sys/fs/fuse/fuse_internal.c
index eba0a8a79ff3..a3590060f44a 100644
--- a/sys/fs/fuse/fuse_internal.c
+++ b/sys/fs/fuse/fuse_internal.c
@@ -1103,7 +1103,6 @@ fuse_internal_send_init(struct fuse_data *data, struct thread *td)
* FUSE_SPLICE_WRITE, FUSE_SPLICE_MOVE, FUSE_SPLICE_READ: FreeBSD
* doesn't have splice(2).
* FUSE_FLOCK_LOCKS: not yet implemented
- * FUSE_HAS_IOCTL_DIR: not yet implemented
* FUSE_AUTO_INVAL_DATA: not yet implemented
* FUSE_DO_READDIRPLUS: not yet implemented
* FUSE_READDIRPLUS_AUTO: not yet implemented
@@ -1116,7 +1115,7 @@ fuse_internal_send_init(struct fuse_data *data, struct thread *td)
* FUSE_MAX_PAGES: not yet implemented
*/
fiii->flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_EXPORT_SUPPORT
- | FUSE_BIG_WRITES | FUSE_WRITEBACK_CACHE
+ | FUSE_BIG_WRITES | FUSE_HAS_IOCTL_DIR | FUSE_WRITEBACK_CACHE
| FUSE_NO_OPEN_SUPPORT | FUSE_NO_OPENDIR_SUPPORT
| FUSE_SETXATTR_EXT;
diff --git a/sys/fs/fuse/fuse_ipc.c b/sys/fs/fuse/fuse_ipc.c
index bc36f0070d7d..f3d92d861352 100644
--- a/sys/fs/fuse/fuse_ipc.c
+++ b/sys/fs/fuse/fuse_ipc.c
@@ -835,6 +835,10 @@ fuse_body_audit(struct fuse_ticket *ftick, size_t blen)
err = (blen == 0) ? 0 : EINVAL;
break;
+ case FUSE_IOCTL:
+ err = (blen >= sizeof(struct fuse_ioctl_out)) ? 0 : EINVAL;
+ break;
+
case FUSE_FALLOCATE:
err = (blen == 0) ? 0 : EINVAL;
break;
diff --git a/sys/fs/fuse/fuse_ipc.h b/sys/fs/fuse/fuse_ipc.h
index d9d79f38c269..6fb04fd4ee25 100644
--- a/sys/fs/fuse/fuse_ipc.h
+++ b/sys/fs/fuse/fuse_ipc.h
@@ -240,10 +240,11 @@ struct fuse_data {
#define FSESS_WARN_READLINK_EMBEDDED_NUL 0x1000000 /* corrupt READLINK output */
#define FSESS_WARN_DOT_LOOKUP 0x2000000 /* Inconsistent . LOOKUP response */
#define FSESS_WARN_INODE_MISMATCH 0x4000000 /* ino != nodeid */
+#define FSESS_SETXATTR_EXT 0x8000000 /* extended fuse_setxattr_in */
+#define FSESS_AUTO_UNMOUNT 0x10000000 /* perform unmount when server dies */
#define FSESS_MNTOPTS_MASK ( \
FSESS_DAEMON_CAN_SPY | FSESS_PUSH_SYMLINKS_IN | \
- FSESS_DEFAULT_PERMISSIONS | FSESS_INTR)
-#define FSESS_SETXATTR_EXT 0x8000000 /* extended fuse_setxattr_in */
+ FSESS_DEFAULT_PERMISSIONS | FSESS_INTR | FSESS_AUTO_UNMOUNT)
extern int fuse_data_cache_mode;
diff --git a/sys/fs/fuse/fuse_vfsops.c b/sys/fs/fuse/fuse_vfsops.c
index 0ff79913128a..a5118aa7675f 100644
--- a/sys/fs/fuse/fuse_vfsops.c
+++ b/sys/fs/fuse/fuse_vfsops.c
@@ -337,6 +337,7 @@ fuse_vfsop_mount(struct mount *mp)
FUSE_FLAGOPT(push_symlinks_in, FSESS_PUSH_SYMLINKS_IN);
FUSE_FLAGOPT(default_permissions, FSESS_DEFAULT_PERMISSIONS);
FUSE_FLAGOPT(intr, FSESS_INTR);
+ FUSE_FLAGOPT(auto_unmount, FSESS_AUTO_UNMOUNT);
(void)vfs_scanopt(opts, "max_read=", "%u", &max_read);
(void)vfs_scanopt(opts, "linux_errnos", "%d", &linux_errnos);
diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c
index 0e049b1f07a9..22d5893d4fbc 100644
--- a/sys/fs/fuse/fuse_vnops.c
+++ b/sys/fs/fuse/fuse_vnops.c
@@ -91,6 +91,7 @@
#include <sys/vmmeter.h>
#define EXTERR_CATEGORY EXTERR_CAT_FUSE_VNOPS
#include <sys/exterrvar.h>
+#include <sys/sysent.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
@@ -374,6 +375,84 @@ fuse_inval_buf_range(struct vnode *vp, off_t filesize, off_t start, off_t end)
return (0);
}
+/* Send FUSE_IOCTL for this node */
+static int
+fuse_vnop_do_ioctl(struct vnode *vp, u_long cmd, void *arg, int fflag,
+ struct ucred *cred, struct thread *td)
+{
+ struct fuse_dispatcher fdi;
+ struct fuse_ioctl_in *fii;
+ struct fuse_ioctl_out *fio;
+ struct fuse_filehandle *fufh;
+ uint32_t flags = 0;
+ uint32_t insize = 0;
+ uint32_t outsize = 0;
+ int err;
+
+ err = fuse_filehandle_getrw(vp, fflag, &fufh, cred, td->td_proc->p_pid);
+ if (err != 0)
+ return (err);
+
+ if (vnode_isdir(vp)) {
+ struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp));
+
+ if (!fuse_libabi_geq(data, 7, 18))
+ return (ENOTTY);
+ flags |= FUSE_IOCTL_DIR;
+ }
+#ifdef __LP64__
+#ifdef COMPAT_FREEBSD32
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32))
+ flags |= FUSE_IOCTL_32BIT;
+#endif
+#else /* !defined(__LP64__) */
+ flags |= FUSE_IOCTL_32BIT;
+#endif
+
+ if ((cmd & IOC_OUT) != 0)
+ outsize = IOCPARM_LEN(cmd);
+ /* _IOWINT() sets IOC_VOID */
+ if ((cmd & (IOC_VOID | IOC_IN)) != 0)
+ insize = IOCPARM_LEN(cmd);
+
+ fdisp_init(&fdi, sizeof(*fii) + insize);
+ fdisp_make_vp(&fdi, FUSE_IOCTL, vp, td, cred);
+ fii = fdi.indata;
+ fii->fh = fufh->fh_id;
+ fii->flags = flags;
+ fii->cmd = cmd;
+ fii->arg = (uintptr_t)arg;
+ fii->in_size = insize;
+ fii->out_size = outsize;
+ if (insize > 0)
+ memcpy((char *)fii + sizeof(*fii), arg, insize);
+
+ err = fdisp_wait_answ(&fdi);
+ if (err != 0) {
+ if (err == ENOSYS)
+ err = ENOTTY;
+ goto out;
+ }
+
+ fio = fdi.answ;
+ if (fdi.iosize > sizeof(*fio)) {
+ size_t realoutsize = fdi.iosize - sizeof(*fio);
+
+ if (realoutsize > outsize) {
+ err = EIO;
+ goto out;
+ }
+ memcpy(arg, (char *)fio + sizeof(*fio), realoutsize);
+ }
+ if (fio->result > 0)
+ td->td_retval[0] = fio->result;
+ else
+ err = -fio->result;
+
+out:
+ fdisp_destroy(&fdi);
+ return (err);
+}
/* Send FUSE_LSEEK for this node */
static int
@@ -1294,25 +1373,29 @@ fuse_vnop_ioctl(struct vop_ioctl_args *ap)
struct vnode *vp = ap->a_vp;
struct mount *mp = vnode_mount(vp);
struct ucred *cred = ap->a_cred;
- off_t *offp;
- pid_t pid = ap->a_td->td_proc->p_pid;
+ struct thread *td = ap->a_td;
int err;
+ if (fuse_isdeadfs(vp)) {
+ return (ENXIO);
+ }
+
switch (ap->a_command) {
case FIOSEEKDATA:
case FIOSEEKHOLE:
/* Call FUSE_LSEEK, if we can, or fall back to vop_stdioctl */
if (fsess_maybe_impl(mp, FUSE_LSEEK)) {
+ off_t *offp = ap->a_data;
+ pid_t pid = td->td_proc->p_pid;
int whence;
- offp = ap->a_data;
if (ap->a_command == FIOSEEKDATA)
whence = SEEK_DATA;
else
whence = SEEK_HOLE;
vn_lock(vp, LK_SHARED | LK_RETRY);
- err = fuse_vnop_do_lseek(vp, ap->a_td, cred, pid, offp,
+ err = fuse_vnop_do_lseek(vp, td, cred, pid, offp,
whence);
VOP_UNLOCK(vp);
}
@@ -1320,8 +1403,8 @@ fuse_vnop_ioctl(struct vop_ioctl_args *ap)
err = vop_stdioctl(ap);
break;
default:
- /* TODO: implement FUSE_IOCTL */
- err = ENOTTY;
+ err = fuse_vnop_do_ioctl(vp, ap->a_command, ap->a_data,
+ ap->a_fflag, cred, td);
break;
}
return (err);
diff --git a/sys/fs/nfs/nfs.h b/sys/fs/nfs/nfs.h
index ecff9b8e6849..7903542be91d 100644
--- a/sys/fs/nfs/nfs.h
+++ b/sys/fs/nfs/nfs.h
@@ -872,6 +872,11 @@ typedef enum { UNKNOWN=0, DELETED=1, NLINK_ZERO=2, VALID=3 } nfsremove_status;
#define SUPPACL_NFSV4 1
#define SUPPACL_POSIX 2
+/* Values NFSv4 uses for exclusive_flag. */
+#define NFSV4_EXCLUSIVE_NONE 0
+#define NFSV4_EXCLUSIVE 1
+#define NFSV4_EXCLUSIVE_41 2
+
#endif /* _KERNEL */
#endif /* _NFS_NFS_H */
diff --git a/sys/fs/nfs/nfs_var.h b/sys/fs/nfs/nfs_var.h
index 0211acf7f00b..28088c12d7e7 100644
--- a/sys/fs/nfs/nfs_var.h
+++ b/sys/fs/nfs/nfs_var.h
@@ -410,7 +410,7 @@ int nfsv4_strtogid(struct nfsrv_descript *, u_char *, int, gid_t *);
int nfsrv_checkuidgid(struct nfsrv_descript *, struct nfsvattr *);
void nfsrv_fixattr(struct nfsrv_descript *, vnode_t,
struct nfsvattr *, NFSACL_T *, NFSACL_T *, NFSPROC_T *, nfsattrbit_t *,
- struct nfsexstuff *);
+ bool);
int nfsrv_errmoved(int);
int nfsrv_putreferralattr(struct nfsrv_descript *, nfsattrbit_t *,
struct nfsreferral *, int, int *);
diff --git a/sys/fs/nfs/nfsdport.h b/sys/fs/nfs/nfsdport.h
index c863741746c5..6439ef921d29 100644
--- a/sys/fs/nfs/nfsdport.h
+++ b/sys/fs/nfs/nfsdport.h
@@ -46,6 +46,8 @@
#define NFSVNO_ISSETATIME(n) ((n)->na_atime.tv_sec != VNOVAL)
#define NFSVNO_NOTSETMTIME(n) ((n)->na_mtime.tv_sec == VNOVAL)
#define NFSVNO_ISSETMTIME(n) ((n)->na_mtime.tv_sec != VNOVAL)
+#define NFSVNO_NOTSETFLAGS(n) ((n)->na_flags == VNOVAL)
+#define NFSVNO_ISSETFLAGS(n) ((n)->na_flags != VNOVAL)
/*
* This structure acts as a "catch-all" for information that
diff --git a/sys/fs/nfs/nfsport.h b/sys/fs/nfs/nfsport.h
index 4e9aae70da6f..f6b6cfb22908 100644
--- a/sys/fs/nfs/nfsport.h
+++ b/sys/fs/nfs/nfsport.h
@@ -1040,6 +1040,7 @@ void ncl_copy_vattr(struct vnode *vp, struct vattr *dst, struct vattr *src);
#define NFSSTA_HASWRITEVERF 0x00040000 /* Has write verifier */
#define NFSSTA_GOTFSINFO 0x00100000 /* Got the fsinfo */
#define NFSSTA_OPENMODE 0x00200000 /* Must use correct open mode */
+#define NFSSTA_CASEINSENSITIVE 0x00400000 /* Case insensitive fs */
#define NFSSTA_FLEXFILE 0x00800000 /* Use Flex File Layout */
#define NFSSTA_NOLAYOUTCOMMIT 0x04000000 /* Don't do LayoutCommit */
#define NFSSTA_SESSPERSIST 0x08000000 /* Has a persistent session */
@@ -1073,6 +1074,7 @@ void ncl_copy_vattr(struct vnode *vp, struct vattr *dst, struct vattr *src);
#define NFSHASPNFS(n) ((n)->nm_state & NFSSTA_PNFS)
#define NFSHASFLEXFILE(n) ((n)->nm_state & NFSSTA_FLEXFILE)
#define NFSHASOPENMODE(n) ((n)->nm_state & NFSSTA_OPENMODE)
+#define NFSHASCASEINSENSITIVE(n) ((n)->nm_state & NFSSTA_CASEINSENSITIVE)
#define NFSHASONEOPENOWN(n) (((n)->nm_flag & NFSMNT_ONEOPENOWN) != 0 && \
(n)->nm_minorvers > 0)
#define NFSHASTLS(n) (((n)->nm_newflag & NFSMNT_TLS) != 0)
diff --git a/sys/fs/nfsclient/nfs_clrpcops.c b/sys/fs/nfsclient/nfs_clrpcops.c
index 0cdcde6cca28..974d08611a00 100644
--- a/sys/fs/nfsclient/nfs_clrpcops.c
+++ b/sys/fs/nfsclient/nfs_clrpcops.c
@@ -4995,11 +4995,13 @@ nfsrpc_statfs(vnode_t vp, struct nfsstatfs *sbp, struct nfsfsinfo *fsp,
uint32_t *leasep, uint32_t *cloneblksizep, struct ucred *cred, NFSPROC_T *p,
struct nfsvattr *nap, int *attrflagp)
{
+ struct nfsvattr na;
+ struct nfsv3_pathconf pc;
u_int32_t *tl = NULL;
struct nfsrv_descript nfsd, *nd = &nfsd;
struct nfsmount *nmp;
nfsattrbit_t attrbits;
- int error;
+ int attrflag, error;
*attrflagp = 0;
if (cloneblksizep != NULL)
@@ -5066,6 +5068,16 @@ nfsrpc_statfs(vnode_t vp, struct nfsstatfs *sbp, struct nfsfsinfo *fsp,
sbp->sf_bfree = fxdr_unsigned(u_int32_t, *tl++);
sbp->sf_bavail = fxdr_unsigned(u_int32_t, *tl);
}
+
+ /* Try and find out if the server fs is case-insensitive. */
+ error = nfsrpc_pathconf(vp, &pc, NULL, NULL, cred, p, &na, &attrflag,
+ NULL);
+ if (error == 0 && pc.pc_caseinsensitive != 0) {
+ NFSLOCKMNT(nmp);
+ nmp->nm_state |= NFSSTA_CASEINSENSITIVE;
+ NFSUNLOCKMNT(nmp);
+ }
+ error = 0;
nfsmout:
m_freem(nd->nd_mrep);
return (error);
@@ -5086,9 +5098,11 @@ nfsrpc_pathconf(vnode_t vp, struct nfsv3_pathconf *pc, bool *has_namedattrp,
int error;
struct nfsnode *np;
- *has_namedattrp = false;
+ if (has_namedattrp != NULL)
+ *has_namedattrp = false;
*attrflagp = 0;
- *clone_blksizep = 0;
+ if (clone_blksizep != NULL)
+ *clone_blksizep = 0;
nmp = VFSTONFS(vp->v_mount);
if (NFSHASNFSV4(nmp)) {
np = VTONFS(vp);
diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c
index 1bd77ab27ced..8e4d58c0e554 100644
--- a/sys/fs/nfsclient/nfs_clvnops.c
+++ b/sys/fs/nfsclient/nfs_clvnops.c
@@ -1492,7 +1492,8 @@ handle_error:
return (EJUSTRETURN);
}
- if ((cnp->cn_flags & MAKEENTRY) != 0 && dattrflag) {
+ if ((cnp->cn_flags & MAKEENTRY) != 0 && dattrflag &&
+ !NFSHASCASEINSENSITIVE(nmp)) {
/*
* Cache the modification time of the parent
* directory from the post-op attributes in
@@ -2206,6 +2207,14 @@ nfs_rename(struct vop_rename_args *ap)
goto out;
/*
+ * For case insensitive file systems, there may be multiple
+ * names cached for the one name being rename'd, so purge
+ * all names from the cache.
+ */
+ if (NFSHASCASEINSENSITIVE(nmp))
+ cache_purge(fvp);
+
+ /*
* We have to flush B_DELWRI data prior to renaming
* the file. If we don't, the delayed-write buffers
* can be flushed out later after the file has gone stale
@@ -2221,6 +2230,7 @@ nfs_rename(struct vop_rename_args *ap)
if ((nmp->nm_flag & NFSMNT_NOCTO) == 0 || !NFSHASNFSV4(nmp) ||
!NFSHASNFSV4N(nmp) || nfscl_mustflush(fvp) != 0)
error = VOP_FSYNC(fvp, MNT_WAIT, curthread);
+
NFSVOPUNLOCK(fvp);
if (error == 0 && tvp != NULL && ((nmp->nm_flag & NFSMNT_NOCTO) == 0 ||
!NFSHASNFSV4(nmp) || !NFSHASNFSV4N(nmp) ||
diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c
index 7d64f211b058..833203cd86fc 100644
--- a/sys/fs/nfsserver/nfs_nfsdport.c
+++ b/sys/fs/nfsserver/nfs_nfsdport.c
@@ -1972,11 +1972,13 @@ nfsvno_open(struct nfsrv_descript *nd, struct nameidata *ndp,
NFSACL_T *aclp, NFSACL_T *daclp, nfsattrbit_t *attrbitp, struct ucred *cred,
bool done_namei, struct nfsexstuff *exp, struct vnode **vpp)
{
+ struct vattr va;
struct vnode *vp = NULL;
u_quad_t tempsize;
struct nfsexstuff nes;
struct thread *p = curthread;
uint32_t oldrepstat;
+ u_long savflags;
if (ndp->ni_vp == NULL) {
/*
@@ -1991,6 +1993,15 @@ nfsvno_open(struct nfsrv_descript *nd, struct nameidata *ndp,
}
if (!nd->nd_repstat) {
if (ndp->ni_vp == NULL) {
+ /*
+ * Most file systems ignore va_flags for
+ * VOP_CREATE(), however setting va_flags
+ * for VOP_CREATE() causes problems for ZFS.
+ * So disable them and let nfsrv_fixattr()
+ * do them, as required.
+ */
+ savflags = nvap->na_flags;
+ nvap->na_flags = VNOVAL;
nd->nd_repstat = VOP_CREATE(ndp->ni_dvp,
&ndp->ni_vp, &ndp->ni_cnd, &nvap->na_vattr);
/* For a pNFS server, create the data file on a DS. */
@@ -2003,27 +2014,57 @@ nfsvno_open(struct nfsrv_descript *nd, struct nameidata *ndp,
nfsrv_pnfscreate(ndp->ni_vp, &nvap->na_vattr,
cred, p);
}
+ nvap->na_flags = savflags;
VOP_VPUT_PAIR(ndp->ni_dvp, nd->nd_repstat == 0 ?
&ndp->ni_vp : NULL, false);
nfsvno_relpathbuf(ndp);
if (!nd->nd_repstat) {
- if (*exclusive_flagp) {
- *exclusive_flagp = 0;
- NFSVNO_ATTRINIT(nvap);
- nvap->na_atime.tv_sec = cverf[0];
- nvap->na_atime.tv_nsec = cverf[1];
+ if (*exclusive_flagp != NFSV4_EXCLUSIVE_NONE) {
+ VATTR_NULL(&va);
+ va.va_atime.tv_sec = cverf[0];
+ va.va_atime.tv_nsec = cverf[1];
nd->nd_repstat = VOP_SETATTR(ndp->ni_vp,
- &nvap->na_vattr, cred);
+ &va, cred);
if (nd->nd_repstat != 0) {
vput(ndp->ni_vp);
ndp->ni_vp = NULL;
nd->nd_repstat = NFSERR_NOTSUPP;
- } else
+ } else {
+ /*
+ * Few clients set these
+ * attributes in Open/Create
+ * Exclusive_41. If this
+ * changes, this should include
+ * setting atime, instead of
+ * the above.
+ */
+ if (*exclusive_flagp ==
+ NFSV4_EXCLUSIVE_41 &&
+ (NFSISSET_ATTRBIT(attrbitp,
+ NFSATTRBIT_OWNER) ||
+ NFSISSET_ATTRBIT(attrbitp,
+ NFSATTRBIT_OWNERGROUP) ||
+ NFSISSET_ATTRBIT(attrbitp,
+ NFSATTRBIT_TIMEMODIFYSET)||
+ NFSISSET_ATTRBIT(attrbitp,
+ NFSATTRBIT_ARCHIVE) ||
+ NFSISSET_ATTRBIT(attrbitp,
+ NFSATTRBIT_HIDDEN) ||
+ NFSISSET_ATTRBIT(attrbitp,
+ NFSATTRBIT_SYSTEM) ||
+ aclp != NULL ||
+ daclp != NULL))
+ nfsrv_fixattr(nd,
+ ndp->ni_vp, nvap,
+ aclp, daclp, p,
+ attrbitp, true);
NFSSETBIT_ATTRBIT(attrbitp,
NFSATTRBIT_TIMEACCESS);
+ }
+ *exclusive_flagp = NFSV4_EXCLUSIVE_NONE;
} else {
nfsrv_fixattr(nd, ndp->ni_vp, nvap,
- aclp, daclp, p, attrbitp, exp);
+ aclp, daclp, p, attrbitp, false);
}
}
vp = ndp->ni_vp;
diff --git a/sys/fs/nfsserver/nfs_nfsdserv.c b/sys/fs/nfsserver/nfs_nfsdserv.c
index 3eb3471d9ac9..b5f5b9bec9fc 100644
--- a/sys/fs/nfsserver/nfs_nfsdserv.c
+++ b/sys/fs/nfsserver/nfs_nfsdserv.c
@@ -1608,7 +1608,7 @@ nfsrvd_mknod(struct nfsrv_descript *nd, __unused int isdgram,
nd->nd_repstat = nfsvno_mknod(&named, &nva, nd->nd_cred, p);
if (!nd->nd_repstat) {
vp = named.ni_vp;
- nfsrv_fixattr(nd, vp, &nva, aclp, daclp, p, &attrbits, exp);
+ nfsrv_fixattr(nd, vp, &nva, aclp, daclp, p, &attrbits, false);
nd->nd_repstat = nfsvno_getfh(vp, fhp, p);
if ((nd->nd_flag & ND_NFSV3) && !nd->nd_repstat)
nd->nd_repstat = nfsvno_getattr(vp, &nva, nd, p, 1,
@@ -2120,7 +2120,7 @@ nfsrvd_symlinksub(struct nfsrv_descript *nd, struct nameidata *ndp,
!(nd->nd_flag & ND_NFSV2), nd->nd_saveduid, nd->nd_cred, p, exp);
if (!nd->nd_repstat && !(nd->nd_flag & ND_NFSV2)) {
nfsrv_fixattr(nd, ndp->ni_vp, nvap, aclp, NULL, p, attrbitp,
- exp);
+ false);
if (nd->nd_flag & ND_NFSV3) {
nd->nd_repstat = nfsvno_getfh(ndp->ni_vp, fhp, p);
if (!nd->nd_repstat)
@@ -2255,7 +2255,7 @@ nfsrvd_mkdirsub(struct nfsrv_descript *nd, struct nameidata *ndp,
nd->nd_cred, p, exp);
if (!nd->nd_repstat) {
vp = ndp->ni_vp;
- nfsrv_fixattr(nd, vp, nvap, aclp, daclp, p, attrbitp, exp);
+ nfsrv_fixattr(nd, vp, nvap, aclp, daclp, p, attrbitp, false);
nd->nd_repstat = nfsvno_getfh(vp, fhp, p);
if (!(nd->nd_flag & ND_NFSV4) && !nd->nd_repstat)
nd->nd_repstat = nfsvno_getattr(vp, nvap, nd, p, 1,
@@ -2964,7 +2964,8 @@ nfsrvd_open(struct nfsrv_descript *nd, __unused int isdgram,
u_int32_t *tl;
int i, retext;
struct nfsstate *stp = NULL;
- int error = 0, create, claim, exclusive_flag = 0, override;
+ int error = 0, create, claim, override;
+ int exclusive_flag = NFSV4_EXCLUSIVE_NONE;
u_int32_t rflags = NFSV4OPEN_LOCKTYPEPOSIX, acemask;
int how = NFSCREATE_UNCHECKED;
int32_t cverf[2], tverf[2] = { 0, 0 };
@@ -3229,6 +3230,7 @@ nfsrvd_open(struct nfsrv_descript *nd, __unused int isdgram,
case NFSCREATE_EXCLUSIVE:
if (nd->nd_repstat == 0 && named.ni_vp == NULL)
nva.na_mode = 0;
+ exclusive_flag = NFSV4_EXCLUSIVE;
/* FALLTHROUGH */
case NFSCREATE_EXCLUSIVE41:
if (nd->nd_repstat == 0 && named.ni_vp != NULL) {
@@ -3244,7 +3246,8 @@ nfsrvd_open(struct nfsrv_descript *nd, __unused int isdgram,
if (nd->nd_repstat != 0)
done_namei = true;
}
- exclusive_flag = 1;
+ if (how == NFSCREATE_EXCLUSIVE41)
+ exclusive_flag = NFSV4_EXCLUSIVE_41;
break;
}
}
diff --git a/sys/fs/nfsserver/nfs_nfsdsubs.c b/sys/fs/nfsserver/nfs_nfsdsubs.c
index ea8382e4282a..fdedf959f0e5 100644
--- a/sys/fs/nfsserver/nfs_nfsdsubs.c
+++ b/sys/fs/nfsserver/nfs_nfsdsubs.c
@@ -1645,7 +1645,7 @@ out:
void
nfsrv_fixattr(struct nfsrv_descript *nd, vnode_t vp,
struct nfsvattr *nvap, NFSACL_T *aclp, NFSACL_T *daclp, NFSPROC_T *p,
- nfsattrbit_t *attrbitp, struct nfsexstuff *exp)
+ nfsattrbit_t *attrbitp, bool atime_done)
{
int change = 0;
struct nfsvattr nva;
@@ -1675,7 +1675,7 @@ nfsrv_fixattr(struct nfsrv_descript *nd, vnode_t vp,
}
}
if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_TIMEACCESSSET) &&
- NFSVNO_ISSETATIME(nvap)) {
+ !atime_done && NFSVNO_ISSETATIME(nvap)) {
nva.na_atime = nvap->na_atime;
change++;
NFSSETBIT_ATTRBIT(&nattrbits, NFSATTRBIT_TIMEACCESSSET);
@@ -1697,8 +1697,46 @@ nfsrv_fixattr(struct nfsrv_descript *nd, vnode_t vp,
NFSCLRBIT_ATTRBIT(attrbitp, NFSATTRBIT_OWNERGROUP);
}
}
+
+ /*
+ * For archive, ZFS sets it by default for new files,
+ * so if specified, it must be set or cleared.
+ * For hidden and system, no file system sets them
+ * by default upon creation, so they only need to be
+ * set and not cleared.
+ */
+ if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_ARCHIVE)) {
+ if (nva.na_flags == VNOVAL)
+ nva.na_flags = 0;
+ if ((nvap->na_flags & UF_ARCHIVE) != 0)
+ nva.na_flags |= UF_ARCHIVE;
+ change++;
+ NFSSETBIT_ATTRBIT(&nattrbits, NFSATTRBIT_ARCHIVE);
+ }
+ if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_HIDDEN)) {
+ if ((nvap->na_flags & UF_HIDDEN) != 0) {
+ if (nva.na_flags == VNOVAL)
+ nva.na_flags = 0;
+ nva.na_flags |= UF_HIDDEN;
+ change++;
+ NFSSETBIT_ATTRBIT(&nattrbits, NFSATTRBIT_HIDDEN);
+ } else {
+ NFSCLRBIT_ATTRBIT(attrbitp, NFSATTRBIT_HIDDEN);
+ }
+ }
+ if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_SYSTEM)) {
+ if ((nvap->na_flags & UF_SYSTEM) != 0) {
+ if (nva.na_flags == VNOVAL)
+ nva.na_flags = 0;
+ nva.na_flags |= UF_SYSTEM;
+ change++;
+ NFSSETBIT_ATTRBIT(&nattrbits, NFSATTRBIT_SYSTEM);
+ } else {
+ NFSCLRBIT_ATTRBIT(attrbitp, NFSATTRBIT_SYSTEM);
+ }
+ }
if (change) {
- error = nfsvno_setattr(vp, &nva, nd->nd_cred, p, exp);
+ error = nfsvno_setattr(vp, &nva, nd->nd_cred, p, NULL);
if (error) {
NFSCLRALL_ATTRBIT(attrbitp, &nattrbits);
}
diff --git a/sys/fs/unionfs/union_subr.c b/sys/fs/unionfs/union_subr.c
index 90be4d6af812..6489eb77ce2b 100644
--- a/sys/fs/unionfs/union_subr.c
+++ b/sys/fs/unionfs/union_subr.c
@@ -1071,7 +1071,7 @@ unionfs_forward_vop_ref(struct vnode *basevp, int *lkflags)
* forwarded VOP reacquires the base vnode lock the unionfs vnode
* lock will no longer be held. This can lead to violation of the
* caller's sychronization requirements as well as various failed
- * locking assertions when DEBUG_VFS_LOCKS is enabled.
+ * locking assertions when INVARIANTS is enabled.
* 2) Loss of reference on the base vnode. The caller is expected to
* hold a v_usecount reference on the unionfs vnode, while the
* unionfs vnode holds a reference on the base-layer vnode(s). But
diff --git a/sys/i386/conf/MINIMAL b/sys/i386/conf/MINIMAL
index 8019617ca4d4..7bed6d560035 100644
--- a/sys/i386/conf/MINIMAL
+++ b/sys/i386/conf/MINIMAL
@@ -114,7 +114,6 @@ options SC_PIXEL_MODE # add support for the raster text mode
# vt is the default video console driver
device vt
device vt_vga
-device vt_efifb
device vt_vbefb
device agp # support several AGP chipsets
@@ -139,6 +138,7 @@ device virtio_pci # VirtIO PCI device
device vtnet # VirtIO Ethernet device
device virtio_blk # VirtIO Block device
device virtio_balloon # VirtIO Memory Balloon device
+device virtio_scsi # VirtIO SCSI device
# Linux KVM paravirtualization support
device kvm_clock # KVM paravirtual clock driver
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 80b45070d896..698d3b26813c 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -91,7 +91,8 @@ get_pcb_user_save_td(struct thread *td)
p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
- KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
+ KASSERT(__is_aligned(p, XSAVE_AREA_ALIGN),
+ ("Unaligned pcb_user_save area"));
return ((union savefpu *)p);
}
diff --git a/sys/i386/include/ifunc.h b/sys/i386/include/ifunc.h
new file mode 100644
index 000000000000..1af46757b836
--- /dev/null
+++ b/sys/i386/include/ifunc.h
@@ -0,0 +1,5 @@
+/*
+ * This file is in the public domain.
+ */
+
+#include <x86/ifunc.h>
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 4144297d674c..39357b8d4440 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -104,7 +104,17 @@ void mi_startup(void); /* Should be elsewhere */
static struct session session0;
static struct pgrp pgrp0;
struct proc proc0;
-struct thread0_storage thread0_st __aligned(32);
+struct thread0_storage thread0_st __aligned(32) = {
+ .t0st_thread = {
+ /*
+ * thread0.td_pflags is set with TDP_NOFAULTING to
+ * short-cut the vm page fault handler until it is
+ * ready. It is cleared in vm_init() after VM
+ * initialization.
+ */
+ .td_pflags = TDP_NOFAULTING,
+ },
+};
struct vmspace vmspace0;
struct proc *initproc;
diff --git a/sys/kern/init_sysent.c b/sys/kern/init_sysent.c
index cd305de1ed44..33586db22600 100644
--- a/sys/kern/init_sysent.c
+++ b/sys/kern/init_sysent.c
@@ -666,4 +666,6 @@ struct sysent sysent[] = {
{ .sy_narg = AS(jail_attach_jd_args), .sy_call = (sy_call_t *)sys_jail_attach_jd, .sy_auevent = AUE_JAIL_ATTACH, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 597 = jail_attach_jd */
{ .sy_narg = AS(jail_remove_jd_args), .sy_call = (sy_call_t *)sys_jail_remove_jd, .sy_auevent = AUE_JAIL_REMOVE, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 598 = jail_remove_jd */
{ .sy_narg = AS(kexec_load_args), .sy_call = (sy_call_t *)sys_kexec_load, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 599 = kexec_load */
+ { .sy_narg = AS(pdrfork_args), .sy_call = (sy_call_t *)sys_pdrfork, .sy_auevent = AUE_PDRFORK, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 600 = pdrfork */
+ { .sy_narg = AS(pdwait_args), .sy_call = (sy_call_t *)sys_pdwait, .sy_auevent = AUE_PDWAIT, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 601 = pdwait */
};
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index c4b1c8201ff2..18ea3a7bd29d 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -34,45 +34,45 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_ddb.h"
#include "opt_ktrace.h"
-#include <sys/param.h>
+#define EXTERR_CATEGORY EXTERR_CAT_PROCEXIT
#include <sys/systm.h>
-#include <sys/sysproto.h>
+#include <sys/acct.h> /* for acct_process() function prototype */
#include <sys/capsicum.h>
#include <sys/eventhandler.h>
+#include <sys/exterrvar.h>
+#include <sys/filedesc.h>
+#include <sys/jail.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
-#include <sys/malloc.h>
#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/procdesc.h>
-#include <sys/jail.h>
-#include <sys/tty.h>
-#include <sys/wait.h>
-#include <sys/vmmeter.h>
-#include <sys/vnode.h>
+#include <sys/ptrace.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/sbuf.h>
-#include <sys/signalvar.h>
#include <sys/sched.h>
+#include <sys/sdt.h>
+#include <sys/sem.h>
+#include <sys/shm.h>
+#include <sys/signalvar.h>
#include <sys/sx.h>
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
-#include <sys/syslog.h>
-#include <sys/ptrace.h>
-#include <sys/acct.h> /* for acct_process() function prototype */
-#include <sys/filedesc.h>
-#include <sys/sdt.h>
-#include <sys/shm.h>
-#include <sys/sem.h>
#include <sys/sysent.h>
+#include <sys/syslog.h>
+#include <sys/sysproto.h>
#include <sys/timers.h>
+#include <sys/tty.h>
#include <sys/umtxvar.h>
+#include <sys/vmmeter.h>
+#include <sys/vnode.h>
+#include <sys/wait.h>
#ifdef KTRACE
#include <sys/ktrace.h>
#endif
@@ -906,6 +906,33 @@ sys_wait6(struct thread *td, struct wait6_args *uap)
return (error);
}
+int
+sys_pdwait(struct thread *td, struct pdwait_args *uap)
+{
+ struct __wrusage wru, *wrup;
+ siginfo_t si, *sip;
+ int error, status;
+
+ wrup = uap->wrusage != NULL ? &wru : NULL;
+
+ if (uap->info != NULL) {
+ sip = &si;
+ bzero(sip, sizeof(*sip));
+ } else {
+ sip = NULL;
+ }
+
+ error = kern_pdwait(td, uap->fd, &status, uap->options, wrup, sip);
+
+ if (uap->status != NULL && error == 0)
+ error = copyout(&status, uap->status, sizeof(status));
+ if (uap->wrusage != NULL && error == 0)
+ error = copyout(&wru, uap->wrusage, sizeof(wru));
+ if (uap->info != NULL && error == 0)
+ error = copyout(&si, uap->info, sizeof(si));
+ return (error);
+}
+
/*
* Reap the remains of a zombie process and optionally return status and
* rusage. Asserts and will release both the proctree_lock and the process
@@ -924,9 +951,9 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options)
q = td->td_proc;
- if (status)
+ if (status != NULL)
*status = KW_EXITCODE(p->p_xexit, p->p_xsig);
- if (options & WNOWAIT) {
+ if ((options & WNOWAIT) != 0) {
/*
* Only poll, returning the status. Caller does not wish to
* release the proc struct just yet.
@@ -979,10 +1006,10 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options)
leavepgrp(p);
if (p->p_procdesc != NULL)
procdesc_reap(p);
+ else
+ proc_id_clear(PROC_ID_PID, p->p_pid);
sx_xunlock(&proctree_lock);
- proc_id_clear(PROC_ID_PID, p->p_pid);
-
PROC_LOCK(p);
knlist_detach(p->p_klist);
p->p_klist = NULL;
@@ -1042,13 +1069,75 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options)
atomic_add_int(&nprocs, -1);
}
+static void
+wait_fill_siginfo(struct proc *p, siginfo_t *siginfo)
+{
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+
+ if (siginfo == NULL)
+ return;
+
+ bzero(siginfo, sizeof(*siginfo));
+ siginfo->si_errno = 0;
+
+ /*
+ * SUSv4 requires that the si_signo value is always
+ * SIGCHLD. Obey it despite the rfork(2) interface allows to
+ * request other signal for child exit notification.
+ */
+ siginfo->si_signo = SIGCHLD;
+
+ /*
+ * This is still a rough estimate. We will fix the cases
+ * TRAPPED, STOPPED, and CONTINUED later.
+ */
+ if (WCOREDUMP(p->p_xsig)) {
+ siginfo->si_code = CLD_DUMPED;
+ siginfo->si_status = WTERMSIG(p->p_xsig);
+ } else if (WIFSIGNALED(p->p_xsig)) {
+ siginfo->si_code = CLD_KILLED;
+ siginfo->si_status = WTERMSIG(p->p_xsig);
+ } else {
+ siginfo->si_code = CLD_EXITED;
+ siginfo->si_status = p->p_xexit;
+ }
+
+ siginfo->si_pid = p->p_pid;
+ siginfo->si_uid = p->p_ucred->cr_uid;
+
+ /*
+ * The si_addr field would be useful additional detail, but
+ * apparently the PC value may be lost when we reach this
+ * point. bzero() above sets siginfo->si_addr to NULL.
+ */
+}
+
+static void
+wait_fill_wrusage(struct proc *p, struct __wrusage *wrusage)
+{
+ struct rusage *rup;
+
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+
+ if (wrusage == NULL)
+ return;
+
+ rup = &wrusage->wru_self;
+ *rup = p->p_ru;
+ PROC_STATLOCK(p);
+ calcru(p, &rup->ru_utime, &rup->ru_stime);
+ PROC_STATUNLOCK(p);
+
+ rup = &wrusage->wru_children;
+ *rup = p->p_stats->p_cru;
+ calccru(p, &rup->ru_utime, &rup->ru_stime);
+}
+
static int
proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
int *status, int options, struct __wrusage *wrusage, siginfo_t *siginfo,
int check_only)
{
- struct rusage *rup;
-
sx_assert(&proctree_lock, SA_XLOCKED);
PROC_LOCK(p);
@@ -1114,7 +1203,7 @@ proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
return (0);
}
- if (((options & WEXITED) == 0) && (p->p_state == PRS_ZOMBIE)) {
+ if ((options & WEXITED) == 0 && p->p_state == PRS_ZOMBIE) {
PROC_UNLOCK(p);
return (0);
}
@@ -1133,60 +1222,14 @@ proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
return (0);
}
- if (siginfo != NULL) {
- bzero(siginfo, sizeof(*siginfo));
- siginfo->si_errno = 0;
-
- /*
- * SUSv4 requires that the si_signo value is always
- * SIGCHLD. Obey it despite the rfork(2) interface
- * allows to request other signal for child exit
- * notification.
- */
- siginfo->si_signo = SIGCHLD;
-
- /*
- * This is still a rough estimate. We will fix the
- * cases TRAPPED, STOPPED, and CONTINUED later.
- */
- if (WCOREDUMP(p->p_xsig)) {
- siginfo->si_code = CLD_DUMPED;
- siginfo->si_status = WTERMSIG(p->p_xsig);
- } else if (WIFSIGNALED(p->p_xsig)) {
- siginfo->si_code = CLD_KILLED;
- siginfo->si_status = WTERMSIG(p->p_xsig);
- } else {
- siginfo->si_code = CLD_EXITED;
- siginfo->si_status = p->p_xexit;
- }
-
- siginfo->si_pid = p->p_pid;
- siginfo->si_uid = p->p_ucred->cr_uid;
-
- /*
- * The si_addr field would be useful additional
- * detail, but apparently the PC value may be lost
- * when we reach this point. bzero() above sets
- * siginfo->si_addr to NULL.
- */
- }
+ wait_fill_siginfo(p, siginfo);
/*
* There should be no reason to limit resources usage info to
* exited processes only. A snapshot about any resources used
* by a stopped process may be exactly what is needed.
*/
- if (wrusage != NULL) {
- rup = &wrusage->wru_self;
- *rup = p->p_ru;
- PROC_STATLOCK(p);
- calcru(p, &rup->ru_utime, &rup->ru_stime);
- PROC_STATUNLOCK(p);
-
- rup = &wrusage->wru_children;
- *rup = p->p_stats->p_cru;
- calccru(p, &rup->ru_utime, &rup->ru_stime);
- }
+ wait_fill_wrusage(p, wrusage);
if (p->p_state == PRS_ZOMBIE && !check_only) {
proc_reap(td, p, status, options);
@@ -1267,10 +1310,85 @@ report_alive_proc(struct thread *td, struct proc *p, siginfo_t *siginfo,
}
if (status != NULL)
*status = cont ? SIGCONT : W_STOPCODE(p->p_xsig);
- td->td_retval[0] = p->p_pid;
PROC_UNLOCK(p);
}
+static int
+wait6_checkopt(int options)
+{
+ /* If we don't know the option, just return. */
+ if ((options & ~(WUNTRACED | WNOHANG | WCONTINUED | WNOWAIT |
+ WEXITED | WTRAPPED | WLINUXCLONE)) != 0)
+ return (EXTERROR(EINVAL, "Unknown options %#jx", options));
+ if ((options & (WEXITED | WUNTRACED | WCONTINUED | WTRAPPED)) == 0) {
+ /*
+ * We will be unable to find any matching processes,
+ * because there are no known events to look for.
+ * Prefer to return error instead of blocking
+ * indefinitely.
+ */
+ return (EXTERROR(EINVAL,
+ "Cannot match processes %#jx", options));
+ }
+ return (0);
+}
+
+/*
+ * Checks and reports status for alive process, according to the
+ * options. Returns true if the process fits one of the requested
+ * options and its status was updated in siginfo.
+ *
+ * If the process was reported (the function result is true), both the
+ * process and proctree locks are unlocked.
+ */
+static bool
+wait6_check_alive(struct thread *td, int options, struct proc *p, int *status,
+ siginfo_t *siginfo)
+{
+ bool report;
+
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ sx_assert(&proctree_lock, SA_XLOCKED);
+
+ if ((options & WTRAPPED) != 0 && (p->p_flag & P_TRACED) != 0) {
+ PROC_SLOCK(p);
+ report = (p->p_flag & (P_STOPPED_TRACE | P_STOPPED_SIG)) &&
+ p->p_suspcount == p->p_numthreads &&
+ (p->p_flag & P_WAITED) == 0;
+ PROC_SUNLOCK(p);
+ if (report) {
+ CTR4(KTR_PTRACE,
+ "wait: returning trapped pid %d status %#x (xstat %d) xthread %d",
+ p->p_pid, W_STOPCODE(p->p_xsig), p->p_xsig,
+ p->p_xthread != NULL ?
+ p->p_xthread->td_tid : -1);
+ report_alive_proc(td, p, siginfo, status,
+ options, CLD_TRAPPED);
+ return (true);
+ }
+ }
+
+ if ((options & WUNTRACED) != 0 && (p->p_flag & P_STOPPED_SIG) != 0) {
+ PROC_SLOCK(p);
+ report = p->p_suspcount == p->p_numthreads &&
+ (p->p_flag & P_WAITED) == 0;
+ PROC_SUNLOCK(p);
+ if (report) {
+ report_alive_proc(td, p, siginfo, status, options,
+ CLD_STOPPED);
+ return (true);
+ }
+ }
+
+ if ((options & WCONTINUED) != 0 && (p->p_flag & P_CONTINUED) != 0) {
+ report_alive_proc(td, p, siginfo, status, options,
+ CLD_CONTINUED);
+ return (true);
+ }
+
+ return (false);
+}
+
int
kern_wait6(struct thread *td, idtype_t idtype, id_t id, int *status,
int options, struct __wrusage *wrusage, siginfo_t *siginfo)
@@ -1278,7 +1396,6 @@ kern_wait6(struct thread *td, idtype_t idtype, id_t id, int *status,
struct proc *p, *q;
pid_t pid;
int error, nfound, ret;
- bool report;
AUDIT_ARG_VALUE((int)idtype); /* XXX - This is likely wrong! */
AUDIT_ARG_PID((pid_t)id); /* XXX - This may be wrong! */
@@ -1293,20 +1410,9 @@ kern_wait6(struct thread *td, idtype_t idtype, id_t id, int *status,
idtype = P_PGID;
}
- /* If we don't know the option, just return. */
- if ((options & ~(WUNTRACED | WNOHANG | WCONTINUED | WNOWAIT |
- WEXITED | WTRAPPED | WLINUXCLONE)) != 0)
- return (EINVAL);
- if ((options & (WEXITED | WUNTRACED | WCONTINUED | WTRAPPED)) == 0) {
- /*
- * We will be unable to find any matching processes,
- * because there are no known events to look for.
- * Prefer to return error instead of blocking
- * indefinitely.
- */
- return (EINVAL);
- }
-
+ error = wait6_checkopt(options);
+ if (error != 0)
+ return (error);
loop:
if (q->p_flag & P_STATCHILD) {
PROC_LOCK(q);
@@ -1342,44 +1448,11 @@ loop_locked:
nfound++;
PROC_LOCK_ASSERT(p, MA_OWNED);
- if ((options & WTRAPPED) != 0 &&
- (p->p_flag & P_TRACED) != 0) {
- PROC_SLOCK(p);
- report =
- ((p->p_flag & (P_STOPPED_TRACE | P_STOPPED_SIG)) &&
- p->p_suspcount == p->p_numthreads &&
- (p->p_flag & P_WAITED) == 0);
- PROC_SUNLOCK(p);
- if (report) {
- CTR4(KTR_PTRACE,
- "wait: returning trapped pid %d status %#x "
- "(xstat %d) xthread %d",
- p->p_pid, W_STOPCODE(p->p_xsig), p->p_xsig,
- p->p_xthread != NULL ?
- p->p_xthread->td_tid : -1);
- report_alive_proc(td, p, siginfo, status,
- options, CLD_TRAPPED);
- return (0);
- }
- }
- if ((options & WUNTRACED) != 0 &&
- (p->p_flag & P_STOPPED_SIG) != 0) {
- PROC_SLOCK(p);
- report = (p->p_suspcount == p->p_numthreads &&
- ((p->p_flag & P_WAITED) == 0));
- PROC_SUNLOCK(p);
- if (report) {
- report_alive_proc(td, p, siginfo, status,
- options, CLD_STOPPED);
- return (0);
- }
- }
- if ((options & WCONTINUED) != 0 &&
- (p->p_flag & P_CONTINUED) != 0) {
- report_alive_proc(td, p, siginfo, status, options,
- CLD_CONTINUED);
+ if (wait6_check_alive(td, options, p, status, siginfo)) {
+ td->td_retval[0] = pid;
return (0);
}
+
PROC_UNLOCK(p);
}
@@ -1412,24 +1485,102 @@ loop_locked:
sx_xunlock(&proctree_lock);
return (ECHILD);
}
- if (options & WNOHANG) {
+ if ((options & WNOHANG) != 0) {
sx_xunlock(&proctree_lock);
td->td_retval[0] = 0;
return (0);
}
PROC_LOCK(q);
- if (q->p_flag & P_STATCHILD) {
+ if ((q->p_flag & P_STATCHILD) != 0) {
q->p_flag &= ~P_STATCHILD;
PROC_UNLOCK(q);
goto loop_locked;
}
sx_xunlock(&proctree_lock);
error = msleep(q, &q->p_mtx, PWAIT | PCATCH | PDROP, "wait", 0);
- if (error)
+ if (error != 0)
return (error);
goto loop;
}
+int
+kern_pdwait(struct thread *td, int fd, int *status,
+ int options, struct __wrusage *wrusage, siginfo_t *siginfo)
+{
+ struct proc *p;
+ struct file *fp;
+ struct procdesc *pd;
+ int error;
+
+ AUDIT_ARG_FD(fd);
+ AUDIT_ARG_VALUE(options);
+
+ error = wait6_checkopt(options);
+ if (error != 0)
+ return (error);
+
+ error = fget(td, fd, &cap_pdwait_rights, &fp);
+ if (error != 0)
+ return (error);
+ if (fp->f_type != DTYPE_PROCDESC) {
+ error = EINVAL;
+ goto exit_unlocked;
+ }
+ pd = fp->f_data;
+
+ for (;;) {
+ /* We own a reference on the procdesc file. */
+ KASSERT((pd->pd_flags & PDF_CLOSED) == 0,
+ ("PDF_CLOSED proc %p procdesc %p pd flags %#x",
+ p, pd, pd->pd_flags));
+
+ sx_xlock(&proctree_lock);
+ p = pd->pd_proc;
+ if (p == NULL) {
+ error = ESRCH;
+ goto exit_tree_locked;
+ }
+ PROC_LOCK(p);
+
+ error = p_canwait(td, p);
+ if (error != 0)
+ break;
+ if ((options & WEXITED) == 0 && p->p_state == PRS_ZOMBIE) {
+ error = ESRCH;
+ break;
+ }
+
+ wait_fill_siginfo(p, siginfo);
+ wait_fill_wrusage(p, wrusage);
+
+ if (p->p_state == PRS_ZOMBIE) {
+ proc_reap(td, p, status, options);
+ goto exit_unlocked;
+ }
+
+ if (wait6_check_alive(td, options, p, status, siginfo))
+ goto exit_unlocked;
+
+ if ((options & WNOHANG) != 0) {
+ error = EWOULDBLOCK;
+ break;
+ }
+
+ PROC_UNLOCK(p);
+ error = sx_sleep(&p->p_procdesc, &proctree_lock,
+ PWAIT | PCATCH | PDROP, "pdwait", 0);
+ if (error != 0)
+ goto exit_unlocked;
+ }
+
+ PROC_UNLOCK(p);
+exit_tree_locked:
+ sx_xunlock(&proctree_lock);
+exit_unlocked:
+ fdrop(fp, td);
+ return (error);
+}
+
void
proc_add_orphan(struct proc *child, struct proc *parent)
{
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 961d72c46d2c..6313bf3bb651 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -34,20 +34,22 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_ktrace.h"
#include "opt_kstack_pages.h"
-#include <sys/param.h>
+#define EXTERR_CATEGORY EXTERR_CAT_FORK
#include <sys/systm.h>
+#include <sys/acct.h>
#include <sys/bitstring.h>
-#include <sys/sysproto.h>
#include <sys/eventhandler.h>
+#include <sys/exterrvar.h>
#include <sys/fcntl.h>
#include <sys/filedesc.h>
#include <sys/jail.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
+#include <sys/ktr.h>
+#include <sys/ktrace.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/malloc.h>
@@ -60,17 +62,15 @@
#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/sched.h>
+#include <sys/sdt.h>
+#include <sys/signalvar.h>
+#include <sys/sx.h>
#include <sys/syscall.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
-#include <sys/acct.h>
-#include <sys/ktr.h>
-#include <sys/ktrace.h>
#include <sys/unistd.h>
-#include <sys/sdt.h>
-#include <sys/sx.h>
-#include <sys/sysent.h>
-#include <sys/signalvar.h>
#include <security/audit/audit.h>
#include <security/mac/mac_framework.h>
@@ -166,10 +166,11 @@ sys_rfork(struct thread *td, struct rfork_args *uap)
/* Don't allow kernel-only flags. */
if ((uap->flags & RFKERNELONLY) != 0)
- return (EINVAL);
+ return (EXTERROR(EINVAL, "Kernel-only flags %#jx", uap->flags));
/* RFSPAWN must not appear with others */
if ((uap->flags & RFSPAWN) != 0 && uap->flags != RFSPAWN)
- return (EINVAL);
+ return (EXTERROR(EINVAL, "RFSPAWN must be the only flag %#jx",
+ uap->flags));
AUDIT_ARG_FFLAGS(uap->flags);
bzero(&fr, sizeof(fr));
@@ -188,6 +189,48 @@ sys_rfork(struct thread *td, struct rfork_args *uap)
return (error);
}
+int
+sys_pdrfork(struct thread *td, struct pdrfork_args *uap)
+{
+ struct fork_req fr;
+ int error, fd, pid;
+
+ bzero(&fr, sizeof(fr));
+ fd = -1;
+
+ AUDIT_ARG_FFLAGS(uap->pdflags);
+ AUDIT_ARG_CMD(uap->rfflags);
+
+ if ((uap->rfflags & (RFSTOPPED | RFHIGHPID)) != 0)
+ return (EXTERROR(EINVAL,
+ "Kernel-only flags %#jx", uap->rfflags));
+
+ /* RFSPAWN must not appear with others */
+ if ((uap->rfflags & RFSPAWN) != 0) {
+ if (uap->rfflags != RFSPAWN)
+ return (EXTERROR(EINVAL,
+ "RFSPAWN must be the only flag %#jx",
+ uap->rfflags));
+ fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPROCDESC;
+ fr.fr_flags2 = FR2_DROPSIG_CAUGHT;
+ } else {
+ fr.fr_flags = uap->rfflags;
+ }
+
+ fr.fr_pidp = &pid;
+ fr.fr_pd_fd = &fd;
+ fr.fr_pd_flags = uap->pdflags;
+ error = fork1(td, &fr);
+ if (error == 0) {
+ td->td_retval[0] = pid;
+ td->td_retval[1] = 0;
+ if ((fr.fr_flags & (RFPROC | RFPROCDESC)) ==
+ (RFPROC | RFPROCDESC) || uap->rfflags == RFSPAWN)
+ error = copyout(&fd, uap->fdp, sizeof(fd));
+ }
+ return (error);
+}
+
int __exclusive_cache_line nprocs = 1; /* process 0 */
int lastpid = 0;
SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
@@ -871,34 +914,32 @@ fork1(struct thread *td, struct fork_req *fr)
else
MPASS(fr->fr_procp == NULL);
- /* Check for the undefined or unimplemented flags. */
if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
- return (EINVAL);
+ return (EXTERROR(EINVAL,
+ "Undef or unimplemented flags %#jx", flags));
- /* Signal value requires RFTSIGZMB. */
if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
- return (EINVAL);
+ return (EXTERROR(EINVAL,
+ "Signal value requires RFTSIGZMB", flags));
- /* Can't copy and clear. */
- if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
- return (EINVAL);
+ if ((flags & (RFFDG | RFCFDG)) == (RFFDG | RFCFDG))
+ return (EXTERROR(EINVAL, "Can not copy and clear"));
- /* Check the validity of the signal number. */
if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
- return (EINVAL);
+ return (EXTERROR(EINVAL, "Invalid signal", RFTSIGNUM(flags)));
if ((flags & RFPROCDESC) != 0) {
- /* Can't not create a process yet get a process descriptor. */
if ((flags & RFPROC) == 0)
- return (EINVAL);
+ return (EXTERROR(EINVAL,
+ "Can not not create a process yet get a process descriptor"));
- /* Must provide a place to put a procdesc if creating one. */
if (fr->fr_pd_fd == NULL)
- return (EINVAL);
+ return (EXTERROR(EINVAL,
+ "Must provide a place to put a procdesc if creating one"));
- /* Check if we are using supported flags. */
if ((fr->fr_pd_flags & ~PD_ALLOWED_AT_FORK) != 0)
- return (EINVAL);
+ return (EXTERROR(EINVAL,
+ "Invallid pdflags at fork %#jx", fr->fr_pd_flags));
}
p1 = td->td_proc;
diff --git a/sys/kern/kern_jail.c b/sys/kern/kern_jail.c
index f803be76a70e..5111b98bf221 100644
--- a/sys/kern/kern_jail.c
+++ b/sys/kern/kern_jail.c
@@ -36,6 +36,7 @@
#include <sys/param.h>
#include <sys/types.h>
+#include <sys/ctype.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/errno.h>
@@ -79,6 +80,8 @@
#endif /* DDB */
#include <security/mac/mac_framework.h>
+#include <security/mac/mac_policy.h>
+#include <security/mac/mac_syscalls.h>
#define PRISON0_HOSTUUID_MODULE "hostuuid"
@@ -275,8 +278,17 @@ prison0_init(void)
uint8_t *file, *data;
size_t size;
char buf[sizeof(prison0.pr_hostuuid)];
+#ifdef MAC
+ int error __diagused;
+#endif
bool valid;
+#ifdef MAC
+ error = mac_prison_init(&prison0, M_WAITOK);
+ MPASS(error == 0);
+
+ mtx_unlock(&prison0.pr_mtx);
+#endif
prison0.pr_cpuset = cpuset_ref(thread0.td_cpuset);
prison0.pr_osreldate = osreldate;
strlcpy(prison0.pr_osrelease, osrelease, sizeof(prison0.pr_osrelease));
@@ -1017,6 +1029,10 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
#endif
unsigned long hid;
size_t namelen, onamelen, pnamelen;
+#ifdef MAC
+ void *mac_set_prison_data = NULL;
+ int gotmaclabel;
+#endif
int created, cuflags, descend, drflags, enforce;
int error, errmsg_len, errmsg_pos;
int gotchildmax, gotenforce, gothid, gotrsnum, gotslevel;
@@ -1339,6 +1355,17 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
pr_flags |= PR_HOST;
}
+#ifdef MAC
+ /* Process the mac.label vfsopt */
+ error = mac_set_prison_prepare(td, opts, &mac_set_prison_data);
+ if (error == ENOENT)
+ gotmaclabel = 0;
+ else if (error != 0)
+ goto done_errmsg;
+ else
+ gotmaclabel = 1;
+#endif
+
#ifdef INET
error = vfs_getopt(opts, "ip4.addr", &op, &ip4s);
if (error == ENOENT)
@@ -1692,6 +1719,11 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
/* If there's no prison to update, create a new one and link it in. */
created = pr == NULL;
if (created) {
+#ifdef MAC
+ error = mac_prison_check_create(td->td_ucred, opts, flags);
+ if (error != 0)
+ goto done_deref;
+#endif
for (tpr = mypr; tpr != NULL; tpr = tpr->pr_parent)
if (tpr->pr_childcount >= tpr->pr_childmax) {
error = EPERM;
@@ -1828,7 +1860,14 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
if (error)
goto done_deref;
+#ifdef MAC
+ error = mac_prison_init(pr, M_WAITOK);
+ MPASS(error == 0);
+
+ mtx_assert(&pr->pr_mtx, MA_OWNED);
+#else
mtx_lock(&pr->pr_mtx);
+#endif
drflags |= PD_LOCKED;
} else {
/*
@@ -1839,6 +1878,11 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
prison_hold(pr);
drflags |= PD_DEREF;
}
+#ifdef MAC
+ error = mac_prison_check_set(td->td_ucred, pr, opts, flags);
+ if (error != 0)
+ goto done_deref;
+#endif
#if defined(VIMAGE) && (defined(INET) || defined(INET6))
if ((pr->pr_flags & PR_VNET) &&
(ch_flags & (PR_IP4_USER | PR_IP6_USER))) {
@@ -2155,6 +2199,17 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
}
}
pr->pr_flags = (pr->pr_flags & ~ch_flags) | pr_flags;
+
+#ifdef MAC
+ /* Apply any request MAC label before we let modules do their work. */
+ if (gotmaclabel) {
+ error = mac_set_prison_core(td, pr, mac_set_prison_data);
+ if (error) {
+ vfs_opterror(opts, "mac relabel denied");
+ goto done_deref;
+ }
+ }
+#endif
mtx_unlock(&pr->pr_mtx);
drflags &= ~PD_LOCKED;
/*
@@ -2230,6 +2285,13 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
if (created) {
sx_assert(&allprison_lock, SX_XLOCKED);
prison_knote(ppr, NOTE_JAIL_CHILD | pr->pr_id);
+#ifdef MAC
+ /*
+ * Note that mac_prison_created() assumes that it's called in a
+ * sleepable context.
+ */
+ mac_prison_created(td->td_ucred, pr);
+#endif
mtx_lock(&pr->pr_mtx);
drflags |= PD_LOCKED;
pr->pr_state = PRISON_STATE_ALIVE;
@@ -2237,6 +2299,14 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
/* Attach this process to the prison if requested. */
if (flags & JAIL_ATTACH) {
+#ifdef MAC
+ error = mac_prison_check_attach(td->td_ucred, pr);
+ if (error != 0) {
+ vfs_opterror(opts,
+ "attach operation denied by MAC policy");
+ goto done_deref;
+ }
+#endif
error = do_jail_attach(td, pr,
prison_lock_xlock(pr, drflags & PD_LOCK_FLAGS));
drflags &= ~(PD_LOCKED | PD_LIST_XLOCKED);
@@ -2329,6 +2399,10 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
#ifdef INET6
prison_ip_free(ip6);
#endif
+#ifdef MAC
+ if (mac_set_prison_data != NULL)
+ mac_set_prison_finish(td, error == 0, mac_set_prison_data);
+#endif
if (jfp_out != NULL)
fdrop(jfp_out, td);
if (error && jfd_out >= 0)
@@ -2540,12 +2614,6 @@ kern_jail_get(struct thread *td, struct uio *optuio, int flags)
drflags |= PD_DEREF;
mtx_lock(&pr->pr_mtx);
drflags |= PD_LOCKED;
- if (!(prison_isalive(pr) || (flags & JAIL_DYING))) {
- error = ENOENT;
- vfs_opterror(opts, "jail %d is dying",
- pr->pr_id);
- goto done;
- }
goto found_prison;
}
if (flags & JAIL_AT_DESC) {
@@ -2577,7 +2645,29 @@ kern_jail_get(struct thread *td, struct uio *optuio, int flags)
prison_ischild(mypr, pr)) {
mtx_lock(&pr->pr_mtx);
drflags |= PD_LOCKED;
+#ifdef MAC
+ /*
+ * We special-case this one check because we
+ * don't want MAC to break jail enumeration. We
+ * need to just move on to the next accessible
+ * and alive prison.
+ */
+ error = mac_prison_check_get(td->td_ucred, pr,
+ opts, flags);
+ if (error != 0) {
+ mtx_unlock(&pr->pr_mtx);
+ drflags &= ~PD_LOCKED;
+ continue;
+ }
+
+ /*
+ * Avoid potentially expensive trip back into
+ * the MAC framework.
+ */
+ goto found_prison_nomac_alive;
+#else
goto found_prison;
+#endif
}
}
error = ENOENT;
@@ -2592,13 +2682,6 @@ kern_jail_get(struct thread *td, struct uio *optuio, int flags)
pr = prison_find_child(mypr, jid);
if (pr != NULL) {
drflags |= PD_LOCKED;
- if (!(prison_isalive(pr) ||
- (flags & JAIL_DYING))) {
- error = ENOENT;
- vfs_opterror(opts, "jail %d is dying",
- jid);
- goto done;
- }
goto found_prison;
}
error = ENOENT;
@@ -2617,12 +2700,6 @@ kern_jail_get(struct thread *td, struct uio *optuio, int flags)
pr = prison_find_name(mypr, name);
if (pr != NULL) {
drflags |= PD_LOCKED;
- if (!(prison_isalive(pr) || (flags & JAIL_DYING))) {
- error = ENOENT;
- vfs_opterror(opts, "jail \"%s\" is dying",
- name);
- goto done;
- }
goto found_prison;
}
error = ENOENT;
@@ -2636,6 +2713,25 @@ kern_jail_get(struct thread *td, struct uio *optuio, int flags)
goto done;
found_prison:
+#ifdef MAC
+ error = mac_prison_check_get(td->td_ucred, pr, opts, flags);
+ if (error != 0)
+ goto done;
+#endif
+ if (!(prison_isalive(pr) || (flags & JAIL_DYING))) {
+ error = ENOENT;
+ if (pr->pr_name[0] != '0' && isdigit(pr->pr_name[0])) {
+ vfs_opterror(opts, "jail %d is dying",
+ pr->pr_id);
+ } else {
+ vfs_opterror(opts, "jail \"%s\" (%d) is dying",
+ pr->pr_name, pr->pr_id);
+ }
+ goto done;
+ }
+#ifdef MAC
+ found_prison_nomac_alive:
+#endif
/* Get the parameters of the prison. */
if (!(drflags & PD_DEREF)) {
prison_hold(pr);
@@ -2771,9 +2867,22 @@ kern_jail_get(struct thread *td, struct uio *optuio, int flags)
if (error != 0 && error != ENOENT)
goto done;
- /* Get the module parameters. */
+#ifdef MAC
+ /*
+ * We get the MAC label last because we'll let the MAC framework drop
+ * pr_mtx to externalize the label.
+ */
+ error = mac_get_prison(td, pr, opts);
+ mtx_assert(&pr->pr_mtx, MA_NOTOWNED);
+ drflags &= ~PD_LOCKED;
+ if (error != 0 && error != ENOENT)
+ goto done;
+#else
mtx_unlock(&pr->pr_mtx);
drflags &= ~PD_LOCKED;
+#endif
+
+ /* Get the module parameters. */
error = osd_jail_call(pr, PR_METHOD_GET, opts);
if (error)
goto done;
@@ -2875,6 +2984,14 @@ sys_jail_remove(struct thread *td, struct jail_remove_args *uap)
sx_xunlock(&allprison_lock);
return (EINVAL);
}
+#ifdef MAC
+ error = mac_prison_check_remove(td->td_ucred, pr);
+ if (error != 0) {
+ mtx_unlock(&pr->pr_mtx);
+ sx_xunlock(&allprison_lock);
+ return (error);
+ }
+#endif
prison_hold(pr);
prison_remove(pr);
return (0);
@@ -2897,6 +3014,10 @@ sys_jail_remove_jd(struct thread *td, struct jail_remove_jd_args *uap)
return (error);
error = priv_check_cred(jdcred, PRIV_JAIL_REMOVE);
crfree(jdcred);
+#ifdef MAC
+ if (error == 0)
+ error = mac_prison_check_remove(td->td_ucred, pr);
+#endif
if (error) {
prison_free(pr);
return (error);
@@ -2941,14 +3062,25 @@ sys_jail_attach(struct thread *td, struct jail_attach_args *uap)
return (EINVAL);
}
+#ifdef MAC
+ error = mac_prison_check_attach(td->td_ucred, pr);
+ if (error != 0)
+ goto unlock;
+#endif
+
/* Do not allow a process to attach to a prison that is not alive. */
if (!prison_isalive(pr)) {
- mtx_unlock(&pr->pr_mtx);
- sx_sunlock(&allprison_lock);
- return (EINVAL);
+ error = EINVAL;
+ goto unlock;
}
return (do_jail_attach(td, pr, PD_LOCKED | PD_LIST_SLOCKED));
+
+unlock:
+
+ mtx_unlock(&pr->pr_mtx);
+ sx_sunlock(&allprison_lock);
+ return (error);
}
/*
@@ -2970,6 +3102,10 @@ sys_jail_attach_jd(struct thread *td, struct jail_attach_jd_args *uap)
goto fail;
drflags |= PD_DEREF;
error = priv_check_cred(jdcred, PRIV_JAIL_ATTACH);
+#ifdef MAC
+ if (error == 0)
+ error = mac_prison_check_attach(td->td_ucred, pr);
+#endif
crfree(jdcred);
if (error)
goto fail;
@@ -3070,6 +3206,13 @@ do_jail_attach(struct thread *td, struct prison *pr, int drflags)
prison_deref(oldcred->cr_prison, drflags);
crfree(oldcred);
prison_knote(pr, NOTE_JAIL_ATTACH | td->td_proc->p_pid);
+#ifdef MAC
+ /*
+ * Note that mac_prison_attached() assumes that it's called in a
+ * sleepable context.
+ */
+ mac_prison_attached(td->td_ucred, pr, td->td_proc);
+#endif
/*
* If the prison was killed while changing credentials, die along
@@ -3540,6 +3683,16 @@ prison_deref(struct prison *pr, int flags)
KASSERT(
refcount_load(&prison0.pr_ref) != 0,
("prison0 pr_ref=0"));
+#ifdef MAC
+ /*
+ * The MAC framework will call into any
+ * policies that want to hook
+ * prison_destroy_label, so ideally we
+ * call this prior to any final state
+ * invalidation to be safe.
+ */
+ mac_prison_destroy(pr);
+#endif
pr->pr_state = PRISON_STATE_INVALID;
TAILQ_REMOVE(&allprison, pr, pr_list);
LIST_REMOVE(pr, pr_sibling);
@@ -4999,6 +5152,11 @@ SYSCTL_JAIL_PARAM(_host, hostid, CTLTYPE_ULONG | CTLFLAG_RW,
SYSCTL_JAIL_PARAM_NODE(cpuset, "Jail cpuset");
SYSCTL_JAIL_PARAM(_cpuset, id, CTLTYPE_INT | CTLFLAG_RD, "I", "Jail cpuset ID");
+#ifdef MAC
+SYSCTL_JAIL_PARAM_STRUCT(_mac, label, CTLFLAG_RW, sizeof(struct mac),
+ "S,mac", "Jail MAC label");
+#endif
+
#ifdef INET
SYSCTL_JAIL_PARAM_SYS_NODE(ip4, CTLFLAG_RDTUN,
"Jail IPv4 address virtualization");
diff --git a/sys/kern/kern_jaildesc.c b/sys/kern/kern_jaildesc.c
index f4e31801201f..80d0f3d07d7c 100644
--- a/sys/kern/kern_jaildesc.c
+++ b/sys/kern/kern_jaildesc.c
@@ -72,42 +72,66 @@ static const struct fileops jaildesc_ops = {
};
/*
- * Given a jail descriptor number, return its prison and/or its
- * credential. They are returned held, and will need to be released
- * by the caller.
+ * Retrieve a prison from a jail descriptor. If prp is not NULL, then the
+ * prison will be held and subsequently returned, and must be released by the
+ * caller. This differs from jaildesc_get_prison in that it doesn't actually
+ * require the caller to take the struct prison, which we use internally when
+ * the caller doesn't necessarily need it- it might just want to check validity.
*/
-int
-jaildesc_find(struct thread *td, int fd, struct prison **prp,
- struct ucred **ucredp)
+static int
+jaildesc_get_prison_impl(struct file *fp, struct prison **prp)
{
- struct file *fp;
- struct jaildesc *jd;
struct prison *pr;
- int error;
+ struct jaildesc *jd;
+
+ if (fp->f_type != DTYPE_JAILDESC)
+ return (EINVAL);
- error = fget(td, fd, &cap_no_rights, &fp);
- if (error != 0)
- return (error);
- if (fp->f_type != DTYPE_JAILDESC) {
- error = EINVAL;
- goto out;
- }
jd = fp->f_data;
JAILDESC_LOCK(jd);
pr = jd->jd_prison;
if (pr == NULL || !prison_isvalid(pr)) {
- error = ENOENT;
JAILDESC_UNLOCK(jd);
- goto out;
+ return (ENOENT);
}
+
if (prp != NULL) {
prison_hold(pr);
*prp = pr;
}
+
JAILDESC_UNLOCK(jd);
- if (ucredp != NULL)
- *ucredp = crhold(fp->f_cred);
- out:
+
+ return (0);
+}
+
+/*
+ * Given a jail descriptor number, return its prison and/or its
+ * credential. They are returned held, and will need to be released
+ * by the caller.
+ */
+int
+jaildesc_find(struct thread *td, int fd, struct prison **prp,
+ struct ucred **ucredp)
+{
+ struct file *fp;
+ int error;
+
+ error = fget(td, fd, &cap_no_rights, &fp);
+ if (error != 0)
+ return (error);
+
+ error = jaildesc_get_prison_impl(fp, prp);
+ if (error == 0) {
+ /*
+ * jaildesc_get_prison validated the file and held the prison
+ * for us if the caller wants it, so we just need to grab the
+ * ucred on the way out.
+ */
+ if (ucredp != NULL)
+ *ucredp = crhold(fp->f_cred);
+ }
+
fdrop(fp, td);
return (error);
}
@@ -146,6 +170,17 @@ jaildesc_alloc(struct thread *td, struct file **fpp, int *fdp, int owning)
}
/*
+ * Retrieve a prison from a jail descriptor. It will be returned held, and must
+ * be released by the caller.
+ */
+int
+jaildesc_get_prison(struct file *fp, struct prison **prp)
+{
+ MPASS(prp != NULL);
+ return (jaildesc_get_prison_impl(fp, prp));
+}
+
+/*
* Assocate a jail descriptor with its prison.
*/
void
diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c
index 15a8a4ab4fa4..b58e69a3f38e 100644
--- a/sys/kern/kern_ktrace.c
+++ b/sys/kern/kern_ktrace.c
@@ -373,11 +373,17 @@ ktr_getrequest(int type)
static void
ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
{
+ bool sched_ast;
mtx_lock(&ktrace_mtx);
- STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
+ sched_ast = td->td_proc->p_ktrioparms != NULL;
+ if (sched_ast)
+ STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
+ else
+ ktr_freerequest_locked(req);
mtx_unlock(&ktrace_mtx);
- ast_sched(td, TDA_KTRACE);
+ if (sched_ast)
+ ast_sched(td, TDA_KTRACE);
}
/*
diff --git a/sys/kern/kern_sendfile.c b/sys/kern/kern_sendfile.c
index 6e924f70b2ab..a4178f5b9b2d 100644
--- a/sys/kern/kern_sendfile.c
+++ b/sys/kern/kern_sendfile.c
@@ -518,8 +518,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
static int
sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
- struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
- int *bsize)
+ struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size)
{
vm_object_t obj;
struct vnode *vp;
@@ -530,7 +529,6 @@ sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
vp = *vp_res = NULL;
obj = NULL;
shmfd = *shmfd_res = NULL;
- *bsize = 0;
/*
* The file descriptor must be a regular file and have a
@@ -543,7 +541,6 @@ sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
error = EINVAL;
goto out;
}
- *bsize = vp->v_mount->mnt_stat.f_iosize;
obj = vp->v_object;
if (obj == NULL) {
error = EINVAL;
@@ -717,7 +714,7 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
struct shmfd *shmfd;
struct vattr va;
off_t off, sbytes, rem, obj_size, nobj_size;
- int bsize, error, ext_pgs_idx, hdrlen, max_pgs, softerr;
+ int error, ext_pgs_idx, hdrlen, max_pgs, softerr;
#ifdef KERN_TLS
int tls_enq_cnt;
#endif
@@ -733,7 +730,7 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
softerr = 0;
use_ext_pgs = false;
- error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
+ error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size);
if (error != 0)
goto out;
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
index 5b7485c25cd7..0c16045ca610 100644
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -1183,6 +1183,7 @@ itimer_start(void *dummy __unused)
NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0);
register_posix_clock(CLOCK_REALTIME, &rt_clock);
register_posix_clock(CLOCK_MONOTONIC, &rt_clock);
+ register_posix_clock(CLOCK_UPTIME, &rt_clock);
register_posix_clock(CLOCK_TAI, &rt_clock);
p31b_setcfg(CTL_P1003_1B_TIMERS, 200112L);
p31b_setcfg(CTL_P1003_1B_DELAYTIMER_MAX, INT_MAX);
diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c
index d2363d6c625e..c9d64e3674c6 100644
--- a/sys/kern/subr_bus.c
+++ b/sys/kern/subr_bus.c
@@ -1557,7 +1557,7 @@ device_delete_child(device_t dev, device_t child)
return (error);
}
- device_destroy_props(dev);
+ device_destroy_props(child);
if (child->devclass)
devclass_delete_device(child->devclass, child);
if (child->parent)
diff --git a/sys/kern/subr_capability.c b/sys/kern/subr_capability.c
index 5ad5b0af1681..6e23525186ea 100644
--- a/sys/kern/subr_capability.c
+++ b/sys/kern/subr_capability.c
@@ -90,6 +90,7 @@ const cap_rights_t cap_mkfifoat_rights = CAP_RIGHTS_INITIALIZER(CAP_MKFIFOAT);
const cap_rights_t cap_mknodat_rights = CAP_RIGHTS_INITIALIZER(CAP_MKNODAT);
const cap_rights_t cap_pdgetpid_rights = CAP_RIGHTS_INITIALIZER(CAP_PDGETPID);
const cap_rights_t cap_pdkill_rights = CAP_RIGHTS_INITIALIZER(CAP_PDKILL);
+const cap_rights_t cap_pdwait_rights = CAP_RIGHTS_INITIALIZER(CAP_PDWAIT);
const cap_rights_t cap_pread_rights = CAP_RIGHTS_INITIALIZER(CAP_PREAD);
const cap_rights_t cap_pwrite_rights = CAP_RIGHTS_INITIALIZER(CAP_PWRITE);
const cap_rights_t cap_read_rights = CAP_RIGHTS_INITIALIZER(CAP_READ);
diff --git a/sys/kern/subr_devstat.c b/sys/kern/subr_devstat.c
index c4d0223d484f..c62df0e210e1 100644
--- a/sys/kern/subr_devstat.c
+++ b/sys/kern/subr_devstat.c
@@ -43,6 +43,10 @@
#include <vm/vm.h>
#include <vm/pmap.h>
+#ifdef COMPAT_FREEBSD32
+#include <compat/freebsd32/freebsd32.h>
+#endif
+
#include <machine/atomic.h>
SDT_PROVIDER_DEFINE(io);
@@ -398,25 +402,63 @@ sysctl_devstat(SYSCTL_HANDLER_ARGS)
*/
mygen = devstat_generation;
- error = SYSCTL_OUT(req, &mygen, sizeof(mygen));
-
- if (devstat_num_devs == 0)
- return(0);
+#ifdef COMPAT_FREEBSD32
+ if ((req->flags & SCTL_MASK32) != 0) {
+ int32_t mygen32 = (int32_t)mygen;
+ error = SYSCTL_OUT(req, &mygen32, sizeof(mygen32));
+ } else
+#endif /* COMPAT_FREEBSD32 */
+ error = SYSCTL_OUT(req, &mygen, sizeof(mygen));
if (error != 0)
return (error);
+ if (devstat_num_devs == 0)
+ return(0);
+
mtx_lock(&devstat_mutex);
nds = STAILQ_FIRST(&device_statq);
if (mygen != devstat_generation)
error = EBUSY;
mtx_unlock(&devstat_mutex);
-
if (error != 0)
return (error);
while (nds != NULL) {
- error = SYSCTL_OUT(req, nds, sizeof(struct devstat));
+#ifdef COMPAT_FREEBSD32
+ if ((req->flags & SCTL_MASK32) != 0) {
+ struct devstat32 ds32;
+ unsigned int i;
+
+ CP(*nds, ds32, sequence0);
+ CP(*nds, ds32, allocated);
+ CP(*nds, ds32, start_count);
+ CP(*nds, ds32, end_count);
+ BT_CP(*nds, ds32, busy_from);
+ PTROUT_CP(*nds, ds32, dev_links.stqe_next);
+ CP(*nds, ds32, device_number);
+ strcpy(ds32.device_name, nds->device_name);
+ CP(*nds, ds32, unit_number);
+ for (i = 0; i < DEVSTAT_N_TRANS_FLAGS; i++) {
+ FU64_CP(*nds, ds32, bytes[i]);
+ FU64_CP(*nds, ds32, operations[i]);
+ BT_CP(*nds, ds32, duration[i]);
+ }
+ BT_CP(*nds, ds32, busy_time);
+ BT_CP(*nds, ds32, creation_time);
+ CP(*nds, ds32, block_size);
+ for (i = 0; i < nitems(ds32.tag_types); i++) {
+ FU64_CP(*nds, ds32, tag_types[i]);
+ }
+ CP(*nds, ds32, flags);
+ CP(*nds, ds32, device_type);
+ CP(*nds, ds32, priority);
+ PTROUT_CP(*nds, ds32, id);
+ CP(*nds, ds32, sequence1);
+ error = SYSCTL_OUT(req, &ds32, sizeof(ds32));
+ } else
+#endif /* COMPAT_FREEBSD32 */
+ error = SYSCTL_OUT(req, nds, sizeof(*nds));
if (error != 0)
return (error);
mtx_lock(&devstat_mutex);
@@ -428,7 +470,7 @@ sysctl_devstat(SYSCTL_HANDLER_ARGS)
if (error != 0)
return (error);
}
- return(error);
+ return (error);
}
/*
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 9f5106316018..353a69435971 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -625,6 +625,19 @@ smp_rendezvous_cpus(cpuset_t map,
}
void
+smp_rendezvous_cpu(u_int cpuid,
+ void (* setup_func)(void *),
+ void (* action_func)(void *),
+ void (* teardown_func)(void *),
+ void *arg)
+{
+ cpuset_t set;
+
+ CPU_SETOF(cpuid, &set);
+ smp_rendezvous_cpus(set, setup_func, action_func, teardown_func, arg);
+}
+
+void
smp_rendezvous(void (* setup_func)(void *),
void (* action_func)(void *),
void (* teardown_func)(void *),
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index abed76315c34..7437a7e238f0 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -168,7 +168,7 @@
#define WITNESS_RELATED_MASK (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
#define WITNESS_REVERSAL 0x10 /* A lock order reversal has been observed. */
#define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */
-#define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */
+#define WITNESS_ORDER_LISTS 0x40 /* Relationship set in order_lists[]. */
#define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */
/* Descendant to ancestor flags */
@@ -390,12 +390,16 @@ SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RWTUN, &witness_kdb, 0, "");
#if defined(DDB) || defined(KDB)
/*
- * When DDB or KDB is enabled and witness_trace is 1, it will cause the system
- * to print a stack trace:
+ * When DDB or KDB is enabled and witness_trace is > 0, it will cause the system
+ * to print a stack trace when:
* - a lock hierarchy violation occurs
* - locks are held when going to sleep.
+ *
+ * Additionally, if witness_trace is 2, it will cause the system to search
+ * for all locks which established the known lock ordering and print
+ * stack traces of where the lock ordering was first established.
*/
-int witness_trace = 1;
+int witness_trace = 2;
SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RWTUN, &witness_trace, 0, "");
#endif /* DDB || KDB */
@@ -854,6 +858,10 @@ witness_startup(void *mem)
continue;
w1->w_file = "order list";
itismychild(w, w1);
+ w_rmatrix[w->w_index][w1->w_index] |=
+ WITNESS_ORDER_LISTS;
+ w_rmatrix[w1->w_index][w->w_index] |=
+ WITNESS_ORDER_LISTS;
w = w1;
}
}
@@ -1075,6 +1083,240 @@ witness_ddb_display(int(*prnt)(const char *fmt, ...))
}
#endif /* DDB */
+#define NUM_VERBOSE_STACKS 256
+#define MAX_LOCKCHAIN_RECURSION 32
+
+/*
+ * Struct used by the verbose witness functionality. Only sb, generation,
+ * pairs, pair_count, check_generation, and alloc_flags communicate data
+ * between multiple functions. The rest are used to pre-allocate space for
+ * data which would otherwise end up on the stack.
+ */
+struct verbose_tracker {
+ struct witness t_w1, t_w2;
+ struct stack t_stack;
+ struct sbuf *sb;
+ int generation;
+ int alloc_flags;
+ int pairs[2 * NUM_VERBOSE_STACKS];
+ int pair_count;
+ int recursion_list[MAX_LOCKCHAIN_RECURSION];
+ int found[MAX_LOCKCHAIN_RECURSION + 1];
+ int iter[MAX_LOCKCHAIN_RECURSION];
+ bool check_generation;
+};
+
+static void
+init_verbose_tracker(struct verbose_tracker *t, struct sbuf *sb,
+ int alloc_flags, bool check_generation)
+{
+
+ KASSERT(t != NULL,
+ ("%s: NULL t argument", __func__));
+ KASSERT(alloc_flags == M_WAITOK || alloc_flags == M_NOWAIT,
+ ("%s: Unexpected alloc_flags %d", __func__, alloc_flags));
+ t->sb = sb;
+ t->check_generation = check_generation;
+ t->alloc_flags = alloc_flags;
+}
+
+static void
+reset_verbose_tracker(struct verbose_tracker *t, int generation)
+{
+
+ KASSERT(t != NULL,
+ ("%s: NULL t argument", __func__));
+ t->pair_count = 0;
+ t->generation = generation;
+}
+
+static bool
+has_verbose_lockpair(const struct verbose_tracker *t, int from, int to)
+{
+ int i;
+
+ /* Look for value. */
+ for (i = 0; i < (2 * t->pair_count); i += 2)
+ if (t->pairs[i] == from && t->pairs[i + 1] == to)
+ return (true);
+ return (false);
+}
+
+static void
+add_verbose_lockpair(struct verbose_tracker *t, int from, int to)
+{
+
+ /* Check for duplicates. */
+ if (has_verbose_lockpair(t, from, to))
+ return;
+
+ /* Add a new value. */
+ if (t->pair_count < NUM_VERBOSE_STACKS) {
+ t->pairs[t->pair_count * 2] = from;
+ t->pairs[(t->pair_count * 2) + 1] = to;
+ t->pair_count++;
+ }
+}
+
+static void
+sbuf_print_verbose_witness_chains(struct verbose_tracker *t, int from, int to)
+{
+ struct witness *w1, *w2;
+ int i, recursion_count;
+
+ recursion_count = 0;
+
+ mtx_lock_spin(&w_mtx);
+ if (t->check_generation && t->generation != w_generation) {
+ mtx_unlock_spin(&w_mtx);
+
+ /*
+ * The graph has changed. Break the recursion loop.
+ * The calling function should figure out what happened and
+ * restart.
+ */
+ return;
+ }
+
+top:
+ t->found[recursion_count] = 0;
+
+ /*
+ * Check for a direct dependence. If so, print that here.
+ * However, we keep scanning just in case there are other
+ * locking paths between these two locks.
+ */
+ w1 = &w_data[from];
+ w2 = &w_data[to];
+ if (isitmychild(w1, w2)) {
+ t->t_w1 = *w1;
+ t->t_w2 = *w2;
+ mtx_unlock_spin(&w_mtx);
+
+ sbuf_printf(t->sb, "\"%s\" -> \"%s\"",
+ t->t_w1.w_name, t->t_w2.w_name);
+
+ /* Add the lockchain which got us here. */
+ KASSERT(recursion_count >= 0 &&
+ recursion_count <= MAX_LOCKCHAIN_RECURSION,
+ ("Invalid recursion_count: %d", recursion_count));
+ for (i = recursion_count - 1; i >= 0; i--) {
+ mtx_lock_spin(&w_mtx);
+ if (t->check_generation &&
+ t->generation != w_generation) {
+ mtx_unlock_spin(&w_mtx);
+ /* The graph has changed. */
+ return;
+ }
+ /*
+ * Make a local copy, drop the lock, and add the lock
+ * to the sbuf.
+ */
+ t->t_w1 = w_data[t->recursion_list[i]];
+ mtx_unlock_spin(&w_mtx);
+ sbuf_printf(t->sb, " -> \"%s\"", t->t_w1.w_name);
+ }
+
+ sbuf_putc(t->sb, '\n');
+ add_verbose_lockpair(t, from, to);
+ t->found[recursion_count]++;
+
+ mtx_lock_spin(&w_mtx);
+ if (t->check_generation && t->generation != w_generation) {
+ mtx_unlock_spin(&w_mtx);
+ return;
+ }
+ }
+
+ /*
+ * Ensure we aren't recursing too many times. We do this check
+ * after looking for direct dependencies so we don't fail to
+ * catch at least those at the limits of our recursion.
+ */
+ if (recursion_count >= MAX_LOCKCHAIN_RECURSION)
+ goto end;
+
+ /*
+ * Record our 'to' lock on the recursion list. We will use this
+ * to build successful lock chains later.
+ */
+ t->recursion_list[recursion_count] = to;
+ t->iter[recursion_count] = 1;
+
+loop:
+ /* Walk all parents of 'to' to see if any have a path to 'from'. */
+ for (; t->iter[recursion_count] < w_max_used_index;
+ t->iter[recursion_count]++) {
+ if (t->iter[recursion_count] == to ||
+ t->iter[recursion_count] == from)
+ continue;
+ if (isitmychild(&w_data[t->iter[recursion_count]],
+ &w_data[to])) {
+ /* Recurse to the parent. */
+ to = t->iter[recursion_count];
+ recursion_count++;
+ goto top;
+ }
+ }
+end:
+ if (recursion_count != 0) {
+ recursion_count--;
+ to = t->recursion_list[recursion_count];
+ if (t->found[recursion_count + 1] > 0) {
+ add_verbose_lockpair(t, t->iter[recursion_count], to);
+ t->found[recursion_count]++;
+ }
+ t->iter[recursion_count]++;
+ goto loop;
+ }
+ mtx_unlock_spin(&w_mtx);
+}
+
+static void
+sbuf_print_verbose_witness_stacks(struct verbose_tracker *t)
+{
+ struct witness_lock_order_data *data;
+ int i;
+ bool hardcoded;
+
+ for (i = 0; i < (2 * t->pair_count); i += 2) {
+ mtx_lock_spin(&w_mtx);
+ if (t->check_generation && t->generation != w_generation) {
+ /*
+ * The graph has changed. Return to the calling
+ * function so it can restart.
+ */
+ mtx_unlock_spin(&w_mtx);
+ break;
+ }
+
+ /*
+ * Make a local copy of the data we need so we can drop
+ * the lock.
+ */
+ t->t_w1 = w_data[t->pairs[i]];
+ t->t_w2 = w_data[t->pairs[i + 1]];
+ data = witness_lock_order_get(&t->t_w1, &t->t_w2);
+ if (data != NULL)
+ stack_copy(&data->wlod_stack, &t->t_stack);
+ hardcoded = (w_rmatrix[t->pairs[i]][t->pairs[i + 1]] &
+ WITNESS_ORDER_LISTS) == WITNESS_ORDER_LISTS;
+ mtx_unlock_spin(&w_mtx);
+
+ sbuf_printf(t->sb,
+ "%slock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
+ hardcoded ? "hardcoded " : "",
+ t->t_w1.w_name, t->t_w1.w_class->lc_name,
+ t->t_w2.w_name, t->t_w2.w_class->lc_name);
+ if (data != NULL)
+ stack_sbuf_print_flags(t->sb, &t->t_stack,
+ t->alloc_flags, STACK_SBUF_FMT_LONG);
+ else
+ sbuf_printf(t->sb, "(No stack trace)\n");
+ sbuf_putc(t->sb, '\n');
+ }
+}
+
int
witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
{
@@ -1117,6 +1359,7 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
struct witness *w, *w1;
struct thread *td;
int i, j;
+ bool print_lock_order;
if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
KERNEL_PANICKED())
@@ -1279,7 +1522,8 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
for (i = lle->ll_count - 1; i >= 0; i--, j++) {
struct stack pstack;
- bool pstackv, trace;
+ int trace;
+ bool pstackv;
MPASS(j < LOCK_CHILDCOUNT * LOCK_NCHILDREN);
lock1 = &lle->ll_children[i];
@@ -1396,6 +1640,7 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
/*
* Ok, yell about it.
*/
+ print_lock_order = false;
if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
(flags & LOP_NOSLEEP) == 0 &&
(lock1->li_flags & LI_SLEEPABLE) == 0)
@@ -1405,8 +1650,10 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
&& lock == &Giant.lock_object)
witness_output(
"lock order reversal: (Giant after non-sleepable)\n");
- else
+ else {
witness_output("lock order reversal:\n");
+ print_lock_order = true;
+ }
/*
* Try to locate an earlier lock with
@@ -1455,6 +1702,7 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
if (trace) {
char buf[64];
struct sbuf sb;
+ struct verbose_tracker *t;
sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
sbuf_set_drain(&sb, witness_output_drain,
@@ -1466,6 +1714,37 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
w->w_name, w1->w_name);
stack_sbuf_print_flags(&sb, &pstack,
M_NOWAIT, STACK_SBUF_FMT_LONG);
+ } else if (trace > 1 && print_lock_order &&
+ (t = malloc(sizeof(struct verbose_tracker),
+ M_TEMP, M_NOWAIT | M_ZERO)) != NULL) {
+ /*
+ * We make a purposeful decision to
+ * ignore generation changes while
+ * printing. The two locks in
+ * question are in use, so won't be
+ * going away. There is a small
+ * chance that intermediate locks
+ * in a lock chain get destroyed
+ * while we are traversing the
+ * chain or printing them, but even
+ * then nothing "bad" should happen
+ * with the current code since the
+ * WITNESS objects are not actually
+ * freed and re-used. If that changes,
+ * we might need to reassess the
+ * decision to ignore generation.
+ */
+ init_verbose_tracker(t, &sb, M_NOWAIT,
+ false);
+ reset_verbose_tracker(t, 0);
+ sbuf_printf(&sb,
+ "All lock orders from %s -> %s:\n",
+ w->w_name, w1->w_name);
+ sbuf_print_verbose_witness_chains(t,
+ w->w_index, w1->w_index);
+ sbuf_putc(&sb, '\n');
+ sbuf_print_verbose_witness_stacks(t);
+ free(t, M_TEMP);
}
sbuf_printf(&sb,
@@ -2645,16 +2924,14 @@ DB_SHOW_COMMAND_FLAGS(witness, db_witness_display, DB_CMD_MEMSAFE)
#endif
static void
-sbuf_print_witness_badstacks(struct sbuf *sb, size_t *oldidx)
+sbuf_print_witness_badstacks(struct sbuf *sb, size_t *oldidx,
+ bool check_generation)
{
struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
struct witness *tmp_w1, *tmp_w2, *w1, *w2;
+ struct verbose_tracker *t;
int generation, i, j;
-
- tmp_data1 = NULL;
- tmp_data2 = NULL;
- tmp_w1 = NULL;
- tmp_w2 = NULL;
+ bool w1_is_parent, w2_is_parent;
/* Allocate and init temporary storage space. */
tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
@@ -2665,16 +2942,19 @@ sbuf_print_witness_badstacks(struct sbuf *sb, size_t *oldidx)
M_WAITOK | M_ZERO);
stack_zero(&tmp_data1->wlod_stack);
stack_zero(&tmp_data2->wlod_stack);
+ t = malloc(sizeof(struct verbose_tracker), M_TEMP, M_WAITOK | M_ZERO);
+ init_verbose_tracker(t, sb, M_WAITOK, check_generation);
restart:
mtx_lock_spin(&w_mtx);
generation = w_generation;
mtx_unlock_spin(&w_mtx);
+ reset_verbose_tracker(t, generation);
sbuf_printf(sb, "Number of known direct relationships is %d\n",
w_lohash.wloh_count);
for (i = 1; i < w_max_used_index; i++) {
mtx_lock_spin(&w_mtx);
- if (generation != w_generation) {
+ if (check_generation && generation != w_generation) {
mtx_unlock_spin(&w_mtx);
/* The graph has changed, try again. */
@@ -2700,7 +2980,7 @@ restart:
continue;
mtx_lock_spin(&w_mtx);
- if (generation != w_generation) {
+ if (check_generation && generation != w_generation) {
mtx_unlock_spin(&w_mtx);
/* The graph has changed, try again. */
@@ -2729,6 +3009,8 @@ restart:
stack_copy(&data2->wlod_stack,
&tmp_data2->wlod_stack);
}
+ w1_is_parent = isitmydescendant(w1, w2);
+ w2_is_parent = isitmydescendant(w2, w1);
mtx_unlock_spin(&w_mtx);
if (blessed(tmp_w1, tmp_w2))
@@ -2738,26 +3020,49 @@ restart:
"\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
tmp_w1->w_name, tmp_w1->w_class->lc_name,
tmp_w2->w_name, tmp_w2->w_class->lc_name);
- if (data1) {
+ if (w1_is_parent || data1 != NULL) {
sbuf_printf(sb,
- "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
+ "All lock orders from \"%s\"(%s) -> \"%s\"(%s):\n",
tmp_w1->w_name, tmp_w1->w_class->lc_name,
tmp_w2->w_name, tmp_w2->w_class->lc_name);
- stack_sbuf_print(sb, &tmp_data1->wlod_stack);
+ if (w1_is_parent)
+ sbuf_print_verbose_witness_chains(t, i,
+ j);
+ if (data1 && !has_verbose_lockpair(t, i, j)) {
+ sbuf_printf(t->sb,
+ "** \"%s\" -> \"%s\"\n",
+ tmp_w1->w_name, tmp_w2->w_name);
+ add_verbose_lockpair(t, i, j);
+ }
+ sbuf_putc(sb, '\n');
+ sbuf_print_verbose_witness_stacks(t);
sbuf_putc(sb, '\n');
+ reset_verbose_tracker(t, generation);
}
- if (data2 && data2 != data1) {
+ if (w2_is_parent || (data2 != NULL && data2 != data1)) {
sbuf_printf(sb,
- "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
+ "All lock orders from \"%s\"(%s) -> \"%s\"(%s):\n",
tmp_w2->w_name, tmp_w2->w_class->lc_name,
tmp_w1->w_name, tmp_w1->w_class->lc_name);
- stack_sbuf_print(sb, &tmp_data2->wlod_stack);
+ if (w2_is_parent)
+ sbuf_print_verbose_witness_chains(t, j,
+ i);
+ if (data2 && data2 != data1 &&
+ !has_verbose_lockpair(t, j, i)) {
+ sbuf_printf(t->sb,
+ "** \"%s\" -> \"%s\"\n",
+ tmp_w2->w_name, tmp_w1->w_name);
+ add_verbose_lockpair(t, j, i);
+ }
+ sbuf_putc(sb, '\n');
+ sbuf_print_verbose_witness_stacks(t);
sbuf_putc(sb, '\n');
+ reset_verbose_tracker(t, generation);
}
}
}
mtx_lock_spin(&w_mtx);
- if (generation != w_generation) {
+ if (check_generation && generation != w_generation) {
mtx_unlock_spin(&w_mtx);
/*
@@ -2775,6 +3080,7 @@ restart:
free(tmp_data2, M_TEMP);
free(tmp_w1, M_TEMP);
free(tmp_w2, M_TEMP);
+ free(t, M_TEMP);
}
static int
@@ -2796,7 +3102,7 @@ sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
if (sb == NULL)
return (ENOMEM);
- sbuf_print_witness_badstacks(sb, &req->oldidx);
+ sbuf_print_witness_badstacks(sb, &req->oldidx, true);
sbuf_finish(sb);
error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
@@ -2820,7 +3126,7 @@ DB_SHOW_COMMAND_FLAGS(badstacks, db_witness_badstacks, DB_CMD_MEMSAFE)
sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN);
sbuf_set_drain(&sb, sbuf_db_printf_drain, NULL);
- sbuf_print_witness_badstacks(&sb, &dummy);
+ sbuf_print_witness_badstacks(&sb, &dummy, false);
sbuf_finish(&sb);
}
#endif
diff --git a/sys/kern/sys_eventfd.c b/sys/kern/sys_eventfd.c
index 04ed107c933d..47f1fcc316ec 100644
--- a/sys/kern/sys_eventfd.c
+++ b/sys/kern/sys_eventfd.c
@@ -40,6 +40,7 @@
#include <sys/mutex.h>
#include <sys/poll.h>
#include <sys/proc.h>
+#include <sys/refcount.h>
#include <sys/selinfo.h>
#include <sys/stat.h>
#include <sys/uio.h>
@@ -102,6 +103,7 @@ struct eventfd {
uint32_t efd_flags;
struct selinfo efd_sel;
struct mtx efd_lock;
+ unsigned int efd_refcount;
};
int
@@ -119,6 +121,7 @@ eventfd_create_file(struct thread *td, struct file *fp, uint32_t initval,
efd->efd_count = initval;
mtx_init(&efd->efd_lock, "eventfd", NULL, MTX_DEF);
knlist_init_mtx(&efd->efd_sel.si_note, &efd->efd_lock);
+ refcount_init(&efd->efd_refcount, 1);
fflags = FREAD | FWRITE;
if ((flags & EFD_NONBLOCK) != 0)
@@ -128,16 +131,60 @@ eventfd_create_file(struct thread *td, struct file *fp, uint32_t initval,
return (0);
}
-static int
-eventfd_close(struct file *fp, struct thread *td)
+struct eventfd *
+eventfd_get(struct file *fp)
{
struct eventfd *efd;
+ if (fp->f_data == NULL || fp->f_ops != &eventfdops)
+ return (NULL);
+
efd = fp->f_data;
+ refcount_acquire(&efd->efd_refcount);
+
+ return (efd);
+}
+
+void
+eventfd_put(struct eventfd *efd)
+{
+ if (!refcount_release(&efd->efd_refcount))
+ return;
+
seldrain(&efd->efd_sel);
knlist_destroy(&efd->efd_sel.si_note);
mtx_destroy(&efd->efd_lock);
free(efd, M_EVENTFD);
+}
+
+static void
+eventfd_wakeup(struct eventfd *efd)
+{
+ KNOTE_LOCKED(&efd->efd_sel.si_note, 0);
+ selwakeup(&efd->efd_sel);
+ wakeup(&efd->efd_count);
+}
+
+void
+eventfd_signal(struct eventfd *efd)
+{
+ mtx_lock(&efd->efd_lock);
+
+ if (efd->efd_count < UINT64_MAX)
+ efd->efd_count++;
+
+ eventfd_wakeup(efd);
+
+ mtx_unlock(&efd->efd_lock);
+}
+
+static int
+eventfd_close(struct file *fp, struct thread *td)
+{
+ struct eventfd *efd;
+
+ efd = fp->f_data;
+ eventfd_put(efd);
return (0);
}
@@ -218,9 +265,7 @@ retry:
if (error == 0) {
MPASS(UINT64_MAX - efd->efd_count > count);
efd->efd_count += count;
- KNOTE_LOCKED(&efd->efd_sel.si_note, 0);
- selwakeup(&efd->efd_sel);
- wakeup(&efd->efd_count);
+ eventfd_wakeup(efd);
}
mtx_unlock(&efd->efd_lock);
diff --git a/sys/kern/sys_procdesc.c b/sys/kern/sys_procdesc.c
index c5db21544b0f..ec3b37f96148 100644
--- a/sys/kern/sys_procdesc.c
+++ b/sys/kern/sys_procdesc.c
@@ -75,6 +75,7 @@
#include <sys/procdesc.h>
#include <sys/resourcevar.h>
#include <sys/stat.h>
+#include <sys/syscallsubr.h>
#include <sys/sysproto.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -270,6 +271,9 @@ procdesc_free(struct procdesc *pd)
KASSERT((pd->pd_flags & PDF_CLOSED),
("procdesc_free: !PDF_CLOSED"));
+ if (pd->pd_pid != -1)
+ proc_id_clear(PROC_ID_PID, pd->pd_pid);
+
knlist_destroy(&pd->pd_selinfo.si_note);
PROCDESC_LOCK_DESTROY(pd);
free(pd, M_PROCDESC);
@@ -318,6 +322,9 @@ procdesc_exit(struct proc *p)
}
KNOTE_LOCKED(&pd->pd_selinfo.si_note, NOTE_EXIT);
PROCDESC_UNLOCK(pd);
+
+ /* Wakeup all waiters for this procdesc' process exit. */
+ wakeup(&p->p_procdesc);
return (0);
}
@@ -389,6 +396,7 @@ procdesc_close(struct file *fp, struct thread *td)
*/
pd->pd_proc = NULL;
p->p_procdesc = NULL;
+ pd->pd_pid = -1;
procdesc_free(pd);
/*
diff --git a/sys/kern/syscalls.c b/sys/kern/syscalls.c
index 06a4adc3d8cb..e2467c39fe6d 100644
--- a/sys/kern/syscalls.c
+++ b/sys/kern/syscalls.c
@@ -605,4 +605,6 @@ const char *syscallnames[] = {
"jail_attach_jd", /* 597 = jail_attach_jd */
"jail_remove_jd", /* 598 = jail_remove_jd */
"kexec_load", /* 599 = kexec_load */
+ "pdrfork", /* 600 = pdrfork */
+ "pdwait", /* 601 = pdwait */
};
diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master
index ea6d2b5aa1ef..8a30e5931a0e 100644
--- a/sys/kern/syscalls.master
+++ b/sys/kern/syscalls.master
@@ -3402,4 +3402,23 @@
u_long flags
);
}
+
+600 AUE_PDRFORK STD|CAPENABLED {
+ int pdrfork(
+ _Out_ int *fdp,
+ int pdflags,
+ int rfflags
+ );
+ }
+
+601 AUE_PDWAIT STD|CAPENABLED {
+ int pdwait(
+ int fd,
+ _Out_opt_ int *status,
+ int options,
+ _Out_opt_ _Contains_long_ struct __wrusage *wrusage,
+ _Out_opt_ _Contains_long_ptr_ struct __siginfo *info
+ );
+ }
+
; vim: syntax=off
diff --git a/sys/kern/systrace_args.c b/sys/kern/systrace_args.c
index 5951cebbe74a..8f5a5cd5153d 100644
--- a/sys/kern/systrace_args.c
+++ b/sys/kern/systrace_args.c
@@ -3524,6 +3524,26 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
*n_args = 4;
break;
}
+ /* pdrfork */
+ case 600: {
+ struct pdrfork_args *p = params;
+ uarg[a++] = (intptr_t)p->fdp; /* int * */
+ iarg[a++] = p->pdflags; /* int */
+ iarg[a++] = p->rfflags; /* int */
+ *n_args = 3;
+ break;
+ }
+ /* pdwait */
+ case 601: {
+ struct pdwait_args *p = params;
+ iarg[a++] = p->fd; /* int */
+ uarg[a++] = (intptr_t)p->status; /* int * */
+ iarg[a++] = p->options; /* int */
+ uarg[a++] = (intptr_t)p->wrusage; /* struct __wrusage * */
+ uarg[a++] = (intptr_t)p->info; /* struct __siginfo * */
+ *n_args = 5;
+ break;
+ }
default:
*n_args = 0;
break;
@@ -9430,6 +9450,44 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
break;
};
break;
+ /* pdrfork */
+ case 600:
+ switch (ndx) {
+ case 0:
+ p = "userland int *";
+ break;
+ case 1:
+ p = "int";
+ break;
+ case 2:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* pdwait */
+ case 601:
+ switch (ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland int *";
+ break;
+ case 2:
+ p = "int";
+ break;
+ case 3:
+ p = "userland struct __wrusage *";
+ break;
+ case 4:
+ p = "userland struct __siginfo *";
+ break;
+ default:
+ break;
+ };
+ break;
default:
break;
};
@@ -11443,6 +11501,16 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
if (ndx == 0 || ndx == 1)
p = "int";
break;
+ /* pdrfork */
+ case 600:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* pdwait */
+ case 601:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
default:
break;
};
diff --git a/sys/kern/uipc_domain.c b/sys/kern/uipc_domain.c
index ebcf041790b2..2b922ab51550 100644
--- a/sys/kern/uipc_domain.c
+++ b/sys/kern/uipc_domain.c
@@ -168,20 +168,6 @@ pr_sockaddr_notsupp(struct socket *so, struct sockaddr *nam)
return (EOPNOTSUPP);
}
-static int
-pr_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
- struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
-{
- return (EOPNOTSUPP);
-}
-
-static int
-pr_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
- struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
-{
- return (EOPNOTSUPP);
-}
-
static void
pr_init(struct domain *dom, struct protosw *pr)
{
@@ -217,8 +203,6 @@ pr_init(struct domain *dom, struct protosw *pr)
NOTSUPP(pr_sendfile_wait);
NOTSUPP(pr_shutdown);
NOTSUPP(pr_sockaddr);
- NOTSUPP(pr_sosend);
- NOTSUPP(pr_soreceive);
NOTSUPP(pr_ready);
}
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index eb1327f7f2de..fe3feab4149f 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -759,7 +759,6 @@ shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
/* Free the swap accounted for shm */
swap_release_by_cred(delta, object->cred);
- object->charge -= delta;
} else {
if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
return (EPERM);
@@ -768,7 +767,6 @@ shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
delta = IDX_TO_OFF(nobjsize - object->size);
if (!swap_reserve_by_cred(delta, object->cred))
return (ENOMEM);
- object->charge += delta;
}
shmfd->shm_size = length;
mtx_lock(&shm_timestamp_lock);
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index 557e451f9a45..3f8591bd0ba7 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -384,14 +384,10 @@ struct namecache {
};
/*
- * struct namecache_ts repeats struct namecache layout up to the
- * nc_nlen member.
* struct namecache_ts is used in place of struct namecache when time(s) need
* to be stored. The nc_dotdottime field is used when a cache entry is mapping
* both a non-dotdot directory name plus dotdot for the directory's
* parent.
- *
- * See below for alignment requirement.
*/
struct namecache_ts {
struct timespec nc_time; /* timespec provided by fs */
@@ -404,43 +400,52 @@ struct namecache_ts {
TAILQ_HEAD(cache_freebatch, namecache);
/*
- * At least mips n32 performs 64-bit accesses to timespec as found
- * in namecache_ts and requires them to be aligned. Since others
- * may be in the same spot suffer a little bit and enforce the
- * alignment for everyone. Note this is a nop for 64-bit platforms.
+ * Ensure all zones are sufficently aligned to hold both
+ * struct namecache and struct namecache_ts.
*/
-#define CACHE_ZONE_ALIGNMENT UMA_ALIGNOF(time_t)
+#define CACHE_ZONE_ALIGN_MASK UMA_ALIGNOF(struct namecache_ts)
/*
- * TODO: the initial value of CACHE_PATH_CUTOFF was inherited from the
- * 4.4 BSD codebase. Later on struct namecache was tweaked to become
- * smaller and the value was bumped to retain the total size, but it
- * was never re-evaluated for suitability. A simple test counting
- * lengths during package building shows that the value of 45 covers
- * about 86% of all added entries, reaching 99% at 65.
+ * TODO: CACHE_PATH_CUTOFF was initially introduced with an arbitrary
+ * value of 32 in FreeBSD 5.2.0. It was bumped to 35 and the path was
+ * NUL terminated with the introduction of DTrace probes. Later, it was
+ * expanded to match the alignment allowing an increase to 39, but it
+ * was not re-evaluated for suitability. It was again bumped to 45 on
+ * 64-bit systems and 41 on 32-bit systems (the current values, now
+ * computed at compile time rather than hardcoded). A simple test
+ * counting lengths during package building in 2020 showed that the
+ * value of 45 covers about 86% of all added entries, reaching 99%
+ * at 65.
*
* Regardless of the above, use of dedicated zones instead of malloc may be
* inducing additional waste. This may be hard to address as said zones are
* tied to VFS SMR. Even if retaining them, the current split should be
* re-evaluated.
*/
-#ifdef __LP64__
-#define CACHE_PATH_CUTOFF 45
-#define CACHE_LARGE_PAD 6
-#else
-#define CACHE_PATH_CUTOFF 41
-#define CACHE_LARGE_PAD 2
-#endif
-
-#define CACHE_ZONE_SMALL_SIZE (offsetof(struct namecache, nc_name) + CACHE_PATH_CUTOFF + 1)
-#define CACHE_ZONE_SMALL_TS_SIZE (offsetof(struct namecache_ts, nc_nc) + CACHE_ZONE_SMALL_SIZE)
-#define CACHE_ZONE_LARGE_SIZE (offsetof(struct namecache, nc_name) + NAME_MAX + 1 + CACHE_LARGE_PAD)
-#define CACHE_ZONE_LARGE_TS_SIZE (offsetof(struct namecache_ts, nc_nc) + CACHE_ZONE_LARGE_SIZE)
-
-_Static_assert((CACHE_ZONE_SMALL_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size");
-_Static_assert((CACHE_ZONE_SMALL_TS_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size");
-_Static_assert((CACHE_ZONE_LARGE_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size");
-_Static_assert((CACHE_ZONE_LARGE_TS_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size");
+#define CACHE_PATH_CUTOFF_MIN 40
+#define CACHE_STRUCT_LEN(pathlen) \
+ (offsetof(struct namecache, nc_name) + (pathlen) + 1)
+#define CACHE_PATH_CUTOFF \
+ (roundup2(CACHE_STRUCT_LEN(CACHE_PATH_CUTOFF_MIN), \
+ _Alignof(struct namecache_ts)) - CACHE_STRUCT_LEN(0))
+
+#define CACHE_ZONE_SMALL_SIZE \
+ CACHE_STRUCT_LEN(CACHE_PATH_CUTOFF)
+#define CACHE_ZONE_SMALL_TS_SIZE \
+ (offsetof(struct namecache_ts, nc_nc) + CACHE_ZONE_SMALL_SIZE)
+#define CACHE_ZONE_LARGE_SIZE \
+ roundup2(CACHE_STRUCT_LEN(NAME_MAX), _Alignof(struct namecache_ts))
+#define CACHE_ZONE_LARGE_TS_SIZE \
+ (offsetof(struct namecache_ts, nc_nc) + CACHE_ZONE_LARGE_SIZE)
+
+_Static_assert((CACHE_ZONE_SMALL_SIZE % (CACHE_ZONE_ALIGN_MASK + 1)) == 0,
+ "bad zone size");
+_Static_assert((CACHE_ZONE_SMALL_TS_SIZE % (CACHE_ZONE_ALIGN_MASK + 1)) == 0,
+ "bad zone size");
+_Static_assert((CACHE_ZONE_LARGE_SIZE % (CACHE_ZONE_ALIGN_MASK + 1)) == 0,
+ "bad zone size");
+_Static_assert((CACHE_ZONE_LARGE_TS_SIZE % (CACHE_ZONE_ALIGN_MASK + 1)) == 0,
+ "bad zone size");
#define nc_vp n_un.nu_vp
#define nc_neg n_un.nu_neg
@@ -2785,13 +2790,13 @@ nchinit(void *dummy __unused)
u_int i;
cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL_SIZE,
- NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT);
+ NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGN_MASK, UMA_ZONE_ZINIT);
cache_zone_small_ts = uma_zcreate("STS VFS Cache", CACHE_ZONE_SMALL_TS_SIZE,
- NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT);
+ NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGN_MASK, UMA_ZONE_ZINIT);
cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE_SIZE,
- NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT);
+ NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGN_MASK, UMA_ZONE_ZINIT);
cache_zone_large_ts = uma_zcreate("LTS VFS Cache", CACHE_ZONE_LARGE_TS_SIZE,
- NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT);
+ NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGN_MASK, UMA_ZONE_ZINIT);
VFS_SMR_ZONE_SET(cache_zone_small);
VFS_SMR_ZONE_SET(cache_zone_small_ts);
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 138adc30c213..caf703bb318d 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -551,6 +551,17 @@ SUBDIR+= linux64
SUBDIR+= linux_common
.endif
+# LinuxKPI based wireless drivers.
+.if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \
+ ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "riscv"
+_iwlwifi= iwlwifi
+_rtw88= rtw88
+# rtw89 currently cannot be compiled without ACPI (seems also broken in Linux).
+.if ${KERN_OPTS:MDEV_ACPI}
+_rtw89= rtw89
+.endif
+.endif
+
.if ${MACHINE_CPUARCH} != "arm"
.if ${MK_OFED} != "no" || defined(ALL_MODULES)
_ibcore= ibcore
@@ -583,9 +594,6 @@ _acpi= acpi
_ena= ena
_gve= gve
_igc= igc
-_iwlwifi= iwlwifi
-_rtw88= rtw88
-_rtw89= rtw89
_vmware= vmware
.endif
@@ -651,8 +659,7 @@ _rtwnfw= rtwnfw
.endif
.if ${MK_SOURCELESS_UCODE} != "no" && ${MACHINE_CPUARCH} != "arm" && \
- ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \
- ${MACHINE_CPUARCH} != "riscv"
+ ${MACHINE_ARCH} != "powerpc" && ${MACHINE_CPUARCH} != "riscv"
_cxgbe= cxgbe
.endif
@@ -667,7 +674,7 @@ _genet= genet
.endif
.if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "aarch64" || \
- ${MACHINE_ARCH:Mpowerpc64*}
+ ${MACHINE_ARCH:Mpowerpc64*} != ""
_ice= ice
.if ${MK_SOURCELESS_UCODE} != "no"
_ice_ddp= ice_ddp
@@ -677,6 +684,7 @@ _ice_ddp= ice_ddp
_irdma= irdma
.endif
.endif
+_ixl= ixl
.endif
.if ${MACHINE_CPUARCH} == "aarch64"
@@ -693,7 +701,7 @@ _sdhci_fdt= sdhci_fdt
.endif
# These rely on 64bit atomics
-.if ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe"
+.if ${MACHINE_ARCH} != "powerpc"
_mps= mps
_mpr= mpr
.endif
@@ -786,7 +794,6 @@ _amdsbwd= amdsbwd
_amdsmn= amdsmn
_amdtemp= amdtemp
_arcmsr= arcmsr
-_asmc= asmc
.if ${MK_CRYPT} != "no" || defined(ALL_MODULES)
_blake2= blake2
.endif
@@ -845,6 +852,7 @@ _x86bios= x86bios
.if ${MACHINE_CPUARCH} == "amd64"
_amdgpio= amdgpio
+_asmc= asmc
_ccp= ccp
_enic= enic
_iavf= iavf
@@ -918,12 +926,10 @@ _ffec= ffec
.endif
.if ${MACHINE_ARCH:Mpowerpc64*} != ""
-_ixl= ixl
_nvram= opal_nvram
.endif
-.if ${MACHINE_CPUARCH} == "powerpc" && ${MACHINE_ARCH} != "powerpcspe"
-# Don't build powermac_nvram for powerpcspe, it's never supported.
+.if ${MACHINE_CPUARCH} == "powerpc"
_nvram+= powermac_nvram
.endif
@@ -961,7 +967,7 @@ SUBDIR:= ${SUBDIR:N${reject}}
afterinstall: .PHONY
${KLDXREF_CMD} ${DESTDIR}${KMODDIR}
.if defined(NO_ROOT) && defined(METALOG)
- echo ".${DISTBASE}${KMODDIR}/linker.hints type=file mode=0644 uname=root gname=wheel" | \
+ echo ".${DISTBASE}${KMODDIR}/linker.hints type=file uname=root gname=wheel mode=0644" | \
cat -l >> ${METALOG}
.endif
.endif
diff --git a/sys/modules/ath10k/Makefile b/sys/modules/ath10k/Makefile
index 98df270b6791..d8196854b681 100644
--- a/sys/modules/ath10k/Makefile
+++ b/sys/modules/ath10k/Makefile
@@ -29,6 +29,7 @@ SRCS+= leds.c
.endif
CFLAGS+= -DKBUILD_MODNAME='"ath10k"'
+CFLAGS+= -DLINUXKPI_VERSION=61900
CFLAGS+= -I${DEVATH10KDIR}
CFLAGS+= -I${DEVATH10KDIR}/..
diff --git a/sys/modules/iwlwifi/Makefile b/sys/modules/iwlwifi/Makefile
index 6fe64a611900..b5441744d77a 100644
--- a/sys/modules/iwlwifi/Makefile
+++ b/sys/modules/iwlwifi/Makefile
@@ -1,10 +1,14 @@
+.include <kmod.opts.mk>
+
DEVIWLWIFIDIR= ${SRCTOP}/sys/contrib/dev/iwlwifi
.PATH: ${DEVIWLWIFIDIR}
WITH_CONFIG_PM= 0
WITH_DEBUGFS= 0
+.if ${KERN_OPTS:MDEV_ACPI}
WITH_CONFIG_ACPI= 1
+.endif
KMOD= if_iwlwifi
@@ -60,10 +64,11 @@ CFLAGS+= -DCONFIG_PM_SLEEP
.endif
.if defined(WITH_CONFIG_ACPI) && ${WITH_CONFIG_ACPI} > 0
-SRCS+= fw/acpi.c
+SRCS.DEV_ACPI+= fw/acpi.c
CFLAGS+= -DCONFIG_ACPI
-CFLAGS+= -DLINUXKPI_WANT_LINUX_ACPI
.endif
+# This needs to always stay on for the LinuxKPI header file.
+CFLAGS+= -DLINUXKPI_WANT_LINUX_ACPI
# Other
SRCS+= ${LINUXKPI_GENSRCS}
diff --git a/sys/modules/linuxkpi/Makefile b/sys/modules/linuxkpi/Makefile
index c465c76a7626..2090fe76d857 100644
--- a/sys/modules/linuxkpi/Makefile
+++ b/sys/modules/linuxkpi/Makefile
@@ -6,6 +6,7 @@ SRCS= linux_compat.c \
linux_devres.c \
linux_dmi.c \
linux_domain.c \
+ linux_eventfd.c \
linux_firmware.c \
linux_folio.c \
linux_fpu.c \
@@ -24,6 +25,7 @@ SRCS= linux_compat.c \
linux_radix.c \
linux_rcu.c \
linux_schedule.c \
+ linux_seq_buf.c \
linux_seq_file.c \
linux_shmemfs.c \
linux_shrinker.c \
diff --git a/sys/modules/mt76/Makefile.inc b/sys/modules/mt76/Makefile.inc
index 44aa94c954a8..e4369564237e 100644
--- a/sys/modules/mt76/Makefile.inc
+++ b/sys/modules/mt76/Makefile.inc
@@ -32,7 +32,6 @@ CFLAGS+= CONFIG_NET_MEDIATEK_SOC_WED
CFLAGS+= -I${COMMONDIR}
CFLAGS+= ${LINUXKPI_INCLUDES}
-CFLAGS+= -DLINUXKPI_VERSION=61700
-
+CFLAGS+= -DLINUXKPI_VERSION=61900
# end
diff --git a/sys/modules/mt76/mt7925/Makefile b/sys/modules/mt76/mt7925/Makefile
index 58e23d06a9ad..dc6de5085d77 100644
--- a/sys/modules/mt76/mt7925/Makefile
+++ b/sys/modules/mt76/mt7925/Makefile
@@ -5,7 +5,7 @@ DEVDIR= ${SRCTOP}/sys/contrib/dev/mediatek/mt76/mt7925
KMOD= if_mt7925
# Common stuff.
-SRCS= init.c main.c mac.c mcu.c
+SRCS= init.c main.c mac.c mcu.c regd.c
# PCI stuff.
SRCS+= pci.c pci_mac.c pci_mcu.c
diff --git a/sys/modules/rtw88/Makefile b/sys/modules/rtw88/Makefile
index ee47df54bcf9..0ce6ad3f99bb 100644
--- a/sys/modules/rtw88/Makefile
+++ b/sys/modules/rtw88/Makefile
@@ -46,7 +46,7 @@ SRCS+= ${LINUXKPI_GENSRCS}
SRCS+= opt_wlan.h opt_inet6.h opt_inet.h
CFLAGS+= -DKBUILD_MODNAME='"rtw88"'
-CFLAGS+= -DLINUXKPI_VERSION=61700
+CFLAGS+= -DLINUXKPI_VERSION=61900
CFLAGS+= -I${DEVRTW88DIR}
CFLAGS+= ${LINUXKPI_INCLUDES}
diff --git a/sys/modules/rtw89/Makefile b/sys/modules/rtw89/Makefile
index 1307abf3d9b4..9aa6cfda1d3d 100644
--- a/sys/modules/rtw89/Makefile
+++ b/sys/modules/rtw89/Makefile
@@ -1,16 +1,21 @@
+.include <kmod.opts.mk>
+
DEVRTW89DIR= ${SRCTOP}/sys/contrib/dev/rtw89
.PATH: ${DEVRTW89DIR}
WITH_CONFIG_PM= 0
WITH_DEBUGFS= 0
+.if ${KERN_OPTS:MDEV_ACPI}
+WITH_CONFIG_ACPI= 1
+.endif
KMOD= if_rtw89
SRCS= core.c
SRCS+= pci.c pci_be.c
SRCS+= chan.c mac80211.c mac.c mac_be.c phy.c phy_be.c fw.c
-SRCS+= acpi.c cam.c efuse.c efuse_be.c regd.c sar.c coex.c ps.c ser.c
+SRCS+= cam.c efuse.c efuse_be.c regd.c sar.c coex.c ps.c ser.c
SRCS+= util.c
SRCS+= rtw8852a.c rtw8852a_rfk.c rtw8852a_rfk_table.c rtw8852a_table.c
SRCS+= rtw8852ae.c
@@ -26,6 +31,13 @@ SRCS+= rtw8852bte.c
SRCS+= rtw8922a.c rtw8922a_rfk.c
SRCS+= rtw8922ae.c
+.if defined(WITH_CONFIG_ACPI) && ${WITH_CONFIG_ACPI} > 0
+SRCS.DEV_ACPI+= acpi.c
+CFLAGS+= -DCONFIG_ACPI
+.endif
+# This needs to always stay on for the LinuxKPI header file.
+CFLAGS+= -DLINUXKPI_WANT_LINUX_ACPI
+
# USB parts
#SRCS+= rtw8851bu.c rtw8852bu.c
#SRCS+= usb.c
@@ -44,7 +56,6 @@ SRCS+= opt_wlan.h opt_inet6.h opt_inet.h opt_acpi.h
CFLAGS+= -DKBUILD_MODNAME='"rtw89"'
CFLAGS+= -DLINUXKPI_VERSION=61700
-CFLAGS+= -DLINUXKPI_WANT_LINUX_ACPI
CFLAGS+= -I${DEVRTW89DIR}
CFLAGS+= ${LINUXKPI_INCLUDES}
diff --git a/sys/modules/sctp/Makefile b/sys/modules/sctp/Makefile
index 626f195dbf1f..4b47e1e63478 100644
--- a/sys/modules/sctp/Makefile
+++ b/sys/modules/sctp/Makefile
@@ -6,7 +6,6 @@ SRCS= sctp_asconf.c \
sctp_auth.c \
sctp_bsd_addr.c \
sctp_cc_functions.c \
- sctp_crc32.c \
sctp_indata.c \
sctp_input.c \
sctp_kdtrace.c \
diff --git a/sys/modules/vmm/Makefile b/sys/modules/vmm/Makefile
index 066b4d814348..dcb401d2026d 100644
--- a/sys/modules/vmm/Makefile
+++ b/sys/modules/vmm/Makefile
@@ -18,7 +18,8 @@ SRCS+= vmm.c \
vmm_dev_machdep.c \
vmm_instruction_emul.c \
vmm_mem.c \
- vmm_stat.c
+ vmm_stat.c \
+ vmm_vm.c
.if ${MACHINE_CPUARCH} == "aarch64"
CFLAGS+= -I${SRCTOP}/sys/${MACHINE}/vmm/io
diff --git a/sys/modules/zfs/Makefile b/sys/modules/zfs/Makefile
index 2751bb465258..fed29336e5de 100644
--- a/sys/modules/zfs/Makefile
+++ b/sys/modules/zfs/Makefile
@@ -45,7 +45,7 @@ CFLAGS+= -D__x86_64 -DHAVE_SSE2 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 \
.endif
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
- ${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
+ ${MACHINE_ARCH} == "arm"
CFLAGS+= -DBITS_PER_LONG=32
.else
CFLAGS+= -DBITS_PER_LONG=64
@@ -175,7 +175,7 @@ SRCS+= acl_common.c \
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
- ${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
+ ${MACHINE_ARCH} == "arm"
SRCS+= spl_atomic.c
.endif
diff --git a/sys/net/if.c b/sys/net/if.c
index 4ddf8a69b3f0..047ccd2ecda7 100644
--- a/sys/net/if.c
+++ b/sys/net/if.c
@@ -1115,6 +1115,8 @@ if_detach_internal(struct ifnet *ifp, bool vmove)
altq_detach(&ifp->if_snd);
#endif
+ rt_flushifroutes(ifp);
+
if_purgeaddrs(ifp);
EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
if_purgemaddrs(ifp);
@@ -1139,8 +1141,6 @@ if_detach_internal(struct ifnet *ifp, bool vmove)
} else
IF_ADDR_WUNLOCK(ifp);
}
-
- rt_flushifroutes(ifp);
}
#ifdef VIMAGE
diff --git a/sys/net/if_enc.c b/sys/net/if_enc.c
index 3c3f19661063..159fb3ca34c9 100644
--- a/sys/net/if_enc.c
+++ b/sys/net/if_enc.c
@@ -387,7 +387,7 @@ vnet_enc_init(const void *unused __unused)
ifc_create_ifp(encname, &ifd, &ifp);
V_enc_sc = ifp->if_softc;
}
-VNET_SYSINIT(vnet_enc_init, SI_SUB_PSEUDO, SI_ORDER_ANY,
+VNET_SYSINIT(vnet_enc_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
vnet_enc_init, NULL);
static void
diff --git a/sys/net/if_ethersubr.c b/sys/net/if_ethersubr.c
index da9264aa4a23..812a31595df9 100644
--- a/sys/net/if_ethersubr.c
+++ b/sys/net/if_ethersubr.c
@@ -479,7 +479,7 @@ ether_output_frame(struct ifnet *ifp, struct mbuf *m)
#if defined(INET6) && defined(INET)
/* draft-ietf-6man-ipv6only-flag */
/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
+ if ((ifp->if_inet6->nd_flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
struct ether_header *eh;
eh = mtod(m, struct ether_header *);
@@ -545,7 +545,7 @@ ether_input_internal(struct ifnet *ifp, struct mbuf *m)
#if defined(INET6) && defined(INET)
/* draft-ietf-6man-ipv6only-flag */
/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
+ if ((ifp->if_inet6->nd_flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
switch (etype) {
case ETHERTYPE_IP:
case ETHERTYPE_ARP:
diff --git a/sys/net/if_ovpn.c b/sys/net/if_ovpn.c
index ae09a1ce9db8..7c416055e939 100644
--- a/sys/net/if_ovpn.c
+++ b/sys/net/if_ovpn.c
@@ -1669,6 +1669,7 @@ ovpn_encrypt_tx_cb(struct cryptop *crp)
NET_EPOCH_EXIT(et);
CURVNET_RESTORE();
OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (0);
}
@@ -1680,6 +1681,8 @@ ovpn_encrypt_tx_cb(struct cryptop *crp)
if (ret == 0) {
OVPN_COUNTER_ADD(sc, sent_data_pkts, 1);
OVPN_COUNTER_ADD(sc, tunnel_bytes_sent, tunnel_len);
+ if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, tunnel_len);
}
crypto_freereq(crp);
@@ -1706,6 +1709,7 @@ ovpn_finish_rx(struct ovpn_softc *sc, struct mbuf *m,
if (V_replay_protection && ! ovpn_check_replay(key->decrypt, seq)) {
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return;
}
@@ -1767,6 +1771,7 @@ skip_float:
m = m_pullup(m, 1);
if (m == NULL) {
OVPN_COUNTER_ADD(sc, nomem_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
return;
}
@@ -1783,6 +1788,7 @@ skip_float:
netisr_dispatch(af == AF_INET ? NETISR_IP : NETISR_IPV6, m);
} else {
OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
}
}
@@ -1827,6 +1833,7 @@ ovpn_decrypt_rx_cb(struct cryptop *crp)
crypto_freereq(crp);
atomic_add_int(&sc->refcount, -1);
OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
OVPN_RUNLOCK(sc);
m_freem(m);
return (0);
@@ -1844,6 +1851,7 @@ ovpn_decrypt_rx_cb(struct cryptop *crp)
atomic_add_int(&sc->refcount, -1);
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
CURVNET_RESTORE();
return (0);
@@ -1859,6 +1867,7 @@ ovpn_decrypt_rx_cb(struct cryptop *crp)
*/
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
CURVNET_RESTORE();
return (0);
@@ -2073,6 +2082,7 @@ ovpn_transmit_to_peer(struct ifnet *ifp, struct mbuf *m,
if (_ovpn_lock_trackerp != NULL)
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
return (ELOOP);
}
@@ -2089,6 +2099,7 @@ ovpn_transmit_to_peer(struct ifnet *ifp, struct mbuf *m,
if (_ovpn_lock_trackerp != NULL)
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
ohdr = mtod(m, struct ovpn_wire_header *);
@@ -2105,6 +2116,7 @@ ovpn_transmit_to_peer(struct ifnet *ifp, struct mbuf *m,
if (_ovpn_lock_trackerp != NULL)
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
/* Let's avoid (very unlikely, but still) wraparounds of the
* 64-bit counter taking us back to 0. */
@@ -2127,6 +2139,8 @@ ovpn_transmit_to_peer(struct ifnet *ifp, struct mbuf *m,
if (ret == 0) {
OVPN_COUNTER_ADD(sc, sent_data_pkts, 1);
OVPN_COUNTER_ADD(sc, tunnel_bytes_sent, tunnel_len);
+ if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, tunnel_len);
}
return (ret);
}
@@ -2136,6 +2150,7 @@ ovpn_transmit_to_peer(struct ifnet *ifp, struct mbuf *m,
if (_ovpn_lock_trackerp != NULL)
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (ENOBUFS);
}
@@ -2174,6 +2189,7 @@ ovpn_transmit_to_peer(struct ifnet *ifp, struct mbuf *m,
ret = crypto_dispatch(crp);
if (ret) {
OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
}
return (ret);
@@ -2198,6 +2214,7 @@ ovpn_encap(struct ovpn_softc *sc, uint32_t peerid, struct mbuf *m)
if (peer == NULL || sc->ifp->if_link_state != LINK_STATE_UP) {
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (ENETDOWN);
}
@@ -2208,6 +2225,7 @@ ovpn_encap(struct ovpn_softc *sc, uint32_t peerid, struct mbuf *m)
if (m == NULL) {
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (ENOBUFS);
}
@@ -2241,6 +2259,7 @@ ovpn_encap(struct ovpn_softc *sc, uint32_t peerid, struct mbuf *m)
if (m == NULL) {
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
ip = mtod(m, struct ip *);
@@ -2274,12 +2293,14 @@ ovpn_encap(struct ovpn_softc *sc, uint32_t peerid, struct mbuf *m)
if (m == NULL) {
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
m = m_pullup(m, sizeof(*ip6) + sizeof(*udp));
if (m == NULL) {
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, nomem_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
@@ -2341,6 +2362,7 @@ ovpn_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
m = m_unshare(m, M_NOWAIT);
if (m == NULL) {
OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
@@ -2350,6 +2372,7 @@ ovpn_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
if (__predict_false(ifp->if_link_state != LINK_STATE_UP)) {
OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
OVPN_RUNLOCK(sc);
m_freem(m);
return (ENETDOWN);
@@ -2368,6 +2391,7 @@ ovpn_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
if (peer == NULL) {
/* No destination. */
OVPN_COUNTER_ADD(sc, lost_data_pkts_out, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
OVPN_RUNLOCK(sc);
m_freem(m);
return (ENETDOWN);
@@ -2463,6 +2487,8 @@ ovpn_udp_input(struct mbuf *m, int off, struct inpcb *inp,
M_ASSERTPKTHDR(m);
OVPN_COUNTER_ADD(sc, transport_bytes_received, m->m_pkthdr.len - off);
+ if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len - off);
+ if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
ohdrlen = sizeof(*ohdr) - sizeof(ohdr->auth_tag);
@@ -2492,6 +2518,7 @@ ovpn_udp_input(struct mbuf *m, int off, struct inpcb *inp,
m = m_unshare(m, M_NOWAIT);
if (m == NULL) {
OVPN_COUNTER_ADD(sc, nomem_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
return (true);
}
@@ -2499,6 +2526,7 @@ ovpn_udp_input(struct mbuf *m, int off, struct inpcb *inp,
if (m == NULL) {
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, nomem_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
return (true);
}
@@ -2515,6 +2543,7 @@ ovpn_udp_input(struct mbuf *m, int off, struct inpcb *inp,
if (key == NULL || key->decrypt == NULL) {
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
return (true);
}
@@ -2560,6 +2589,7 @@ ovpn_udp_input(struct mbuf *m, int off, struct inpcb *inp,
if (m == NULL) {
OVPN_RUNLOCK(sc);
OVPN_COUNTER_ADD(sc, nomem_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
return (true);
}
uhdr = mtodo(m, 0);
@@ -2569,6 +2599,7 @@ ovpn_udp_input(struct mbuf *m, int off, struct inpcb *inp,
crp = crypto_getreq(key->decrypt->cryptoid, M_NOWAIT);
if (crp == NULL) {
OVPN_COUNTER_ADD(sc, nomem_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
OVPN_RUNLOCK(sc);
m_freem(m);
return (true);
@@ -2605,6 +2636,7 @@ ovpn_udp_input(struct mbuf *m, int off, struct inpcb *inp,
ret = crypto_dispatch(crp);
if (ret != 0) {
OVPN_COUNTER_ADD(sc, lost_data_pkts_in, 1);
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
}
return (true);
diff --git a/sys/net/iflib.c b/sys/net/iflib.c
index fcd847ab6f7a..b0e4bb9470c9 100644
--- a/sys/net/iflib.c
+++ b/sys/net/iflib.c
@@ -504,61 +504,6 @@ typedef struct if_rxsd {
iflib_fl_t ifsd_fl;
} *if_rxsd_t;
-/* multiple of word size */
-#ifdef __LP64__
-#define PKT_INFO_SIZE 7
-#define RXD_INFO_SIZE 5
-#define PKT_TYPE uint64_t
-#else
-#define PKT_INFO_SIZE 12
-#define RXD_INFO_SIZE 8
-#define PKT_TYPE uint32_t
-#endif
-#define PKT_LOOP_BOUND ((PKT_INFO_SIZE / 3) * 3)
-#define RXD_LOOP_BOUND ((RXD_INFO_SIZE / 4) * 4)
-
-typedef struct if_pkt_info_pad {
- PKT_TYPE pkt_val[PKT_INFO_SIZE];
-} *if_pkt_info_pad_t;
-typedef struct if_rxd_info_pad {
- PKT_TYPE rxd_val[RXD_INFO_SIZE];
-} *if_rxd_info_pad_t;
-
-CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
-CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
-
-static inline void
-pkt_info_zero(if_pkt_info_t pi)
-{
- if_pkt_info_pad_t pi_pad;
-
- pi_pad = (if_pkt_info_pad_t)pi;
- pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
- pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
-#ifndef __LP64__
- pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
- pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
-#endif
-}
-
-static inline void
-rxd_info_zero(if_rxd_info_t ri)
-{
- if_rxd_info_pad_t ri_pad;
- int i;
-
- ri_pad = (if_rxd_info_pad_t)ri;
- for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
- ri_pad->rxd_val[i] = 0;
- ri_pad->rxd_val[i + 1] = 0;
- ri_pad->rxd_val[i + 2] = 0;
- ri_pad->rxd_val[i + 3] = 0;
- }
-#ifdef __LP64__
- ri_pad->rxd_val[RXD_INFO_SIZE - 1] = 0;
-#endif
-}
-
/*
* Only allow a single packet to take up most 1/nth of the tx ring
*/
@@ -1068,7 +1013,7 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
if (nm_i != head) { /* we have new packets to send */
uint32_t pkt_len = 0, seg_idx = 0;
int nic_i_start = -1, flags = 0;
- pkt_info_zero(&pi);
+ memset(&pi, 0, sizeof(pi));
pi.ipi_segs = txq->ift_segs;
pi.ipi_qsidx = kring->ring_id;
nic_i = netmap_idx_k2n(kring, nm_i);
@@ -1261,7 +1206,7 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
nm_i = netmap_idx_n2k(kring, nic_i);
MPASS(nm_i == kring->nr_hwtail);
for (n = 0; avail > 0 && nm_i != hwtail_lim; n++, avail--) {
- rxd_info_zero(&ri);
+ memset(&ri, 0, sizeof(ri));
ri.iri_frags = rxq->ifr_frags;
ri.iri_qsidx = kring->ring_id;
ri.iri_ifp = ctx->ifc_ifp;
@@ -1945,6 +1890,7 @@ iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
bus_dmamap_unload(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[i]);
}
+ txq->ift_sds.ifsd_m[i] = NULL;
m_freem(m);
DBG_COUNTER_INC(tx_frees);
}
@@ -2998,7 +2944,7 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
/*
* Reset client set fields to their default values
*/
- rxd_info_zero(&ri);
+ memset(&ri, 0, sizeof(ri));
ri.iri_qsidx = rxq->ifr_id;
ri.iri_cidx = *cidxp;
ri.iri_ifp = ifp;
@@ -3580,7 +3526,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
}
m_head = *m_headp;
- pkt_info_zero(&pi);
+ memset(&pi, 0, sizeof(pi));
pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG | M_BCAST | M_MCAST));
pi.ipi_pidx = pidx;
pi.ipi_qsidx = txq->ift_id;
@@ -4256,7 +4202,7 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
if (ctx->isc_txq_select_v2) {
struct if_pkt_info pi;
uint64_t early_pullups = 0;
- pkt_info_zero(&pi);
+ memset(&pi, 0, sizeof(pi));
err = iflib_parse_header_partial(&pi, &m, &early_pullups);
if (__predict_false(err != 0)) {
diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index 6582250879ca..eb17c4ff5ef0 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -329,6 +329,9 @@ SYSCTL_DECL(_net_pf);
MALLOC_DECLARE(M_PF);
MALLOC_DECLARE(M_PFHASH);
MALLOC_DECLARE(M_PF_RULE_ITEM);
+MALLOC_DECLARE(M_PF_STATE_LINK);
+MALLOC_DECLARE(M_PF_SOURCE_LIM);
+MALLOC_DECLARE(M_PF_STATE_LIM);
SDT_PROVIDER_DECLARE(pf);
SDT_PROBE_DECLARE(pf, , test, reason_set);
@@ -893,6 +896,14 @@ struct pf_krule {
u_int8_t set_prio[2];
sa_family_t naf;
u_int8_t rcvifnot;
+ struct {
+ uint8_t id;
+ int limiter_action;
+ } statelim;
+ struct {
+ uint8_t id;
+ int limiter_action;
+ } sourcelim;
struct {
struct pf_addr addr;
@@ -1102,7 +1113,54 @@ struct pf_state_export {
};
_Static_assert(sizeof(struct pf_state_export) == 384, "size incorrect");
+#define PF_STATELIM_NAME_LEN 16 /* kstat istr */
+#define PF_STATELIM_DESCR_LEN 64
+
+#define PF_SOURCELIM_NAME_LEN 16 /* kstat istr */
+#define PF_SOURCELIM_DESCR_LEN 64
+
#ifdef _KERNEL
+struct kstat;
+
+/*
+ * PF state links
+ *
+ * This is used to augment a struct pf_state so it can be
+ * tracked/referenced by the state and source address limiter things.
+ * Each limiter maintains a list of the states they "own", and these
+ * state links are what the limiters use to wire a state into their
+ * lists.
+ *
+ * Without PF state links, the pf_state struct would have to grow
+ * a lot to support a feature that may not be used.
+ *
+ * pfl_entry is used by the pools to add states to their list.
+ * pfl_state allows the pools to get from their list of states to
+ * the states themselves.
+ *
+ * pfl_link allows operations on states (well, delete) to be able
+ * to quickly locate the pf_state_link struct so they can be unwired
+ * from the pools.
+ */
+
+#define PF_STATE_LINK_TYPE_STATELIM 1
+#define PF_STATE_LINK_TYPE_SOURCELIM 2
+
+struct pf_kstate;
+struct pf_state_link {
+ /* used by source/state pools to get to states */
+ TAILQ_ENTRY(pf_state_link) pfl_link;
+
+ /* used by pf_state to get to source/state pools */
+ SLIST_ENTRY(pf_state_link) pfl_linkage;
+
+ struct pf_kstate *pfl_state;
+ unsigned int pfl_type;
+};
+
+TAILQ_HEAD(pf_state_link_list, pf_state_link);
+SLIST_HEAD(pf_state_linkage, pf_state_link);
+
struct pf_kstate {
/*
* Area shared with pf_state_cmp
@@ -1144,13 +1202,226 @@ struct pf_kstate {
u_int16_t tag;
u_int16_t if_index_in;
u_int16_t if_index_out;
+ uint8_t statelim;
+ uint8_t sourcelim;
+ struct pf_state_linkage linkage;
+};
+
+/*
+ * State limiter
+ */
+
+struct pf_limiter_rate {
+ unsigned int limit;
+ unsigned int seconds;
+};
+
+struct pf_statelim {
+ RB_ENTRY(pf_statelim) pfstlim_id_tree;
+ RB_ENTRY(pf_statelim) pfstlim_nm_tree;
+ TAILQ_ENTRY(pf_statelim) pfstlim_list;
+ struct kstat *pfstlim_ks;
+
+ uint32_t pfstlim_id;
+ char pfstlim_nm[PF_STATELIM_NAME_LEN];
+
+ /* config */
+
+ unsigned int pfstlim_limit;
+ struct pf_limiter_rate pfstlim_rate;
+
+ /* run state */
+ struct mtx pfstlim_lock;
+
+ /* rate limiter */
+ uint64_t pfstlim_rate_ts;
+ uint64_t pfstlim_rate_token;
+ uint64_t pfstlim_rate_bucket;
+
+ unsigned int pfstlim_inuse;
+ struct pf_state_link_list pfstlim_states;
+
+ /* counters */
+
+ struct {
+ uint64_t admitted;
+ uint64_t hardlimited;
+ uint64_t ratelimited;
+ } pfstlim_counters;
+
+ struct {
+ time_t created;
+ time_t updated;
+ time_t cleared;
+ } pfstlim_timestamps;
};
+RB_HEAD(pf_statelim_id_tree, pf_statelim);
+RB_PROTOTYPE(pf_statelim_id_tree, pf_statelim, pfstlim_id_tree, cmp);
+
+RB_HEAD(pf_statelim_nm_tree, pf_statelim);
+RB_PROTOTYPE(pf_statelim_nm_tree, pf_statelim, pfstlim_nm_tree, cmp);
+
+TAILQ_HEAD(pf_statelim_list, pf_statelim);
+
+VNET_DECLARE(struct pf_statelim_id_tree, pf_statelim_id_tree_active);
+#define V_pf_statelim_id_tree_active VNET(pf_statelim_id_tree_active)
+VNET_DECLARE(struct pf_statelim_list, pf_statelim_list_active);
+#define V_pf_statelim_list_active VNET(pf_statelim_list_active)
+
+VNET_DECLARE(struct pf_statelim_id_tree, pf_statelim_id_tree_inactive);
+#define V_pf_statelim_id_tree_inactive VNET(pf_statelim_id_tree_inactive)
+VNET_DECLARE(struct pf_statelim_nm_tree, pf_statelim_nm_tree_inactive);
+#define V_pf_statelim_nm_tree_inactive VNET(pf_statelim_nm_tree_inactive)
+VNET_DECLARE(struct pf_statelim_list, pf_statelim_list_inactive);
+#define V_pf_statelim_list_inactive VNET(pf_statelim_list_inactive)
+
+static inline unsigned int
+pf_statelim_enter(struct pf_statelim *pfstlim)
+{
+ mtx_lock(&pfstlim->pfstlim_lock);
+
+ return (0);
+}
+
+static inline void
+pf_statelim_leave(struct pf_statelim *pfstlim, unsigned int gen)
+{
+ return (mtx_unlock(&pfstlim->pfstlim_lock));
+}
+
/*
- * 6 cache lines per struct, 10 structs per page.
- * Try to not grow the struct beyond that.
+ * Source address pools
*/
-_Static_assert(sizeof(struct pf_kstate) <= 384, "pf_kstate size crosses 384 bytes");
+
+struct pf_sourcelim;
+
+struct pf_source {
+ RB_ENTRY(pf_source) pfsr_tree;
+ RB_ENTRY(pf_source) pfsr_ioc_tree;
+ struct pf_sourcelim *pfsr_parent;
+
+ sa_family_t pfsr_af;
+ u_int16_t pfsr_rdomain;
+ struct pf_addr pfsr_addr;
+
+ /* run state */
+
+ unsigned int pfsr_inuse;
+ unsigned int pfsr_intable;
+ struct pf_state_link_list pfsr_states;
+ time_t pfsr_empty_ts;
+ TAILQ_ENTRY(pf_source) pfsr_empty_gc;
+
+ /* rate limiter */
+ uint64_t pfsr_rate_ts;
+
+ struct {
+ uint64_t admitted;
+ uint64_t hardlimited;
+ uint64_t ratelimited;
+ } pfsr_counters;
+};
+
+RB_HEAD(pf_source_tree, pf_source);
+RB_PROTOTYPE(pf_source_tree, pf_source, pfsr_tree, cmp);
+
+RB_HEAD(pf_source_ioc_tree, pf_source);
+RB_PROTOTYPE(pf_source_ioc_tree, pf_source, pfsr_ioc_tree, cmp);
+
+TAILQ_HEAD(pf_source_list, pf_source);
+
+struct pf_sourcelim {
+ RB_ENTRY(pf_sourcelim) pfsrlim_id_tree;
+ RB_ENTRY(pf_sourcelim) pfsrlim_nm_tree;
+ TAILQ_ENTRY(pf_sourcelim) pfsrlim_list;
+ struct kstat *pfsrlim_ks;
+
+ uint32_t pfsrlim_id;
+ char pfsrlim_nm[PF_SOURCELIM_NAME_LEN];
+ unsigned int pfsrlim_disabled;
+
+ /* config */
+
+ unsigned int pfsrlim_entries;
+ unsigned int pfsrlim_limit;
+ unsigned int pfsrlim_ipv4_prefix;
+ unsigned int pfsrlim_ipv6_prefix;
+
+ struct pf_limiter_rate pfsrlim_rate;
+
+ struct {
+ char name[PF_TABLE_NAME_SIZE];
+ unsigned int hwm;
+ unsigned int lwm;
+ struct pfr_ktable *table;
+ } pfsrlim_overload;
+
+ /* run state */
+ struct mtx pfsrlim_lock;
+
+ struct pf_addr pfsrlim_ipv4_mask;
+ struct pf_addr pfsrlim_ipv6_mask;
+
+ uint64_t pfsrlim_rate_token;
+ uint64_t pfsrlim_rate_bucket;
+
+ /* number of pf_sources */
+ unsigned int pfsrlim_nsources;
+ struct pf_source_tree pfsrlim_sources;
+ struct pf_source_ioc_tree pfsrlim_ioc_sources;
+
+ struct {
+ /* number of times pf_source was allocated */
+ uint64_t addrallocs;
+ /* state was rejected because the address limit was hit */
+ uint64_t addrlimited;
+ /* no memory to create address thing */
+ uint64_t addrnomem;
+
+ /* sum of pf_source inuse gauges */
+ uint64_t inuse;
+ /* sum of pf_source admitted counters */
+ uint64_t admitted;
+ /* sum of pf_source hardlimited counters */
+ uint64_t hardlimited;
+ /* sum of pf_source ratelimited counters */
+ uint64_t ratelimited;
+ } pfsrlim_counters;
+};
+
+RB_HEAD(pf_sourcelim_id_tree, pf_sourcelim);
+RB_PROTOTYPE(pf_sourcelim_id_tree, pf_sourcelim, pfsrlim_id_tree, cmp);
+
+RB_HEAD(pf_sourcelim_nm_tree, pf_sourcelim);
+RB_PROTOTYPE(pf_sourcelim_nm_tree, pf_sourcelim, pfsrlim_nm_tree, cmp);
+
+TAILQ_HEAD(pf_sourcelim_list, pf_sourcelim);
+
+VNET_DECLARE(struct pf_sourcelim_id_tree, pf_sourcelim_id_tree_active);
+#define V_pf_sourcelim_id_tree_active VNET(pf_sourcelim_id_tree_active)
+VNET_DECLARE(struct pf_sourcelim_list, pf_sourcelim_list_active);
+#define V_pf_sourcelim_list_active VNET(pf_sourcelim_list_active)
+
+VNET_DECLARE(struct pf_sourcelim_id_tree, pf_sourcelim_id_tree_inactive);
+#define V_pf_sourcelim_id_tree_inactive VNET(pf_sourcelim_id_tree_inactive)
+VNET_DECLARE(struct pf_sourcelim_nm_tree, pf_sourcelim_nm_tree_inactive);
+#define V_pf_sourcelim_nm_tree_inactive VNET(pf_sourcelim_nm_tree_inactive)
+VNET_DECLARE(struct pf_sourcelim_list, pf_sourcelim_list_inactive);
+#define V_pf_sourcelim_list_inactive VNET(pf_sourcelim_list_inactive)
+
+static inline unsigned int
+pf_sourcelim_enter(struct pf_sourcelim *pfsrlim)
+{
+ mtx_lock(&pfsrlim->pfsrlim_lock);
+ return (0);
+}
+
+static inline void
+pf_sourcelim_leave(struct pf_sourcelim *pfsrlim, unsigned int gen)
+{
+ mtx_unlock(&pfsrlim->pfsrlim_lock);
+}
enum pf_test_status {
PF_TEST_FAIL = -1,
@@ -1168,6 +1439,7 @@ struct pf_test_ctx {
int state_icmp;
int tag;
int rewrite;
+ int limiter_drop;
u_short reason;
struct pf_src_node *sns[PF_SN_MAX];
struct pf_krule *nr;
@@ -1186,6 +1458,9 @@ struct pf_test_ctx {
uint16_t virtual_type;
uint16_t virtual_id;
int depth;
+ struct pf_statelim *statelim;
+ struct pf_sourcelim *sourcelim;
+ struct pf_source *source;
};
#define PF_ANCHOR_STACK_MAX 32
@@ -1805,7 +2080,116 @@ enum pf_syncookies_mode {
#define PF_SYNCOOKIES_HIWATPCT 25
#define PF_SYNCOOKIES_LOWATPCT (PF_SYNCOOKIES_HIWATPCT / 2)
+#define PF_STATELIM_ID_NONE 0
+#define PF_STATELIM_ID_MIN 1
+#define PF_STATELIM_ID_MAX 255 /* fits in pf_state uint8_t */
+#define PF_STATELIM_LIMIT_MIN 1
+#define PF_STATELIM_LIMIT_MAX (1 << 24) /* pf is pretty scalable */
+
+#define PF_SOURCELIM_ID_NONE 0
+#define PF_SOURCELIM_ID_MIN 1
+#define PF_SOURCELIM_ID_MAX 255 /* fits in pf_state uint8_t */
+
#ifdef _KERNEL
+
+struct pfioc_statelim {
+ uint32_t ticket;
+
+ char name[PF_STATELIM_NAME_LEN];
+ uint32_t id;
+
+ /* limit on the total number of states */
+ unsigned int limit;
+
+ /* rate limit on the creation of states */
+ struct pf_limiter_rate rate;
+
+ char description[PF_STATELIM_DESCR_LEN];
+
+ /* kernel state for GET ioctls */
+ unsigned int inuse; /* gauge */
+ uint64_t admitted; /* counter */
+ uint64_t hardlimited; /* counter */
+ uint64_t ratelimited; /* counter */
+};
+
+struct pfioc_sourcelim {
+ uint32_t ticket;
+
+ char name[PF_SOURCELIM_NAME_LEN];
+ uint32_t id;
+
+ /* limit on the total number of address entries */
+ unsigned int entries;
+
+ /* limit on the number of states per address entry */
+ unsigned int limit;
+
+ /* rate limit on the creation of states by an address entry */
+ struct pf_limiter_rate rate;
+
+ /*
+ * when the number of states on an entry exceeds hwm, add
+ * the address to the specified table. when the number of
+ * states goes below lwm, remove it from the table.
+ */
+ char overload_tblname[PF_TABLE_NAME_SIZE];
+ unsigned int overload_hwm;
+ unsigned int overload_lwm;
+
+ /*
+ * mask addresses before they're used for entries. /64s
+ * everywhere for inet6 makes it easy to use too much memory.
+ */
+ unsigned int inet_prefix;
+ unsigned int inet6_prefix;
+
+ char description[PF_SOURCELIM_DESCR_LEN];
+
+ /* kernel state for GET ioctls */
+ unsigned int nentries; /* gauge */
+ unsigned int inuse; /* gauge */
+
+ uint64_t addrallocs; /* counter */
+ uint64_t addrnomem; /* counter */
+ uint64_t admitted; /* counter */
+ uint64_t addrlimited; /* counter */
+ uint64_t hardlimited; /* counter */
+ uint64_t ratelimited; /* counter */
+};
+
+struct pfioc_source_kill {
+ char name[PF_SOURCELIM_NAME_LEN];
+ uint32_t id;
+ unsigned int rdomain;
+ sa_family_t af;
+ struct pf_addr addr;
+
+ unsigned int rmstates; /* kill the states too? */
+};
+
+int pf_statelim_add(const struct pfioc_statelim *);
+struct pf_statelim *pf_statelim_rb_find(struct pf_statelim_id_tree *,
+ struct pf_statelim *);
+struct pf_statelim *pf_statelim_rb_nfind(struct pf_statelim_id_tree *,
+ struct pf_statelim *);
+int pf_statelim_get(struct pfioc_statelim *,
+ struct pf_statelim *(*rbt_op)(struct pf_statelim_id_tree *,
+ struct pf_statelim *));
+int pf_sourcelim_add(const struct pfioc_sourcelim *);
+struct pf_sourcelim *pf_sourcelim_rb_find(struct pf_sourcelim_id_tree *,
+ struct pf_sourcelim *);
+struct pf_sourcelim *pf_sourcelim_rb_nfind(struct pf_sourcelim_id_tree *,
+ struct pf_sourcelim *);
+int pf_sourcelim_get(struct pfioc_sourcelim *,
+ struct pf_sourcelim *(*rbt_op)(struct pf_sourcelim_id_tree *,
+ struct pf_sourcelim *));
+struct pf_source *pf_source_rb_find(struct pf_source_ioc_tree *,
+ struct pf_source *);
+struct pf_source *pf_source_rb_nfind(struct pf_source_ioc_tree *,
+ struct pf_source *);
+int pf_source_clr(struct pfioc_source_kill *);
+
struct pf_kstatus {
counter_u64_t counters[PFRES_MAX]; /* reason for passing/dropping */
counter_u64_t lcounters[KLCNT_MAX]; /* limit counters */
@@ -2420,6 +2804,14 @@ pf_get_time(void)
return ((t.tv_sec * 1000) + (t.tv_usec / 1000));
}
+static inline uint64_t
+SEC_TO_NSEC(uint64_t seconds)
+{
+ if (seconds > UINT64_MAX / 1000000000ULL)
+ return (UINT64_MAX);
+ return (seconds * 1000000000ULL);
+}
+
extern struct pf_kstate *pf_find_state_byid(uint64_t, uint32_t);
extern struct pf_kstate *pf_find_state_all(
const struct pf_state_key_cmp *,
@@ -2554,6 +2946,7 @@ int pfr_clr_tstats(struct pfr_table *, int, int *, int);
int pfr_set_tflags(struct pfr_table *, int, int, int, int *, int *, int);
int pfr_clr_addrs(struct pfr_table *, int *, int);
int pfr_insert_kentry(struct pfr_ktable *, struct pfr_addr *, time_t);
+int pfr_remove_kentry(struct pfr_ktable *, struct pfr_addr *);
int pfr_add_addrs(struct pfr_table *, struct pfr_addr *, int, int *,
int);
int pfr_del_addrs(struct pfr_table *, struct pfr_addr *, int, int *,
diff --git a/sys/net80211/ieee80211_ht.c b/sys/net80211/ieee80211_ht.c
index a8a767785fce..88e614e266a1 100644
--- a/sys/net80211/ieee80211_ht.c
+++ b/sys/net80211/ieee80211_ht.c
@@ -2766,10 +2766,15 @@ ieee80211_ampdu_enable(struct ieee80211_node *ni,
return 1;
}
-/*
- * Request A-MPDU tx aggregation. Setup local state and
- * issue an ADDBA request. BA use will only happen after
+/**
+ * @brief Request A-MPDU tx aggregation.
+ *
+ * Setup local state and issue an ADDBA request. BA use will only happen after
* the other end replies with ADDBA response.
+ *
+ * @param ni ieee80211_node update
+ * @param tap tx_ampdu state
+ * @returns 1 on success and 0 on error
*/
int
ieee80211_ampdu_request(struct ieee80211_node *ni,
@@ -2777,7 +2782,7 @@ ieee80211_ampdu_request(struct ieee80211_node *ni,
{
struct ieee80211com *ic = ni->ni_ic;
uint16_t args[5];
- int tid, dialogtoken;
+ int tid, dialogtoken, error;
static int tokens = 0; /* XXX */
/* XXX locking */
@@ -2828,8 +2833,11 @@ ieee80211_ampdu_request(struct ieee80211_node *ni,
args[4] = _IEEE80211_SHIFTMASK(tap->txa_start, IEEE80211_BASEQ_START)
| _IEEE80211_SHIFTMASK(0, IEEE80211_BASEQ_FRAG)
;
- return ic->ic_send_action(ni, IEEE80211_ACTION_CAT_BA,
+
+ error = ic->ic_send_action(ni, IEEE80211_ACTION_CAT_BA,
IEEE80211_ACTION_BA_ADDBA_REQUEST, args);
+ /* Silly return of 1 for success here. */
+ return (error == 0);
}
/*
diff --git a/sys/net80211/ieee80211_proto.c b/sys/net80211/ieee80211_proto.c
index 4918bf7d025f..ba09cd015a57 100644
--- a/sys/net80211/ieee80211_proto.c
+++ b/sys/net80211/ieee80211_proto.c
@@ -747,8 +747,8 @@ ieee80211_fix_rate(struct ieee80211_node *ni,
((flags & (IEEE80211_F_DOFRATE|IEEE80211_F_DOFMCS)) &&
fixedrate != ucastrate)) {
IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE | IEEE80211_MSG_11N, ni,
- "%s: flags 0x%x okrate %d error %d fixedrate 0x%x "
- "ucastrate %x\n", __func__, fixedrate, ucastrate, flags);
+ "%s: flags 0x%x okrate %d error %d fixedrate 0x%x ucastrate 0x%x\n",
+ __func__, flags, okrate, error, fixedrate, ucastrate);
return badrate | IEEE80211_RATE_BASIC;
} else
return IEEE80211_RV(okrate);
diff --git a/sys/net80211/ieee80211_radiotap.c b/sys/net80211/ieee80211_radiotap.c
index 4d36be6df9f5..aa1812045953 100644
--- a/sys/net80211/ieee80211_radiotap.c
+++ b/sys/net80211/ieee80211_radiotap.c
@@ -110,12 +110,6 @@ ieee80211_radiotap_detach(struct ieee80211com *ic)
{
}
-void
-ieee80211_radiotap_vdetach(struct ieee80211vap *vap)
-{
- /* NB: bpfdetach is called by ether_ifdetach and claims all taps */
-}
-
static void
set_channel(void *p, const struct ieee80211_channel *c)
{
@@ -472,3 +466,12 @@ ieee80211_radiotap_vattach(struct ieee80211vap *vap)
if_ref(vap->iv_ifp);
}
}
+
+void
+ieee80211_radiotap_vdetach(struct ieee80211vap *vap)
+{
+ if (vap->iv_rawbpf != NULL) {
+ bpf_detach(vap->iv_rawbpf);
+ if_rele(vap->iv_ifp);
+ }
+}
diff --git a/sys/net80211/ieee80211_radiotap.h b/sys/net80211/ieee80211_radiotap.h
index d44f81b68b8c..d729323fce3a 100644
--- a/sys/net80211/ieee80211_radiotap.h
+++ b/sys/net80211/ieee80211_radiotap.h
@@ -512,8 +512,11 @@ struct ieee80211_radiotap_lsig {
uint16_t data1;
uint16_t data2;
} __packed;
+
+#define IEEE80211_RADIOTAP_LSIG_DATA1_RATE_KNOWN 0x0001
#define IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN 0x0002
+#define IEEE80211_RADIOTAP_LSIG_DATA2_RATE 0x000F
#define IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH 0xFFF0
/* https://www.radiotap.org/fields/MCS.html */
@@ -615,6 +618,24 @@ struct ieee80211_radiotap_eht {
#define IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_2 0x000001ff
#define IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_2_KNOWN 0x00000200
+#define IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3 0x0007fc00
+#define IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN 0x00080000
+#define IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_3 0x1ff00000
+#define IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_3_KNOWN 0x20000000
+
+#define IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4 0x000001ff
+#define IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN 0x00000200
+#define IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_2_2_4 0x0007fc00
+#define IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_2_2_4_KNOWN 0x00080000
+#define IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5 0x1ff00000
+#define IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN 0x20000000
+
+#define IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_5 0x000001ff
+#define IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_5_KNOWN 0x00000200
+#define IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6 0x0007fc00
+#define IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN 0x00080000
+#define IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_6 0x1ff00000
+#define IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_6_KNOWN 0x20000000
#define IEEE80211_RADIOTAP_EHT_DATA7_NSS_S 0x0000f000
#define IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S 0x00010000
diff --git a/sys/netgraph/bluetooth/include/ng_hci.h b/sys/netgraph/bluetooth/include/ng_hci.h
index 54980e128bba..bbe9541c25c9 100644
--- a/sys/netgraph/bluetooth/include/ng_hci.h
+++ b/sys/netgraph/bluetooth/include/ng_hci.h
@@ -447,8 +447,8 @@ typedef struct {
} __attribute__ ((packed)) bdaddr_t;
typedef bdaddr_t * bdaddr_p;
-/* Any BD_ADDR. Note: This is actually 7 bytes (count '\0' terminator) */
-#define NG_HCI_BDADDR_ANY (&(const bdaddr_t){"\000\000\000\000\000\000"})
+/* Any BD_ADDR. */
+#define NG_HCI_BDADDR_ANY (&(const bdaddr_t){ { 0, 0, 0, 0, 0, 0 } })
/* HCI status return parameter */
typedef struct {
diff --git a/sys/netgraph/netflow/netflow_v9.c b/sys/netgraph/netflow/netflow_v9.c
index b6e9fca98408..e6b63a8aa36b 100644
--- a/sys/netgraph/netflow/netflow_v9.c
+++ b/sys/netgraph/netflow/netflow_v9.c
@@ -227,6 +227,7 @@ export9_send(priv_p priv, fib_export_p fe, item_p item, struct netflow_v9_packet
else
NG_FREE_ITEM(item);
+ fe->sent_packets++;
free(t, M_NETFLOW_GENERAL);
return (error);
diff --git a/sys/netinet/icmp6.h b/sys/netinet/icmp6.h
index 082ef5d29ce9..9ed39d118c16 100644
--- a/sys/netinet/icmp6.h
+++ b/sys/netinet/icmp6.h
@@ -717,8 +717,7 @@ int icmp6_ratelimit(const struct in6_addr *, const int, const int);
#define icmp6_ifstat_inc(ifp, tag) \
do { \
if (ifp) \
- counter_u64_add(((struct in6_ifextra *) \
- ((ifp)->if_inet6))->icmp6_ifstat[ \
+ counter_u64_add((ifp)->if_inet6->icmp6_ifstat[ \
offsetof(struct icmp6_ifstat, tag) / sizeof(uint64_t)], 1);\
} while (/*CONSTCOND*/ 0)
diff --git a/sys/netinet/ip6.h b/sys/netinet/ip6.h
index 2f61d594e59d..580283b09745 100644
--- a/sys/netinet/ip6.h
+++ b/sys/netinet/ip6.h
@@ -257,7 +257,17 @@ struct ip6_frag {
#define IPV6_HLIMDEC 1 /* subtracted when forwarding */
#define IPV6_MMTU 1280 /* minimal MTU and reassembly. 1024 + 256 */
-#define IPV6_MAXPACKET 65535 /* ip6 max packet size without Jumbo payload*/
+/*
+ * XXX: IPV6_MAXPACKET is historically used as the maximum packet size.
+ * The maximum IPv6 packet size is:
+ *
+ * v6 header size (40) + payload load size (65535)
+ *
+ * practically this isn't encountered as it requires a link with an mtu of
+ * 65575 octets. IPV6_MAXPACKET is preserved at this value for compatibility.
+ */
+#define IPV6_MAXPACKET 65535
+#define IPV6_MAXPAYLOAD 65535 /* max size that can be carried in a payload */
#define IPV6_MAXOPTHDR 2048 /* max option header size, 256 64-bit words */
#endif /* not _NETINET_IP6_H_ */
diff --git a/sys/netinet/ip_fastfwd.c b/sys/netinet/ip_fastfwd.c
index d3d42afb2d84..6001ce781bc8 100644
--- a/sys/netinet/ip_fastfwd.c
+++ b/sys/netinet/ip_fastfwd.c
@@ -359,15 +359,20 @@ passin:
}
/*
- * Decrement the TTL and incrementally change the IP header checksum.
- * Don't bother doing this with hw checksum offloading, it's faster
- * doing it right here.
+ * Decrement the TTL.
+ * If the IP header checksum field contains a valid value, incrementally
+ * change this value. Don't use hw checksum offloading, which would
+ * recompute the checksum. It's faster to just change it here
+ * according to the decremented TTL.
+ * If the checksum still needs to be computed, don't touch it.
*/
ip->ip_ttl -= IPTTLDEC;
- if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
- ip->ip_sum -= ~htons(IPTTLDEC << 8);
- else
- ip->ip_sum += htons(IPTTLDEC << 8);
+ if (__predict_true((m->m_pkthdr.csum_flags & CSUM_IP) == 0)) {
+ if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
+ ip->ip_sum -= ~htons(IPTTLDEC << 8);
+ else
+ ip->ip_sum += htons(IPTTLDEC << 8);
+ }
#ifdef IPSTEALTH
}
#endif
@@ -465,9 +470,18 @@ passout:
gw = (const struct sockaddr *)dst;
/*
- * If TCP/UDP header still needs a valid checksum and interface will not
- * calculate it for us, do it here.
+ * If the IP/SCTP/TCP/UDP header still needs a valid checksum and the
+ * interface will not calculate it for us, do it here.
+ * Note that if we defer checksum calculation, we might send an ICMP
+ * message later that reflects this packet, which still has an
+ * invalid checksum.
*/
+ if (__predict_false(m->m_pkthdr.csum_flags & CSUM_IP &
+ ~nh->nh_ifp->if_hwassist)) {
+ ip->ip_sum = 0;
+ ip->ip_sum = in_cksum(m, (ip->ip_hl << 2));
+ m->m_pkthdr.csum_flags &= ~CSUM_IP;
+ }
if (__predict_false(m->m_pkthdr.csum_flags & CSUM_DELAY_DATA &
~nh->nh_ifp->if_hwassist)) {
in_delayed_cksum(m);
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index 2e0635f8e482..1e1747d04c45 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -532,6 +532,12 @@ ip_input(struct mbuf *m)
if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
+ } else if (m->m_pkthdr.csum_flags & CSUM_IP) {
+ /*
+ * Packet from local host that offloaded checksum computation.
+ * Checksum not required since the packet wasn't on the wire.
+ */
+ sum = 0;
} else {
if (hlen == sizeof(struct ip)) {
sum = in_cksum_hdr(ip);
diff --git a/sys/netinet/ip_mroute.c b/sys/netinet/ip_mroute.c
index f9efdc419882..ca32f381ff51 100644
--- a/sys/netinet/ip_mroute.c
+++ b/sys/netinet/ip_mroute.c
@@ -606,12 +606,12 @@ get_vif_cnt(struct sioc_vif_req *req)
return EINVAL;
}
- mtx_lock_spin(&V_viftable[vifi].v_spin);
+ mtx_lock(&V_viftable[vifi].v_mtx);
req->icount = V_viftable[vifi].v_pkt_in;
req->ocount = V_viftable[vifi].v_pkt_out;
req->ibytes = V_viftable[vifi].v_bytes_in;
req->obytes = V_viftable[vifi].v_bytes_out;
- mtx_unlock_spin(&V_viftable[vifi].v_spin);
+ mtx_unlock(&V_viftable[vifi].v_mtx);
MRW_RUNLOCK();
return 0;
@@ -1004,8 +1004,8 @@ add_vif(struct vifctl *vifcp)
vifp->v_pkt_out = 0;
vifp->v_bytes_in = 0;
vifp->v_bytes_out = 0;
- sprintf(vifp->v_spin_name, "BM[%d] spin", vifcp->vifc_vifi);
- mtx_init(&vifp->v_spin, vifp->v_spin_name, NULL, MTX_SPIN);
+ sprintf(vifp->v_mtx_name, "BM[%d] mtx", vifcp->vifc_vifi);
+ mtx_init(&vifp->v_mtx, vifp->v_mtx_name, NULL, MTX_DEF);
/* Adjust numvifs up if the vifi is higher than numvifs */
if (V_numvifs <= vifcp->vifc_vifi)
@@ -1053,7 +1053,7 @@ del_vif_locked(vifi_t vifi, struct ifnet **ifp_multi_leave, struct ifnet **ifp_f
}
}
- mtx_destroy(&vifp->v_spin);
+ mtx_destroy(&vifp->v_mtx);
bzero((caddr_t)vifp, sizeof (*vifp));
@@ -1659,7 +1659,7 @@ ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
}
/* If I sourced this packet, it counts as output, else it was input. */
- mtx_lock_spin(&V_viftable[vifi].v_spin);
+ mtx_lock(&V_viftable[vifi].v_mtx);
if (in_hosteq(ip->ip_src, V_viftable[vifi].v_lcl_addr)) {
V_viftable[vifi].v_pkt_out++;
V_viftable[vifi].v_bytes_out += plen;
@@ -1667,7 +1667,7 @@ ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
V_viftable[vifi].v_pkt_in++;
V_viftable[vifi].v_bytes_in += plen;
}
- mtx_unlock_spin(&V_viftable[vifi].v_spin);
+ mtx_unlock(&V_viftable[vifi].v_mtx);
rt->mfc_pkt_cnt++;
rt->mfc_byte_cnt += plen;
@@ -1704,14 +1704,14 @@ ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
for (x = rt->mfc_bw_meter_leq; x != NULL; x = x->bm_mfc_next) {
/*
* Record that a packet is received.
- * Spin lock has to be taken as callout context
+ * A lock has to be taken as callout context
* (expire_bw_meter_leq) might modify these fields
* as well
*/
- mtx_lock_spin(&x->bm_spin);
+ mtx_lock(&x->bm_mtx);
x->bm_measured.b_packets++;
x->bm_measured.b_bytes += plen;
- mtx_unlock_spin(&x->bm_spin);
+ mtx_unlock(&x->bm_mtx);
}
}
@@ -1894,13 +1894,14 @@ expire_bw_meter_leq(void *arg)
/* Reset counters */
x->bm_start_time = now;
- /* Spin lock has to be taken as ip_forward context
+ /*
+ * The lock has to be taken as ip_forward context
* might modify these fields as well
*/
- mtx_lock_spin(&x->bm_spin);
+ mtx_lock(&x->bm_mtx);
x->bm_measured.b_bytes = 0;
x->bm_measured.b_packets = 0;
- mtx_unlock_spin(&x->bm_spin);
+ mtx_unlock(&x->bm_mtx);
callout_schedule(&x->bm_meter_callout, tvtohz(&x->bm_threshold.b_time));
@@ -1986,8 +1987,8 @@ add_bw_upcall(struct bw_upcall *req)
x->bm_time_next = NULL;
x->bm_mfc = mfc;
x->arg = curvnet;
- sprintf(x->bm_spin_name, "BM spin %p", x);
- mtx_init(&x->bm_spin, x->bm_spin_name, NULL, MTX_SPIN);
+ sprintf(x->bm_mtx_name, "BM mtx %p", x);
+ mtx_init(&x->bm_mtx, x->bm_mtx_name, NULL, MTX_DEF);
/* For LEQ case create periodic callout */
if (req->bu_flags & BW_UPCALL_LEQ) {
@@ -2014,7 +2015,7 @@ free_bw_list(struct bw_meter *list)
/* MRW_WLOCK must be held here */
if (x->bm_flags & BW_METER_LEQ) {
callout_drain(&x->bm_meter_callout);
- mtx_destroy(&x->bm_spin);
+ mtx_destroy(&x->bm_mtx);
}
list = list->bm_mfc_next;
@@ -2115,7 +2116,7 @@ bw_meter_geq_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
/*
* Processing for ">=" type of bw_meter entry.
- * bm_spin does not have to be hold here as in GEQ
+ * bm_mtx does not have to be hold here as in GEQ
* case this is the only context accessing bm_measured.
*/
if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
@@ -2834,12 +2835,6 @@ ip_mroute_modevent(module_t mod, int type, void *unused)
if_detach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
if_detached_event, NULL, EVENTHANDLER_PRI_ANY);
- if (if_detach_event_tag == NULL) {
- printf("ip_mroute: unable to register "
- "ifnet_departure_event handler\n");
- MRW_LOCK_DESTROY();
- return (EINVAL);
- }
if (!powerof2(mfchashsize)) {
printf("WARNING: %s not a power of 2; using default\n",
diff --git a/sys/netinet/ip_mroute.h b/sys/netinet/ip_mroute.h
index 2c71dc10dfd9..ed98e59a7c77 100644
--- a/sys/netinet/ip_mroute.h
+++ b/sys/netinet/ip_mroute.h
@@ -262,9 +262,9 @@ struct vif {
u_long v_bytes_in; /* # bytes in on interface */
u_long v_bytes_out; /* # bytes out on interface */
#ifdef _KERNEL
-#define MROUTE_VIF_SYSCTL_LEN __offsetof(struct vif, v_spin)
- struct mtx v_spin; /* Spin mutex for pkt stats */
- char v_spin_name[32];
+#define MROUTE_VIF_SYSCTL_LEN __offsetof(struct vif, v_mtx)
+ struct mtx v_mtx; /* mutex for pkt stats */
+ char v_mtx_name[32];
#endif
};
@@ -350,8 +350,8 @@ struct bw_meter {
#ifdef _KERNEL
struct callout bm_meter_callout; /* Periodic callout */
void* arg; /* custom argument */
- struct mtx bm_spin; /* meter spin lock */
- char bm_spin_name[32];
+ struct mtx bm_mtx; /* meter lock */
+ char bm_mtx_name[32];
#endif
};
diff --git a/sys/netinet/sctp_bsd_addr.c b/sys/netinet/sctp_bsd_addr.c
index ac715d8298ec..f1c364d7b57a 100644
--- a/sys/netinet/sctp_bsd_addr.c
+++ b/sys/netinet/sctp_bsd_addr.c
@@ -174,6 +174,7 @@ sctp_is_desired_interface_type(struct ifnet *ifn)
case IFT_IPOVERCLAW:
case IFT_PROPVIRTUAL: /* NetGraph Virtual too */
case IFT_VIRTUALIPADDRESS:
+ case IFT_BRIDGE:
result = 1;
break;
default:
diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c
index e4bdb4291972..4f6fbc6be783 100644
--- a/sys/netinet/sctp_output.c
+++ b/sys/netinet/sctp_output.c
@@ -4596,9 +4596,9 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
}
}
} else if (ifp != NULL) {
- if ((ND_IFINFO(ifp)->linkmtu > 0) &&
- (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
- sctp_pathmtu_adjustment(stcb, ND_IFINFO(ifp)->linkmtu, false);
+ if ((ifp->if_inet6->nd_linkmtu > 0) &&
+ (stcb->asoc.smallest_mtu > ifp->if_inet6->nd_linkmtu)) {
+ sctp_pathmtu_adjustment(stcb, ifp->if_inet6->nd_linkmtu, false);
}
}
}
diff --git a/sys/netinet6/icmp6.c b/sys/netinet6/icmp6.c
index c05850f1477b..5b5f7b83623e 100644
--- a/sys/netinet6/icmp6.c
+++ b/sys/netinet6/icmp6.c
@@ -2080,7 +2080,8 @@ icmp6_reflect(struct mbuf *m, size_t off)
if (m->m_pkthdr.rcvif != NULL) {
/* XXX: This may not be the outgoing interface */
- hlim = ND_IFINFO(m->m_pkthdr.rcvif)->chlim;
+ hlim =
+ m->m_pkthdr.rcvif->if_inet6->nd_curhoplimit;
} else
hlim = V_ip6_defhlim;
}
diff --git a/sys/netinet6/in6.c b/sys/netinet6/in6.c
index a953bb5546d6..e1504400d55b 100644
--- a/sys/netinet6/in6.c
+++ b/sys/netinet6/in6.c
@@ -317,7 +317,6 @@ in6_control_ioctl(u_long cmd, void *data,
return (error);
}
/* FALLTHROUGH */
- case OSIOCGIFINFO_IN6:
case SIOCGIFINFO_IN6:
case SIOCGNBRINFO_IN6:
case SIOCGDEFIFACE_IN6:
@@ -1029,6 +1028,15 @@ in6_alloc_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int flags)
return (ia);
}
+time_t
+in6_expire_time(uint32_t ltime)
+{
+ if (ltime == ND6_INFINITE_LIFETIME)
+ return (0);
+ else
+ return (time_uptime + ltime);
+}
+
/*
* Update/configure interface address parameters:
*
@@ -1051,16 +1059,10 @@ in6_update_ifa_internal(struct ifnet *ifp, struct in6_aliasreq *ifra,
* these members for applications.
*/
ia->ia6_lifetime = ifra->ifra_lifetime;
- if (ia->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME) {
- ia->ia6_lifetime.ia6t_expire =
- time_uptime + ia->ia6_lifetime.ia6t_vltime;
- } else
- ia->ia6_lifetime.ia6t_expire = 0;
- if (ia->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME) {
- ia->ia6_lifetime.ia6t_preferred =
- time_uptime + ia->ia6_lifetime.ia6t_pltime;
- } else
- ia->ia6_lifetime.ia6t_preferred = 0;
+ ia->ia6_lifetime.ia6t_expire =
+ in6_expire_time(ifra->ifra_lifetime.ia6t_vltime);
+ ia->ia6_lifetime.ia6t_preferred =
+ in6_expire_time(ifra->ifra_lifetime.ia6t_pltime);
/*
* backward compatibility - if IN6_IFF_DEPRECATED is set from the
@@ -1088,7 +1090,7 @@ in6_update_ifa_internal(struct ifnet *ifp, struct in6_aliasreq *ifra,
* an interface with ND6_IFF_IFDISABLED.
*/
if (in6if_do_dad(ifp) &&
- (hostIsNew || (ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED)))
+ (hostIsNew || (ifp->if_inet6->nd_flags & ND6_IFF_IFDISABLED)))
ia->ia6_flags |= IN6_IFF_TENTATIVE;
/* notify other subsystems */
@@ -1327,6 +1329,28 @@ in6_addifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra, struct in6_ifaddr *i
(*carp_detach_p)(&ia->ia_ifa, false);
goto out;
}
+ } else if (pr->ndpr_raf_onlink) {
+ time_t expiry;
+
+ /*
+ * If the prefix already exists, update lifetimes, but avoid
+ * shortening them.
+ */
+ ND6_WLOCK();
+ expiry = in6_expire_time(pr0.ndpr_pltime);
+ if (pr->ndpr_preferred != 0 &&
+ (pr->ndpr_preferred < expiry || expiry == 0)) {
+ pr->ndpr_pltime = pr0.ndpr_pltime;
+ pr->ndpr_preferred = expiry;
+ }
+ expiry = in6_expire_time(pr0.ndpr_vltime);
+ if (pr->ndpr_expire != 0 &&
+ (pr->ndpr_expire < expiry || expiry == 0)) {
+ pr->ndpr_vltime = pr0.ndpr_vltime;
+ pr->ndpr_expire = expiry;
+ }
+ pr->ndpr_lastupdate = time_uptime;
+ ND6_WUNLOCK();
}
/* relate the address to the prefix */
@@ -1362,11 +1386,11 @@ aifaddr_out:
* Try to clear the flag when a new IPv6 address is added
* onto an IFDISABLED interface and it succeeds.
*/
- if (ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) {
+ if (ifp->if_inet6->nd_flags & ND6_IFF_IFDISABLED) {
struct in6_ndireq nd;
memset(&nd, 0, sizeof(nd));
- nd.ndi.flags = ND_IFINFO(ifp)->flags;
+ nd.ndi.flags = ifp->if_inet6->nd_flags;
nd.ndi.flags &= ~ND6_IFF_IFDISABLED;
if (nd6_ioctl(SIOCSIFINFO_FLAGS, (caddr_t)&nd, ifp) < 0)
log(LOG_NOTICE, "SIOCAIFADDR_IN6: "
@@ -1688,7 +1712,7 @@ in6ifa_llaonifp(struct ifnet *ifp)
struct sockaddr_in6 *sin6;
struct ifaddr *ifa;
- if (ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED)
+ if (ifp->if_inet6->nd_flags & ND6_IFF_IFDISABLED)
return (NULL);
NET_EPOCH_ENTER(et);
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
@@ -2112,7 +2136,7 @@ in6if_do_dad(struct ifnet *ifp)
return (0);
if ((ifp->if_flags & IFF_MULTICAST) == 0)
return (0);
- if ((ND_IFINFO(ifp)->flags &
+ if ((ifp->if_inet6->nd_flags &
(ND6_IFF_IFDISABLED | ND6_IFF_NO_DAD)) != 0)
return (0);
return (1);
@@ -2583,35 +2607,33 @@ in6_ifarrival(void *arg __unused, struct ifnet *ifp)
ifp->if_inet6 = NULL;
return;
}
- ext = (struct in6_ifextra *)malloc(sizeof(*ext), M_IFADDR, M_WAITOK);
- bzero(ext, sizeof(*ext));
-
- ext->in6_ifstat = malloc(sizeof(counter_u64_t) *
- sizeof(struct in6_ifstat) / sizeof(uint64_t), M_IFADDR, M_WAITOK);
+ ext = ifp->if_inet6 = malloc(sizeof(*ext), M_IFADDR, M_WAITOK | M_ZERO);
COUNTER_ARRAY_ALLOC(ext->in6_ifstat,
sizeof(struct in6_ifstat) / sizeof(uint64_t), M_WAITOK);
-
- ext->icmp6_ifstat = malloc(sizeof(counter_u64_t) *
- sizeof(struct icmp6_ifstat) / sizeof(uint64_t), M_IFADDR,
- M_WAITOK);
COUNTER_ARRAY_ALLOC(ext->icmp6_ifstat,
sizeof(struct icmp6_ifstat) / sizeof(uint64_t), M_WAITOK);
+ nd6_ifattach(ifp);
+ mld_domifattach(ifp);
+ scope6_ifattach(ifp);
- ext->nd_ifinfo = nd6_ifattach(ifp);
- ext->scope6_id = scope6_ifattach(ifp);
ext->lltable = in6_lltattach(ifp);
-
- ext->mld_ifinfo = mld_domifattach(ifp);
-
- ifp->if_inet6 = ext;
}
EVENTHANDLER_DEFINE(ifnet_arrival_event, in6_ifarrival, NULL,
EVENTHANDLER_PRI_ANY);
uint32_t
-in6_ifmtu(struct ifnet *ifp)
-{
- return (IN6_LINKMTU(ifp));
+in6_ifmtu(const struct ifnet *ifp)
+{
+ const uint32_t
+ linkmtu = ifp->if_inet6->nd_linkmtu,
+ maxmtu = ifp->if_inet6->nd_maxmtu,
+ ifmtu = ifp->if_mtu;
+
+ if (linkmtu > 0 && linkmtu < ifmtu)
+ return (linkmtu);
+ if (maxmtu > 0 && maxmtu < ifmtu)
+ return (maxmtu);
+ return (ifmtu);
}
/*
diff --git a/sys/netinet6/in6.h b/sys/netinet6/in6.h
index a7fe03b9c3d7..f250d7e49982 100644
--- a/sys/netinet6/in6.h
+++ b/sys/netinet6/in6.h
@@ -671,6 +671,8 @@ int in6_cksum_partial_l2(struct mbuf *m, uint8_t nxt, uint32_t off_l3,
uint32_t off_l4, uint32_t len, uint32_t cov);
int in6_cksum_pseudo(struct ip6_hdr *, uint32_t, uint8_t, uint16_t);
+time_t in6_expire_time(uint32_t);
+
int in6_localaddr(struct in6_addr *);
int in6_localip(struct in6_addr *);
bool in6_localip_fib(struct in6_addr *, uint16_t);
diff --git a/sys/netinet6/in6_ifattach.c b/sys/netinet6/in6_ifattach.c
index c3d256a8d51f..c38fe90632f8 100644
--- a/sys/netinet6/in6_ifattach.c
+++ b/sys/netinet6/in6_ifattach.c
@@ -465,7 +465,7 @@ in6_get_ifid(struct ifnet *ifp0, struct ifnet *altifp,
NET_EPOCH_ASSERT();
/* first, try to get it from the interface itself, with stable algorithm, if configured */
- if ((ND_IFINFO(ifp0)->flags & ND6_IFF_STABLEADDR) && in6_get_stableifid(ifp0, in6, 64) == 0) {
+ if ((ifp0->if_inet6->nd_flags & ND6_IFF_STABLEADDR) && in6_get_stableifid(ifp0, in6, 64) == 0) {
nd6log((LOG_DEBUG, "%s: got interface identifier from itself (stable private)\n",
if_name(ifp0)));
goto success;
@@ -799,8 +799,8 @@ in6_ifattach(struct ifnet *ifp, struct ifnet *altifp)
* linklocals for 6to4 interface, but there's no use and
* it is rather harmful to have one.
*/
- ND_IFINFO(ifp)->flags &= ~ND6_IFF_AUTO_LINKLOCAL;
- ND_IFINFO(ifp)->flags |= ND6_IFF_NO_DAD;
+ ifp->if_inet6->nd_flags &= ~ND6_IFF_AUTO_LINKLOCAL;
+ ifp->if_inet6->nd_flags |= ND6_IFF_NO_DAD;
break;
default:
break;
@@ -831,8 +831,8 @@ in6_ifattach(struct ifnet *ifp, struct ifnet *altifp)
/*
* assign a link-local address, if there's none.
*/
- if (!(ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) &&
- ND_IFINFO(ifp)->flags & ND6_IFF_AUTO_LINKLOCAL) {
+ if (!(ifp->if_inet6->nd_flags & ND6_IFF_IFDISABLED) &&
+ ifp->if_inet6->nd_flags & ND6_IFF_AUTO_LINKLOCAL) {
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
@@ -900,6 +900,19 @@ in6_ifdetach(struct ifnet *ifp)
}
static void
+in6_ifextra_free(epoch_context_t ctx)
+{
+ struct in6_ifextra *ext =
+ __containerof(ctx, struct in6_ifextra, epoch_ctx);
+
+ COUNTER_ARRAY_FREE(ext->in6_ifstat,
+ sizeof(struct in6_ifstat) / sizeof(uint64_t));
+ COUNTER_ARRAY_FREE(ext->icmp6_ifstat,
+ sizeof(struct icmp6_ifstat) / sizeof(uint64_t));
+ free(ext, M_IFADDR);
+}
+
+static void
in6_ifdeparture(void *arg __unused, struct ifnet *ifp)
{
struct in6_ifextra *ext = ifp->if_inet6;
@@ -916,17 +929,16 @@ in6_ifdeparture(void *arg __unused, struct ifnet *ifp)
if (!VNET_IS_SHUTTING_DOWN(ifp->if_vnet))
#endif
_in6_ifdetach(ifp, 1);
+ /*
+ * XXXGL: mld and nd bits are left in a consistent state after
+ * destructors, but I'm not sure if it safe to call lltable_free() here.
+ * Individual lle entries are epoch(9) protected, but the table itself
+ * isn't.
+ */
mld_domifdetach(ifp);
- scope6_ifdetach(ext->scope6_id);
- nd6_ifdetach(ifp, ext->nd_ifinfo);
+ nd6_ifdetach(ifp);
lltable_free(ext->lltable);
- COUNTER_ARRAY_FREE(ext->in6_ifstat,
- sizeof(struct in6_ifstat) / sizeof(uint64_t));
- free(ext->in6_ifstat, M_IFADDR);
- COUNTER_ARRAY_FREE(ext->icmp6_ifstat,
- sizeof(struct icmp6_ifstat) / sizeof(uint64_t));
- free(ext->icmp6_ifstat, M_IFADDR);
- free(ext, M_IFADDR);
+ NET_EPOCH_CALL(in6_ifextra_free, &ext->epoch_ctx);
}
EVENTHANDLER_DEFINE(ifnet_departure_event, in6_ifdeparture, NULL,
EVENTHANDLER_PRI_ANY);
diff --git a/sys/netinet6/in6_rmx.c b/sys/netinet6/in6_rmx.c
index d1c121115b60..35c6cd5ba5f1 100644
--- a/sys/netinet6/in6_rmx.c
+++ b/sys/netinet6/in6_rmx.c
@@ -114,10 +114,8 @@ rib6_augment_nh(u_int fibnum, struct nhop_object *nh)
* inherit interface MTU if not set or
* check if MTU is too large.
*/
- if (nh->nh_mtu == 0) {
- nh->nh_mtu = IN6_LINKMTU(nh->nh_ifp);
- } else if (nh->nh_mtu > IN6_LINKMTU(nh->nh_ifp))
- nh->nh_mtu = IN6_LINKMTU(nh->nh_ifp);
+ if (nh->nh_mtu == 0 || nh->nh_mtu > in6_ifmtu(nh->nh_ifp))
+ nh->nh_mtu = in6_ifmtu(nh->nh_ifp);
/* Set nexthop type */
if (nhop_get_type(nh) == 0) {
diff --git a/sys/netinet6/in6_src.c b/sys/netinet6/in6_src.c
index 5171bc1d4ea6..d5e8e0f952c6 100644
--- a/sys/netinet6/in6_src.c
+++ b/sys/netinet6/in6_src.c
@@ -366,7 +366,7 @@ in6_selectsrc(uint32_t fibnum, struct sockaddr_in6 *dstsock,
*/
/* Rule 5: Prefer outgoing interface */
- if (!(ND_IFINFO(ifp)->flags & ND6_IFF_NO_PREFER_IFACE)) {
+ if (!(ifp->if_inet6->nd_flags & ND6_IFF_NO_PREFER_IFACE)) {
if (ia_best->ia_ifp == ifp && ia->ia_ifp != ifp)
NEXT(5);
if (ia_best->ia_ifp != ifp && ia->ia_ifp == ifp)
@@ -868,7 +868,7 @@ in6_selecthlim(struct inpcb *inp, struct ifnet *ifp)
if (inp && inp->in6p_hops >= 0)
return (inp->in6p_hops);
else if (ifp)
- return (ND_IFINFO(ifp)->chlim);
+ return (ifp->if_inet6->nd_curhoplimit);
else if (inp && !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
struct nhop_object *nh;
struct in6_addr dst;
@@ -879,7 +879,7 @@ in6_selecthlim(struct inpcb *inp, struct ifnet *ifp)
in6_splitscope(&inp->in6p_faddr, &dst, &scopeid);
nh = fib6_lookup(fibnum, &dst, scopeid, 0, 0);
if (nh != NULL) {
- hlim = ND_IFINFO(nh->nh_ifp)->chlim;
+ hlim = nh->nh_ifp->if_inet6->nd_curhoplimit;
return (hlim);
}
}
diff --git a/sys/netinet6/in6_var.h b/sys/netinet6/in6_var.h
index 0cfdde652c0a..057cd84b6ea7 100644
--- a/sys/netinet6/in6_var.h
+++ b/sys/netinet6/in6_var.h
@@ -93,25 +93,6 @@ struct in6_addrlifetime {
u_int32_t ia6t_pltime; /* prefix lifetime */
};
-struct nd_ifinfo;
-struct scope6_id;
-struct lltable;
-struct mld_ifsoftc;
-struct in6_multi;
-
-struct in6_ifextra {
- counter_u64_t *in6_ifstat;
- counter_u64_t *icmp6_ifstat;
- struct nd_ifinfo *nd_ifinfo;
- struct scope6_id *scope6_id;
- struct lltable *lltable;
- struct mld_ifsoftc *mld_ifinfo;
- u_int dad_failures; /* DAD failures when using RFC 7217 stable addresses */
-};
-
-#define LLTABLE6(ifp) ((ifp)->if_inet6->lltable)
-#define DAD_FAILURES(ifp) ((ifp)->if_inet6->dad_failures)
-
#ifdef _KERNEL
SLIST_HEAD(in6_multi_head, in6_multi);
@@ -449,9 +430,6 @@ struct in6_rrenumreq {
#define SIOCGIFAFLAG_IN6 _IOWR('i', 73, struct in6_ifreq)
-#ifdef _KERNEL
-#define OSIOCGIFINFO_IN6 _IOWR('i', 76, struct in6_ondireq)
-#endif
#define SIOCGIFINFO_IN6 _IOWR('i', 108, struct in6_ndireq)
#define SIOCSIFINFO_IN6 _IOWR('i', 109, struct in6_ndireq)
#define SIOCSNDFLUSH_IN6 _IOWR('i', 77, struct in6_ifreq)
@@ -510,6 +488,56 @@ struct in6_rrenumreq {
#endif
#ifdef _KERNEL
+/*
+ * Structure pointed at by ifp->if_inet6.
+ */
+struct in6_ifextra {
+ counter_u64_t in6_ifstat[sizeof(struct in6_ifstat) / sizeof(uint64_t)];
+ counter_u64_t icmp6_ifstat[sizeof(struct icmp6_ifstat) /
+ sizeof(uint64_t)];
+ /* ND6 */
+ uint32_t nd_linkmtu;
+ uint32_t nd_maxmtu;
+ uint32_t nd_basereachable;
+ uint32_t nd_reachable;
+ uint32_t nd_retrans;
+ uint32_t nd_flags;
+ int nd_recalc_timer;
+ u_int nd_dad_failures;
+ uint8_t nd_curhoplimit;
+
+ struct mld_ifsoftc {
+ /* Timers and invervals measured in seconds. */
+ LIST_ENTRY(mld_ifsoftc) mli_link;
+ struct ifnet *mli_ifp; /* interface this instance belongs to */
+ uint32_t mli_version; /* MLDv1 Host Compatibility Mode */
+ uint32_t mli_v1_timer; /* MLDv1 Querier Present timer */
+ uint32_t mli_v2_timer; /* MLDv2 General Query timer */
+ uint32_t mli_flags; /* MLD per-interface flags */
+ uint32_t mli_rv; /* MLDv2 Robustness Variable */
+ uint32_t mli_qi; /* MLDv2 Query Interval */
+ uint32_t mli_qri; /* MLDv2 Query Response Interval */
+ uint32_t mli_uri; /* MLDv2 Unsolicited Report Interval */
+ struct mbufq mli_gq; /* queue of general query responses */
+ } mld_ifsoftc;
+
+ struct scope6_id {
+ /*
+ * 16 is correspondent to 4bit multicast scope field. i.e. from
+ * node-local to global with some reserved/unassigned types.
+ */
+#define IPV6_ADDR_SCOPES_COUNT 16
+ uint32_t s6id_list[IPV6_ADDR_SCOPES_COUNT];
+ } scope6_id;
+
+ struct lltable *lltable;
+
+ struct epoch_context epoch_ctx;
+};
+
+#define LLTABLE6(ifp) ((ifp)->if_inet6->lltable)
+#define DAD_FAILURES(ifp) ((ifp)->if_inet6->nd_dad_failures)
+
VNET_DECLARE(struct in6_ifaddrhead, in6_ifaddrhead);
VNET_DECLARE(struct in6_ifaddrlisthead *, in6_ifaddrhashtbl);
VNET_DECLARE(u_long, in6_ifaddrhmask);
@@ -866,7 +894,7 @@ void in6_purgeaddr(struct ifaddr *);
void in6_purgeifaddr(struct in6_ifaddr *);
int in6if_do_dad(struct ifnet *);
void in6_savemkludge(struct in6_ifaddr *);
-uint32_t in6_ifmtu(struct ifnet *);
+uint32_t in6_ifmtu(const struct ifnet *);
struct rib_head *in6_inithead(uint32_t fibnum);
void in6_detachhead(struct rib_head *rh);
int in6_if2idlen(struct ifnet *);
diff --git a/sys/netinet6/ip6_forward.c b/sys/netinet6/ip6_forward.c
index 9823366b0156..0ebb51fd80f0 100644
--- a/sys/netinet6/ip6_forward.c
+++ b/sys/netinet6/ip6_forward.c
@@ -384,11 +384,11 @@ again:
pass:
/* See if the size was changed by the packet filter. */
/* TODO: change to nh->nh_mtu */
- if (m->m_pkthdr.len > IN6_LINKMTU(nh->nh_ifp)) {
+ if (m->m_pkthdr.len > in6_ifmtu(nh->nh_ifp)) {
in6_ifstat_inc(nh->nh_ifp, ifs6_in_toobig);
if (mcopy)
icmp6_error(mcopy, ICMP6_PACKET_TOO_BIG, 0,
- IN6_LINKMTU(nh->nh_ifp));
+ in6_ifmtu(nh->nh_ifp));
goto bad;
}
diff --git a/sys/netinet6/ip6_input.c b/sys/netinet6/ip6_input.c
index 29fa4741a509..a23f5d46d6a3 100644
--- a/sys/netinet6/ip6_input.c
+++ b/sys/netinet6/ip6_input.c
@@ -219,7 +219,7 @@ VNET_PCPUSTAT_SYSUNINIT(ip6stat);
struct rmlock in6_ifaddr_lock;
RM_SYSINIT(in6_ifaddr_lock, &in6_ifaddr_lock, "in6_ifaddr_lock");
-static int ip6_hopopts_input(u_int32_t *, u_int32_t *, struct mbuf **, int *);
+static int ip6_hopopts_input(u_int32_t *, struct mbuf **, int *);
/*
* IP6 initialization: fill in IP6 protocol switch table.
@@ -390,7 +390,6 @@ ip6_destroy(void *unused __unused)
}
/* IF_ADDR_UNLOCK(ifp); */
in6_ifdetach_destroy(ifp);
- mld_domifdetach(ifp);
}
IFNET_RUNLOCK();
@@ -408,14 +407,14 @@ VNET_SYSUNINIT(inet6, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ip6_destroy, NULL);
#endif
static int
-ip6_input_hbh(struct mbuf **mp, uint32_t *plen, uint32_t *rtalert, int *off,
+ip6_input_hbh(struct mbuf **mp, uint32_t *rtalert, int *off,
int *nxt, int *ours)
{
struct mbuf *m;
struct ip6_hdr *ip6;
struct ip6_hbh *hbh;
- if (ip6_hopopts_input(plen, rtalert, mp, off)) {
+ if (ip6_hopopts_input(rtalert, mp, off)) {
#if 0 /*touches NULL pointer*/
in6_ifstat_inc((*mp)->m_pkthdr.rcvif, ifs6_in_discard);
#endif
@@ -427,16 +426,11 @@ ip6_input_hbh(struct mbuf **mp, uint32_t *plen, uint32_t *rtalert, int *off,
ip6 = mtod(m, struct ip6_hdr *);
/*
- * if the payload length field is 0 and the next header field
- * indicates Hop-by-Hop Options header, then a Jumbo Payload
- * option MUST be included.
+ * If the payload length field is 0 and the next header field indicates
+ * Hop-by-Hop Options header, then a Jumbo Payload option MUST be
+ * included. We no not support Jumbo Payloads so report an error.
*/
- if (ip6->ip6_plen == 0 && *plen == 0) {
- /*
- * Note that if a valid jumbo payload option is
- * contained, ip6_hopopts_input() must set a valid
- * (non-zero) payload length to the variable plen.
- */
+ if (ip6->ip6_plen == 0) {
IP6STAT_INC(ip6s_badoptions);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
@@ -551,7 +545,7 @@ ip6_input(struct mbuf *m)
* Drop the packet if IPv6 operation is disabled on the interface.
*/
rcvif = m->m_pkthdr.rcvif;
- if ((ND_IFINFO(rcvif)->flags & ND6_IFF_IFDISABLED))
+ if ((rcvif->if_inet6->nd_flags & ND6_IFF_IFDISABLED))
goto bad;
#if defined(IPSEC) || defined(IPSEC_SUPPORT)
@@ -775,6 +769,15 @@ passin:
goto bad;
}
+ plen = (uint32_t)ntohs(ip6->ip6_plen);
+
+ /*
+ * We don't support Jumbograms, reject packets with plen == 0 as early
+ * as we can.
+ */
+ if (plen == 0)
+ goto bad;
+
/*
* Disambiguate address scope zones (if there is ambiguity).
* We first make sure that the original source or destination address
@@ -851,11 +854,9 @@ passin:
/*
* Process Hop-by-Hop options header if it's contained.
* m may be modified in ip6_hopopts_input().
- * If a JumboPayload option is included, plen will also be modified.
*/
- plen = (u_int32_t)ntohs(ip6->ip6_plen);
if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
- if (ip6_input_hbh(&m, &plen, &rtalert, &off, &nxt, &ours) != 0)
+ if (ip6_input_hbh(&m, &rtalert, &off, &nxt, &ours) != 0)
return;
} else
nxt = ip6->ip6_nxt;
@@ -964,13 +965,12 @@ bad:
/*
* Hop-by-Hop options header processing. If a valid jumbo payload option is
- * included, the real payload length will be stored in plenp.
+ * included report an error.
*
* rtalertp - XXX: should be stored more smart way
*/
static int
-ip6_hopopts_input(u_int32_t *plenp, u_int32_t *rtalertp,
- struct mbuf **mp, int *offp)
+ip6_hopopts_input(u_int32_t *rtalertp, struct mbuf **mp, int *offp)
{
struct mbuf *m = *mp;
int off = *offp, hbhlen;
@@ -1000,7 +1000,7 @@ ip6_hopopts_input(u_int32_t *plenp, u_int32_t *rtalertp,
off += hbhlen;
hbhlen -= sizeof(struct ip6_hbh);
if (ip6_process_hopopts(m, (u_int8_t *)hbh + sizeof(struct ip6_hbh),
- hbhlen, rtalertp, plenp) < 0) {
+ hbhlen, rtalertp) < 0) {
*mp = NULL;
return (-1);
}
@@ -1022,13 +1022,11 @@ ip6_hopopts_input(u_int32_t *plenp, u_int32_t *rtalertp,
*/
int
ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen,
- u_int32_t *rtalertp, u_int32_t *plenp)
+ u_int32_t *rtalertp)
{
- struct ip6_hdr *ip6;
int optlen = 0;
u_int8_t *opt = opthead;
u_int16_t rtalert_val;
- u_int32_t jumboplen;
const int erroff = sizeof(struct ip6_hdr) + sizeof(struct ip6_hbh);
for (; hbhlen > 0; hbhlen -= optlen, opt += optlen) {
@@ -1061,71 +1059,8 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen,
*rtalertp = ntohs(rtalert_val);
break;
case IP6OPT_JUMBO:
- /* XXX may need check for alignment */
- if (hbhlen < IP6OPT_JUMBO_LEN) {
- IP6STAT_INC(ip6s_toosmall);
- goto bad;
- }
- if (*(opt + 1) != IP6OPT_JUMBO_LEN - 2) {
- /* XXX stat */
- icmp6_error(m, ICMP6_PARAM_PROB,
- ICMP6_PARAMPROB_HEADER,
- erroff + opt + 1 - opthead);
- return (-1);
- }
- optlen = IP6OPT_JUMBO_LEN;
-
- /*
- * IPv6 packets that have non 0 payload length
- * must not contain a jumbo payload option.
- */
- ip6 = mtod(m, struct ip6_hdr *);
- if (ip6->ip6_plen) {
- IP6STAT_INC(ip6s_badoptions);
- icmp6_error(m, ICMP6_PARAM_PROB,
- ICMP6_PARAMPROB_HEADER,
- erroff + opt - opthead);
- return (-1);
- }
-
- /*
- * We may see jumbolen in unaligned location, so
- * we'd need to perform bcopy().
- */
- bcopy(opt + 2, &jumboplen, sizeof(jumboplen));
- jumboplen = (u_int32_t)htonl(jumboplen);
-
-#if 1
- /*
- * if there are multiple jumbo payload options,
- * *plenp will be non-zero and the packet will be
- * rejected.
- * the behavior may need some debate in ipngwg -
- * multiple options does not make sense, however,
- * there's no explicit mention in specification.
- */
- if (*plenp != 0) {
- IP6STAT_INC(ip6s_badoptions);
- icmp6_error(m, ICMP6_PARAM_PROB,
- ICMP6_PARAMPROB_HEADER,
- erroff + opt + 2 - opthead);
- return (-1);
- }
-#endif
-
- /*
- * jumbo payload length must be larger than 65535.
- */
- if (jumboplen <= IPV6_MAXPACKET) {
- IP6STAT_INC(ip6s_badoptions);
- icmp6_error(m, ICMP6_PARAM_PROB,
- ICMP6_PARAMPROB_HEADER,
- erroff + opt + 2 - opthead);
- return (-1);
- }
- *plenp = jumboplen;
-
- break;
+ /* We do not support the Jumbo Payload option. */
+ goto bad;
default: /* unknown option */
if (hbhlen < IP6OPT_MINLEN) {
IP6STAT_INC(ip6s_toosmall);
diff --git a/sys/netinet6/ip6_mroute.c b/sys/netinet6/ip6_mroute.c
index 4473d3931af8..46981aff1025 100644
--- a/sys/netinet6/ip6_mroute.c
+++ b/sys/netinet6/ip6_mroute.c
@@ -1582,7 +1582,7 @@ phyint_send(struct ip6_hdr *ip6, struct mif6 *mifp, struct mbuf *m)
* Put the packet into the sending queue of the outgoing interface
* if it would fit in the MTU of the interface.
*/
- linkmtu = IN6_LINKMTU(ifp);
+ linkmtu = in6_ifmtu(ifp);
if (mb_copy->m_pkthdr.len <= linkmtu || linkmtu < IPV6_MMTU) {
struct sockaddr_in6 dst6;
diff --git a/sys/netinet6/ip6_output.c b/sys/netinet6/ip6_output.c
index eb5a3a971ea0..dca1bcf04371 100644
--- a/sys/netinet6/ip6_output.c
+++ b/sys/netinet6/ip6_output.c
@@ -142,7 +142,6 @@ static int ip6_setpktopt(int, u_char *, int, struct ip6_pktopts *,
static int ip6_copyexthdr(struct mbuf **, caddr_t, int);
static int ip6_insertfraghdr(struct mbuf *, struct mbuf *, int,
struct ip6_frag **);
-static int ip6_insert_jumboopt(struct ip6_exthdrs *, u_int32_t);
static int ip6_splithdr(struct mbuf *, struct ip6_exthdrs *);
static void ip6_getpmtu(struct route_in6 *, int,
struct ifnet *, const struct in6_addr *, u_long *, u_int, u_int);
@@ -542,20 +541,9 @@ ip6_output(struct mbuf *m0, struct ip6_pktopts *opt,
m->m_pkthdr.len += optlen;
plen = m->m_pkthdr.len - sizeof(*ip6);
- /* If this is a jumbo payload, insert a jumbo payload option. */
if (plen > IPV6_MAXPACKET) {
- if (!hdrsplit) {
- if ((error = ip6_splithdr(m, &exthdrs)) != 0) {
- m = NULL;
- goto freehdrs;
- }
- m = exthdrs.ip6e_ip6;
- ip6 = mtod(m, struct ip6_hdr *);
- hdrsplit = true;
- }
- if ((error = ip6_insert_jumboopt(&exthdrs, plen)) != 0)
- goto freehdrs;
- ip6->ip6_plen = 0;
+ error = EMSGSIZE;
+ goto freehdrs;
} else
ip6->ip6_plen = htons(plen);
nexthdrp = &ip6->ip6_nxt;
@@ -981,7 +969,6 @@ nonh6lookup:
if (exthdrs.ip6e_hbh) {
struct ip6_hbh *hbh = mtod(exthdrs.ip6e_hbh, struct ip6_hbh *);
u_int32_t dummy; /* XXX unused */
- u_int32_t plen = 0; /* XXX: ip6_process will check the value */
#ifdef DIAGNOSTIC
if ((hbh->ip6h_len + 1) << 3 > exthdrs.ip6e_hbh->m_len)
@@ -997,7 +984,7 @@ nonh6lookup:
m->m_pkthdr.rcvif = ifp;
if (ip6_process_hopopts(m, (u_int8_t *)(hbh + 1),
((hbh->ip6h_len + 1) << 3) - sizeof(struct ip6_hbh),
- &dummy, &plen) < 0) {
+ &dummy) < 0) {
/* m was already freed at this point. */
error = EINVAL;/* better error? */
goto done;
@@ -1146,7 +1133,7 @@ passout:
dontfrag = 1;
else
dontfrag = 0;
- if (dontfrag && tlen > IN6_LINKMTU(ifp) && !tso) { /* Case 2-b. */
+ if (dontfrag && tlen > in6_ifmtu(ifp) && !tso) { /* Case 2-b. */
/*
* If the DONTFRAG option is specified, we cannot send the
* packet when the data length is larger than the MTU of the
@@ -1185,7 +1172,7 @@ passout:
in6_ifstat_inc(ifp, ifs6_out_fragfail);
goto bad;
} else if (ip6->ip6_plen == 0) {
- /* Jumbo payload cannot be fragmented. */
+ /* We do not support jumbo payload. */
error = EMSGSIZE;
in6_ifstat_inc(ifp, ifs6_out_fragfail);
goto bad;
@@ -1312,94 +1299,6 @@ ip6_copyexthdr(struct mbuf **mp, caddr_t hdr, int hlen)
}
/*
- * Insert jumbo payload option.
- */
-static int
-ip6_insert_jumboopt(struct ip6_exthdrs *exthdrs, u_int32_t plen)
-{
- struct mbuf *mopt;
- u_char *optbuf;
- u_int32_t v;
-
-#define JUMBOOPTLEN 8 /* length of jumbo payload option and padding */
-
- /*
- * If there is no hop-by-hop options header, allocate new one.
- * If there is one but it doesn't have enough space to store the
- * jumbo payload option, allocate a cluster to store the whole options.
- * Otherwise, use it to store the options.
- */
- if (exthdrs->ip6e_hbh == NULL) {
- mopt = m_get(M_NOWAIT, MT_DATA);
- if (mopt == NULL)
- return (ENOBUFS);
- mopt->m_len = JUMBOOPTLEN;
- optbuf = mtod(mopt, u_char *);
- optbuf[1] = 0; /* = ((JUMBOOPTLEN) >> 3) - 1 */
- exthdrs->ip6e_hbh = mopt;
- } else {
- struct ip6_hbh *hbh;
-
- mopt = exthdrs->ip6e_hbh;
- if (M_TRAILINGSPACE(mopt) < JUMBOOPTLEN) {
- /*
- * XXX assumption:
- * - exthdrs->ip6e_hbh is not referenced from places
- * other than exthdrs.
- * - exthdrs->ip6e_hbh is not an mbuf chain.
- */
- int oldoptlen = mopt->m_len;
- struct mbuf *n;
-
- /*
- * XXX: give up if the whole (new) hbh header does
- * not fit even in an mbuf cluster.
- */
- if (oldoptlen + JUMBOOPTLEN > MCLBYTES)
- return (ENOBUFS);
-
- /*
- * As a consequence, we must always prepare a cluster
- * at this point.
- */
- n = m_getcl(M_NOWAIT, MT_DATA, 0);
- if (n == NULL)
- return (ENOBUFS);
- n->m_len = oldoptlen + JUMBOOPTLEN;
- bcopy(mtod(mopt, caddr_t), mtod(n, caddr_t),
- oldoptlen);
- optbuf = mtod(n, caddr_t) + oldoptlen;
- m_freem(mopt);
- mopt = exthdrs->ip6e_hbh = n;
- } else {
- optbuf = mtod(mopt, u_char *) + mopt->m_len;
- mopt->m_len += JUMBOOPTLEN;
- }
- optbuf[0] = IP6OPT_PADN;
- optbuf[1] = 1;
-
- /*
- * Adjust the header length according to the pad and
- * the jumbo payload option.
- */
- hbh = mtod(mopt, struct ip6_hbh *);
- hbh->ip6h_len += (JUMBOOPTLEN >> 3);
- }
-
- /* fill in the option. */
- optbuf[2] = IP6OPT_JUMBO;
- optbuf[3] = 4;
- v = (u_int32_t)htonl(plen + JUMBOOPTLEN);
- bcopy(&v, &optbuf[4], sizeof(u_int32_t));
-
- /* finally, adjust the packet header length */
- exthdrs->ip6e_ip6->m_pkthdr.len += JUMBOOPTLEN;
-
- return (0);
-#undef JUMBOOPTLEN
-}
-
-/*
* Insert fragment header and copy unfragmentable header portions.
*/
static int
@@ -1560,7 +1459,7 @@ ip6_calcmtu(struct ifnet *ifp, const struct in6_addr *dst, u_long rt_mtu,
}
if (mtu == 0)
- mtu = IN6_LINKMTU(ifp);
+ mtu = in6_ifmtu(ifp);
*mtup = mtu;
}
@@ -2921,7 +2820,7 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt,
return (ENXIO);
}
if (ifp != NULL && (ifp->if_inet6 == NULL ||
- (ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) != 0))
+ (ifp->if_inet6->nd_flags & ND6_IFF_IFDISABLED) != 0))
return (ENETDOWN);
if (ifp != NULL &&
diff --git a/sys/netinet6/ip6_var.h b/sys/netinet6/ip6_var.h
index db1631736c4a..c1645f587483 100644
--- a/sys/netinet6/ip6_var.h
+++ b/sys/netinet6/ip6_var.h
@@ -393,8 +393,7 @@ int ip6_lasthdr(const struct mbuf *, int, int, int *);
extern int (*ip6_mforward)(struct ip6_hdr *, struct ifnet *,
struct mbuf *);
-int ip6_process_hopopts(struct mbuf *, u_int8_t *, int, u_int32_t *,
- u_int32_t *);
+int ip6_process_hopopts(struct mbuf *, u_int8_t *, int, u_int32_t *);
struct mbuf **ip6_savecontrol_v4(struct inpcb *, struct mbuf *,
struct mbuf **, int *);
void ip6_savecontrol(struct inpcb *, struct mbuf *, struct mbuf **);
diff --git a/sys/netinet6/mld6.c b/sys/netinet6/mld6.c
index 8e2bbf8adc01..f14d2c76ffda 100644
--- a/sys/netinet6/mld6.c
+++ b/sys/netinet6/mld6.c
@@ -99,7 +99,6 @@
#define KTR_MLD KTR_INET6
#endif
-static void mli_delete_locked(struct ifnet *);
static void mld_dispatch_packet(struct mbuf *);
static void mld_dispatch_queue(struct mbufq *, int);
static void mld_final_leave(struct in6_multi *, struct mld_ifsoftc *);
@@ -465,21 +464,21 @@ mld_is_addr_reported(const struct in6_addr *addr)
* Attach MLD when PF_INET6 is attached to an interface. Assumes that the
* current VNET is set by the caller.
*/
-struct mld_ifsoftc *
+void
mld_domifattach(struct ifnet *ifp)
{
- struct mld_ifsoftc *mli;
+ struct mld_ifsoftc *mli = MLD_IFINFO(ifp);
CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp, if_name(ifp));
- mli = malloc(sizeof(struct mld_ifsoftc), M_MLD, M_WAITOK | M_ZERO);
- mli->mli_ifp = ifp;
- mli->mli_version = MLD_VERSION_2;
- mli->mli_flags = 0;
- mli->mli_rv = MLD_RV_INIT;
- mli->mli_qi = MLD_QI_INIT;
- mli->mli_qri = MLD_QRI_INIT;
- mli->mli_uri = MLD_URI_INIT;
+ *mli = (struct mld_ifsoftc){
+ .mli_ifp = ifp,
+ .mli_version = MLD_VERSION_2,
+ .mli_rv = MLD_RV_INIT,
+ .mli_qi = MLD_QI_INIT,
+ .mli_qri = MLD_QRI_INIT,
+ .mli_uri = MLD_URI_INIT,
+ };
mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
if ((ifp->if_flags & IFF_MULTICAST) == 0)
mli->mli_flags |= MLIF_SILENT;
@@ -489,8 +488,6 @@ mld_domifattach(struct ifnet *ifp)
MLD_LOCK();
LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
MLD_UNLOCK();
-
- return (mli);
}
/*
@@ -552,44 +549,19 @@ mld_ifdetach(struct ifnet *ifp, struct in6_multi_head *inmh)
/*
* Hook for domifdetach.
* Runs after link-layer cleanup; free MLD state.
- *
- * SMPng: Normally called with LLTABLE_LOCK held.
*/
void
mld_domifdetach(struct ifnet *ifp)
{
+ struct mld_ifsoftc *mli = MLD_IFINFO(ifp);
CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
__func__, ifp, if_name(ifp));
MLD_LOCK();
- mli_delete_locked(ifp);
+ LIST_REMOVE(mli, mli_link);
MLD_UNLOCK();
-}
-
-static void
-mli_delete_locked(struct ifnet *ifp)
-{
- struct mld_ifsoftc *mli, *tmli;
-
- CTR3(KTR_MLD, "%s: freeing mld_ifsoftc for ifp %p(%s)",
- __func__, ifp, if_name(ifp));
-
- MLD_LOCK_ASSERT();
-
- LIST_FOREACH_SAFE(mli, &V_mli_head, mli_link, tmli) {
- if (mli->mli_ifp == ifp) {
- /*
- * Free deferred General Query responses.
- */
- mbufq_drain(&mli->mli_gq);
-
- LIST_REMOVE(mli, mli_link);
-
- free(mli, M_MLD);
- return;
- }
- }
+ mbufq_drain(&mli->mli_gq);
}
/*
diff --git a/sys/netinet6/mld6_var.h b/sys/netinet6/mld6_var.h
index d75ac2450c10..a063771f4dc8 100644
--- a/sys/netinet6/mld6_var.h
+++ b/sys/netinet6/mld6_var.h
@@ -120,23 +120,6 @@ struct mld_ifinfo {
};
#ifdef _KERNEL
-/*
- * Per-link MLD state.
- */
-struct mld_ifsoftc {
- LIST_ENTRY(mld_ifsoftc) mli_link;
- struct ifnet *mli_ifp; /* interface this instance belongs to */
- uint32_t mli_version; /* MLDv1 Host Compatibility Mode */
- uint32_t mli_v1_timer; /* MLDv1 Querier Present timer (s) */
- uint32_t mli_v2_timer; /* MLDv2 General Query (interface) timer (s)*/
- uint32_t mli_flags; /* MLD per-interface flags */
- uint32_t mli_rv; /* MLDv2 Robustness Variable */
- uint32_t mli_qi; /* MLDv2 Query Interval (s) */
- uint32_t mli_qri; /* MLDv2 Query Response Interval (s) */
- uint32_t mli_uri; /* MLDv2 Unsolicited Report Interval (s) */
- struct mbufq mli_gq; /* queue of general query responses */
-};
-
#define MLD_RANDOM_DELAY(X) (arc4random() % (X) + 1)
#define MLD_MAX_STATE_CHANGES 24 /* Max pending changes per group */
@@ -155,12 +138,11 @@ struct mld_ifsoftc {
/*
* Per-link MLD context.
*/
-#define MLD_IFINFO(ifp) ((ifp)->if_inet6->mld_ifinfo)
+#define MLD_IFINFO(ifp) (&(ifp)->if_inet6->mld_ifsoftc)
struct in6_multi_head;
int mld_change_state(struct in6_multi *, const int);
-struct mld_ifsoftc *
- mld_domifattach(struct ifnet *);
+void mld_domifattach(struct ifnet *);
void mld_domifdetach(struct ifnet *);
void mld_ifdetach(struct ifnet *, struct in6_multi_head *);
int mld_input(struct mbuf **, int, int);
diff --git a/sys/netinet6/nd6.c b/sys/netinet6/nd6.c
index 595e0b4ac54f..969b32032a60 100644
--- a/sys/netinet6/nd6.c
+++ b/sys/netinet6/nd6.c
@@ -88,8 +88,6 @@
#define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */
#define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */
-MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
-
VNET_DEFINE_STATIC(int, nd6_prune) = 1;
#define V_nd6_prune VNET(nd6_prune)
SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_PRUNE, nd6_prune,
@@ -150,7 +148,6 @@ int (*send_sendso_input_hook)(struct mbuf *, struct ifnet *, int, int);
static bool nd6_is_new_addr_neighbor(const struct sockaddr_in6 *,
struct ifnet *);
-static void nd6_setmtu0(struct ifnet *, struct nd_ifinfo *);
static void nd6_slowtimo(void *);
static int regen_tmpaddr(struct in6_ifaddr *);
static void nd6_free(struct llentry **, int);
@@ -277,24 +274,30 @@ nd6_destroy(void)
}
#endif
-struct nd_ifinfo *
+void
nd6_ifattach(struct ifnet *ifp)
{
- struct nd_ifinfo *nd;
+ struct in6_ifextra *nd = ifp->if_inet6;
- nd = malloc(sizeof(*nd), M_IP6NDP, M_WAITOK | M_ZERO);
- nd->initialized = 1;
+ nd->nd_linkmtu = 0;
+ nd->nd_maxmtu = ifp->if_mtu;
+ nd->nd_basereachable = REACHABLE_TIME;
+ nd->nd_reachable = ND_COMPUTE_RTIME(nd->nd_basereachable);
+ nd->nd_retrans = RETRANS_TIMER;
+ nd->nd_recalc_timer = 0;
+ nd->nd_dad_failures = 0;
+ nd->nd_curhoplimit = IPV6_DEFHLIM;
- nd->chlim = IPV6_DEFHLIM;
- nd->basereachable = REACHABLE_TIME;
- nd->reachable = ND_COMPUTE_RTIME(nd->basereachable);
- nd->retrans = RETRANS_TIMER;
-
- nd->flags = ND6_IFF_PERFORMNUD;
+ nd->nd_flags = ND6_IFF_PERFORMNUD;
/* Set IPv6 disabled on all interfaces but loopback by default. */
- if ((ifp->if_flags & IFF_LOOPBACK) == 0)
- nd->flags |= ND6_IFF_IFDISABLED;
+ if ((ifp->if_flags & IFF_LOOPBACK) == 0) {
+ nd->nd_flags |= ND6_IFF_IFDISABLED;
+ if (V_ip6_no_radr)
+ nd->nd_flags |= ND6_IFF_NO_RADR;
+ if (V_ip6_use_stableaddr)
+ nd->nd_flags |= ND6_IFF_STABLEADDR;
+ }
/* A loopback interface always has ND6_IFF_AUTO_LINKLOCAL.
* XXXHRS: Clear ND6_IFF_AUTO_LINKLOCAL on an IFT_BRIDGE interface by
@@ -303,7 +306,7 @@ nd6_ifattach(struct ifnet *ifp)
*/
if ((V_ip6_auto_linklocal && ifp->if_type != IFT_BRIDGE &&
ifp->if_type != IFT_WIREGUARD) || (ifp->if_flags & IFF_LOOPBACK))
- nd->flags |= ND6_IFF_AUTO_LINKLOCAL;
+ nd->nd_flags |= ND6_IFF_AUTO_LINKLOCAL;
/*
* A loopback interface does not need to accept RTADV.
* XXXHRS: Clear ND6_IFF_ACCEPT_RTADV on an IFT_BRIDGE interface by
@@ -314,26 +317,14 @@ nd6_ifattach(struct ifnet *ifp)
if (V_ip6_accept_rtadv &&
!(ifp->if_flags & IFF_LOOPBACK) &&
(ifp->if_type != IFT_BRIDGE)) {
- nd->flags |= ND6_IFF_ACCEPT_RTADV;
+ nd->nd_flags |= ND6_IFF_ACCEPT_RTADV;
/* If we globally accept rtadv, assume IPv6 on. */
- nd->flags &= ~ND6_IFF_IFDISABLED;
+ nd->nd_flags &= ~ND6_IFF_IFDISABLED;
}
- if (V_ip6_no_radr && !(ifp->if_flags & IFF_LOOPBACK))
- nd->flags |= ND6_IFF_NO_RADR;
-
- /* XXX: we cannot call nd6_setmtu since ifp is not fully initialized */
- nd6_setmtu0(ifp, nd);
-
- /* Configure default value for stable addresses algorithm, skip loopback interface */
- if (V_ip6_use_stableaddr && !(ifp->if_flags & IFF_LOOPBACK)) {
- nd->flags |= ND6_IFF_STABLEADDR;
- }
-
- return nd;
}
void
-nd6_ifdetach(struct ifnet *ifp, struct nd_ifinfo *nd)
+nd6_ifdetach(struct ifnet *ifp)
{
struct epoch_tracker et;
struct ifaddr *ifa, *next;
@@ -347,32 +338,25 @@ nd6_ifdetach(struct ifnet *ifp, struct nd_ifinfo *nd)
nd6_dad_stop(ifa);
}
NET_EPOCH_EXIT(et);
-
- free(nd, M_IP6NDP);
}
/*
* Reset ND level link MTU. This function is called when the physical MTU
* changes, which means we might have to adjust the ND level MTU.
+ * XXX todo: do not maintain copy of ifp->if_mtu in if_inet6->nd_maxmtu.
*/
void
nd6_setmtu(struct ifnet *ifp)
{
- /* XXXGL: ??? */
- if (ifp->if_inet6 == NULL)
- return;
+ struct in6_ifextra *ndi = ifp->if_inet6;
+ uint32_t omaxmtu;
- nd6_setmtu0(ifp, ND_IFINFO(ifp));
-}
-
-/* XXX todo: do not maintain copy of ifp->if_mtu in ndi->maxmtu */
-void
-nd6_setmtu0(struct ifnet *ifp, struct nd_ifinfo *ndi)
-{
- u_int32_t omaxmtu;
+ /* XXXGL: safety against IFT_PFSYNC & IFT_PFLOG */
+ if (ndi == NULL)
+ return;
- omaxmtu = ndi->maxmtu;
- ndi->maxmtu = ifp->if_mtu;
+ omaxmtu = ndi->nd_maxmtu;
+ ndi->nd_maxmtu = ifp->if_mtu;
/*
* Decreasing the interface MTU under IPV6 minimum MTU may cause
@@ -380,10 +364,10 @@ nd6_setmtu0(struct ifnet *ifp, struct nd_ifinfo *ndi)
* explicitly. The check for omaxmtu is necessary to restrict the
* log to the case of changing the MTU, not initializing it.
*/
- if (omaxmtu >= IPV6_MMTU && ndi->maxmtu < IPV6_MMTU) {
- log(LOG_NOTICE, "nd6_setmtu0: "
+ if (omaxmtu >= IPV6_MMTU && ndi->nd_maxmtu < IPV6_MMTU) {
+ log(LOG_NOTICE, "%s: "
"new link MTU on %s (%lu) is too small for IPv6\n",
- if_name(ifp), (unsigned long)ndi->maxmtu);
+ __func__, if_name(ifp), (unsigned long)ndi->nd_maxmtu);
}
}
@@ -714,12 +698,12 @@ nd6_llinfo_setstate(struct llentry *lle, int newstate)
switch (newstate) {
case ND6_LLINFO_INCOMPLETE:
ifp = lle->lle_tbl->llt_ifp;
- delay = (long)ND_IFINFO(ifp)->retrans * hz / 1000;
+ delay = (long)ifp->if_inet6->nd_retrans * hz / 1000;
break;
case ND6_LLINFO_REACHABLE:
if (!ND6_LLINFO_PERMANENT(lle)) {
ifp = lle->lle_tbl->llt_ifp;
- delay = (long)ND_IFINFO(ifp)->reachable * hz;
+ delay = (long)ifp->if_inet6->nd_reachable * hz;
}
break;
case ND6_LLINFO_STALE:
@@ -756,7 +740,7 @@ nd6_llinfo_timer(void *arg)
struct llentry *ln;
struct in6_addr *dst, *pdst, *psrc, src;
struct ifnet *ifp;
- struct nd_ifinfo *ndi;
+ struct in6_ifextra *ndi;
int do_switch, send_ns;
long delay;
@@ -790,7 +774,7 @@ nd6_llinfo_timer(void *arg)
return;
}
NET_EPOCH_ENTER(et);
- ndi = ND_IFINFO(ifp);
+ ndi = ifp->if_inet6;
send_ns = 0;
dst = &ln->r_l3addr.addr6;
pdst = dst;
@@ -892,7 +876,7 @@ nd6_llinfo_timer(void *arg)
/* FALLTHROUGH */
case ND6_LLINFO_DELAY:
- if (ndi && (ndi->flags & ND6_IFF_PERFORMNUD) != 0) {
+ if ((ndi->nd_flags & ND6_IFF_PERFORMNUD) != 0) {
/* We need NUD */
ln->la_asked = 1;
nd6_llinfo_setstate(ln, ND6_LLINFO_PROBE);
@@ -916,7 +900,8 @@ done:
if (ln != NULL)
ND6_RUNLOCK();
if (send_ns != 0) {
- nd6_llinfo_settimer_locked(ln, (long)ndi->retrans * hz / 1000);
+ nd6_llinfo_settimer_locked(ln,
+ (long)ndi->nd_retrans * hz / 1000);
psrc = nd6_llinfo_get_holdsrc(ln, &src);
LLE_FREE_LOCKED(ln);
ln = NULL;
@@ -1027,10 +1012,10 @@ nd6_timer(void *arg)
* mark the address as tentative for future DAD.
*/
ifp = ia6->ia_ifp;
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_NO_DAD) == 0 &&
+ if ((ifp->if_inet6->nd_flags & ND6_IFF_NO_DAD) == 0 &&
((ifp->if_flags & IFF_UP) == 0 ||
(ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
- (ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) != 0)){
+ (ifp->if_inet6->nd_flags & ND6_IFF_IFDISABLED))){
ia6->ia6_flags &= ~IN6_IFF_DUPLICATED;
ia6->ia6_flags |= IN6_IFF_TENTATIVE;
}
@@ -1198,7 +1183,7 @@ nd6_purge(struct ifnet *ifp)
if (V_nd6_defifindex == ifp->if_index)
nd6_setdefaultiface(0);
- if (ND_IFINFO(ifp)->flags & ND6_IFF_ACCEPT_RTADV) {
+ if (ifp->if_inet6->nd_flags & ND6_IFF_ACCEPT_RTADV) {
/* Refresh default router list. */
defrouter_select_fib(ifp->if_fib);
}
@@ -1324,7 +1309,7 @@ nd6_is_new_addr_neighbor(const struct sockaddr_in6 *addr, struct ifnet *ifp)
* If the default router list is empty, all addresses are regarded
* as on-link, and thus, as a neighbor.
*/
- if (ND_IFINFO(ifp)->flags & ND6_IFF_ACCEPT_RTADV &&
+ if (ifp->if_inet6->nd_flags & ND6_IFF_ACCEPT_RTADV &&
nd6_defrouter_list_empty() &&
V_nd6_defifindex == ifp->if_index) {
return (1);
@@ -1448,7 +1433,7 @@ nd6_free(struct llentry **lnp, int gc)
KASSERT((ln->la_flags & LLE_CHILD) == 0, ("child lle"));
ifp = lltable_get_ifp(ln->lle_tbl);
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_ACCEPT_RTADV) != 0)
+ if ((ifp->if_inet6->nd_flags & ND6_IFF_ACCEPT_RTADV) != 0)
dr = defrouter_lookup_locked(&ln->r_l3addr.addr6, ifp);
else
dr = NULL;
@@ -1465,7 +1450,7 @@ nd6_free(struct llentry **lnp, int gc)
/* cancel timer */
nd6_llinfo_settimer_locked(ln, -1);
- if (ND_IFINFO(ifp)->flags & ND6_IFF_ACCEPT_RTADV) {
+ if (ifp->if_inet6->nd_flags & ND6_IFF_ACCEPT_RTADV) {
if (dr != NULL && dr->expire &&
ln->ln_state == ND6_LLINFO_STALE && gc) {
/*
@@ -1640,31 +1625,30 @@ nd6_subscription_cb(struct rib_head *rnh, struct rib_cmd_info *rc, void *arg)
int
nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
{
+ struct epoch_tracker et;
struct in6_ndireq *ndi = (struct in6_ndireq *)data;
struct in6_nbrinfo *nbi = (struct in6_nbrinfo *)data;
struct in6_ndifreq *ndif = (struct in6_ndifreq *)data;
- struct epoch_tracker et;
+ struct in6_ifextra *ext = ifp->if_inet6;
int error = 0;
- /* XXXGL: ??? */
- if (ifp->if_inet6 == NULL)
+ /* XXXGL: safety against IFT_PFSYNC & IFT_PFLOG */
+ if (ext == NULL)
return (EPFNOSUPPORT);
- switch (cmd) {
- case OSIOCGIFINFO_IN6:
#define ND ndi->ndi
- /* XXX: old ndp(8) assumes a positive value for linkmtu. */
- bzero(&ND, sizeof(ND));
- ND.linkmtu = IN6_LINKMTU(ifp);
- ND.maxmtu = ND_IFINFO(ifp)->maxmtu;
- ND.basereachable = ND_IFINFO(ifp)->basereachable;
- ND.reachable = ND_IFINFO(ifp)->reachable;
- ND.retrans = ND_IFINFO(ifp)->retrans;
- ND.flags = ND_IFINFO(ifp)->flags;
- ND.recalctm = ND_IFINFO(ifp)->recalctm;
- ND.chlim = ND_IFINFO(ifp)->chlim;
- break;
+ switch (cmd) {
case SIOCGIFINFO_IN6:
- ND = *ND_IFINFO(ifp);
+ ND = (struct nd_ifinfo){
+ .linkmtu = ext->nd_linkmtu,
+ .maxmtu = ext->nd_maxmtu,
+ .basereachable = ext->nd_basereachable,
+ .reachable = ext->nd_reachable,
+ .retrans = ext->nd_retrans,
+ .flags = ext->nd_flags,
+ .recalctm = ext->nd_recalc_timer,
+ .chlim = ext->nd_curhoplimit,
+ .initialized = 1,
+ };
break;
case SIOCSIFINFO_IN6:
/*
@@ -1674,32 +1658,32 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
/* 0 means 'unspecified' */
if (ND.linkmtu != 0) {
if (ND.linkmtu < IPV6_MMTU ||
- ND.linkmtu > IN6_LINKMTU(ifp)) {
+ ND.linkmtu > in6_ifmtu(ifp)) {
error = EINVAL;
break;
}
- ND_IFINFO(ifp)->linkmtu = ND.linkmtu;
+ ext->nd_linkmtu = ND.linkmtu;
}
if (ND.basereachable != 0) {
- int obasereachable = ND_IFINFO(ifp)->basereachable;
+ uint32_t obasereachable = ext->nd_basereachable;
- ND_IFINFO(ifp)->basereachable = ND.basereachable;
+ ext->nd_basereachable = ND.basereachable;
if (ND.basereachable != obasereachable)
- ND_IFINFO(ifp)->reachable =
+ ext->nd_reachable =
ND_COMPUTE_RTIME(ND.basereachable);
}
if (ND.retrans != 0)
- ND_IFINFO(ifp)->retrans = ND.retrans;
+ ext->nd_retrans = ND.retrans;
if (ND.chlim != 0)
- ND_IFINFO(ifp)->chlim = ND.chlim;
+ ext->nd_curhoplimit = ND.chlim;
/* FALLTHROUGH */
case SIOCSIFINFO_FLAGS:
{
struct ifaddr *ifa;
struct in6_ifaddr *ia;
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) &&
+ if ((ext->nd_flags & ND6_IFF_IFDISABLED) &&
!(ND.flags & ND6_IFF_IFDISABLED)) {
/* ifdisabled 1->0 transision */
@@ -1727,18 +1711,18 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
" with a link-local address marked"
" duplicate.\n");
} else {
- ND_IFINFO(ifp)->flags &= ~ND6_IFF_IFDISABLED;
+ ext->nd_flags &= ~ND6_IFF_IFDISABLED;
if (ifp->if_flags & IFF_UP)
in6_if_up(ifp);
}
- } else if (!(ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) &&
+ } else if (!(ext->nd_flags & ND6_IFF_IFDISABLED) &&
(ND.flags & ND6_IFF_IFDISABLED)) {
/* ifdisabled 0->1 transision */
/* Mark all IPv6 address as tentative. */
- ND_IFINFO(ifp)->flags |= ND6_IFF_IFDISABLED;
+ ext->nd_flags |= ND6_IFF_IFDISABLED;
if (V_ip6_dad_count > 0 &&
- (ND_IFINFO(ifp)->flags & ND6_IFF_NO_DAD) == 0) {
+ (ext->nd_flags & ND6_IFF_NO_DAD) == 0) {
NET_EPOCH_ENTER(et);
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead,
ifa_link) {
@@ -1753,11 +1737,11 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
}
if (ND.flags & ND6_IFF_AUTO_LINKLOCAL) {
- if (!(ND_IFINFO(ifp)->flags & ND6_IFF_AUTO_LINKLOCAL)) {
+ if (!(ext->nd_flags & ND6_IFF_AUTO_LINKLOCAL)) {
/* auto_linklocal 0->1 transision */
/* If no link-local address on ifp, configure */
- ND_IFINFO(ifp)->flags |= ND6_IFF_AUTO_LINKLOCAL;
+ ext->nd_flags |= ND6_IFF_AUTO_LINKLOCAL;
in6_ifattach(ifp, NULL);
} else if (!(ND.flags & ND6_IFF_IFDISABLED) &&
ifp->if_flags & IFF_UP) {
@@ -1783,7 +1767,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
in6_ifattach(ifp, NULL);
}
}
- ND_IFINFO(ifp)->flags = ND.flags;
+ ext->nd_flags = ND.flags;
break;
}
#undef ND
@@ -2120,7 +2104,7 @@ nd6_cache_lladdr(struct ifnet *ifp, struct in6_addr *from, char *lladdr,
* cases for safety.
*/
if ((do_update || is_newentry) && router &&
- ND_IFINFO(ifp)->flags & ND6_IFF_ACCEPT_RTADV) {
+ ifp->if_inet6->nd_flags & ND6_IFF_ACCEPT_RTADV) {
/*
* guaranteed recursion
*/
@@ -2133,26 +2117,26 @@ nd6_slowtimo(void *arg)
{
struct epoch_tracker et;
CURVNET_SET((struct vnet *) arg);
- struct nd_ifinfo *nd6if;
+ struct in6_ifextra *nd6if;
struct ifnet *ifp;
callout_reset(&V_nd6_slowtimo_ch, ND6_SLOWTIMER_INTERVAL * hz,
nd6_slowtimo, curvnet);
NET_EPOCH_ENTER(et);
CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
- if (ifp->if_inet6 == NULL)
+ if ((nd6if = ifp->if_inet6) == NULL)
continue;
- nd6if = ND_IFINFO(ifp);
- if (nd6if->basereachable && /* already initialized */
- (nd6if->recalctm -= ND6_SLOWTIMER_INTERVAL) <= 0) {
+ if (nd6if->nd_basereachable && /* already initialized */
+ (nd6if->nd_recalc_timer -= ND6_SLOWTIMER_INTERVAL) <= 0) {
/*
* Since reachable time rarely changes by router
* advertisements, we SHOULD insure that a new random
* value gets recomputed at least once every few hours.
* (RFC 2461, 6.3.4)
*/
- nd6if->recalctm = V_nd6_recalc_reachtm_interval;
- nd6if->reachable = ND_COMPUTE_RTIME(nd6if->basereachable);
+ nd6if->nd_recalc_timer = V_nd6_recalc_reachtm_interval;
+ nd6if->nd_reachable =
+ ND_COMPUTE_RTIME(nd6if->nd_basereachable);
}
}
NET_EPOCH_EXIT(et);
@@ -2260,7 +2244,7 @@ nd6_resolve(struct ifnet *ifp, int gw_flags, struct mbuf *m,
dst6 = (const struct sockaddr_in6 *)sa_dst;
/* discard the packet if IPv6 operation is disabled on the interface */
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED)) {
+ if ((ifp->if_inet6->nd_flags & ND6_IFF_IFDISABLED)) {
m_freem(m);
return (ENETDOWN); /* better error? */
}
diff --git a/sys/netinet6/nd6.h b/sys/netinet6/nd6.h
index 3051397ec33f..a22a0e24735b 100644
--- a/sys/netinet6/nd6.h
+++ b/sys/netinet6/nd6.h
@@ -62,22 +62,6 @@ struct llentry;
#define ND6_IS_LLINFO_PROBREACH(n) ((n)->ln_state > ND6_LLINFO_INCOMPLETE)
#define ND6_LLINFO_PERMANENT(n) (((n)->la_expire == 0) && ((n)->ln_state > ND6_LLINFO_INCOMPLETE))
-struct nd_ifinfo {
- u_int32_t linkmtu; /* LinkMTU */
- u_int32_t maxmtu; /* Upper bound of LinkMTU */
- u_int32_t basereachable; /* BaseReachableTime */
- u_int32_t reachable; /* Reachable Time */
- u_int32_t retrans; /* Retrans Timer */
- u_int32_t flags; /* Flags */
- int recalctm; /* BaseReacable re-calculation timer */
- u_int8_t chlim; /* CurHopLimit */
- u_int8_t initialized; /* Flag to see the entry is initialized */
- /* the following 3 members are for privacy extension for addrconf */
- u_int8_t randomseed0[8]; /* upper 64 bits of MD5 digest */
- u_int8_t randomseed1[8]; /* lower 64 bits (usually the EUI64 IFID) */
- u_int8_t randomid[8]; /* current random ID */
-};
-
#define ND6_IFF_PERFORMNUD 0x1
#define ND6_IFF_ACCEPT_RTADV 0x2
#define ND6_IFF_PREFER_SOURCE 0x4 /* Not used in FreeBSD. */
@@ -97,15 +81,6 @@ struct nd_ifinfo {
#define ND6_IFF_IPV6_ONLY_MASK (ND6_IFF_IPV6_ONLY|ND6_IFF_IPV6_ONLY_MANUAL)
#endif
-#ifdef _KERNEL
-#define ND_IFINFO(ifp) ((if_getinet6(ifp))->nd_ifinfo)
-#define IN6_LINKMTU(ifp) \
- ((ND_IFINFO(ifp)->linkmtu && ND_IFINFO(ifp)->linkmtu < (ifp)->if_mtu) \
- ? ND_IFINFO(ifp)->linkmtu \
- : ((ND_IFINFO(ifp)->maxmtu && ND_IFINFO(ifp)->maxmtu < (ifp)->if_mtu) \
- ? ND_IFINFO(ifp)->maxmtu : (ifp)->if_mtu))
-#endif
-
struct in6_nbrinfo {
char ifname[IFNAMSIZ]; /* if name, e.g. "en0" */
struct in6_addr addr; /* IPv6 address of the neighbor */
@@ -139,27 +114,30 @@ struct in6_prefix {
/* struct sockaddr_in6 advrtr[] */
};
-#ifdef _KERNEL
-struct in6_ondireq {
+struct in6_ndireq {
char ifname[IFNAMSIZ];
- struct {
- u_int32_t linkmtu; /* LinkMTU */
- u_int32_t maxmtu; /* Upper bound of LinkMTU */
- u_int32_t basereachable; /* BaseReachableTime */
- u_int32_t reachable; /* Reachable Time */
- u_int32_t retrans; /* Retrans Timer */
- u_int32_t flags; /* Flags */
+ struct nd_ifinfo {
+ uint32_t linkmtu; /* LinkMTU */
+ uint32_t maxmtu; /* Upper bound of LinkMTU */
+ uint32_t basereachable; /* BaseReachableTime */
+ uint32_t reachable; /* Reachable Time */
+ uint32_t retrans; /* Retrans Timer */
+ uint32_t flags; /* Flags */
int recalctm; /* BaseReacable re-calculation timer */
- u_int8_t chlim; /* CurHopLimit */
- u_int8_t receivedra;
+ uint8_t chlim; /* CurHopLimit */
+ /*
+ * The below members are not used. They came from KAME and
+ * are hanging around to preserve ABI compatibility of the
+ * SIOCGIFINFO_IN6 ioctl.
+ * The original comment documented the random* members as a
+ * privacy extension for addrconf.
+ */
+ uint8_t initialized; /* compat: always 1 */
+ uint8_t randomseed0[8]; /* upper 64 bits of MD5 digest */
+ uint8_t randomseed1[8]; /* lower 64 bits (the EUI64 IFID?) */
+ uint8_t randomid[8]; /* current random ID */
} ndi;
};
-#endif
-
-struct in6_ndireq {
- char ifname[IFNAMSIZ];
- struct nd_ifinfo ndi;
-};
struct in6_ndifreq {
char ifname[IFNAMSIZ];
@@ -253,10 +231,6 @@ struct nd_pfxrouter {
struct nd_defrouter *router;
};
-#ifdef MALLOC_DECLARE
-MALLOC_DECLARE(M_IP6NDP);
-#endif
-
/* nd6.c */
VNET_DECLARE(int, nd6_mmaxtries);
VNET_DECLARE(struct nd_prhead, nd_prefix);
@@ -349,8 +323,8 @@ void nd6_init(void);
#ifdef VIMAGE
void nd6_destroy(void);
#endif
-struct nd_ifinfo *nd6_ifattach(struct ifnet *);
-void nd6_ifdetach(struct ifnet *, struct nd_ifinfo *);
+void nd6_ifattach(struct ifnet *);
+void nd6_ifdetach(struct ifnet *);
int nd6_is_addr_neighbor(const struct sockaddr_in6 *, struct ifnet *);
void nd6_option_init(void *, int, union nd_opts *);
struct nd_opt_hdr *nd6_option(union nd_opts *);
diff --git a/sys/netinet6/nd6_nbr.c b/sys/netinet6/nd6_nbr.c
index aa7cb3b41973..4da62575eaac 100644
--- a/sys/netinet6/nd6_nbr.c
+++ b/sys/netinet6/nd6_nbr.c
@@ -80,6 +80,8 @@
#define SDL(s) ((struct sockaddr_dl *)s)
+MALLOC_DECLARE(M_IP6NDP);
+
struct dadq;
static struct dadq *nd6_dad_find(struct ifaddr *, struct nd_opt_nonce *);
static void nd6_dad_add(struct dadq *dp);
@@ -173,7 +175,7 @@ nd6_ns_input(struct mbuf *m, int off, int icmp6len)
goto bad;
rflag = (V_ip6_forwarding) ? ND_NA_FLAG_ROUTER : 0;
- if (ND_IFINFO(ifp)->flags & ND6_IFF_ACCEPT_RTADV && V_ip6_norbit_raif)
+ if (ifp->if_inet6->nd_flags & ND6_IFF_ACCEPT_RTADV && V_ip6_norbit_raif)
rflag = 0;
if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) {
@@ -910,7 +912,7 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len)
nd6_ifp = lltable_get_ifp(ln->lle_tbl);
if (!defrouter_remove(&ln->r_l3addr.addr6, nd6_ifp) &&
- (ND_IFINFO(nd6_ifp)->flags &
+ (nd6_ifp->if_inet6->nd_flags &
ND6_IFF_ACCEPT_RTADV) != 0)
/*
* Even if the neighbor is not in the default
@@ -1281,13 +1283,13 @@ nd6_dad_start(struct ifaddr *ifa, int delay)
*/
if ((ia->ia6_flags & IN6_IFF_ANYCAST) != 0 ||
V_ip6_dad_count == 0 ||
- (ND_IFINFO(ifa->ifa_ifp)->flags & ND6_IFF_NO_DAD) != 0) {
+ (ifa->ifa_ifp->if_inet6->nd_flags & ND6_IFF_NO_DAD) != 0) {
ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
return;
}
if ((ifa->ifa_ifp->if_flags & IFF_UP) == 0 ||
(ifa->ifa_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
- (ND_IFINFO(ifa->ifa_ifp)->flags & ND6_IFF_IFDISABLED) != 0)
+ (ifa->ifa_ifp->if_inet6->nd_flags & ND6_IFF_IFDISABLED) != 0)
return;
DADQ_WLOCK();
@@ -1377,7 +1379,7 @@ nd6_dad_timer(void *arg)
KASSERT(ia != NULL, ("DAD entry %p with no address", dp));
NET_EPOCH_ENTER(et);
- if (ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) {
+ if (ifp->if_inet6->nd_flags & ND6_IFF_IFDISABLED) {
/* Do not need DAD for ifdisabled interface. */
log(LOG_ERR, "nd6_dad_timer: cancel DAD on %s because of "
"ND6_IFF_IFDISABLED.\n", ifp->if_xname);
@@ -1414,7 +1416,7 @@ nd6_dad_timer(void *arg)
* We have more NS to go. Send NS packet for DAD.
*/
nd6_dad_starttimer(dp,
- (long)ND_IFINFO(ifa->ifa_ifp)->retrans * hz / 1000);
+ (long)ifa->ifa_ifp->if_inet6->nd_retrans * hz / 1000);
nd6_dad_ns_output(dp);
goto done;
} else {
@@ -1446,7 +1448,7 @@ nd6_dad_timer(void *arg)
dp->dad_count =
dp->dad_ns_ocount + V_nd6_mmaxtries - 1;
nd6_dad_starttimer(dp,
- (long)ND_IFINFO(ifa->ifa_ifp)->retrans * hz / 1000);
+ (long)ifa->ifa_ifp->if_inet6->nd_retrans * hz / 1000);
nd6_dad_ns_output(dp);
goto done;
} else {
@@ -1458,9 +1460,9 @@ nd6_dad_timer(void *arg)
*
* Reset DAD failures counter if using stable addresses.
*/
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) == 0) {
+ if ((ifp->if_inet6->nd_flags & ND6_IFF_IFDISABLED) == 0) {
ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) && !(ia->ia6_flags & IN6_IFF_TEMPORARY))
+ if ((ifp->if_inet6->nd_flags & ND6_IFF_STABLEADDR) && !(ia->ia6_flags & IN6_IFF_TEMPORARY))
atomic_store_int(&DAD_FAILURES(ifp), 0);
}
@@ -1509,7 +1511,7 @@ nd6_dad_duplicated(struct ifaddr *ifa, struct dadq *dp)
* For RFC 7217 stable addresses, increment failure counter here if we still have retries.
* More addresses will be generated as long as retries are not exhausted.
*/
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) && !(ia->ia6_flags & IN6_IFF_TEMPORARY)) {
+ if ((ifp->if_inet6->nd_flags & ND6_IFF_STABLEADDR) && !(ia->ia6_flags & IN6_IFF_TEMPORARY)) {
u_int dad_failures = atomic_load_int(&DAD_FAILURES(ifp));
if (dad_failures <= V_ip6_stableaddr_maxretries) {
@@ -1547,7 +1549,7 @@ nd6_dad_duplicated(struct ifaddr *ifa, struct dadq *dp)
in6 = ia->ia_addr.sin6_addr;
if (in6_get_hw_ifid(ifp, &in6) == 0 &&
IN6_ARE_ADDR_EQUAL(&ia->ia_addr.sin6_addr, &in6)) {
- ND_IFINFO(ifp)->flags |= ND6_IFF_IFDISABLED;
+ ifp->if_inet6->nd_flags |= ND6_IFF_IFDISABLED;
log(LOG_ERR, "%s: possible hardware address "
"duplication detected, disable IPv6\n",
if_name(ifp));
diff --git a/sys/netinet6/nd6_rtr.c b/sys/netinet6/nd6_rtr.c
index 7bbba30a3a21..0ca97125110c 100644
--- a/sys/netinet6/nd6_rtr.c
+++ b/sys/netinet6/nd6_rtr.c
@@ -75,6 +75,8 @@
#include <machine/atomic.h>
+MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
+
static struct nd_defrouter *defrtrlist_update(struct nd_defrouter *);
static int prelist_update(struct nd_prefixctl *, struct nd_defrouter *,
struct mbuf *, int);
@@ -175,7 +177,7 @@ nd6_rs_input(struct mbuf *m, int off, int icmp6len)
* Accept RS only when V_ip6_forwarding=1 and the interface has
* no ND6_IFF_ACCEPT_RTADV.
*/
- if (!V_ip6_forwarding || ND_IFINFO(ifp)->flags & ND6_IFF_ACCEPT_RTADV)
+ if (!V_ip6_forwarding || ifp->if_inet6->nd_flags & ND6_IFF_ACCEPT_RTADV)
goto freeit;
/* RFC 6980: Nodes MUST silently ignore fragments */
@@ -280,7 +282,7 @@ defrtr_ipv6_only_ifp(struct ifnet *ifp)
ND6_RUNLOCK();
IF_ADDR_WLOCK(ifp);
- ipv6_only_old = ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY;
+ ipv6_only_old = ifp->if_inet6->nd_flags & ND6_IFF_IPV6_ONLY;
IF_ADDR_WUNLOCK(ifp);
/* If nothing changed, we have an early exit. */
@@ -317,9 +319,9 @@ defrtr_ipv6_only_ifp(struct ifnet *ifp)
IF_ADDR_WLOCK(ifp);
if (ipv6_only)
- ND_IFINFO(ifp)->flags |= ND6_IFF_IPV6_ONLY;
+ ifp->if_inet6->nd_flags |= ND6_IFF_IPV6_ONLY;
else
- ND_IFINFO(ifp)->flags &= ~ND6_IFF_IPV6_ONLY;
+ ifp->if_inet6->nd_flags &= ~ND6_IFF_IPV6_ONLY;
IF_ADDR_WUNLOCK(ifp);
#ifdef notyet
@@ -332,7 +334,7 @@ defrtr_ipv6_only_ipf_down(struct ifnet *ifp)
{
IF_ADDR_WLOCK(ifp);
- ND_IFINFO(ifp)->flags &= ~ND6_IFF_IPV6_ONLY;
+ ifp->if_inet6->nd_flags &= ~ND6_IFF_IPV6_ONLY;
IF_ADDR_WUNLOCK(ifp);
}
#endif /* EXPERIMENTAL */
@@ -364,7 +366,7 @@ void
nd6_ra_input(struct mbuf *m, int off, int icmp6len)
{
struct ifnet *ifp;
- struct nd_ifinfo *ndi;
+ struct in6_ifextra *ndi;
struct ip6_hdr *ip6;
struct nd_router_advert *nd_ra;
struct in6_addr saddr6;
@@ -378,8 +380,8 @@ nd6_ra_input(struct mbuf *m, int off, int icmp6len)
* ND6_IFF_ACCEPT_RTADV is on the receiving interface.
*/
ifp = m->m_pkthdr.rcvif;
- ndi = ND_IFINFO(ifp);
- if (!(ndi->flags & ND6_IFF_ACCEPT_RTADV))
+ ndi = ifp->if_inet6;
+ if (!(ndi->nd_flags & ND6_IFF_ACCEPT_RTADV))
goto freeit;
/* RFC 6980: Nodes MUST silently ignore fragments */
@@ -441,7 +443,7 @@ nd6_ra_input(struct mbuf *m, int off, int icmp6len)
* ND6_IFF_NO_RADR enabled on the receiving interface or
* (ip6.forwarding == 1 && ip6.rfc6204w3 != 1).
*/
- if (ndi->flags & ND6_IFF_NO_RADR)
+ if (ndi->nd_flags & ND6_IFF_NO_RADR)
dr0.rtlifetime = 0;
else if (V_ip6_forwarding && !V_ip6_rfc6204w3)
dr0.rtlifetime = 0;
@@ -453,22 +455,24 @@ nd6_ra_input(struct mbuf *m, int off, int icmp6len)
if (advreachable) {
advreachable = ntohl(advreachable);
if (advreachable <= MAX_REACHABLE_TIME &&
- ndi->basereachable != advreachable) {
- ndi->basereachable = advreachable;
- ndi->reachable = ND_COMPUTE_RTIME(ndi->basereachable);
- ndi->recalctm = V_nd6_recalc_reachtm_interval; /* reset */
+ ndi->nd_basereachable != advreachable) {
+ ndi->nd_basereachable = advreachable;
+ ndi->nd_reachable =
+ ND_COMPUTE_RTIME(ndi->nd_basereachable);
+ ndi->nd_recalc_timer = V_nd6_recalc_reachtm_interval;
}
}
if (nd_ra->nd_ra_retransmit)
- ndi->retrans = ntohl(nd_ra->nd_ra_retransmit);
+ ndi->nd_retrans = ntohl(nd_ra->nd_ra_retransmit);
if (nd_ra->nd_ra_curhoplimit) {
- if (ndi->chlim < nd_ra->nd_ra_curhoplimit)
- ndi->chlim = nd_ra->nd_ra_curhoplimit;
- else if (ndi->chlim != nd_ra->nd_ra_curhoplimit) {
+ if (ndi->nd_curhoplimit < nd_ra->nd_ra_curhoplimit)
+ ndi->nd_curhoplimit = nd_ra->nd_ra_curhoplimit;
+ else if (ndi->nd_curhoplimit != nd_ra->nd_ra_curhoplimit) {
log(LOG_ERR, "RA with a lower CurHopLimit sent from "
"%s on %s (current = %d, received = %d). "
"Ignored.\n", ip6_sprintf(ip6bufs, &ip6->ip6_src),
- if_name(ifp), ndi->chlim, nd_ra->nd_ra_curhoplimit);
+ if_name(ifp), ndi->nd_curhoplimit,
+ nd_ra->nd_ra_curhoplimit);
}
}
dr = defrtrlist_update(&dr0);
@@ -557,11 +561,11 @@ nd6_ra_input(struct mbuf *m, int off, int icmp6len)
}
/* upper bound */
- maxmtu = (ndi->maxmtu && ndi->maxmtu < ifp->if_mtu)
- ? ndi->maxmtu : ifp->if_mtu;
+ maxmtu = (ndi->nd_maxmtu && ndi->nd_maxmtu < ifp->if_mtu)
+ ? ndi->nd_maxmtu : ifp->if_mtu;
if (mtu <= maxmtu) {
- if (ndi->linkmtu != mtu) {
- ndi->linkmtu = mtu;
+ if (ndi->nd_linkmtu != mtu) {
+ ndi->nd_linkmtu = mtu;
rt_updatemtu(ifp);
}
} else {
@@ -751,7 +755,7 @@ defrouter_del(struct nd_defrouter *dr)
* Flush all the routing table entries that use the router
* as a next hop.
*/
- if (ND_IFINFO(dr->ifp)->flags & ND6_IFF_ACCEPT_RTADV)
+ if (dr->ifp->if_inet6->nd_flags & ND6_IFF_ACCEPT_RTADV)
rt6_flush(&dr->rtaddr, dr->ifp);
#ifdef EXPERIMENTAL
@@ -1146,39 +1150,18 @@ restart:
return (n);
}
-static int
+static void
in6_init_prefix_ltimes(struct nd_prefix *ndpr)
{
- if (ndpr->ndpr_pltime == ND6_INFINITE_LIFETIME)
- ndpr->ndpr_preferred = 0;
- else
- ndpr->ndpr_preferred = time_uptime + ndpr->ndpr_pltime;
- if (ndpr->ndpr_vltime == ND6_INFINITE_LIFETIME)
- ndpr->ndpr_expire = 0;
- else
- ndpr->ndpr_expire = time_uptime + ndpr->ndpr_vltime;
-
- return 0;
+ ndpr->ndpr_preferred = in6_expire_time(ndpr->ndpr_pltime);
+ ndpr->ndpr_expire = in6_expire_time(ndpr->ndpr_vltime);
}
static void
in6_init_address_ltimes(struct nd_prefix *new, struct in6_addrlifetime *lt6)
{
- /* init ia6t_expire */
- if (lt6->ia6t_vltime == ND6_INFINITE_LIFETIME)
- lt6->ia6t_expire = 0;
- else {
- lt6->ia6t_expire = time_uptime;
- lt6->ia6t_expire += lt6->ia6t_vltime;
- }
-
- /* init ia6t_preferred */
- if (lt6->ia6t_pltime == ND6_INFINITE_LIFETIME)
- lt6->ia6t_preferred = 0;
- else {
- lt6->ia6t_preferred = time_uptime;
- lt6->ia6t_preferred += lt6->ia6t_pltime;
- }
+ lt6->ia6t_preferred = in6_expire_time(lt6->ia6t_pltime);
+ lt6->ia6t_expire = in6_expire_time(lt6->ia6t_vltime);
}
static struct in6_ifaddr *
@@ -1223,7 +1206,7 @@ in6_ifadd(struct nd_prefixctl *pr, int mcast)
/* make ifaddr */
in6_prepare_ifra(&ifra, &pr->ndpr_prefix.sin6_addr, &mask);
- if (ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) {
+ if (ifp->if_inet6->nd_flags & ND6_IFF_STABLEADDR) {
memcpy(&newaddr, &pr->ndpr_prefix.sin6_addr, sizeof(pr->ndpr_prefix.sin6_addr));
if(!in6_get_stableifid(ifp, &newaddr, prefixlen))
@@ -1394,11 +1377,8 @@ nd6_prelist_add(struct nd_prefixctl *pr, struct nd_defrouter *dr,
new->ndpr_vltime = pr->ndpr_vltime;
new->ndpr_pltime = pr->ndpr_pltime;
new->ndpr_flags = pr->ndpr_flags;
- if ((error = in6_init_prefix_ltimes(new)) != 0) {
- free(new, M_IP6NDP);
- return (error);
- }
new->ndpr_lastupdate = time_uptime;
+ in6_init_prefix_ltimes(new);
/* initialization */
LIST_INIT(&new->ndpr_advrtrs);
@@ -1542,7 +1522,7 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
if (new->ndpr_raf_onlink) {
pr->ndpr_vltime = new->ndpr_vltime;
pr->ndpr_pltime = new->ndpr_pltime;
- (void)in6_init_prefix_ltimes(pr); /* XXX error case? */
+ in6_init_prefix_ltimes(pr);
pr->ndpr_lastupdate = time_uptime;
}
@@ -1717,7 +1697,7 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
* if stable addresses (RFC 7217) are enabled, mark that a temporary address has been found
* to avoid generating uneeded extra ones.
*/
- if (ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR)
+ if (ifp->if_inet6->nd_flags & ND6_IFF_STABLEADDR)
has_temporary = true;
if (V_ip6_temp_valid_lifetime >
@@ -1759,7 +1739,7 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
* between here and when a new address is generated, but this will cause that generation
* to fail and no further retries should happen.
*/
- if (ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR &&
+ if (ifp->if_inet6->nd_flags & ND6_IFF_STABLEADDR &&
atomic_load_int(&DAD_FAILURES(ifp)) <= V_ip6_stableaddr_maxretries &&
ifa6->ia6_flags & (IN6_IFF_DUPLICATED | IN6_IFF_TEMPORARY))
continue;
diff --git a/sys/netinet6/scope6.c b/sys/netinet6/scope6.c
index 6862c75fb5e7..44a9d976d5fe 100644
--- a/sys/netinet6/scope6.c
+++ b/sys/netinet6/scope6.c
@@ -73,10 +73,11 @@ static struct mtx scope6_lock;
VNET_DEFINE_STATIC(struct scope6_id, sid_default);
#define V_sid_default VNET(sid_default)
-#define SID(ifp) ((ifp)->if_inet6->scope6_id)
+#define SID(ifp) (&(ifp)->if_inet6->scope6_id)
static int scope6_get(struct ifnet *, struct scope6_id *);
static int scope6_set(struct ifnet *, struct scope6_id *);
+static int scope6_get_default(struct scope6_id *);
void
scope6_init(void)
@@ -90,26 +91,18 @@ scope6_init(void)
SCOPE6_LOCK_INIT();
}
-struct scope6_id *
+void
scope6_ifattach(struct ifnet *ifp)
{
- struct scope6_id *sid;
+ struct scope6_id *sid = &ifp->if_inet6->scope6_id;
- sid = malloc(sizeof(*sid), M_IFADDR, M_WAITOK | M_ZERO);
/*
* XXX: IPV6_ADDR_SCOPE_xxx macros are not standard.
* Should we rather hardcode here?
*/
+ bzero(sid, sizeof(*sid));
sid->s6id_list[IPV6_ADDR_SCOPE_INTFACELOCAL] = ifp->if_index;
sid->s6id_list[IPV6_ADDR_SCOPE_LINKLOCAL] = ifp->if_index;
- return (sid);
-}
-
-void
-scope6_ifdetach(struct scope6_id *sid)
-{
-
- free(sid, M_IFADDR);
}
int
@@ -280,7 +273,7 @@ scope6_setdefault(struct ifnet *ifp)
SCOPE6_UNLOCK();
}
-int
+static int
scope6_get_default(struct scope6_id *idlist)
{
diff --git a/sys/netinet6/scope6_var.h b/sys/netinet6/scope6_var.h
index f914d5981bb0..7832444e0658 100644
--- a/sys/netinet6/scope6_var.h
+++ b/sys/netinet6/scope6_var.h
@@ -37,21 +37,10 @@
#ifdef _KERNEL
#include <net/vnet.h>
-#define IPV6_ADDR_SCOPES_COUNT 16
-struct scope6_id {
- /*
- * 16 is correspondent to 4bit multicast scope field.
- * i.e. from node-local to global with some reserved/unassigned types.
- */
- uint32_t s6id_list[IPV6_ADDR_SCOPES_COUNT];
-};
-
void scope6_init(void);
-struct scope6_id *scope6_ifattach(struct ifnet *);
-void scope6_ifdetach(struct scope6_id *);
+void scope6_ifattach(struct ifnet *);
int scope6_ioctl(u_long cmd, caddr_t data, struct ifnet *);
void scope6_setdefault(struct ifnet *);
-int scope6_get_default(struct scope6_id *);
u_int32_t scope6_addr2default(struct in6_addr *);
int sa6_embedscope(struct sockaddr_in6 *, int);
int sa6_recoverscope(struct sockaddr_in6 *);
diff --git a/sys/netinet6/udp6_usrreq.c b/sys/netinet6/udp6_usrreq.c
index ca7c95497510..1d1dcb75a1df 100644
--- a/sys/netinet6/udp6_usrreq.c
+++ b/sys/netinet6/udp6_usrreq.c
@@ -703,11 +703,6 @@ udp6_send(struct socket *so, int flags_arg, struct mbuf *m,
sin6 = (struct sockaddr_in6 *)addr6;
- /*
- * In contrast to IPv4 we do not validate the max. packet length
- * here due to IPv6 Jumbograms (RFC2675).
- */
-
scope_ambiguous = 0;
if (sin6) {
/* Protect *addr6 from overwrites. */
@@ -865,10 +860,21 @@ udp6_send(struct socket *so, int flags_arg, struct mbuf *m,
fport = inp->inp_fport;
}
+
+ /*
+ * We do not support IPv6 Jumbograms (RFC2675), so validate the payload
+ * length fits in a normal gram.
+ */
ulen = m->m_pkthdr.len;
plen = sizeof(struct udphdr) + ulen;
hlen = sizeof(struct ip6_hdr);
+ if (plen > IPV6_MAXPAYLOAD) {
+ m_freem(control);
+ m_freem(m);
+ return (EMSGSIZE);
+ }
+
/*
* Calculate data length and get a mbuf for UDP, IP6, and possible
* link-layer headers. Immediate slide the data pointer back forward
@@ -903,10 +909,10 @@ udp6_send(struct socket *so, int flags_arg, struct mbuf *m,
* the entire UDPLite packet is covered by the checksum.
*/
cscov_partial = (cscov == 0) ? 0 : 1;
- } else if (plen <= 0xffff)
+ } else {
+ MPASS(plen <= IPV6_MAXPAYLOAD);
udp6->uh_ulen = htons((u_short)plen);
- else
- udp6->uh_ulen = 0;
+ }
udp6->uh_sum = 0;
ip6 = mtod(m, struct ip6_hdr *);
diff --git a/sys/netlink/ktest_netlink_message_writer.c b/sys/netlink/ktest_netlink_message_writer.c
index 805f52197f69..0e8b962bbd34 100644
--- a/sys/netlink/ktest_netlink_message_writer.c
+++ b/sys/netlink/ktest_netlink_message_writer.c
@@ -37,8 +37,6 @@
#define KTEST_CALLER
#include <netlink/ktest_netlink_message_writer.h>
-#ifdef INVARIANTS
-
struct test_nlbuf_attrs {
uint32_t size;
uint32_t expected_avail;
@@ -98,16 +96,13 @@ test_nlbuf_writer_allocation(struct ktest_test_context *ctx)
return (0);
}
-#endif
static const struct ktest_test_info tests[] = {
-#ifdef INVARIANTS
{
.name = "test_nlbuf_writer_allocation",
.desc = "test different buffer sizes in the netlink writer",
.func = &test_nlbuf_writer_allocation,
.parse = &test_nlbuf_parser,
},
-#endif
};
KTEST_MODULE_DECLARE(ktest_netlink_message_writer, tests);
diff --git a/sys/netlink/ktest_netlink_message_writer.h b/sys/netlink/ktest_netlink_message_writer.h
index 447593e0e700..1abf0d48de95 100644
--- a/sys/netlink/ktest_netlink_message_writer.h
+++ b/sys/netlink/ktest_netlink_message_writer.h
@@ -28,7 +28,7 @@
#ifndef _NETLINK_KTEST_NETLINK_MESSAGE_WRITER_H_
#define _NETLINK_KTEST_NETLINK_MESSAGE_WRITER_H_
-#if defined(_KERNEL) && defined(INVARIANTS)
+#if defined(_KERNEL)
bool nlmsg_get_buf_wrapper(struct nl_writer *nw, size_t size, bool waitok);
diff --git a/sys/netlink/route/iface.c b/sys/netlink/route/iface.c
index e9c053015fad..3e30d74a3793 100644
--- a/sys/netlink/route/iface.c
+++ b/sys/netlink/route/iface.c
@@ -36,6 +36,7 @@
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/syslog.h>
+#include <sys/proc.h>
#include <net/if.h>
#include <net/if_dl.h>
@@ -675,6 +676,8 @@ static int
rtnl_handle_newlink(struct nlmsghdr *hdr, struct nlpcb *nlp, struct nl_pstate *npt)
{
struct nlattr_bmask bm;
+ struct thread *td = curthread;
+ struct ucred *cred;
int error;
struct nl_parsed_link attrs = {};
@@ -683,10 +686,16 @@ rtnl_handle_newlink(struct nlmsghdr *hdr, struct nlpcb *nlp, struct nl_pstate *n
return (error);
nl_get_attrs_bmask_nlmsg(hdr, &ifmsg_parser, &bm);
+ /* XXX: temporary patch until the D39180 review lands */
+ cred = td->td_ucred;
+ td->td_ucred = nlp_get_cred(nlp);
if (hdr->nlmsg_flags & NLM_F_CREATE)
- return (create_link(hdr, &attrs, &bm, nlp, npt));
+ error = create_link(hdr, &attrs, &bm, nlp, npt);
else
- return (modify_link(hdr, &attrs, &bm, nlp, npt));
+ error = modify_link(hdr, &attrs, &bm, nlp, npt);
+ td->td_ucred = cred;
+
+ return (error);
}
static void
diff --git a/sys/netpfil/ipfw/ip_fw2.c b/sys/netpfil/ipfw/ip_fw2.c
index 4e13e6e55f1d..fe707abc7682 100644
--- a/sys/netpfil/ipfw/ip_fw2.c
+++ b/sys/netpfil/ipfw/ip_fw2.c
@@ -3667,6 +3667,7 @@ vnet_ipfw_init(const void *unused)
#ifdef IPFIREWALL_NAT
LIST_INIT(&chain->nat);
#endif
+ RB_INIT(&chain->taps);
/* Init shared services hash table */
ipfw_init_srv(chain);
@@ -3731,29 +3732,26 @@ vnet_ipfw_uninit(const void *unused)
V_ipfw_vnet_ready = 0; /* tell new callers to go away */
/*
- * disconnect from ipv4, ipv6, layer2 and sockopt.
- * Then grab, release and grab again the WLOCK so we make
- * sure the update is propagated and nobody will be in.
+ * Disconnect from ipv4, ipv6, layer2 and sockopt. pfil(9) hook
+ * removal is synchronized by the net epoch, but our destructors
+ * free the memory immediately, thus we need for the epoch sections
+ * to complete.
*/
ipfw_detach_hooks();
V_ip_fw_ctl_ptr = NULL;
+ NET_EPOCH_WAIT();
last = IS_DEFAULT_VNET(curvnet) ? 1 : 0;
IPFW_UH_WLOCK(chain);
- IPFW_UH_WUNLOCK(chain);
ipfw_dyn_uninit(0); /* run the callout_drain */
- IPFW_UH_WLOCK(chain);
-
reap = NULL;
- IPFW_WLOCK(chain);
for (i = 0; i < chain->n_rules; i++)
ipfw_reap_add(chain, &reap, chain->map[i]);
free(chain->map, M_IPFW);
ipfw_destroy_skipto_cache(chain);
- IPFW_WUNLOCK(chain);
IPFW_UH_WUNLOCK(chain);
ipfw_destroy_tables(chain, last);
ipfw_eaction_uninit(chain, last);
diff --git a/sys/netpfil/ipfw/ip_fw_bpf.c b/sys/netpfil/ipfw/ip_fw_bpf.c
index d9897f700d57..9b92b136beb2 100644
--- a/sys/netpfil/ipfw/ip_fw_bpf.c
+++ b/sys/netpfil/ipfw/ip_fw_bpf.c
@@ -1,7 +1,7 @@
/*-
* Copyright (c) 2016 Yandex LLC
* Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
- * Copyright (c) 2025 Gleb Smirnoff <glebius@FreeBSD.org>
+ * Copyright (c) 2025-2026 Gleb Smirnoff <glebius@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +30,7 @@
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/rwlock.h>
+#include <sys/sx.h>
#include <sys/socket.h>
#include <sys/tree.h>
#include <net/ethernet.h>
@@ -68,20 +68,20 @@ tap_compare(const struct ipfw_tap *a, const struct ipfw_tap *b)
{
return (a->rule != b->rule ? (a->rule < b->rule ? -1 : 1) : 0);
}
-RB_HEAD(tap_tree, ipfw_tap);
-VNET_DEFINE_STATIC(struct tap_tree, tap_tree);
-#define V_tap_tree VNET(tap_tree)
RB_GENERATE_STATIC(tap_tree, ipfw_tap, entry, tap_compare);
-VNET_DEFINE_STATIC(struct ipfw_tap *, default_tap);
+VNET_DEFINE_STATIC(struct ipfw_tap, default_tap) = { .name = "ipfw0" };
#define V_default_tap VNET(default_tap)
void
-ipfw_tap_alloc(uint32_t rule)
+ipfw_tap_alloc(struct ip_fw_chain *ch, uint32_t rule)
{
struct ipfw_tap *tap, key = { .rule = rule };
int n __diagused;
- tap = RB_FIND(tap_tree, &V_tap_tree, &key);
+ MPASS(rule > 0 && rule < IPFW_DEFAULT_RULE);
+ IPFW_UH_WLOCK_ASSERT(ch);
+
+ tap = RB_FIND(tap_tree, &ch->taps, &key);
if (tap != NULL) {
MPASS(tap->rule == rule);
tap->refs++;
@@ -90,43 +90,51 @@ ipfw_tap_alloc(uint32_t rule)
tap = malloc(sizeof(*tap), M_IPFW, M_WAITOK);
tap->rule = rule;
tap->refs = 1;
- /* Note: the default rule logs to "ipfw0". */
- if (__predict_false(rule == IPFW_DEFAULT_RULE)) {
- V_default_tap = tap;
- rule = 0;
- }
n = snprintf(tap->name, sizeof(tap->name), "ipfw%u", rule);
MPASS(n > 4 && n < sizeof("ipfw4294967295"));
tap->bpf = bpf_attach(tap->name, DLT_EN10MB, PFLOG_HDRLEN,
&bpf_ipfw_methods, NULL);
- tap = RB_INSERT(tap_tree, &V_tap_tree, tap);
+ tap = RB_INSERT(tap_tree, &ch->taps, tap);
MPASS(tap == NULL);
}
void
-ipfw_tap_free(uint32_t rule)
+ipfw_tap_free(struct ip_fw_chain *ch, uint32_t rule)
{
-
struct ipfw_tap *tap, key = { .rule = rule };
- tap = RB_FIND(tap_tree, &V_tap_tree, &key);
+ MPASS(rule > 0 && rule < IPFW_DEFAULT_RULE);
+ IPFW_UH_WLOCK_ASSERT(ch);
+
+ tap = RB_FIND(tap_tree, &ch->taps, &key);
MPASS(tap != NULL);
if (--tap->refs == 0) {
bpf_detach(tap->bpf);
- RB_REMOVE(tap_tree, &V_tap_tree, tap);
+ RB_REMOVE(tap_tree, &ch->taps, tap);
free(tap, M_IPFW);
}
}
void
-ipfw_bpf_tap(struct ip_fw_args *args, struct ip *ip, uint32_t rulenum)
+ipfw_bpf_tap(struct ip_fw_chain *ch, struct ip_fw_args *args,
+ struct ip *ip, uint32_t rulenum)
{
- struct ipfw_tap *tap, key = { .rule = rulenum };
+ struct ipfw_tap *tap;
- tap = RB_FIND(tap_tree, &V_tap_tree, &key);
- MPASS(tap != NULL);
- if (!bpf_peers_present(tap->bpf))
- tap = V_default_tap;
+ if (rulenum == IPFW_DEFAULT_RULE) {
+ tap = &V_default_tap;
+ } else {
+ struct ipfw_tap key = { .rule = rulenum };
+
+ tap = RB_FIND(tap_tree, &ch->taps, &key);
+ MPASS(tap != NULL);
+ /*
+ * Compatibility: if user is not using per-rule taps, fallback
+ * to the default tap.
+ */
+ if (!bpf_peers_present(tap->bpf))
+ tap = &V_default_tap;
+ }
if (args->flags & IPFW_ARGS_LENMASK) {
bpf_tap(tap->bpf, args->mem, IPFW_ARGS_LENGTH(args->flags));
} else if (args->flags & IPFW_ARGS_ETHER) {
@@ -161,7 +169,8 @@ ipfw_pflog_tap(void *data, struct mbuf *m)
void
ipfw_bpf_init(int first __unused)
{
- ipfw_tap_alloc(IPFW_DEFAULT_RULE);
+ V_default_tap.bpf = bpf_attach(V_default_tap.name, DLT_EN10MB,
+ PFLOG_HDRLEN, &bpf_ipfw_methods, NULL);
V_bpf_pflog = bpf_attach("ipfwlog0", DLT_PFLOG, PFLOG_HDRLEN,
&bpf_ipfw_methods, NULL);
}
@@ -169,7 +178,6 @@ ipfw_bpf_init(int first __unused)
void
ipfw_bpf_uninit(int last __unused)
{
-
- ipfw_tap_free(IPFW_DEFAULT_RULE);
+ bpf_detach(V_default_tap.bpf);
bpf_detach(V_bpf_pflog);
}
diff --git a/sys/netpfil/ipfw/ip_fw_dynamic.c b/sys/netpfil/ipfw/ip_fw_dynamic.c
index d454024ac5cb..d2bf4f4fc899 100644
--- a/sys/netpfil/ipfw/ip_fw_dynamic.c
+++ b/sys/netpfil/ipfw/ip_fw_dynamic.c
@@ -663,6 +663,8 @@ dyn_create(struct ip_fw_chain *ch, struct tid_info *ti,
ipfw_obj_ntlv *ntlv;
char *name;
+ IPFW_UH_WLOCK_ASSERT(ch);
+
DYN_DEBUG("uidx %u", ti->uidx);
if (ti->uidx != 0) {
if (ti->tlvs == NULL)
@@ -681,7 +683,6 @@ dyn_create(struct ip_fw_chain *ch, struct tid_info *ti,
obj->no.etlv = IPFW_TLV_STATE_NAME;
strlcpy(obj->name, name, sizeof(obj->name));
- IPFW_UH_WLOCK(ch);
no = ipfw_objhash_lookup_name_type(ni, 0,
IPFW_TLV_STATE_NAME, name);
if (no != NULL) {
@@ -691,14 +692,12 @@ dyn_create(struct ip_fw_chain *ch, struct tid_info *ti,
*/
*pkidx = no->kidx;
no->refcnt++;
- IPFW_UH_WUNLOCK(ch);
free(obj, M_IPFW);
DYN_DEBUG("\tfound kidx %u for name '%s'", *pkidx, no->name);
return (0);
}
if (ipfw_objhash_alloc_idx(ni, &obj->no.kidx) != 0) {
DYN_DEBUG("\talloc_idx failed for %s", name);
- IPFW_UH_WUNLOCK(ch);
free(obj, M_IPFW);
return (ENOSPC);
}
@@ -706,7 +705,6 @@ dyn_create(struct ip_fw_chain *ch, struct tid_info *ti,
SRV_OBJECT(ch, obj->no.kidx) = obj;
obj->no.refcnt++;
*pkidx = obj->no.kidx;
- IPFW_UH_WUNLOCK(ch);
DYN_DEBUG("\tcreated kidx %u for name '%s'", *pkidx, name);
return (0);
}
@@ -2145,9 +2143,6 @@ dyn_free_states(struct ip_fw_chain *chain)
* Userland can invoke ipfw_expire_dyn_states() to delete
* specific states, this will lead to modification of expired
* lists.
- *
- * XXXAE: do we need DYN_EXPIRED_LOCK? We can just use
- * IPFW_UH_WLOCK to protect access to these lists.
*/
DYN_EXPIRED_LOCK();
DYN_FREE_STATES(s4, s4n, ipv4);
@@ -2306,8 +2301,6 @@ dyn_expire_states(struct ip_fw_chain *ch, ipfw_range_tlv *rt)
void *rule;
int bucket, removed, length, max_length;
- IPFW_UH_WLOCK_ASSERT(ch);
-
/*
* Unlink expired states from each bucket.
* With acquired bucket lock iterate entries of each lists:
@@ -2505,13 +2498,8 @@ dyn_send_keepalive_ipv4(struct ip_fw_chain *chain)
uint32_t bucket;
mbufq_init(&q, INT_MAX);
- IPFW_UH_RLOCK(chain);
- /*
- * It is safe to not use hazard pointer and just do lockless
- * access to the lists, because states entries can not be deleted
- * while we hold IPFW_UH_RLOCK.
- */
for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) {
+ DYN_BUCKET_LOCK(bucket);
CK_SLIST_FOREACH(s, &V_dyn_ipv4[bucket], entry) {
/*
* Only established TCP connections that will
@@ -2524,8 +2512,8 @@ dyn_send_keepalive_ipv4(struct ip_fw_chain *chain)
continue;
dyn_enqueue_keepalive_ipv4(&q, s);
}
+ DYN_BUCKET_UNLOCK(bucket);
}
- IPFW_UH_RUNLOCK(chain);
while ((m = mbufq_dequeue(&q)) != NULL)
ip_output(m, NULL, NULL, 0, NULL, NULL);
}
@@ -2612,13 +2600,8 @@ dyn_send_keepalive_ipv6(struct ip_fw_chain *chain)
uint32_t bucket;
mbufq_init(&q, INT_MAX);
- IPFW_UH_RLOCK(chain);
- /*
- * It is safe to not use hazard pointer and just do lockless
- * access to the lists, because states entries can not be deleted
- * while we hold IPFW_UH_RLOCK.
- */
for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) {
+ DYN_BUCKET_LOCK(bucket);
CK_SLIST_FOREACH(s, &V_dyn_ipv6[bucket], entry) {
/*
* Only established TCP connections that will
@@ -2631,8 +2614,8 @@ dyn_send_keepalive_ipv6(struct ip_fw_chain *chain)
continue;
dyn_enqueue_keepalive_ipv6(&q, s);
}
+ DYN_BUCKET_UNLOCK(bucket);
}
- IPFW_UH_RUNLOCK(chain);
while ((m = mbufq_dequeue(&q)) != NULL)
ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
}
@@ -2734,10 +2717,6 @@ dyn_grow_hashtable(struct ip_fw_chain *chain, uint32_t new, int flags)
} \
} while (0)
/*
- * Prevent rules changing from userland.
- */
- IPFW_UH_WLOCK(chain);
- /*
* Hold traffic processing until we finish resize to
* prevent access to states lists.
*/
@@ -2780,7 +2759,6 @@ dyn_grow_hashtable(struct ip_fw_chain *chain, uint32_t new, int flags)
V_curr_dyn_buckets = new;
IPFW_WUNLOCK(chain);
- IPFW_UH_WUNLOCK(chain);
/* Release old resources */
while (bucket-- != 0)
@@ -2818,15 +2796,8 @@ dyn_tick(void *vnetx)
* First free states unlinked in previous passes.
*/
dyn_free_states(&V_layer3_chain);
- /*
- * Now unlink others expired states.
- * We use IPFW_UH_WLOCK to avoid concurrent call of
- * dyn_expire_states(). It is the only function that does
- * deletion of state entries from states lists.
- */
- IPFW_UH_WLOCK(&V_layer3_chain);
dyn_expire_states(&V_layer3_chain, NULL);
- IPFW_UH_WUNLOCK(&V_layer3_chain);
+
/*
* Send keepalives if they are enabled and the time has come.
*/
@@ -2863,14 +2834,24 @@ dyn_tick(void *vnetx)
void
ipfw_expire_dyn_states(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
{
+ IPFW_RLOCK_TRACKER;
+
/*
* Do not perform any checks if we currently have no dynamic states
*/
if (V_dyn_count == 0)
return;
- IPFW_UH_WLOCK_ASSERT(chain);
+ /*
+ * Acquire read lock to prevent race with dyn_grow_hashtable() called
+ * via dyn_tick(). Note that dyn_tick() also calls dyn_expire_states(),
+ * but doesn't acquire the chain lock. A race between dyn_tick() and
+ * this function should be safe, as dyn_expire_states() does all proper
+ * locking of buckets and expire lists.
+ */
+ IPFW_RLOCK(chain);
dyn_expire_states(chain, rt);
+ IPFW_RUNLOCK(chain);
}
/*
diff --git a/sys/netpfil/ipfw/ip_fw_iface.c b/sys/netpfil/ipfw/ip_fw_iface.c
index 71a25e84ec2b..332a90f1844a 100644
--- a/sys/netpfil/ipfw/ip_fw_iface.c
+++ b/sys/netpfil/ipfw/ip_fw_iface.c
@@ -95,12 +95,12 @@ enum ifevent { ARRIVAL, DEPARTURE, RENAME };
static void
ipfw_kifhandler(void *arg, struct ifnet *ifp, const char *old_name)
{
- enum ifevent *what = arg;
+ enum ifevent what = (uintptr_t)arg;
struct ip_fw_chain *ch;
struct ipfw_iface *iif;
struct namedobj_instance *ii;
- MPASS(*what != RENAME || old_name != NULL);
+ MPASS(what != RENAME || old_name != NULL);
if (V_ipfw_vnet_ready == 0)
return;
@@ -114,9 +114,9 @@ ipfw_kifhandler(void *arg, struct ifnet *ifp, const char *old_name)
return;
}
iif = (struct ipfw_iface*)ipfw_objhash_lookup_name(ii, 0,
- *what == RENAME ? old_name : if_name(ifp));
+ what == RENAME ? old_name : if_name(ifp));
if (iif != NULL) {
- switch (*what) {
+ switch (what) {
case ARRIVAL:
handle_ifattach(ch, iif, ifp->if_index);
break;
@@ -246,13 +246,13 @@ vnet_ipfw_iface_init(struct ip_fw_chain *ch)
{
struct namedobj_instance *ii;
+ IPFW_UH_WLOCK_ASSERT(ch);
+
ii = ipfw_objhash_create(DEFAULT_IFACES, DEFAULT_OBJHASH_SIZE);
- IPFW_UH_WLOCK(ch);
if (ch->ifcfg == NULL) {
ch->ifcfg = ii;
ii = NULL;
}
- IPFW_UH_WUNLOCK(ch);
if (ii != NULL) {
/* Already initialized. Free namehash. */
@@ -296,9 +296,7 @@ vnet_ipfw_iface_destroy(struct ip_fw_chain *ch)
/*
* Notify the subsystem that we are interested in tracking
- * interface @name. This function has to be called without
- * holding any locks to permit allocating the necessary states
- * for proper interface tracking.
+ * interface @name.
*
* Returns 0 on success.
*/
@@ -309,20 +307,18 @@ ipfw_iface_ref(struct ip_fw_chain *ch, char *name,
struct namedobj_instance *ii;
struct ipfw_iface *iif, *tmp;
+ IPFW_UH_WLOCK_ASSERT(ch);
+
if (strlen(name) >= sizeof(iif->ifname))
return (EINVAL);
- IPFW_UH_WLOCK(ch);
-
ii = CHAIN_TO_II(ch);
if (ii == NULL) {
/*
* First request to subsystem.
* Let's perform init.
*/
- IPFW_UH_WUNLOCK(ch);
vnet_ipfw_iface_init(ch);
- IPFW_UH_WLOCK(ch);
ii = CHAIN_TO_II(ch);
}
@@ -331,12 +327,9 @@ ipfw_iface_ref(struct ip_fw_chain *ch, char *name,
if (iif != NULL) {
iif->no.refcnt++;
ic->iface = iif;
- IPFW_UH_WUNLOCK(ch);
return (0);
}
- IPFW_UH_WUNLOCK(ch);
-
/* Not found. Let's create one */
iif = malloc(sizeof(struct ipfw_iface), M_IPFW, M_WAITOK | M_ZERO);
TAILQ_INIT(&iif->consumers);
@@ -350,14 +343,12 @@ ipfw_iface_ref(struct ip_fw_chain *ch, char *name,
* are not holding any locks.
*/
iif->no.refcnt = 1;
- IPFW_UH_WLOCK(ch);
tmp = (struct ipfw_iface *)ipfw_objhash_lookup_name(ii, 0, name);
if (tmp != NULL) {
/* Interface has been created since unlock. Ref and return */
tmp->no.refcnt++;
ic->iface = tmp;
- IPFW_UH_WUNLOCK(ch);
free(iif, M_IPFW);
return (0);
}
@@ -369,8 +360,6 @@ ipfw_iface_ref(struct ip_fw_chain *ch, char *name,
ipfw_objhash_add(ii, &iif->no);
ic->iface = iif;
- IPFW_UH_WUNLOCK(ch);
-
return (0);
}
diff --git a/sys/netpfil/ipfw/ip_fw_log.c b/sys/netpfil/ipfw/ip_fw_log.c
index b84e8cbf7e59..0f8a4df4e5d6 100644
--- a/sys/netpfil/ipfw/ip_fw_log.c
+++ b/sys/netpfil/ipfw/ip_fw_log.c
@@ -722,7 +722,7 @@ ipfw_log(struct ip_fw_chain *chain, struct ip_fw *f, u_int hlen,
/* O_LOG is the first action */
((cmd = ACTION_PTR(f)) && cmd->arg1 == IPFW_LOG_DEFAULT)) {
if (V_fw_verbose == 0) {
- ipfw_bpf_tap(args, ip,
+ ipfw_bpf_tap(chain, args, ip,
f != NULL ? f->rulenum : IPFW_DEFAULT_RULE);
return;
}
@@ -737,6 +737,6 @@ ipfw_log(struct ip_fw_chain *chain, struct ip_fw *f, u_int hlen,
ipfw_log_rtsock(chain, f, hlen, args, offset, tablearg, eh);
if (cmd->arg1 & IPFW_LOG_IPFW0)
- ipfw_bpf_tap(args, ip, f->rulenum);
+ ipfw_bpf_tap(chain, args, ip, f->rulenum);
}
/* end of file */
diff --git a/sys/netpfil/ipfw/ip_fw_nat.c b/sys/netpfil/ipfw/ip_fw_nat.c
index 8bd27f6885ab..75f12511a264 100644
--- a/sys/netpfil/ipfw/ip_fw_nat.c
+++ b/sys/netpfil/ipfw/ip_fw_nat.c
@@ -503,7 +503,6 @@ nat44_config(struct ip_fw_chain *chain, struct nat44_cfg_nat *ucfg)
gencnt = chain->gencnt;
ptr = lookup_nat_name(&chain->nat, ucfg->name);
if (ptr == NULL) {
- IPFW_UH_WUNLOCK(chain);
/* New rule: allocate and init new instance. */
ptr = malloc(sizeof(struct cfg_nat), M_IPFW, M_WAITOK | M_ZERO);
ptr->lib = LibAliasInit(NULL);
@@ -514,7 +513,6 @@ nat44_config(struct ip_fw_chain *chain, struct nat44_cfg_nat *ucfg)
LIST_REMOVE(ptr, _next);
flush_nat_ptrs(chain, ptr->id);
IPFW_WUNLOCK(chain);
- IPFW_UH_WUNLOCK(chain);
}
/*
@@ -543,7 +541,6 @@ nat44_config(struct ip_fw_chain *chain, struct nat44_cfg_nat *ucfg)
del_redir_spool_cfg(ptr, &ptr->redir_chain);
/* Add new entries. */
add_redir_spool_cfg((char *)(ucfg + 1), ptr);
- IPFW_UH_WLOCK(chain);
/* Extra check to avoid race with another ipfw_nat_cfg() */
tcfg = NULL;
@@ -1049,7 +1046,6 @@ retry:
len += sizeof(struct cfg_spool_legacy);
}
}
- IPFW_UH_RUNLOCK(chain);
data = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
bcopy(&nat_cnt, data, sizeof(nat_cnt));
@@ -1057,7 +1053,6 @@ retry:
nat_cnt = 0;
len = sizeof(nat_cnt);
- IPFW_UH_RLOCK(chain);
if (gencnt != chain->gencnt) {
free(data, M_TEMP);
goto retry;
diff --git a/sys/netpfil/ipfw/ip_fw_private.h b/sys/netpfil/ipfw/ip_fw_private.h
index 582bdf8b1c2c..67bdde66e385 100644
--- a/sys/netpfil/ipfw/ip_fw_private.h
+++ b/sys/netpfil/ipfw/ip_fw_private.h
@@ -28,6 +28,9 @@
#ifndef _IPFW2_PRIVATE_H
#define _IPFW2_PRIVATE_H
+#include <sys/queue.h>
+#include <sys/tree.h>
+
/*
* Internal constants and data structures used by ipfw components
* and not meant to be exported outside the kernel.
@@ -161,9 +164,10 @@ struct ip_fw_chain;
void ipfw_bpf_init(int);
void ipfw_bpf_uninit(int);
-void ipfw_tap_alloc(uint32_t);
-void ipfw_tap_free(uint32_t);
-void ipfw_bpf_tap(struct ip_fw_args *, struct ip *, uint32_t);
+void ipfw_tap_alloc(struct ip_fw_chain *, uint32_t);
+void ipfw_tap_free(struct ip_fw_chain *, uint32_t);
+void ipfw_bpf_tap(struct ip_fw_chain *, struct ip_fw_args *, struct ip *,
+ uint32_t);
void ipfw_pflog_tap(void *, struct mbuf *);
void ipfw_log(struct ip_fw_chain *chain, struct ip_fw *f, u_int hlen,
struct ip_fw_args *args, u_short offset, uint32_t tablearg, struct ip *ip,
@@ -320,10 +324,11 @@ struct ip_fw_chain {
void *ifcfg; /* interface module data */
int *idxmap_back; /* standby skipto array of rules */
struct namedobj_instance *srvmap; /* cfg name->number mappings */
+ RB_HEAD(tap_tree, ipfw_tap) taps; /* see ip_fw_bpf.c */
#if defined( __linux__ ) || defined( _WIN32 )
spinlock_t uh_lock;
#else
- struct rwlock uh_lock; /* lock for upper half */
+ struct sx uh_lock; /* lock for upper half */
#endif
};
@@ -451,12 +456,12 @@ struct ipfw_ifc {
#else /* FreeBSD */
#define IPFW_LOCK_INIT(_chain) do { \
rm_init_flags(&(_chain)->rwmtx, "IPFW static rules", RM_RECURSE); \
- rw_init(&(_chain)->uh_lock, "IPFW UH lock"); \
+ sx_init(&(_chain)->uh_lock, "IPFW UH lock"); \
} while (0)
#define IPFW_LOCK_DESTROY(_chain) do { \
rm_destroy(&(_chain)->rwmtx); \
- rw_destroy(&(_chain)->uh_lock); \
+ sx_destroy(&(_chain)->uh_lock); \
} while (0)
#define IPFW_RLOCK_ASSERT(_chain) rm_assert(&(_chain)->rwmtx, RA_RLOCKED)
@@ -471,14 +476,14 @@ struct ipfw_ifc {
#define IPFW_PF_RUNLOCK(p) IPFW_RUNLOCK(p)
#endif
-#define IPFW_UH_RLOCK_ASSERT(_chain) rw_assert(&(_chain)->uh_lock, RA_RLOCKED)
-#define IPFW_UH_WLOCK_ASSERT(_chain) rw_assert(&(_chain)->uh_lock, RA_WLOCKED)
-#define IPFW_UH_UNLOCK_ASSERT(_chain) rw_assert(&(_chain)->uh_lock, RA_UNLOCKED)
+#define IPFW_UH_RLOCK_ASSERT(_chain) sx_assert(&(_chain)->uh_lock, SA_SLOCKED)
+#define IPFW_UH_WLOCK_ASSERT(_chain) sx_assert(&(_chain)->uh_lock, SA_XLOCKED)
+#define IPFW_UH_UNLOCK_ASSERT(_chain) sx_assert(&(_chain)->uh_lock, SA_UNLOCKED)
-#define IPFW_UH_RLOCK(p) rw_rlock(&(p)->uh_lock)
-#define IPFW_UH_RUNLOCK(p) rw_runlock(&(p)->uh_lock)
-#define IPFW_UH_WLOCK(p) rw_wlock(&(p)->uh_lock)
-#define IPFW_UH_WUNLOCK(p) rw_wunlock(&(p)->uh_lock)
+#define IPFW_UH_RLOCK(p) sx_slock(&(p)->uh_lock)
+#define IPFW_UH_RUNLOCK(p) sx_sunlock(&(p)->uh_lock)
+#define IPFW_UH_WLOCK(p) sx_xlock(&(p)->uh_lock)
+#define IPFW_UH_WUNLOCK(p) sx_xunlock(&(p)->uh_lock)
struct obj_idx {
uint32_t uidx; /* internal index supplied by userland */
diff --git a/sys/netpfil/ipfw/ip_fw_sockopt.c b/sys/netpfil/ipfw/ip_fw_sockopt.c
index 4e87865e966e..2941444a7bd3 100644
--- a/sys/netpfil/ipfw/ip_fw_sockopt.c
+++ b/sys/netpfil/ipfw/ip_fw_sockopt.c
@@ -210,8 +210,6 @@ ipfw_free_rule(struct ip_fw *rule)
*/
if (rule->refcnt > 1)
return;
- if (ACTION_PTR(rule)->opcode == O_LOG)
- ipfw_tap_free(rule->rulenum);
uma_zfree_pcpu(V_ipfw_cntr_zone, rule->cntr);
free(rule, M_IPFW);
}
@@ -337,44 +335,15 @@ ipfw_destroy_skipto_cache(struct ip_fw_chain *chain)
}
/*
- * allocate a new map, returns the chain locked. extra is the number
- * of entries to add or delete.
- */
-static struct ip_fw **
-get_map(struct ip_fw_chain *chain, int extra, int locked)
-{
-
- for (;;) {
- struct ip_fw **map;
- u_int i, mflags;
-
- mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK);
-
- i = chain->n_rules + extra;
- map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags);
- if (map == NULL) {
- printf("%s: cannot allocate map\n", __FUNCTION__);
- return NULL;
- }
- if (!locked)
- IPFW_UH_WLOCK(chain);
- if (i >= chain->n_rules + extra) /* good */
- return map;
- /* otherwise we lost the race, free and retry */
- if (!locked)
- IPFW_UH_WUNLOCK(chain);
- free(map, M_IPFW);
- }
-}
-
-/*
- * swap the maps. It is supposed to be called with IPFW_UH_WLOCK
+ * swap the maps.
*/
static struct ip_fw **
swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len)
{
struct ip_fw **old_map;
+ IPFW_UH_WLOCK_ASSERT(chain);
+
IPFW_WLOCK(chain);
chain->id++;
chain->n_rules = new_len;
@@ -459,6 +428,7 @@ ipfw_commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci,
struct ip_fw *krule;
struct ip_fw **map; /* the new array of pointers */
+ IPFW_UH_WLOCK(chain);
/* Check if we need to do table/obj index remap */
tcount = 0;
for (ci = rci, i = 0; i < count; ci++, i++) {
@@ -484,8 +454,6 @@ ipfw_commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci,
* We have some more table rules
* we need to rollback.
*/
-
- IPFW_UH_WLOCK(chain);
while (ci != rci) {
ci--;
if (ci->object_opcodes == 0)
@@ -493,33 +461,16 @@ ipfw_commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci,
unref_rule_objects(chain,ci->krule);
}
- IPFW_UH_WUNLOCK(chain);
-
}
-
+ IPFW_UH_WUNLOCK(chain);
return (error);
}
tcount++;
}
- /* get_map returns with IPFW_UH_WLOCK if successful */
- map = get_map(chain, count, 0 /* not locked */);
- if (map == NULL) {
- if (tcount > 0) {
- /* Unbind tables */
- IPFW_UH_WLOCK(chain);
- for (ci = rci, i = 0; i < count; ci++, i++) {
- if (ci->object_opcodes == 0)
- continue;
-
- unref_rule_objects(chain, ci->krule);
- }
- IPFW_UH_WUNLOCK(chain);
- }
-
- return (ENOSPC);
- }
+ map = malloc((chain->n_rules + count) * sizeof(struct ip_fw *),
+ M_IPFW, M_ZERO | M_WAITOK);
if (V_autoinc_step < 1)
V_autoinc_step = 1;
@@ -552,6 +503,8 @@ ipfw_commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci,
memcpy((char *)ci->urule + ci->urule_numoff, &rulenum,
sizeof(rulenum));
}
+ if (ACTION_PTR(krule)->opcode == O_LOG)
+ ipfw_tap_alloc(chain, krule->rulenum);
}
/* duplicate the remaining part, we always have the default rule */
@@ -572,9 +525,9 @@ ipfw_add_protected_rule(struct ip_fw_chain *chain, struct ip_fw *rule)
{
struct ip_fw **map;
- map = get_map(chain, 1, 0);
- if (map == NULL)
- return (ENOMEM);
+ IPFW_UH_WLOCK(chain);
+ map = malloc((chain->n_rules + 1) * sizeof(struct ip_fw *),
+ M_IPFW, M_ZERO | M_WAITOK);
if (chain->n_rules > 0)
bcopy(chain->map, map,
chain->n_rules * sizeof(struct ip_fw *));
@@ -812,12 +765,8 @@ delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel)
}
/* Allocate new map of the same size */
- map = get_map(chain, 0, 1 /* locked */);
- if (map == NULL) {
- IPFW_UH_WUNLOCK(chain);
- return (ENOMEM);
- }
-
+ map = malloc(chain->n_rules * sizeof(struct ip_fw *),
+ M_IPFW, M_ZERO | M_WAITOK);
n = 0;
ndyn = 0;
ofs = start;
@@ -2153,6 +2102,8 @@ unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule)
else
no->refcnt--;
}
+ if (ACTION_PTR(rule)->opcode == O_LOG)
+ ipfw_tap_free(ch, rule->rulenum);
}
/*
@@ -2227,7 +2178,7 @@ ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
cmdlen = 0;
error = 0;
- IPFW_UH_WLOCK(ch);
+ IPFW_UH_WLOCK_ASSERT(ch);
/* Increase refcount on each existing referenced table. */
for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
@@ -2250,10 +2201,8 @@ ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
if (error != 0) {
/* Unref everything we have already done */
unref_oib_objects(ch, rule->cmd, oib, pidx);
- IPFW_UH_WUNLOCK(ch);
return (error);
}
- IPFW_UH_WUNLOCK(ch);
/* Perform auto-creation for non-existing objects */
if (pidx != oib)
@@ -2512,9 +2461,6 @@ import_rule_v1(struct ip_fw_chain *chain, struct rule_check_info *ci)
/* Copy opcodes */
memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
-
- if (ACTION_PTR(krule)->opcode == O_LOG)
- ipfw_tap_alloc(krule->rulenum);
}
/*
diff --git a/sys/netpfil/ipfw/ip_fw_table.c b/sys/netpfil/ipfw/ip_fw_table.c
index da52166f8062..dde18d41ab15 100644
--- a/sys/netpfil/ipfw/ip_fw_table.c
+++ b/sys/netpfil/ipfw/ip_fw_table.c
@@ -61,34 +61,6 @@
#include <netpfil/ipfw/ip_fw_private.h>
#include <netpfil/ipfw/ip_fw_table.h>
- /*
- * Table has the following `type` concepts:
- *
- * `no.type` represents lookup key type (addr, ifp, uid, etc..)
- * vmask represents bitmask of table values which are present at the moment.
- * Special IPFW_VTYPE_LEGACY ( (uint32_t)-1 ) represents old
- * single-value-for-all approach.
- */
-struct table_config {
- struct named_object no;
- uint8_t tflags; /* type flags */
- uint8_t locked; /* 1 if locked from changes */
- uint8_t linked; /* 1 if already linked */
- uint8_t ochanged; /* used by set swapping */
- uint8_t vshared; /* 1 if using shared value array */
- uint8_t spare[3];
- uint32_t count; /* Number of records */
- uint32_t limit; /* Max number of records */
- uint32_t vmask; /* bitmask with supported values */
- uint32_t ocount; /* used by set swapping */
- uint64_t gencnt; /* generation count */
- char tablename[64]; /* table name */
- struct table_algo *ta; /* Callbacks for given algo */
- void *astate; /* algorithm state */
- struct table_info ti_copy; /* data to put to table_info */
- struct namedobj_instance *vi;
-};
-
static int find_table_err(struct namedobj_instance *ni, struct tid_info *ti,
struct table_config **tc);
static struct table_config *find_table(struct namedobj_instance *ni,
@@ -115,8 +87,8 @@ static int swap_tables(struct ip_fw_chain *ch, struct tid_info *a,
struct tid_info *b);
static int check_table_name(const char *name);
-static int check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
- struct table_config *tc, struct table_info *ti, uint32_t count);
+static int check_table_space(struct ip_fw_chain *ch, struct table_config *tc,
+ struct table_info *ti, uint32_t count);
static int destroy_table(struct ip_fw_chain *ch, struct tid_info *ti);
static struct table_algo *find_table_algo(struct tables_config *tableconf,
@@ -130,49 +102,6 @@ static void ntlv_to_ti(struct _ipfw_obj_ntlv *ntlv, struct tid_info *ti);
#define TA_BUF_SZ 128 /* On-stack buffer for add/delete state */
-void
-rollback_toperation_state(struct ip_fw_chain *ch, void *object)
-{
- struct tables_config *tcfg;
- struct op_state *os;
-
- tcfg = CHAIN_TO_TCFG(ch);
- TAILQ_FOREACH(os, &tcfg->state_list, next)
- os->func(object, os);
-}
-
-void
-add_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts)
-{
- struct tables_config *tcfg;
-
- tcfg = CHAIN_TO_TCFG(ch);
- TAILQ_INSERT_HEAD(&tcfg->state_list, &ts->opstate, next);
-}
-
-void
-del_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts)
-{
- struct tables_config *tcfg;
-
- tcfg = CHAIN_TO_TCFG(ch);
- TAILQ_REMOVE(&tcfg->state_list, &ts->opstate, next);
-}
-
-void
-tc_ref(struct table_config *tc)
-{
-
- tc->no.refcnt++;
-}
-
-void
-tc_unref(struct table_config *tc)
-{
-
- tc->no.refcnt--;
-}
-
static struct table_value *
get_table_value(struct ip_fw_chain *ch, struct table_config *tc, uint32_t kidx)
{
@@ -277,7 +206,6 @@ create_table_compat(struct ip_fw_chain *ch, struct tid_info *ti,
* creating new one.
*
* Saves found table config into @ptc.
- * Note function may drop/acquire UH_WLOCK.
* Returns 0 if table was found/created and referenced
* or non-zero return code.
*/
@@ -321,9 +249,7 @@ find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti,
if ((tei->flags & TEI_FLAGS_COMPAT) == 0)
return (ESRCH);
- IPFW_UH_WUNLOCK(ch);
error = create_table_compat(ch, ti, &kidx);
- IPFW_UH_WLOCK(ch);
if (error != 0)
return (error);
@@ -476,57 +402,9 @@ flush_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta,
free(ta_buf_m, M_TEMP);
}
-static void
-rollback_add_entry(void *object, struct op_state *_state)
-{
- struct ip_fw_chain *ch __diagused;
- struct tableop_state *ts;
-
- ts = (struct tableop_state *)_state;
-
- if (ts->tc != object && ts->ch != object)
- return;
-
- ch = ts->ch;
-
- IPFW_UH_WLOCK_ASSERT(ch);
-
- /* Call specifid unlockers */
- rollback_table_values(ts);
-
- /* Indicate we've called */
- ts->modified = 1;
-}
-
/*
* Adds/updates one or more entries in table @ti.
*
- * Function may drop/reacquire UH wlock multiple times due to
- * items alloc, algorithm callbacks (check_space), value linkage
- * (new values, value storage realloc), etc..
- * Other processes like other adds (which may involve storage resize),
- * table swaps (which changes table data and may change algo type),
- * table modify (which may change value mask) may be executed
- * simultaneously so we need to deal with it.
- *
- * The following approach was implemented:
- * we have per-chain linked list, protected with UH lock.
- * add_table_entry prepares special on-stack structure wthich is passed
- * to its descendants. Users add this structure to this list before unlock.
- * After performing needed operations and acquiring UH lock back, each user
- * checks if structure has changed. If true, it rolls local state back and
- * returns without error to the caller.
- * add_table_entry() on its own checks if structure has changed and restarts
- * its operation from the beginning (goto restart).
- *
- * Functions which are modifying fields of interest (currently
- * resize_shared_value_storage() and swap_tables() )
- * traverses given list while holding UH lock immediately before
- * performing their operations calling function provided be list entry
- * ( currently rollback_add_entry ) which performs rollback for all necessary
- * state and sets appropriate values in structure indicating rollback
- * has happened.
- *
* Algo interaction:
* Function references @ti first to ensure table won't
* disappear or change its type.
@@ -545,92 +423,47 @@ add_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
struct table_config *tc;
struct table_algo *ta;
struct tentry_info *ptei;
- struct tableop_state ts;
char ta_buf[TA_BUF_SZ];
caddr_t ta_buf_m, v;
uint32_t kidx, num, numadd;
- int error, first_error, i, rollback;
+ int error, first_error, i, rollback = 0;
- memset(&ts, 0, sizeof(ts));
- ta = NULL;
IPFW_UH_WLOCK(ch);
/*
* Find and reference existing table.
*/
-restart:
- if (ts.modified != 0) {
- IPFW_UH_WUNLOCK(ch);
- flush_batch_buffer(ch, ta, tei, count, rollback,
- ta_buf_m, ta_buf);
- memset(&ts, 0, sizeof(ts));
- ta = NULL;
- IPFW_UH_WLOCK(ch);
- }
-
error = find_ref_table(ch, ti, tei, count, OP_ADD, &tc);
if (error != 0) {
IPFW_UH_WUNLOCK(ch);
return (error);
}
+ /* Drop reference we've used in first search */
+ tc->no.refcnt--;
ta = tc->ta;
- /* Fill in tablestate */
- ts.ch = ch;
- ts.opstate.func = rollback_add_entry;
- ts.tc = tc;
- ts.vshared = tc->vshared;
- ts.vmask = tc->vmask;
- ts.ta = ta;
- ts.tei = tei;
- ts.count = count;
- rollback = 0;
- add_toperation_state(ch, &ts);
- IPFW_UH_WUNLOCK(ch);
-
/* Allocate memory and prepare record(s) */
/* Pass stack buffer by default */
ta_buf_m = ta_buf;
error = prepare_batch_buffer(ch, ta, tei, count, OP_ADD, &ta_buf_m);
-
- IPFW_UH_WLOCK(ch);
- del_toperation_state(ch, &ts);
- /* Drop reference we've used in first search */
- tc->no.refcnt--;
-
- /* Check prepare_batch_buffer() error */
if (error != 0)
goto cleanup;
/*
- * Check if table swap has happened.
- * (so table algo might be changed).
- * Restart operation to achieve consistent behavior.
- */
- if (ts.modified != 0)
- goto restart;
-
- /*
* Link all values values to shared/per-table value array.
- *
- * May release/reacquire UH_WLOCK.
*/
- error = ipfw_link_table_values(ch, &ts, flags);
+ error = ipfw_link_table_values(ch, tc, tei, count, flags);
if (error != 0)
goto cleanup;
- if (ts.modified != 0)
- goto restart;
/*
* Ensure we are able to add all entries without additional
- * memory allocations. May release/reacquire UH_WLOCK.
+ * memory allocations.
*/
kidx = tc->no.kidx;
- error = check_table_space(ch, &ts, tc, KIDX_TO_TI(ch, kidx), count);
+ error = check_table_space(ch, tc, KIDX_TO_TI(ch, kidx), count);
if (error != 0)
goto cleanup;
- if (ts.modified != 0)
- goto restart;
/* We've got valid table in @tc. Let's try to add data */
kidx = tc->no.kidx;
@@ -690,7 +523,7 @@ restart:
/* Permit post-add algorithm grow/rehash. */
if (numadd != 0)
- check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0);
+ check_table_space(ch, tc, KIDX_TO_TI(ch, kidx), 0);
/* Return first error to user, if any */
error = first_error;
@@ -730,7 +563,6 @@ del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
return (error);
}
ta = tc->ta;
- IPFW_UH_WUNLOCK(ch);
/* Allocate memory and prepare record(s) */
/* Pass stack buffer by default */
@@ -739,8 +571,6 @@ del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
if (error != 0)
goto cleanup;
- IPFW_UH_WLOCK(ch);
-
/* Drop reference we've used in first search */
tc->no.refcnt--;
@@ -779,7 +609,7 @@ del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
if (numdel != 0) {
/* Run post-del hook to permit shrinking */
- check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0);
+ check_table_space(ch, tc, KIDX_TO_TI(ch, kidx), 0);
}
IPFW_UH_WUNLOCK(ch);
@@ -808,8 +638,8 @@ cleanup:
* Returns 0 on success.
*/
static int
-check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
- struct table_config *tc, struct table_info *ti, uint32_t count)
+check_table_space(struct ip_fw_chain *ch, struct table_config *tc,
+ struct table_info *ti, uint32_t count)
{
struct table_algo *ta;
uint64_t pflags;
@@ -838,36 +668,14 @@ check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
break;
}
- /* We have to shrink/grow table */
- if (ts != NULL)
- add_toperation_state(ch, ts);
- IPFW_UH_WUNLOCK(ch);
-
memset(&ta_buf, 0, sizeof(ta_buf));
error = ta->prepare_mod(ta_buf, &pflags);
-
- IPFW_UH_WLOCK(ch);
- if (ts != NULL)
- del_toperation_state(ch, ts);
-
if (error != 0)
break;
- if (ts != NULL && ts->modified != 0) {
- /*
- * Swap operation has happened
- * so we're currently operating on other
- * table data. Stop doing this.
- */
- ta->flush_mod(ta_buf);
- break;
- }
-
/* Check if we still need to alter table */
ti = KIDX_TO_TI(ch, tc->no.kidx);
if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) {
- IPFW_UH_WUNLOCK(ch);
-
/*
* Other thread has already performed resize.
* Flush our state and return.
@@ -1031,7 +839,6 @@ find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
ipfw_obj_header *oh;
struct tid_info ti;
struct table_config *tc;
- struct table_algo *ta;
struct table_info *kti;
struct table_value *pval;
struct namedobj_instance *ni;
@@ -1060,7 +867,6 @@ find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
/*
* Find existing table and check its type .
*/
- ta = NULL;
if ((tc = find_table(ni, &ti)) == NULL) {
IPFW_UH_RUNLOCK(ch);
return (ESRCH);
@@ -1073,12 +879,8 @@ find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
}
kti = KIDX_TO_TI(ch, tc->no.kidx);
- ta = tc->ta;
-
- if (ta->find_tentry == NULL)
- return (ENOTSUP);
- error = ta->find_tentry(tc->astate, kti, tent);
+ error = tc->ta->find_tentry(tc->astate, kti, tent);
if (error == 0) {
pval = get_table_value(ch, tc, tent->v.kidx);
ipfw_export_table_value_v1(pval, &tent->v.value);
@@ -1119,20 +921,6 @@ flush_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
return (error);
}
-static void
-restart_flush(void *object, struct op_state *_state)
-{
- struct tableop_state *ts;
-
- ts = (struct tableop_state *)_state;
-
- if (ts->tc != object)
- return;
-
- /* Indicate we've called */
- ts->modified = 1;
-}
-
/*
* Flushes given table.
*
@@ -1151,8 +939,7 @@ flush_table(struct ip_fw_chain *ch, struct tid_info *ti)
struct table_info ti_old, ti_new, *tablestate;
void *astate_old, *astate_new;
char algostate[64], *pstate;
- struct tableop_state ts;
- int error, need_gc;
+ int error;
uint32_t kidx;
uint8_t tflags;
@@ -1166,15 +953,8 @@ flush_table(struct ip_fw_chain *ch, struct tid_info *ti)
IPFW_UH_WUNLOCK(ch);
return (ESRCH);
}
- need_gc = 0;
astate_new = NULL;
memset(&ti_new, 0, sizeof(ti_new));
-restart:
- /* Set up swap handler */
- memset(&ts, 0, sizeof(ts));
- ts.opstate.func = restart_flush;
- ts.tc = tc;
-
ta = tc->ta;
/* Do not flush readonly tables */
if ((ta->flags & TA_FLAG_READONLY) != 0) {
@@ -1189,17 +969,6 @@ restart:
} else
pstate = NULL;
tflags = tc->tflags;
- tc->no.refcnt++;
- add_toperation_state(ch, &ts);
- IPFW_UH_WUNLOCK(ch);
-
- /*
- * Stage 1.5: if this is not the first attempt, destroy previous state
- */
- if (need_gc != 0) {
- ta->destroy(astate_new, &ti_new);
- need_gc = 0;
- }
/*
* Stage 2: allocate new table instance using same algo.
@@ -1211,27 +980,11 @@ restart:
* Stage 3: swap old state pointers with newly-allocated ones.
* Decrease refcount.
*/
- IPFW_UH_WLOCK(ch);
- tc->no.refcnt--;
- del_toperation_state(ch, &ts);
-
if (error != 0) {
IPFW_UH_WUNLOCK(ch);
return (error);
}
- /*
- * Restart operation if table swap has happened:
- * even if algo may be the same, algo init parameters
- * may change. Restart operation instead of doing
- * complex checks.
- */
- if (ts.modified != 0) {
- /* Delay destroying data since we're holding UH lock */
- need_gc = 1;
- goto restart;
- }
-
ni = CHAIN_TO_NI(ch);
kidx = tc->no.kidx;
tablestate = (struct table_info *)ch->tablestate;
@@ -1371,10 +1124,6 @@ swap_tables(struct ip_fw_chain *ch, struct tid_info *a,
return (EACCES);
}
- /* Notify we're going to swap */
- rollback_toperation_state(ch, tc_a);
- rollback_toperation_state(ch, tc_b);
-
/* Everything is fine, prepare to swap */
tablestate = (struct table_info *)ch->tablestate;
ti = tablestate[tc_a->no.kidx];
@@ -1565,7 +1314,7 @@ ipfw_ref_table(struct ip_fw_chain *ch, ipfw_obj_ntlv *ntlv, uint32_t *kidx)
if (tc == NULL)
return (ESRCH);
- tc_ref(tc);
+ tc->no.refcnt++;
*kidx = tc->no.kidx;
return (0);
@@ -1748,6 +1497,7 @@ create_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
char *tname, *aname;
struct tid_info ti;
struct namedobj_instance *ni;
+ int rv;
if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info))
return (EINVAL);
@@ -1775,14 +1525,15 @@ create_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
ni = CHAIN_TO_NI(ch);
- IPFW_UH_RLOCK(ch);
+ IPFW_UH_WLOCK(ch);
if (find_table(ni, &ti) != NULL) {
- IPFW_UH_RUNLOCK(ch);
+ IPFW_UH_WUNLOCK(ch);
return (EEXIST);
}
- IPFW_UH_RUNLOCK(ch);
+ rv = create_table_internal(ch, &ti, aname, i, NULL, 0);
+ IPFW_UH_WUNLOCK(ch);
- return (create_table_internal(ch, &ti, aname, i, NULL, 0));
+ return (rv);
}
/*
@@ -1803,6 +1554,8 @@ create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
struct table_algo *ta;
uint32_t kidx;
+ IPFW_UH_WLOCK_ASSERT(ch);
+
ni = CHAIN_TO_NI(ch);
ta = find_table_algo(CHAIN_TO_TCFG(ch), ti, aname);
@@ -1820,8 +1573,6 @@ create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
else
tc->locked = (i->flags & IPFW_TGFLAGS_LOCKED) != 0;
- IPFW_UH_WLOCK(ch);
-
/* Check if table has been already created */
tc_new = find_table(ni, ti);
if (tc_new != NULL) {
@@ -1831,7 +1582,6 @@ create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
* which has the same type
*/
if (compat == 0 || tc_new->no.subtype != tc->no.subtype) {
- IPFW_UH_WUNLOCK(ch);
free_table_config(ni, tc);
return (EEXIST);
}
@@ -1843,7 +1593,6 @@ create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
} else {
/* New table */
if (ipfw_objhash_alloc_idx(ni, &kidx) != 0) {
- IPFW_UH_WUNLOCK(ch);
printf("Unable to allocate table index."
" Consider increasing net.inet.ip.fw.tables_max");
free_table_config(ni, tc);
@@ -1860,8 +1609,6 @@ create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
if (pkidx != NULL)
*pkidx = tc->no.kidx;
- IPFW_UH_WUNLOCK(ch);
-
if (tc_new != NULL)
free_table_config(ni, tc_new);
diff --git a/sys/netpfil/ipfw/ip_fw_table.h b/sys/netpfil/ipfw/ip_fw_table.h
index 1dd7b198236d..5cc596bd821c 100644
--- a/sys/netpfil/ipfw/ip_fw_table.h
+++ b/sys/netpfil/ipfw/ip_fw_table.h
@@ -32,7 +32,39 @@
*/
#ifdef _KERNEL
-struct table_algo;
+/*
+ * Table has the following `type` concepts:
+ *
+ * `no.type` represents lookup key type (addr, ifp, uid, etc..)
+ * vmask represents bitmask of table values which are present at the moment.
+ * Special IPFW_VTYPE_LEGACY ( (uint32_t)-1 ) represents old
+ * single-value-for-all approach.
+ */
+struct table_config {
+ struct named_object no;
+ uint8_t tflags; /* type flags */
+ uint8_t locked; /* 1 if locked from changes */
+ uint8_t linked; /* 1 if already linked */
+ uint8_t ochanged; /* used by set swapping */
+ uint8_t vshared; /* 1 if using shared value array */
+ uint8_t spare[3];
+ uint32_t count; /* Number of records */
+ uint32_t limit; /* Max number of records */
+ uint32_t vmask; /* bitmask with supported values */
+ uint32_t ocount; /* used by set swapping */
+ uint64_t gencnt; /* generation count */
+ char tablename[64]; /* table name */
+ struct table_algo *ta; /* Callbacks for given algo */
+ void *astate; /* algorithm state */
+ struct table_info {
+ table_lookup_t *lookup;/* Lookup function */
+ void *state; /* Lookup radix/other structure */
+ void *xstate;/* eXtended state */
+ u_long data; /* Hints for given func */
+ } ti_copy; /* data to put to table_info */
+ struct namedobj_instance *vi;
+};
+
struct tables_config {
struct namedobj_instance *namehash;
struct namedobj_instance *valhash;
@@ -40,18 +72,9 @@ struct tables_config {
uint32_t algo_count;
struct table_algo *algo[256];
struct table_algo *def_algo[IPFW_TABLE_MAXTYPE + 1];
- TAILQ_HEAD(op_state_l,op_state) state_list;
};
#define CHAIN_TO_TCFG(chain) ((struct tables_config *)(chain)->tblcfg)
-struct table_info {
- table_lookup_t *lookup; /* Lookup function */
- void *state; /* Lookup radix/other structure */
- void *xstate; /* eXtended state */
- u_long data; /* Hints for given func */
-};
-
-struct table_value;
struct tentry_info {
void *paddr;
struct table_value *pvalue;
@@ -159,18 +182,16 @@ int flush_table(struct ip_fw_chain *ch, struct tid_info *ti);
/* ipfw_table_value.c functions */
struct table_config;
-struct tableop_state;
void ipfw_table_value_init(struct ip_fw_chain *ch, int first);
void ipfw_table_value_destroy(struct ip_fw_chain *ch, int last);
-int ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts,
- uint8_t flags);
+int ipfw_link_table_values(struct ip_fw_chain *ch, struct table_config *tc,
+ struct tentry_info *tei, uint32_t count, uint8_t flags);
void ipfw_garbage_table_values(struct ip_fw_chain *ch, struct table_config *tc,
struct tentry_info *tei, uint32_t count, int rollback);
void ipfw_import_table_value_v1(ipfw_table_value *iv);
void ipfw_export_table_value_v1(struct table_value *v, ipfw_table_value *iv);
void ipfw_unref_table_values(struct ip_fw_chain *ch, struct table_config *tc,
struct table_algo *ta, void *astate, struct table_info *ti);
-void rollback_table_values(struct tableop_state *ts);
int ipfw_rewrite_table_uidx(struct ip_fw_chain *chain,
struct rule_check_info *ci);
@@ -189,32 +210,5 @@ void ipfw_swap_tables_sets(struct ip_fw_chain *ch, uint32_t old_set,
int ipfw_foreach_table_tentry(struct ip_fw_chain *ch, uint32_t kidx,
ta_foreach_f f, void *arg);
-/* internal functions */
-void tc_ref(struct table_config *tc);
-void tc_unref(struct table_config *tc);
-
-struct op_state;
-typedef void (op_rollback_f)(void *object, struct op_state *state);
-struct op_state {
- TAILQ_ENTRY(op_state) next; /* chain link */
- op_rollback_f *func;
-};
-
-struct tableop_state {
- struct op_state opstate;
- struct ip_fw_chain *ch;
- struct table_config *tc;
- struct table_algo *ta;
- struct tentry_info *tei;
- uint32_t count;
- uint32_t vmask;
- int vshared;
- int modified;
-};
-
-void add_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts);
-void del_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts);
-void rollback_toperation_state(struct ip_fw_chain *ch, void *object);
-
#endif /* _KERNEL */
#endif /* _IPFW2_TABLE_H */
diff --git a/sys/netpfil/ipfw/ip_fw_table_value.c b/sys/netpfil/ipfw/ip_fw_table_value.c
index e09323cd02c3..09e2ead6ec2c 100644
--- a/sys/netpfil/ipfw/ip_fw_table_value.c
+++ b/sys/netpfil/ipfw/ip_fw_table_value.c
@@ -125,13 +125,13 @@ mask_table_value(struct table_value *src, struct table_value *dst,
}
static void
-get_value_ptrs(struct ip_fw_chain *ch, struct table_config *tc, int vshared,
+get_value_ptrs(struct ip_fw_chain *ch, struct table_config *tc,
struct table_value **ptv, struct namedobj_instance **pvi)
{
struct table_value *pval;
struct namedobj_instance *vi;
- if (vshared != 0) {
+ if (tc->vshared != 0) {
pval = (struct table_value *)ch->valuestate;
vi = CHAIN_TO_VI(ch);
} else {
@@ -147,7 +147,7 @@ get_value_ptrs(struct ip_fw_chain *ch, struct table_config *tc, int vshared,
}
/*
- * Update pointers to real vaues after @pval change.
+ * Update pointers to real values after @pval change.
*/
static int
update_tvalue(struct namedobj_instance *ni, struct named_object *no, void *arg)
@@ -167,7 +167,6 @@ update_tvalue(struct namedobj_instance *ni, struct named_object *no, void *arg)
/*
* Grows value storage shared among all tables.
- * Drops/reacquires UH locks.
* Notifies other running adds on @ch shared storage resize.
* Note function does not guarantee that free space
* will be available after invocation, so one caller needs
@@ -200,15 +199,11 @@ resize_shared_value_storage(struct ip_fw_chain *ch)
if (val_size == (1 << 30))
return (ENOSPC);
- IPFW_UH_WUNLOCK(ch);
-
valuestate = malloc(sizeof(struct table_value) * val_size, M_IPFW,
M_WAITOK | M_ZERO);
ipfw_objhash_bitmap_alloc(val_size, (void *)&new_idx,
&new_blocks);
- IPFW_UH_WLOCK(ch);
-
/*
* Check if we still need to resize
*/
@@ -217,7 +212,6 @@ resize_shared_value_storage(struct ip_fw_chain *ch)
/* Update pointers and notify everyone we're changing @ch */
pval = (struct table_value *)ch->valuestate;
- rollback_toperation_state(ch, ch);
/* Good. Let's merge */
memcpy(valuestate, pval, sizeof(struct table_value) * tcfg->val_size);
@@ -323,48 +317,12 @@ ipfw_unref_table_values(struct ip_fw_chain *ch, struct table_config *tc,
}
/*
- * Table operation state handler.
- * Called when we are going to change something in @tc which
- * may lead to inconsistencies in on-going table data addition.
- *
- * Here we rollback all already committed state (table values, currently)
- * and set "modified" field to non-zero value to indicate
- * that we need to restart original operation.
- */
-void
-rollback_table_values(struct tableop_state *ts)
-{
- struct ip_fw_chain *ch;
- struct table_value *pval;
- struct tentry_info *ptei;
- struct namedobj_instance *vi;
- int i;
-
- ch = ts->ch;
-
- IPFW_UH_WLOCK_ASSERT(ch);
-
- /* Get current table value pointer */
- get_value_ptrs(ch, ts->tc, ts->vshared, &pval, &vi);
-
- for (i = 0; i < ts->count; i++) {
- ptei = &ts->tei[i];
-
- if (ptei->value == 0)
- continue;
-
- unref_table_value(vi, pval, ptei->value);
- }
-}
-
-/*
* Allocate new value index in either shared or per-table array.
- * Function may drop/reacquire UH lock.
*
* Returns 0 on success.
*/
static int
-alloc_table_vidx(struct ip_fw_chain *ch, struct tableop_state *ts,
+alloc_table_vidx(struct ip_fw_chain *ch, struct table_config *tc,
struct namedobj_instance *vi, uint32_t *pvidx, uint8_t flags)
{
int error, vlimit;
@@ -372,19 +330,11 @@ alloc_table_vidx(struct ip_fw_chain *ch, struct tableop_state *ts,
IPFW_UH_WLOCK_ASSERT(ch);
- error = ipfw_objhash_alloc_idx(vi, &vidx);
- if (error != 0) {
- /*
- * We need to resize array. This involves
- * lock/unlock, so we need to check "modified"
- * state.
- */
- ts->opstate.func(ts->tc, &ts->opstate);
- error = resize_shared_value_storage(ch);
- return (error); /* ts->modified should be set, we will restart */
- }
+ if ((error = ipfw_objhash_alloc_idx(vi, &vidx)) != 0 &&
+ (error = resize_shared_value_storage(ch)) != 0)
+ return (error);
- vlimit = ts->ta->vlimit;
+ vlimit = tc->ta->vlimit;
if (vlimit != 0 && vidx >= vlimit && !(flags & IPFW_CTF_ATOMIC)) {
/*
* Algorithm is not able to store given index.
@@ -392,7 +342,7 @@ alloc_table_vidx(struct ip_fw_chain *ch, struct tableop_state *ts,
* per-table value array or return error
* if we're already using it.
*/
- if (ts->vshared != 0) {
+ if (tc->vshared != 0) {
/* shared -> per-table */
return (ENOSPC); /* TODO: proper error */
}
@@ -437,9 +387,8 @@ ipfw_garbage_table_values(struct ip_fw_chain *ch, struct table_config *tc,
/*
* Get current table value pointers.
- * XXX: Properly read vshared
*/
- get_value_ptrs(ch, tc, 1, &pval, &vi);
+ get_value_ptrs(ch, tc, &pval, &vi);
for (i = 0; i < count; i++) {
ptei = &tei[i];
@@ -470,14 +419,13 @@ ipfw_garbage_table_values(struct ip_fw_chain *ch, struct table_config *tc,
* Success: return 0.
*/
int
-ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts,
- uint8_t flags)
+ipfw_link_table_values(struct ip_fw_chain *ch, struct table_config *tc,
+ struct tentry_info *tei, uint32_t count, uint8_t flags)
{
int error, i, found;
struct namedobj_instance *vi;
- struct table_config *tc;
- struct tentry_info *tei, *ptei;
- uint32_t count, vidx, vlimit;
+ struct tentry_info *ptei;
+ uint32_t vidx, vlimit;
struct table_val_link *ptv;
struct table_value tval, *pval;
@@ -486,19 +434,16 @@ ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts,
* save their indices.
*/
IPFW_UH_WLOCK_ASSERT(ch);
- get_value_ptrs(ch, ts->tc, ts->vshared, &pval, &vi);
+ get_value_ptrs(ch, tc, &pval, &vi);
error = 0;
found = 0;
- vlimit = ts->ta->vlimit;
+ vlimit = tc->ta->vlimit;
vidx = 0;
- tc = ts->tc;
- tei = ts->tei;
- count = ts->count;
for (i = 0; i < count; i++) {
ptei = &tei[i];
ptei->value = 0; /* Ensure value is always 0 in the beginning */
- mask_table_value(ptei->pvalue, &tval, ts->vmask);
+ mask_table_value(ptei->pvalue, &tval, tc->vmask);
ptv = (struct table_val_link *)ipfw_objhash_lookup_name(vi, 0,
(char *)&tval);
if (ptv == NULL)
@@ -513,21 +458,12 @@ ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts,
found++;
}
- if (ts->count == found) {
- /* We've found all values , no need ts create new ones */
+ if (count == found) {
+ /* We've found all values, no need to create new ones. */
return (0);
}
/*
- * we have added some state here, let's attach operation
- * state ts the list ts be able ts rollback if necessary.
- */
- add_toperation_state(ch, ts);
- /* Ensure table won't disappear */
- tc_ref(tc);
- IPFW_UH_WUNLOCK(ch);
-
- /*
* Stage 2: allocate objects for non-existing values.
*/
for (i = 0; i < count; i++) {
@@ -544,18 +480,6 @@ ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts,
* Stage 3: allocate index numbers for new values
* and link them to index.
*/
- IPFW_UH_WLOCK(ch);
- tc_unref(tc);
- del_toperation_state(ch, ts);
- if (ts->modified != 0) {
- /*
- * In general, we should free all state/indexes here
- * and return. However, we keep allocated state instead
- * to ensure we achieve some progress on each restart.
- */
- return (0);
- }
-
KASSERT(pval == ch->valuestate, ("resize_storage() notify failure"));
/* Let's try to link values */
@@ -563,7 +487,7 @@ ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts,
ptei = &tei[i];
/* Check if record has appeared */
- mask_table_value(ptei->pvalue, &tval, ts->vmask);
+ mask_table_value(ptei->pvalue, &tval, tc->vmask);
ptv = (struct table_val_link *)ipfw_objhash_lookup_name(vi, 0,
(char *)&tval);
if (ptv != NULL) {
@@ -572,15 +496,8 @@ ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts,
continue;
}
- /* May perform UH unlock/lock */
- error = alloc_table_vidx(ch, ts, vi, &vidx, flags);
- if (error != 0) {
- ts->opstate.func(ts->tc, &ts->opstate);
+ if ((error = alloc_table_vidx(ch, tc, vi, &vidx, flags)) != 0)
return (error);
- }
- /* value storage resize has happened, return */
- if (ts->modified != 0)
- return (0);
/* Finally, we have allocated valid index, let's add entry */
ptei->value = vidx;
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index 60ca9039e9ce..3ccf1344fd7d 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -360,6 +360,8 @@ static int pf_tcp_track_sloppy(struct pf_kstate *,
struct pf_pdesc *, u_short *,
struct pf_state_peer *, struct pf_state_peer *,
u_int8_t, u_int8_t);
+static __inline int pf_synproxy_ack(struct pf_krule *, struct pf_pdesc *,
+ struct pf_kstate **, struct pf_rule_actions *);
static int pf_test_state(struct pf_kstate **, struct pf_pdesc *,
u_short *);
int pf_icmp_state_lookup(struct pf_state_key_cmp *,
@@ -426,6 +428,269 @@ static __inline void pf_set_protostate(struct pf_kstate *, int, u_int8_t);
int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
+static inline int
+pf_statelim_id_cmp(const struct pf_statelim *a, const struct pf_statelim *b)
+{
+ if (a->pfstlim_id > b->pfstlim_id)
+ return (1);
+ if (a->pfstlim_id < b->pfstlim_id)
+ return (-1);
+
+ return (0);
+}
+
+RB_GENERATE(pf_statelim_id_tree, pf_statelim, pfstlim_id_tree,
+ pf_statelim_id_cmp);
+
+static inline int
+pf_statelim_nm_cmp(const struct pf_statelim *a, const struct pf_statelim *b)
+{
+ return (strncmp(a->pfstlim_nm, b->pfstlim_nm, sizeof(a->pfstlim_nm)));
+}
+
+RB_GENERATE(pf_statelim_nm_tree, pf_statelim, pfstlim_nm_tree,
+ pf_statelim_nm_cmp);
+
+VNET_DEFINE(struct pf_statelim_id_tree, pf_statelim_id_tree_active);
+VNET_DEFINE(struct pf_statelim_list, pf_statelim_list_active);
+VNET_DEFINE(struct pf_statelim_id_tree, pf_statelim_id_tree_inactive);
+VNET_DEFINE(struct pf_statelim_nm_tree, pf_statelim_nm_tree_inactive);
+VNET_DEFINE(struct pf_statelim_list, pf_statelim_list_inactive);
+
+static inline int
+pf_sourcelim_id_cmp(const struct pf_sourcelim *a, const struct pf_sourcelim *b)
+{
+ if (a->pfsrlim_id > b->pfsrlim_id)
+ return (1);
+ if (a->pfsrlim_id < b->pfsrlim_id)
+ return (-1);
+
+ return (0);
+}
+
+RB_GENERATE(pf_sourcelim_id_tree, pf_sourcelim, pfsrlim_id_tree,
+ pf_sourcelim_id_cmp);
+
+static inline int
+pf_sourcelim_nm_cmp(const struct pf_sourcelim *a, const struct pf_sourcelim *b)
+{
+ return (strncmp(a->pfsrlim_nm, b->pfsrlim_nm, sizeof(a->pfsrlim_nm)));
+}
+
+RB_GENERATE(pf_sourcelim_nm_tree, pf_sourcelim, pfsrlim_nm_tree,
+ pf_sourcelim_nm_cmp);
+
+static inline int
+pf_source_cmp(const struct pf_source *a, const struct pf_source *b)
+{
+ if (a->pfsr_af > b->pfsr_af)
+ return (1);
+ if (a->pfsr_af < b->pfsr_af)
+ return (-1);
+ if (a->pfsr_rdomain > b->pfsr_rdomain)
+ return (1);
+ if (a->pfsr_rdomain < b->pfsr_rdomain)
+ return (-1);
+
+ return (pf_addr_cmp(&a->pfsr_addr, &b->pfsr_addr, a->pfsr_af));
+}
+
+RB_GENERATE(pf_source_tree, pf_source, pfsr_tree, pf_source_cmp);
+
+static inline int
+pf_source_ioc_cmp(const struct pf_source *a, const struct pf_source *b)
+{
+ size_t i;
+
+ if (a->pfsr_af > b->pfsr_af)
+ return (1);
+ if (a->pfsr_af < b->pfsr_af)
+ return (-1);
+ if (a->pfsr_rdomain > b->pfsr_rdomain)
+ return (1);
+ if (a->pfsr_rdomain < b->pfsr_rdomain)
+ return (-1);
+
+ for (i = 0; i < nitems(a->pfsr_addr.addr32); i++) {
+ uint32_t wa = ntohl(a->pfsr_addr.addr32[i]);
+ uint32_t wb = ntohl(b->pfsr_addr.addr32[i]);
+
+ if (wa > wb)
+ return (1);
+ if (wa < wb)
+ return (-1);
+ }
+
+ return (0);
+}
+
+RB_GENERATE(pf_source_ioc_tree, pf_source, pfsr_ioc_tree, pf_source_ioc_cmp);
+
+VNET_DEFINE(struct pf_sourcelim_id_tree, pf_sourcelim_id_tree_active);
+VNET_DEFINE(struct pf_sourcelim_list, pf_sourcelim_list_active);
+
+VNET_DEFINE(struct pf_sourcelim_id_tree, pf_sourcelim_id_tree_inactive);
+VNET_DEFINE(struct pf_sourcelim_nm_tree, pf_sourcelim_nm_tree_inactive);
+VNET_DEFINE(struct pf_sourcelim_list, pf_sourcelim_list_inactive);
+
+static inline struct pf_statelim *
+pf_statelim_find(uint32_t id)
+{
+ struct pf_statelim key;
+
+ /* only the id is used in cmp, so don't have to zero all the things */
+ key.pfstlim_id = id;
+
+ return (RB_FIND(pf_statelim_id_tree,
+ &V_pf_statelim_id_tree_active, &key));
+}
+
+static inline struct pf_sourcelim *
+pf_sourcelim_find(uint32_t id)
+{
+ struct pf_sourcelim key;
+
+ /* only the id is used in cmp, so don't have to zero all the things */
+ key.pfsrlim_id = id;
+
+ return (RB_FIND(pf_sourcelim_id_tree,
+ &V_pf_sourcelim_id_tree_active, &key));
+}
+
+struct pf_source_list pf_source_gc = TAILQ_HEAD_INITIALIZER(pf_source_gc);
+
+static void
+pf_source_purge(void)
+{
+ struct pf_source *sr, *nsr;
+
+ TAILQ_FOREACH_SAFE(sr, &pf_source_gc, pfsr_empty_gc, nsr) {
+ struct pf_sourcelim *srlim = sr->pfsr_parent;
+
+ if (time_uptime <= sr->pfsr_empty_ts +
+ srlim->pfsrlim_rate.seconds + 1)
+ continue;
+
+ TAILQ_REMOVE(&pf_source_gc, sr, pfsr_empty_gc);
+
+ RB_REMOVE(pf_source_tree, &srlim->pfsrlim_sources, sr);
+ RB_REMOVE(pf_source_ioc_tree, &srlim->pfsrlim_ioc_sources, sr);
+ srlim->pfsrlim_nsources--;
+
+ free(sr, M_PF_SOURCE_LIM);
+ }
+}
+
+static void
+pf_source_pfr_addr(struct pfr_addr *p, const struct pf_source *sr)
+{
+ struct pf_sourcelim *srlim = sr->pfsr_parent;
+
+ memset(p, 0, sizeof(*p));
+
+ p->pfra_af = sr->pfsr_af;
+ switch (sr->pfsr_af) {
+ case AF_INET:
+ p->pfra_net = srlim->pfsrlim_ipv4_prefix;
+ p->pfra_ip4addr = sr->pfsr_addr.v4;
+ break;
+#ifdef INET6
+ case AF_INET6:
+ p->pfra_net = srlim->pfsrlim_ipv6_prefix;
+ p->pfra_ip6addr = sr->pfsr_addr.v6;
+ break;
+#endif /* INET6 */
+ }
+}
+
+static void
+pf_source_used(struct pf_source *sr)
+{
+ struct pf_sourcelim *srlim = sr->pfsr_parent;
+ struct pfr_ktable *t;
+ unsigned int used;
+
+ used = sr->pfsr_inuse++;
+ sr->pfsr_rate_ts += srlim->pfsrlim_rate_token;
+
+ if (used == 0)
+ TAILQ_REMOVE(&pf_source_gc, sr, pfsr_empty_gc);
+ else if ((t = srlim->pfsrlim_overload.table) != NULL &&
+ used >= srlim->pfsrlim_overload.hwm && !sr->pfsr_intable) {
+ struct pfr_addr p;
+
+ pf_source_pfr_addr(&p, sr);
+
+ pfr_insert_kentry(t, &p, time_second);
+ sr->pfsr_intable = 1;
+ }
+}
+
+static void
+pf_source_rele(struct pf_source *sr)
+{
+ struct pf_sourcelim *srlim = sr->pfsr_parent;
+ struct pfr_ktable *t;
+ unsigned int used;
+
+ used = --sr->pfsr_inuse;
+
+ t = srlim->pfsrlim_overload.table;
+ if (t != NULL && sr->pfsr_intable &&
+ used < srlim->pfsrlim_overload.lwm) {
+ struct pfr_addr p;
+
+ pf_source_pfr_addr(&p, sr);
+
+ pfr_remove_kentry(t, &p);
+ sr->pfsr_intable = 0;
+ }
+
+ if (used == 0) {
+ TAILQ_INSERT_TAIL(&pf_source_gc, sr, pfsr_empty_gc);
+ sr->pfsr_empty_ts = time_uptime + srlim->pfsrlim_rate.seconds;
+ }
+}
+
+static inline void
+pf_source_key(struct pf_sourcelim *srlim, struct pf_source *key,
+ sa_family_t af, const struct pf_addr *addr)
+{
+ size_t i;
+
+ /* only af+addr is used for lookup. */
+ key->pfsr_af = af;
+ key->pfsr_rdomain = 0;
+ switch (af) {
+ case AF_INET:
+ key->pfsr_addr.addr32[0] =
+ srlim->pfsrlim_ipv4_mask.v4.s_addr &
+ addr->v4.s_addr;
+
+ for (i = 1; i < nitems(key->pfsr_addr.addr32); i++)
+ key->pfsr_addr.addr32[i] = htonl(0);
+ break;
+#ifdef INET6
+ case AF_INET6:
+ for (i = 0; i < nitems(key->pfsr_addr.addr32); i++) {
+ key->pfsr_addr.addr32[i] =
+ srlim->pfsrlim_ipv6_mask.addr32[i] &
+ addr->addr32[i];
+ }
+ break;
+#endif
+ default:
+ unhandled_af(af);
+ /* NOTREACHED */
+ }
+}
+
+static inline struct pf_source *
+pf_source_find(struct pf_sourcelim *srlim, struct pf_source *key)
+{
+ return (RB_FIND(pf_source_tree, &srlim->pfsrlim_sources, key));
+}
+
extern int pf_end_threads;
extern struct proc *pf_purge_proc;
@@ -519,6 +784,8 @@ BOUND_IFACE(struct pf_kstate *st, struct pf_pdesc *pd)
MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
MALLOC_DEFINE(M_PF_RULE_ITEM, "pf_krule_item", "pf(4) rule items");
+MALLOC_DEFINE(M_PF_STATE_LINK, "pf_state_link", "pf(4) state links");
+MALLOC_DEFINE(M_PF_SOURCE_LIM, "pf_source_lim", "pf(4) source limiter");
VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
VNET_DEFINE(struct pf_idhash *, pf_idhash);
VNET_DEFINE(struct pf_srchash *, pf_srchash);
@@ -1295,6 +1562,22 @@ pf_initialize(void)
/* Unlinked, but may be referenced rules. */
TAILQ_INIT(&V_pf_unlinked_rules);
+
+ /* State limiters */
+ RB_INIT(&V_pf_statelim_id_tree_inactive);
+ RB_INIT(&V_pf_statelim_nm_tree_inactive);
+ TAILQ_INIT(&V_pf_statelim_list_inactive);
+
+ RB_INIT(&V_pf_statelim_id_tree_active);
+ TAILQ_INIT(&V_pf_statelim_list_active);
+
+ /* Source limiters */
+ RB_INIT(&V_pf_sourcelim_id_tree_active);
+ TAILQ_INIT(&V_pf_sourcelim_list_active);
+
+ RB_INIT(&V_pf_sourcelim_id_tree_inactive);
+ RB_INIT(&V_pf_sourcelim_nm_tree_inactive);
+ TAILQ_INIT(&V_pf_sourcelim_list_inactive);
}
void
@@ -2680,6 +2963,7 @@ pf_purge_thread(void *unused __unused)
pf_purge_expired_fragments();
pf_purge_expired_src_nodes();
pf_purge_unlinked_rules();
+ pf_source_purge();
pfi_kkif_purge();
}
CURVNET_RESTORE();
@@ -2712,6 +2996,7 @@ pf_unload_vnet_purge(void)
pf_purge_expired_states(0, V_pf_hashmask);
pf_purge_fragments(UINT_MAX);
pf_purge_expired_src_nodes();
+ pf_source_purge();
/*
* Now all kifs & rules should be unreferenced,
@@ -2817,6 +3102,7 @@ int
pf_remove_state(struct pf_kstate *s)
{
struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
+ struct pf_state_link *pfl;
NET_EPOCH_ASSERT();
PF_HASHROW_ASSERT(ih);
@@ -2858,6 +3144,63 @@ pf_remove_state(struct pf_kstate *s)
s->key[PF_SK_STACK]->proto == IPPROTO_TCP)
pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED);
+ while ((pfl = SLIST_FIRST(&s->linkage)) != NULL) {
+ struct pf_state_link_list *list;
+ unsigned int gen;
+
+ SLIST_REMOVE_HEAD(&s->linkage, pfl_linkage);
+
+ switch (pfl->pfl_type) {
+ case PF_STATE_LINK_TYPE_STATELIM: {
+ struct pf_statelim *stlim;
+
+ stlim = pf_statelim_find(s->statelim);
+ KASSERT(stlim != NULL,
+ ("pf_state %p pfl %p cannot find statelim %u", s,
+ pfl, s->statelim));
+
+ gen = pf_statelim_enter(stlim);
+ stlim->pfstlim_inuse--;
+ pf_statelim_leave(stlim, gen);
+
+ list = &stlim->pfstlim_states;
+ break;
+ }
+ case PF_STATE_LINK_TYPE_SOURCELIM: {
+ struct pf_sourcelim *srlim;
+ struct pf_source key, *sr;
+
+ srlim = pf_sourcelim_find(s->sourcelim);
+ KASSERT(srlim != NULL,
+ ("pf_state %p pfl %p cannot find sourcelim %u", s,
+ pfl, s->sourcelim));
+
+ pf_source_key(srlim, &key, s->key[PF_SK_WIRE]->af,
+ &s->key[PF_SK_WIRE]->addr[0 /* XXX or 1? */]);
+
+ sr = pf_source_find(srlim, &key);
+ KASSERT(sr != NULL,
+ ("pf_state %p pfl %p cannot find source in %u", s,
+ pfl, s->sourcelim));
+
+ gen = pf_sourcelim_enter(srlim);
+ srlim->pfsrlim_counters.inuse--;
+ pf_sourcelim_leave(srlim, gen);
+ pf_source_rele(sr);
+
+ list = &sr->pfsr_states;
+ break;
+ }
+ default:
+ panic("%s: unexpected link type on pfl %p", __func__,
+ pfl);
+ }
+
+ PF_STATE_LOCK_ASSERT(s);
+ TAILQ_REMOVE(list, pfl, pfl_link);
+ free(pfl, M_PF_STATE_LINK);
+ }
+
PF_HASHROW_UNLOCK(ih);
pf_detach_state(s);
@@ -5656,6 +5999,11 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset,
r = TAILQ_FIRST(ruleset->rules[PF_RULESET_FILTER].active.ptr);
while (r != NULL) {
+ struct pf_statelim *stlim = NULL;
+ struct pf_sourcelim *srlim = NULL;
+ struct pf_source *sr = NULL;
+ unsigned int gen;
+
if (ctx->pd->related_rule) {
*ctx->rm = ctx->pd->related_rule;
break;
@@ -5757,6 +6105,153 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset,
pf_osfp_fingerprint(pd, ctx->th),
r->os_fingerprint)),
TAILQ_NEXT(r, entries));
+ if (r->statelim.id != PF_STATELIM_ID_NONE) {
+ stlim = pf_statelim_find(r->statelim.id);
+
+ /*
+ * Treat a missing limiter like an exhausted limiter.
+ * There is no "backend" to get a resource out of
+ * so the rule can't create state.
+ */
+ PF_TEST_ATTRIB(stlim == NULL, TAILQ_NEXT(r, entries));
+
+ /*
+ * An overcommitted pool means this rule
+ * can't create state.
+ */
+ if (stlim->pfstlim_inuse >= stlim->pfstlim_limit) {
+ gen = pf_statelim_enter(stlim);
+ stlim->pfstlim_counters.hardlimited++;
+ pf_statelim_leave(stlim, gen);
+ if (r->statelim.limiter_action == PF_LIMITER_BLOCK) {
+ ctx->limiter_drop = 1;
+ REASON_SET(&ctx->reason, PFRES_MAXSTATES);
+ break; /* stop rule processing */
+ }
+ r = TAILQ_NEXT(r, entries);
+ continue;
+ }
+
+ /*
+ * Is access to the pool rate limited?
+ */
+ if (stlim->pfstlim_rate.limit != 0) {
+ struct timespec ts;
+ getnanouptime(&ts);
+ uint64_t diff = SEC_TO_NSEC(ts.tv_sec) +
+ ts.tv_nsec - stlim->pfstlim_rate_ts;
+
+ if (diff < stlim->pfstlim_rate_token) {
+ gen = pf_statelim_enter(stlim);
+ stlim->pfstlim_counters.ratelimited++;
+ pf_statelim_leave(stlim, gen);
+ if (r->statelim.limiter_action ==
+ PF_LIMITER_BLOCK) {
+ ctx->limiter_drop = 1;
+ REASON_SET(&ctx->reason,
+ PFRES_MAXSTATES);
+ /* stop rule processing */
+ break;
+ }
+ r = TAILQ_NEXT(r, entries);
+ continue;
+ }
+
+ if (diff > stlim->pfstlim_rate_bucket) {
+ stlim->pfstlim_rate_ts =
+ SEC_TO_NSEC(ts.tv_sec) + ts.tv_nsec -
+ stlim->pfstlim_rate_bucket;
+ }
+ }
+ }
+
+ if (r->sourcelim.id != PF_SOURCELIM_ID_NONE) {
+ struct pf_source key;
+
+ srlim = pf_sourcelim_find(r->sourcelim.id);
+
+ /*
+ * Treat a missing pool like an overcommitted pool.
+ * There is no "backend" to get a resource out of
+ * so the rule can't create state.
+ */
+ PF_TEST_ATTRIB(srlim == NULL, TAILQ_NEXT(r, entries));
+
+ pf_source_key(srlim, &key, ctx->pd->af,
+ ctx->pd->src);
+ sr = pf_source_find(srlim, &key);
+ if (sr != NULL) {
+ /*
+ * An overcommitted limiter means this rule
+ * can't create state.
+ */
+ if (sr->pfsr_inuse >= srlim->pfsrlim_limit) {
+ sr->pfsr_counters.hardlimited++;
+ gen = pf_sourcelim_enter(srlim);
+ srlim->pfsrlim_counters.hardlimited++;
+ pf_sourcelim_leave(srlim, gen);
+ if (r->sourcelim.limiter_action ==
+ PF_LIMITER_BLOCK) {
+ ctx->limiter_drop = 1;
+ REASON_SET(&ctx->reason,
+ PFRES_SRCLIMIT);
+ /* stop rule processing */
+ break;
+ }
+ r = TAILQ_NEXT(r, entries);
+ continue;
+ }
+
+ /*
+ * Is access to the pool rate limited?
+ */
+ if (srlim->pfsrlim_rate.limit != 0) {
+ struct timespec ts;
+ getnanouptime(&ts);
+ uint64_t diff = SEC_TO_NSEC(ts.tv_sec) +
+ ts.tv_nsec - sr->pfsr_rate_ts;
+
+ if (diff < srlim->pfsrlim_rate_token) {
+ sr->pfsr_counters.ratelimited++;
+ gen = pf_sourcelim_enter(srlim);
+ srlim->pfsrlim_counters
+ .ratelimited++;
+ pf_sourcelim_leave(srlim, gen);
+ if (r->sourcelim.limiter_action ==
+ PF_LIMITER_BLOCK) {
+ ctx->limiter_drop = 1;
+ REASON_SET(&ctx->reason,
+ PFRES_SRCLIMIT);
+ /* stop rules */
+ break;
+ }
+ r = TAILQ_NEXT(r, entries);
+ continue;
+ }
+
+ if (diff > srlim->pfsrlim_rate_bucket) {
+ sr->pfsr_rate_ts =
+ SEC_TO_NSEC(ts.tv_sec) + ts.tv_nsec -
+ srlim->pfsrlim_rate_bucket;
+ }
+ }
+ } else {
+ /*
+ * a new source entry will (should)
+ * admit a state.
+ */
+
+ if (srlim->pfsrlim_nsources >=
+ srlim->pfsrlim_entries) {
+ gen = pf_sourcelim_enter(srlim);
+ srlim->pfsrlim_counters.addrlimited++;
+ pf_sourcelim_leave(srlim, gen);
+ r = TAILQ_NEXT(r, entries);
+ continue;
+ }
+ }
+ }
+
/* must be last! */
if (r->pktrate.limit) {
PF_TEST_ATTRIB((pf_check_threshold(&r->pktrate)),
@@ -5833,6 +6328,13 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset,
* ruleset, where anchor belongs to.
*/
ctx->arsm = ctx->aruleset;
+ /*
+ * state/source pools
+ */
+
+ ctx->statelim = stlim;
+ ctx->sourcelim = srlim;
+ ctx->source = sr;
}
if (pd->act.log & PF_LOG_MATCHES)
pf_log_matches(pd, r, ctx->a, ruleset, match_rules);
@@ -5987,10 +6489,8 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
} else {
ruleset = &pf_main_ruleset;
rv = pf_match_rule(&ctx, ruleset, match_rules);
- if (rv == PF_TEST_FAIL) {
- /*
- * Reason has been set in pf_match_rule() already.
- */
+ if (rv == PF_TEST_FAIL || ctx.limiter_drop == 1) {
+ REASON_SET(reason, ctx.reason);
goto cleanup;
}
@@ -6085,6 +6585,13 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
return (action);
}
+ if (pd->proto == IPPROTO_TCP &&
+ r->keep_state == PF_STATE_SYNPROXY && pd->dir == PF_IN) {
+ action = pf_synproxy_ack(r, pd, sm, &ctx.act);
+ if (action != PF_PASS)
+ goto cleanup; /* PF_SYNPROXY_DROP */
+ }
+
nat64 = pd->af != pd->naf;
if (nat64) {
int ret;
@@ -6157,6 +6664,10 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
{
struct pf_pdesc *pd = ctx->pd;
struct pf_kstate *s = NULL;
+ struct pf_statelim *stlim = NULL;
+ struct pf_sourcelim *srlim = NULL;
+ struct pf_source *sr = NULL;
+ struct pf_state_link *pfl;
struct pf_ksrc_node *sns[PF_SN_MAX] = { NULL };
/*
* XXXKS: The hash for PF_SN_LIMIT and PF_SN_ROUTE should be the same
@@ -6219,6 +6730,7 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
s->nat_rule = ctx->nr;
s->anchor = ctx->a;
s->match_rules = *match_rules;
+ SLIST_INIT(&s->linkage);
memcpy(&s->act, &pd->act, sizeof(struct pf_rule_actions));
if (pd->act.allow_opts)
@@ -6334,6 +6846,98 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
KASSERT((ctx->sk != NULL && ctx->nk != NULL), ("%s: nr %p sk %p, nk %p",
__func__, ctx->nr, ctx->sk, ctx->nk));
+ stlim = ctx->statelim;
+ if (stlim != NULL) {
+ unsigned int gen;
+
+ pfl = malloc(sizeof(*pfl), M_PF_STATE_LINK, M_NOWAIT);
+ if (pfl == NULL) {
+ REASON_SET(&ctx->reason, PFRES_MEMORY);
+ goto csfailed;
+ }
+
+ gen = pf_statelim_enter(stlim);
+ stlim->pfstlim_counters.admitted++;
+ stlim->pfstlim_inuse++;
+ pf_statelim_leave(stlim, gen);
+
+ stlim->pfstlim_rate_ts += stlim->pfstlim_rate_token;
+
+ s->statelim = stlim->pfstlim_id;
+ pfl->pfl_state = s;
+ pfl->pfl_type = PF_STATE_LINK_TYPE_STATELIM;
+
+ TAILQ_INSERT_TAIL(&stlim->pfstlim_states, pfl, pfl_link);
+ SLIST_INSERT_HEAD(&s->linkage, pfl, pfl_linkage);
+ }
+
+ srlim = ctx->sourcelim;
+ if (srlim != NULL) {
+ unsigned int gen;
+
+ sr = ctx->source;
+ if (sr == NULL) {
+ sr = malloc(sizeof(*sr), M_PF_SOURCE_LIM, M_NOWAIT | M_ZERO);
+ if (sr == NULL) {
+ gen = pf_sourcelim_enter(srlim);
+ srlim->pfsrlim_counters.addrnomem++;
+ pf_sourcelim_leave(srlim, gen);
+ REASON_SET(&ctx->reason, PFRES_MEMORY);
+ goto csfailed;
+ }
+
+ sr->pfsr_parent = srlim;
+ pf_source_key(srlim, sr, ctx->pd->af, ctx->pd->src);
+ TAILQ_INIT(&sr->pfsr_states);
+
+ if (RB_INSERT(pf_source_tree, &srlim->pfsrlim_sources,
+ sr) != NULL) {
+ panic("%s: source pool %u (%p) "
+ "insert collision %p?!",
+ __func__, srlim->pfsrlim_id, srlim, sr);
+ }
+
+ if (RB_INSERT(pf_source_ioc_tree,
+ &srlim->pfsrlim_ioc_sources, sr) != NULL) {
+ panic("%s: source pool %u (%p) ioc "
+ "insert collision (%p)?!",
+ __func__, srlim->pfsrlim_id, srlim, sr);
+ }
+
+ sr->pfsr_empty_ts = time_uptime;
+ TAILQ_INSERT_TAIL(&pf_source_gc, sr, pfsr_empty_gc);
+
+ gen = pf_sourcelim_enter(srlim);
+ srlim->pfsrlim_nsources++;
+ srlim->pfsrlim_counters.addrallocs++;
+ pf_sourcelim_leave(srlim, gen);
+ } else {
+ MPASS(sr->pfsr_parent == srlim);
+ }
+
+ pfl = malloc(sizeof(*pfl), M_PF_STATE_LINK, M_NOWAIT);
+ if (pfl == NULL) {
+ REASON_SET(&ctx->reason, PFRES_MEMORY);
+ goto csfailed;
+ }
+
+ pf_source_used(sr);
+
+ sr->pfsr_counters.admitted++;
+
+ gen = pf_sourcelim_enter(srlim);
+ srlim->pfsrlim_counters.inuse++;
+ srlim->pfsrlim_counters.admitted++;
+ pf_sourcelim_leave(srlim, gen);
+
+ s->sourcelim = srlim->pfsrlim_id;
+ pfl->pfl_state = s;
+ pfl->pfl_type = PF_STATE_LINK_TYPE_SOURCELIM;
+
+ TAILQ_INSERT_TAIL(&sr->pfsr_states, pfl, pfl_link);
+ SLIST_INSERT_HEAD(&s->linkage, pfl, pfl_linkage);
+ }
+
/* Swap sk/nk for PF_OUT. */
if (pf_state_insert(BOUND_IFACE(s, pd), pd->kif,
(pd->dir == PF_IN) ? ctx->sk : ctx->nk,
@@ -6400,6 +7004,44 @@ csfailed:
drop:
if (s != NULL) {
+ struct pf_state_link *npfl;
+
+ SLIST_FOREACH_SAFE(pfl, &s->linkage, pfl_linkage, npfl) {
+ struct pf_state_link_list *list;
+ unsigned int gen;
+
+ /* who needs KASSERTS when we have NULL derefs */
+
+ switch (pfl->pfl_type) {
+ case PF_STATE_LINK_TYPE_STATELIM:
+ gen = pf_statelim_enter(stlim);
+ stlim->pfstlim_inuse--;
+ pf_statelim_leave(stlim, gen);
+
+ stlim->pfstlim_rate_ts -=
+ stlim->pfstlim_rate_token;
+ list = &stlim->pfstlim_states;
+ break;
+ case PF_STATE_LINK_TYPE_SOURCELIM:
+ gen = pf_sourcelim_enter(srlim);
+ srlim->pfsrlim_counters.inuse--;
+ pf_sourcelim_leave(srlim, gen);
+
+ sr->pfsr_rate_ts -= srlim->pfsrlim_rate_token;
+ pf_source_rele(sr);
+
+ list = &sr->pfsr_states;
+ break;
+ default:
+ panic("%s: unexpected link type on pfl %p",
+ __func__, pfl);
+ }
+
+ TAILQ_REMOVE(list, pfl, pfl_link);
+ PF_STATE_LOCK_ASSERT(s);
+ free(pfl, M_PF_STATE_LINK);
+ }
+
pf_src_tree_remove_state(s);
s->timeout = PFTM_UNLINKED;
pf_free_state(s);
@@ -7164,6 +7806,38 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate *state, u_short *reason)
return (PF_PASS);
}
+static __inline int
+pf_synproxy_ack(struct pf_krule *r, struct pf_pdesc *pd, struct pf_kstate **sm,
+ struct pf_rule_actions *act)
+{
+ struct tcphdr *th = &pd->hdr.tcp;
+ struct pf_kstate *s;
+ u_int16_t mss;
+ int rtid;
+ u_short reason;
+
+ if ((th->th_flags & (TH_SYN | TH_ACK)) != TH_SYN)
+ return (PF_PASS);
+
+ s = *sm;
+ rtid = act->rtableid;
+
+ pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_SRC);
+ s->src.seqhi = arc4random();
+ /* Find mss option */
+ mss = pf_get_mss(pd);
+ mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
+ mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
+ s->src.mss = mss;
+
+ pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
+ th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
+ TH_SYN | TH_ACK, 0, s->src.mss, 0, 1, 0, 0, r->rtableid, NULL);
+
+ REASON_SET(&reason, PFRES_SYNPROXY);
+ return (PF_SYNPROXY_DROP);
+}
+
static int
pf_test_state(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason)
{
@@ -11034,9 +11708,9 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
* it here, before we do any NAT.
*/
if (af == AF_INET6 && dir == PF_OUT && pflags & PFIL_FWD &&
- IN6_LINKMTU(ifp) < pf_max_frag_size(*m0)) {
+ in6_ifmtu(ifp) < pf_max_frag_size(*m0)) {
PF_RULES_RUNLOCK();
- icmp6_error(*m0, ICMP6_PACKET_TOO_BIG, 0, IN6_LINKMTU(ifp));
+ icmp6_error(*m0, ICMP6_PACKET_TOO_BIG, 0, in6_ifmtu(ifp));
*m0 = NULL;
return (PF_DROP);
}
@@ -11287,11 +11961,10 @@ done:
pf_is_loopback(af, pd.dst))
pd.m->m_flags |= M_SKIP_FIREWALL;
- if (af == AF_INET && __predict_false(ip_divert_ptr != NULL) &&
- action == PF_PASS && r->divert.port && !PACKET_LOOPED(&pd)) {
+ if (action == PF_PASS && r->divert.port && !PACKET_LOOPED(&pd)) {
mtag = m_tag_alloc(MTAG_PF_DIVERT, 0,
sizeof(struct pf_divert_mtag), M_NOWAIT | M_ZERO);
- if (mtag != NULL) {
+ if (__predict_true(mtag != NULL && ip_divert_ptr != NULL)) {
((struct pf_divert_mtag *)(mtag+1))->port =
ntohs(r->divert.port);
((struct pf_divert_mtag *)(mtag+1))->idir =
@@ -11320,20 +11993,22 @@ done:
}
ip_divert_ptr(*m0, dir == PF_IN);
*m0 = NULL;
-
return (action);
- } else {
+ } else if (mtag == NULL) {
/* XXX: ipfw has the same behaviour! */
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
pd.act.log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
"pf: failed to allocate divert tag");
+ } else {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_MATCH);
+ pd.act.log = PF_LOG_FORCE;
+ DPFPRINTF(PF_DEBUG_MISC,
+ "pf: divert(4) is not loaded");
}
}
- /* XXX: Anybody working on it?! */
- if (af == AF_INET6 && r->divert.port)
- printf("pf: divert(9) is not supported for IPv6\n");
/* this flag will need revising if the pkt is forwarded */
if (pd.pf_mtag)
diff --git a/sys/netpfil/pf/pf.h b/sys/netpfil/pf/pf.h
index 333e5b53b0a8..09bcd424db3e 100644
--- a/sys/netpfil/pf/pf.h
+++ b/sys/netpfil/pf/pf.h
@@ -501,6 +501,13 @@ struct pf_osfp_ioctl {
#define PF_ANCHOR_HIWAT 512
#define PF_OPTIMIZER_TABLE_PFX "__automatic_"
+enum {
+ PF_LIMITER_NOMATCH,
+ PF_LIMITER_BLOCK
+};
+
+#define PF_LIMITER_DEFAULT PF_LIMITER_BLOCK
+
struct pf_rule {
struct pf_rule_addr src;
struct pf_rule_addr dst;
diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c
index ca1815984797..12a2189207f1 100644
--- a/sys/netpfil/pf/pf_ioctl.c
+++ b/sys/netpfil/pf/pf_ioctl.c
@@ -136,6 +136,12 @@ static int pf_import_kaltq(struct pfioc_altq_v1 *,
struct pf_altq *, size_t);
#endif /* ALTQ */
+static void pf_statelim_commit(void);
+static void pf_statelim_rollback(void);
+static int pf_sourcelim_check(void);
+static void pf_sourcelim_commit(void);
+static void pf_sourcelim_rollback(void);
+
VNET_DEFINE(struct pf_krule, pf_default_rule);
static __inline int pf_krule_compare(struct pf_krule *,
@@ -187,6 +193,7 @@ VNET_DEFINE(uma_zone_t, pf_tag_z);
static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
MALLOC_DEFINE(M_PF, "pf", "pf(4)");
+MALLOC_DEFINE(M_PF_STATE_LIM, "pf_state_lim", "pf(4) state limiter");
#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
@@ -969,12 +976,6 @@ pf_qname2qid(const char *qname, bool add_new)
return (tagname2tag(&V_pf_qids, qname, add_new));
}
-static const char *
-pf_qid2qname(uint16_t qid)
-{
- return (tag2tagname(&V_pf_qids, qid));
-}
-
static void
pf_qid_unref(uint16_t qid)
{
@@ -1318,6 +1319,12 @@ pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
rs->rules[rs_num].inactive.rcount--;
}
rs->rules[rs_num].inactive.open = 0;
+
+ if (anchor[0])
+ return (0);
+
+ pf_statelim_rollback();
+ pf_sourcelim_rollback();
return (0);
}
@@ -1437,6 +1444,7 @@ pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
struct pf_krule_global *old_tree;
int error;
u_int32_t old_rcount;
+ bool is_main_ruleset = anchor[0] == '\0';
PF_RULES_WASSERT();
@@ -1449,6 +1457,9 @@ pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
/* Calculate checksum for the main ruleset */
if (rs == &pf_main_ruleset) {
+ error = pf_sourcelim_check();
+ if (error != 0)
+ return (error);
error = pf_setup_pfsync_matching(rs);
if (error != 0)
return (error);
@@ -1507,6 +1518,13 @@ pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
pf_remove_if_empty_kruleset(rs);
pf_rule_tree_free(old_tree);
+ /* statelim/sourcelim/queue defs only in the main ruleset */
+ if (! is_main_ruleset || rs_num != PF_RULESET_FILTER)
+ return (0);
+
+ pf_statelim_commit();
+ pf_sourcelim_commit();
+
return (0);
}
@@ -1589,6 +1607,748 @@ pf_addr_copyout(struct pf_addr_wrap *addr)
}
}
+int
+pf_statelim_add(const struct pfioc_statelim *ioc)
+{
+ struct pf_statelim *pfstlim;
+ int error;
+ size_t namelen;
+
+ if (ioc->id < PF_STATELIM_ID_MIN ||
+ ioc->id > PF_STATELIM_ID_MAX)
+ return (EINVAL);
+
+ if (ioc->limit < PF_STATELIM_LIMIT_MIN ||
+ ioc->limit > PF_STATELIM_LIMIT_MAX)
+ return (EINVAL);
+
+ if ((ioc->rate.limit == 0) != (ioc->rate.seconds == 0))
+ return (EINVAL);
+
+ namelen = strnlen(ioc->name, sizeof(ioc->name));
+ /* is the name from userland nul terminated? */
+ if (namelen == sizeof(ioc->name))
+ return (EINVAL);
+
+ pfstlim = malloc(sizeof(*pfstlim), M_PF_STATE_LIM, M_WAITOK | M_ZERO);
+ if (pfstlim == NULL)
+ return (ENOMEM);
+
+ pfstlim->pfstlim_id = ioc->id;
+ if (strlcpy(pfstlim->pfstlim_nm, ioc->name,
+ sizeof(pfstlim->pfstlim_nm)) >= sizeof(pfstlim->pfstlim_nm)) {
+ error = EINVAL;
+ goto free;
+ }
+ pfstlim->pfstlim_limit = ioc->limit;
+ pfstlim->pfstlim_rate.limit = ioc->rate.limit;
+ pfstlim->pfstlim_rate.seconds = ioc->rate.seconds;
+
+ if (pfstlim->pfstlim_rate.limit) {
+ uint64_t bucket = SEC_TO_NSEC(pfstlim->pfstlim_rate.seconds);
+ struct timespec ts;
+
+ getnanouptime(&ts);
+
+ pfstlim->pfstlim_rate_ts = SEC_TO_NSEC(ts.tv_sec) + ts.tv_nsec -
+ bucket;
+ pfstlim->pfstlim_rate_token = bucket /
+ pfstlim->pfstlim_rate.limit;
+ pfstlim->pfstlim_rate_bucket = bucket;
+ }
+
+ TAILQ_INIT(&pfstlim->pfstlim_states);
+ mtx_init(&pfstlim->pfstlim_lock, "pf state limit", NULL, MTX_DEF);
+
+ PF_RULES_WLOCK();
+ if (ioc->ticket != pf_main_ruleset.rules[PF_RULESET_FILTER].inactive.ticket) {
+ error = EBUSY;
+ goto unlock;
+ }
+
+ if (RB_INSERT(pf_statelim_id_tree, &V_pf_statelim_id_tree_inactive,
+ pfstlim) != NULL) {
+ error = EBUSY;
+ goto unlock;
+ }
+
+ if (RB_INSERT(pf_statelim_nm_tree, &V_pf_statelim_nm_tree_inactive,
+ pfstlim) != NULL) {
+ RB_REMOVE(pf_statelim_id_tree, &V_pf_statelim_id_tree_inactive,
+ pfstlim);
+ error = EBUSY;
+ goto unlock;
+ }
+
+ TAILQ_INSERT_HEAD(&V_pf_statelim_list_inactive, pfstlim, pfstlim_list);
+
+ PF_RULES_WUNLOCK();
+
+ return (0);
+
+unlock:
+ PF_RULES_WUNLOCK();
+
+free:
+ free(pfstlim, M_PF_STATE_LIM);
+
+ return (error);
+}
+
+static void
+pf_statelim_unlink(struct pf_statelim *pfstlim,
+ struct pf_state_link_list *garbage)
+{
+ struct pf_state_link *pfl;
+
+
+ /* unwire the links */
+ TAILQ_FOREACH(pfl, &pfstlim->pfstlim_states, pfl_link) {
+ struct pf_kstate *s = pfl->pfl_state;
+
+ /* if !rmst */
+ PF_STATE_LOCK(s);
+ s->statelim = 0;
+ SLIST_REMOVE(&s->linkage, pfl, pf_state_link, pfl_linkage);
+ PF_STATE_UNLOCK(s);
+ }
+
+ /* take the list away */
+ TAILQ_CONCAT(garbage, &pfstlim->pfstlim_states, pfl_link);
+ pfstlim->pfstlim_inuse = 0;
+}
+
+void
+pf_statelim_commit(void)
+{
+ struct pf_statelim *pfstlim, *npfstlim, *opfstlim;
+ struct pf_statelim_list l = TAILQ_HEAD_INITIALIZER(l);
+ struct pf_state_link_list garbage = TAILQ_HEAD_INITIALIZER(garbage);
+ struct pf_state_link *pfl, *npfl;
+
+ PF_RULES_WASSERT();
+
+ /* merge the new statelims into the current set */
+
+ /* start with an empty active list */
+ TAILQ_CONCAT(&l, &V_pf_statelim_list_active, pfstlim_list);
+
+ /* beware, the inactive bits gets messed up here */
+
+ /* try putting pending statelims into the active tree */
+ TAILQ_FOREACH_SAFE(pfstlim, &V_pf_statelim_list_inactive, pfstlim_list,
+ npfstlim) {
+ opfstlim = RB_INSERT(pf_statelim_id_tree,
+ &V_pf_statelim_id_tree_active, pfstlim);
+ if (opfstlim != NULL) {
+ /* this statelim already exists, merge */
+ opfstlim->pfstlim_limit = pfstlim->pfstlim_limit;
+ opfstlim->pfstlim_rate.limit =
+ pfstlim->pfstlim_rate.limit;
+ opfstlim->pfstlim_rate.seconds =
+ pfstlim->pfstlim_rate.seconds;
+
+ opfstlim->pfstlim_rate_ts = pfstlim->pfstlim_rate_ts;
+ opfstlim->pfstlim_rate_token =
+ pfstlim->pfstlim_rate_token;
+ opfstlim->pfstlim_rate_bucket =
+ pfstlim->pfstlim_rate_bucket;
+
+ memcpy(opfstlim->pfstlim_nm, pfstlim->pfstlim_nm,
+ sizeof(opfstlim->pfstlim_nm));
+
+ /* use the existing statelim instead */
+ free(pfstlim, M_PF_STATE_LIM);
+ TAILQ_REMOVE(&l, opfstlim, pfstlim_list);
+ pfstlim = opfstlim;
+ }
+
+ TAILQ_INSERT_TAIL(&V_pf_statelim_list_active, pfstlim,
+ pfstlim_list);
+ }
+
+ /* clean up the now unused statelims from the old set */
+ TAILQ_FOREACH_SAFE(pfstlim, &l, pfstlim_list, npfstlim) {
+ pf_statelim_unlink(pfstlim, &garbage);
+
+ RB_REMOVE(pf_statelim_id_tree, &V_pf_statelim_id_tree_active,
+ pfstlim);
+
+ free(pfstlim, M_PF_STATE_LIM);
+ }
+
+ /* fix up the inactive tree */
+ RB_INIT(&V_pf_statelim_id_tree_inactive);
+ RB_INIT(&V_pf_statelim_nm_tree_inactive);
+ TAILQ_INIT(&V_pf_statelim_list_inactive);
+
+ TAILQ_FOREACH_SAFE(pfl, &garbage, pfl_link, npfl)
+ free(pfl, M_PF_STATE_LINK);
+}
+
+static void
+pf_sourcelim_unlink(struct pf_sourcelim *pfsrlim,
+ struct pf_state_link_list *garbage)
+{
+ extern struct pf_source_list pf_source_gc;
+ struct pf_source *pfsr;
+ struct pf_state_link *pfl;
+
+ PF_RULES_WASSERT();
+
+ while ((pfsr = RB_ROOT(&pfsrlim->pfsrlim_sources)) != NULL) {
+ RB_REMOVE(pf_source_tree, &pfsrlim->pfsrlim_sources, pfsr);
+ RB_REMOVE(pf_source_ioc_tree, &pfsrlim->pfsrlim_ioc_sources,
+ pfsr);
+ if (pfsr->pfsr_inuse == 0)
+ TAILQ_REMOVE(&pf_source_gc, pfsr, pfsr_empty_gc);
+
+ /* unwire the links */
+ TAILQ_FOREACH(pfl, &pfsr->pfsr_states, pfl_link) {
+ struct pf_kstate *s = pfl->pfl_state;
+
+ PF_STATE_LOCK(s);
+ /* if !rmst */
+ s->sourcelim = 0;
+ SLIST_REMOVE(&s->linkage, pfl, pf_state_link,
+ pfl_linkage);
+ PF_STATE_UNLOCK(s);
+ }
+
+ /* take the list away */
+ TAILQ_CONCAT(garbage, &pfsr->pfsr_states, pfl_link);
+
+ free(pfsr, M_PF_SOURCE_LIM);
+ }
+}
+
+int
+pf_sourcelim_check(void)
+{
+ struct pf_sourcelim *pfsrlim, *npfsrlim;
+
+ PF_RULES_WASSERT();
+
+ /* check if we can merge */
+
+ TAILQ_FOREACH(pfsrlim, &V_pf_sourcelim_list_inactive, pfsrlim_list) {
+ npfsrlim = RB_FIND(pf_sourcelim_id_tree,
+ &V_pf_sourcelim_id_tree_active, pfsrlim);
+
+ /* new config, no conflict */
+ if (npfsrlim == NULL)
+ continue;
+
+ /* nothing is tracked at the moment, no conflict */
+ if (RB_EMPTY(&npfsrlim->pfsrlim_sources))
+ continue;
+
+ if (strcmp(npfsrlim->pfsrlim_overload.name,
+ pfsrlim->pfsrlim_overload.name) != 0)
+ return (EBUSY);
+
+ /*
+ * we should allow the prefixlens to get shorter
+ * and merge pf_source entries.
+ */
+
+ if ((npfsrlim->pfsrlim_ipv4_prefix !=
+ pfsrlim->pfsrlim_ipv4_prefix) ||
+ (npfsrlim->pfsrlim_ipv6_prefix !=
+ pfsrlim->pfsrlim_ipv6_prefix))
+ return (EBUSY);
+ }
+
+ return (0);
+}
+
+void
+pf_sourcelim_commit(void)
+{
+ struct pf_sourcelim *pfsrlim, *npfsrlim, *opfsrlim;
+ struct pf_sourcelim_list l = TAILQ_HEAD_INITIALIZER(l);
+ struct pf_state_link_list garbage = TAILQ_HEAD_INITIALIZER(garbage);
+ struct pf_state_link *pfl, *npfl;
+
+ PF_RULES_WASSERT();
+
+ /* merge the new sourcelims into the current set */
+
+ /* start with an empty active list */
+ TAILQ_CONCAT(&l, &V_pf_sourcelim_list_active, pfsrlim_list);
+
+ /* beware, the inactive bits gets messed up here */
+
+ /* try putting pending sourcelims into the active tree */
+ TAILQ_FOREACH_SAFE(pfsrlim, &V_pf_sourcelim_list_inactive, pfsrlim_list,
+ npfsrlim) {
+ opfsrlim = RB_INSERT(pf_sourcelim_id_tree,
+ &V_pf_sourcelim_id_tree_active, pfsrlim);
+ if (opfsrlim != NULL) {
+ /* this sourcelim already exists, merge */
+ opfsrlim->pfsrlim_entries = pfsrlim->pfsrlim_entries;
+ opfsrlim->pfsrlim_limit = pfsrlim->pfsrlim_limit;
+ opfsrlim->pfsrlim_ipv4_prefix =
+ pfsrlim->pfsrlim_ipv4_prefix;
+ opfsrlim->pfsrlim_ipv6_prefix =
+ pfsrlim->pfsrlim_ipv6_prefix;
+ opfsrlim->pfsrlim_rate.limit =
+ pfsrlim->pfsrlim_rate.limit;
+ opfsrlim->pfsrlim_rate.seconds =
+ pfsrlim->pfsrlim_rate.seconds;
+
+ opfsrlim->pfsrlim_ipv4_mask =
+ pfsrlim->pfsrlim_ipv4_mask;
+ opfsrlim->pfsrlim_ipv6_mask =
+ pfsrlim->pfsrlim_ipv6_mask;
+
+ /* keep the existing pfstlim_rate_ts */
+
+ opfsrlim->pfsrlim_rate_token =
+ pfsrlim->pfsrlim_rate_token;
+ opfsrlim->pfsrlim_rate_bucket =
+ pfsrlim->pfsrlim_rate_bucket;
+
+ if (opfsrlim->pfsrlim_overload.table != NULL) {
+ pfr_detach_table(
+ opfsrlim->pfsrlim_overload.table);
+ }
+
+ strlcpy(opfsrlim->pfsrlim_overload.name,
+ pfsrlim->pfsrlim_overload.name,
+ sizeof(opfsrlim->pfsrlim_overload.name));
+ opfsrlim->pfsrlim_overload.hwm =
+ pfsrlim->pfsrlim_overload.hwm;
+ opfsrlim->pfsrlim_overload.lwm =
+ pfsrlim->pfsrlim_overload.lwm;
+ opfsrlim->pfsrlim_overload.table =
+ pfsrlim->pfsrlim_overload.table,
+
+ memcpy(opfsrlim->pfsrlim_nm, pfsrlim->pfsrlim_nm,
+ sizeof(opfsrlim->pfsrlim_nm));
+
+ /* use the existing sourcelim instead */
+ free(pfsrlim, M_PF_SOURCE_LIM);
+ TAILQ_REMOVE(&l, opfsrlim, pfsrlim_list);
+ pfsrlim = opfsrlim;
+ }
+
+ TAILQ_INSERT_TAIL(&V_pf_sourcelim_list_active, pfsrlim,
+ pfsrlim_list);
+ }
+
+ /* clean up the now unused sourcelims from the old set */
+ TAILQ_FOREACH_SAFE(pfsrlim, &l, pfsrlim_list, npfsrlim) {
+ pf_sourcelim_unlink(pfsrlim, &garbage);
+
+ RB_REMOVE(pf_sourcelim_id_tree, &V_pf_sourcelim_id_tree_active,
+ pfsrlim);
+
+ if (pfsrlim->pfsrlim_overload.table != NULL)
+ pfr_detach_table(pfsrlim->pfsrlim_overload.table);
+
+ free(pfsrlim, M_PF_SOURCE_LIM);
+ }
+
+ /* fix up the inactive tree */
+ RB_INIT(&V_pf_sourcelim_id_tree_inactive);
+ RB_INIT(&V_pf_sourcelim_nm_tree_inactive);
+ TAILQ_INIT(&V_pf_sourcelim_list_inactive);
+
+ TAILQ_FOREACH_SAFE(pfl, &garbage, pfl_link, npfl)
+ free(pfl, M_PF_STATE_LINK);
+}
+
+void
+pf_statelim_rollback(void)
+{
+ struct pf_statelim *pfstlim, *npfstlim;
+
+ PF_RULES_WASSERT();
+
+ TAILQ_FOREACH_SAFE(pfstlim, &V_pf_statelim_list_inactive, pfstlim_list,
+ npfstlim)
+ free(pfstlim, M_PF_STATE_LIM);
+
+ TAILQ_INIT(&V_pf_statelim_list_inactive);
+ RB_INIT(&V_pf_statelim_id_tree_inactive);
+ RB_INIT(&V_pf_statelim_nm_tree_inactive);
+}
+
+struct pf_statelim *
+pf_statelim_rb_find(struct pf_statelim_id_tree *tree, struct pf_statelim *key)
+{
+ PF_RULES_ASSERT();
+
+ return (RB_FIND(pf_statelim_id_tree, tree, key));
+}
+
+struct pf_statelim *
+pf_statelim_rb_nfind(struct pf_statelim_id_tree *tree, struct pf_statelim *key)
+{
+ PF_RULES_ASSERT();
+
+ return (RB_NFIND(pf_statelim_id_tree, tree, key));
+}
+
+int
+pf_statelim_get(struct pfioc_statelim *ioc,
+ struct pf_statelim *(*rbt_op)(struct pf_statelim_id_tree *,
+ struct pf_statelim *))
+{
+ struct pf_statelim key = { .pfstlim_id = ioc->id };
+ struct pf_statelim *pfstlim;
+ int error = 0;
+ PF_RULES_RLOCK_TRACKER;
+
+ PF_RULES_RLOCK();
+
+ pfstlim = (*rbt_op)(&V_pf_statelim_id_tree_active, &key);
+ if (pfstlim == NULL) {
+ error = ENOENT;
+ goto unlock;
+ }
+
+ ioc->id = pfstlim->pfstlim_id;
+ ioc->limit = pfstlim->pfstlim_limit;
+ ioc->rate.limit = pfstlim->pfstlim_rate.limit;
+ ioc->rate.seconds = pfstlim->pfstlim_rate.seconds;
+ CTASSERT(sizeof(ioc->name) == sizeof(pfstlim->pfstlim_nm));
+ memcpy(ioc->name, pfstlim->pfstlim_nm, sizeof(ioc->name));
+
+ ioc->inuse = pfstlim->pfstlim_inuse;
+ ioc->admitted = pfstlim->pfstlim_counters.admitted;
+ ioc->hardlimited = pfstlim->pfstlim_counters.hardlimited;
+ ioc->ratelimited = pfstlim->pfstlim_counters.ratelimited;
+
+unlock:
+ PF_RULES_RUNLOCK();
+
+ return (error);
+}
+
+int
+pf_sourcelim_add(const struct pfioc_sourcelim *ioc)
+{
+ struct pf_sourcelim *pfsrlim;
+ int error;
+ size_t namelen, tablelen;
+ unsigned int prefix;
+ size_t i;
+
+ if (ioc->id < PF_SOURCELIM_ID_MIN ||
+ ioc->id > PF_SOURCELIM_ID_MAX)
+ return (EINVAL);
+
+ if (ioc->entries < 1)
+ return (EINVAL);
+
+ if (ioc->limit < 1)
+ return (EINVAL);
+
+ if ((ioc->rate.limit == 0) != (ioc->rate.seconds == 0))
+ return (EINVAL);
+
+ if (ioc->inet_prefix > 32)
+ return (EINVAL);
+ if (ioc->inet6_prefix > 128)
+ return (EINVAL);
+
+ namelen = strnlen(ioc->name, sizeof(ioc->name));
+ /* is the name from userland nul terminated? */
+ if (namelen == sizeof(ioc->name))
+ return (EINVAL);
+
+ tablelen = strnlen(ioc->overload_tblname,
+ sizeof(ioc->overload_tblname));
+ /* is the name from userland nul terminated? */
+ if (tablelen == sizeof(ioc->overload_tblname))
+ return (EINVAL);
+ if (tablelen != 0) {
+ if (ioc->overload_hwm == 0)
+ return (EINVAL);
+
+ if (ioc->overload_hwm < ioc->overload_lwm)
+ return (EINVAL);
+ }
+
+ pfsrlim = malloc(sizeof(*pfsrlim), M_PF_SOURCE_LIM, M_WAITOK | M_ZERO);
+ if (pfsrlim == NULL)
+ return (ENOMEM);
+
+ pfsrlim->pfsrlim_id = ioc->id;
+ pfsrlim->pfsrlim_entries = ioc->entries;
+ pfsrlim->pfsrlim_limit = ioc->limit;
+ pfsrlim->pfsrlim_ipv4_prefix = ioc->inet_prefix;
+ pfsrlim->pfsrlim_ipv6_prefix = ioc->inet6_prefix;
+ pfsrlim->pfsrlim_rate.limit = ioc->rate.limit;
+ pfsrlim->pfsrlim_rate.seconds = ioc->rate.seconds;
+ if (strlcpy(pfsrlim->pfsrlim_overload.name, ioc->overload_tblname,
+ sizeof(pfsrlim->pfsrlim_overload.name)) >=
+ sizeof(pfsrlim->pfsrlim_overload.name)) {
+ error = EINVAL;
+ goto free;
+ }
+ pfsrlim->pfsrlim_overload.hwm = ioc->overload_hwm;
+ pfsrlim->pfsrlim_overload.lwm = ioc->overload_lwm;
+ if (strlcpy(pfsrlim->pfsrlim_nm, ioc->name,
+ sizeof(pfsrlim->pfsrlim_nm)) >= sizeof(pfsrlim->pfsrlim_nm)) {
+ error = EINVAL;
+ goto free;
+ }
+
+ if (pfsrlim->pfsrlim_rate.limit) {
+ uint64_t bucket = pfsrlim->pfsrlim_rate.seconds * 1000000000ULL;
+
+ pfsrlim->pfsrlim_rate_token = bucket /
+ pfsrlim->pfsrlim_rate.limit;
+ pfsrlim->pfsrlim_rate_bucket = bucket;
+ }
+
+ pfsrlim->pfsrlim_ipv4_mask.v4.s_addr = htonl(
+ 0xffffffff << (32 - pfsrlim->pfsrlim_ipv4_prefix));
+
+ prefix = pfsrlim->pfsrlim_ipv6_prefix;
+ for (i = 0; i < nitems(pfsrlim->pfsrlim_ipv6_mask.addr32); i++) {
+ if (prefix == 0) {
+ /* the memory is already zeroed */
+ break;
+ }
+ if (prefix < 32) {
+ pfsrlim->pfsrlim_ipv6_mask.addr32[i] = htonl(
+ 0xffffffff << (32 - prefix));
+ break;
+ }
+
+ pfsrlim->pfsrlim_ipv6_mask.addr32[i] = htonl(0xffffffff);
+ prefix -= 32;
+ }
+
+ RB_INIT(&pfsrlim->pfsrlim_sources);
+ mtx_init(&pfsrlim->pfsrlim_lock, "pf source limit", NULL, MTX_DEF);
+
+ PF_RULES_WLOCK();
+ if (ioc->ticket != pf_main_ruleset.rules[PF_RULESET_FILTER].inactive.ticket) {
+ error = EBUSY;
+ goto unlock;
+ }
+
+ if (pfsrlim->pfsrlim_overload.name[0] != '\0') {
+ pfsrlim->pfsrlim_overload.table = pfr_attach_table(
+ &pf_main_ruleset, pfsrlim->pfsrlim_overload.name);
+ if (pfsrlim->pfsrlim_overload.table == NULL) {
+ error = EINVAL;
+ goto unlock;
+ }
+ }
+
+ if (RB_INSERT(pf_sourcelim_id_tree, &V_pf_sourcelim_id_tree_inactive,
+ pfsrlim) != NULL) {
+ error = EBUSY;
+ goto unlock;
+ }
+
+ if (RB_INSERT(pf_sourcelim_nm_tree, &V_pf_sourcelim_nm_tree_inactive,
+ pfsrlim) != NULL) {
+ RB_INSERT(pf_sourcelim_nm_tree, &V_pf_sourcelim_nm_tree_inactive,
+ pfsrlim);
+ error = EBUSY;
+ goto unlock;
+ }
+
+ TAILQ_INSERT_HEAD(&V_pf_sourcelim_list_inactive, pfsrlim, pfsrlim_list);
+
+ PF_RULES_WUNLOCK();
+
+ return (0);
+
+unlock:
+ PF_RULES_WUNLOCK();
+
+free:
+ free(pfsrlim, M_PF_SOURCE_LIM);
+
+ return (error);
+}
+
+void
+pf_sourcelim_rollback(void)
+{
+ struct pf_sourcelim *pfsrlim, *npfsrlim;
+
+ PF_RULES_WASSERT();
+
+ TAILQ_FOREACH_SAFE(pfsrlim, &V_pf_sourcelim_list_inactive, pfsrlim_list,
+ npfsrlim) {
+ if (pfsrlim->pfsrlim_overload.table != NULL)
+ pfr_detach_table(pfsrlim->pfsrlim_overload.table);
+
+ free(pfsrlim, M_PF_SOURCE_LIM);
+ }
+
+ TAILQ_INIT(&V_pf_sourcelim_list_inactive);
+ RB_INIT(&V_pf_sourcelim_id_tree_inactive);
+ RB_INIT(&V_pf_sourcelim_nm_tree_inactive);
+}
+
+struct pf_sourcelim *
+pf_sourcelim_rb_find(struct pf_sourcelim_id_tree *tree,
+ struct pf_sourcelim *key)
+{
+ PF_RULES_ASSERT();
+ return (RB_FIND(pf_sourcelim_id_tree, tree, key));
+}
+
+struct pf_sourcelim *
+pf_sourcelim_rb_nfind(struct pf_sourcelim_id_tree *tree,
+ struct pf_sourcelim *key)
+{
+ PF_RULES_ASSERT();
+ return (RB_NFIND(pf_sourcelim_id_tree, tree, key));
+}
+
+int
+pf_sourcelim_get(struct pfioc_sourcelim *ioc,
+ struct pf_sourcelim *(*rbt_op)(struct pf_sourcelim_id_tree *,
+ struct pf_sourcelim *))
+{
+ struct pf_sourcelim key = { .pfsrlim_id = ioc->id };
+ struct pf_sourcelim *pfsrlim;
+ int error = 0;
+ PF_RULES_RLOCK_TRACKER;
+
+ PF_RULES_RLOCK();
+
+ pfsrlim = (*rbt_op)(&V_pf_sourcelim_id_tree_active, &key);
+ if (pfsrlim == NULL) {
+ error = ESRCH;
+ goto unlock;
+ }
+
+ ioc->id = pfsrlim->pfsrlim_id;
+ ioc->entries = pfsrlim->pfsrlim_entries;
+ ioc->limit = pfsrlim->pfsrlim_limit;
+ ioc->inet_prefix = pfsrlim->pfsrlim_ipv4_prefix;
+ ioc->inet6_prefix = pfsrlim->pfsrlim_ipv6_prefix;
+ ioc->rate.limit = pfsrlim->pfsrlim_rate.limit;
+ ioc->rate.seconds = pfsrlim->pfsrlim_rate.seconds;
+
+ CTASSERT(sizeof(ioc->overload_tblname) ==
+ sizeof(pfsrlim->pfsrlim_overload.name));
+ memcpy(ioc->overload_tblname, pfsrlim->pfsrlim_overload.name,
+ sizeof(pfsrlim->pfsrlim_overload.name));
+ ioc->overload_hwm = pfsrlim->pfsrlim_overload.hwm;
+ ioc->overload_lwm = pfsrlim->pfsrlim_overload.lwm;
+
+ CTASSERT(sizeof(ioc->name) == sizeof(pfsrlim->pfsrlim_nm));
+ memcpy(ioc->name, pfsrlim->pfsrlim_nm, sizeof(ioc->name));
+ /* XXX overload table thing */
+
+ ioc->nentries = pfsrlim->pfsrlim_nsources;
+
+ ioc->inuse = pfsrlim->pfsrlim_counters.inuse;
+ ioc->addrallocs = pfsrlim->pfsrlim_counters.addrallocs;
+ ioc->addrnomem = pfsrlim->pfsrlim_counters.addrnomem;
+ ioc->admitted = pfsrlim->pfsrlim_counters.admitted;
+ ioc->addrlimited = pfsrlim->pfsrlim_counters.addrlimited;
+ ioc->hardlimited = pfsrlim->pfsrlim_counters.hardlimited;
+ ioc->ratelimited = pfsrlim->pfsrlim_counters.ratelimited;
+
+unlock:
+ PF_RULES_RUNLOCK();
+
+ return (error);
+}
+
+struct pf_source *
+pf_source_rb_find(struct pf_source_ioc_tree *tree,
+ struct pf_source *key)
+{
+ PF_RULES_ASSERT();
+
+ return (RB_FIND(pf_source_ioc_tree, tree, key));
+}
+
+struct pf_source *
+pf_source_rb_nfind(struct pf_source_ioc_tree *tree,
+ struct pf_source *key)
+{
+ PF_RULES_ASSERT();
+
+ return (RB_NFIND(pf_source_ioc_tree, tree, key));
+}
+
+int
+pf_source_clr(struct pfioc_source_kill *ioc)
+{
+ extern struct pf_source_list pf_source_gc;
+ struct pf_sourcelim plkey = {
+ .pfsrlim_id = ioc->id,
+ };
+ struct pf_source skey = {
+ .pfsr_af = ioc->af,
+ .pfsr_rdomain = ioc->rdomain,
+ .pfsr_addr = ioc->addr,
+ };
+ struct pf_sourcelim *pfsrlim;
+ struct pf_source *pfsr;
+ struct pf_state_link *pfl, *npfl;
+ int error = 0;
+ unsigned int gen;
+
+ if (ioc->rmstates) {
+ /* XXX userland wants the states removed too */
+ return (EOPNOTSUPP);
+ }
+
+ PF_RULES_WLOCK();
+
+ pfsrlim = pf_sourcelim_rb_find(&V_pf_sourcelim_id_tree_active, &plkey);
+ if (pfsrlim == NULL) {
+ error = ESRCH;
+ goto unlock;
+ }
+
+ pfsr = pf_source_rb_find(&pfsrlim->pfsrlim_ioc_sources, &skey);
+ if (pfsr == NULL) {
+ error = ENOENT;
+ goto unlock;
+ }
+
+ RB_REMOVE(pf_source_tree, &pfsrlim->pfsrlim_sources, pfsr);
+ RB_REMOVE(pf_source_ioc_tree, &pfsrlim->pfsrlim_ioc_sources, pfsr);
+ if (pfsr->pfsr_inuse == 0)
+ TAILQ_REMOVE(&pf_source_gc, pfsr, pfsr_empty_gc);
+
+ gen = pf_sourcelim_enter(pfsrlim);
+ pfsrlim->pfsrlim_nsources--;
+ pfsrlim->pfsrlim_counters.inuse -= pfsr->pfsr_inuse;
+ pf_sourcelim_leave(pfsrlim, gen);
+
+ /* unwire the links */
+ TAILQ_FOREACH(pfl, &pfsr->pfsr_states, pfl_link) {
+ struct pf_kstate *st = pfl->pfl_state;
+
+ /* if !rmst */
+ st->sourcelim = 0;
+ SLIST_REMOVE(&st->linkage, pfl, pf_state_link, pfl_linkage);
+ }
+
+ PF_RULES_WUNLOCK();
+
+ TAILQ_FOREACH_SAFE(pfl, &pfsr->pfsr_states, pfl_link, npfl)
+ free(pfl, M_PF_STATE_LINK);
+
+ free(pfsr, M_PF_SOURCE_LIM);
+
+ return (0);
+
+unlock:
+ PF_RULES_WUNLOCK();
+
+ return (error);
+}
+
static void
pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
{
@@ -2156,6 +2916,23 @@ pf_validate_range(uint8_t op, uint16_t port[2])
return 0;
}
+static int
+pf_chk_limiter_action(int limiter_action)
+{
+ int rv;
+
+ switch (limiter_action) {
+ case PF_LIMITER_NOMATCH:
+ case PF_LIMITER_BLOCK:
+ rv = 0;
+ break;
+ default:
+ rv = 1;
+ }
+
+ return (rv);
+}
+
int
pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
uint32_t pool_ticket, const char *anchor, const char *anchor_call,
@@ -2180,6 +2957,9 @@ pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
ERROUT_UNLOCKED(EINVAL);
if (pf_validate_range(rule->dst.port_op, rule->dst.port))
ERROUT_UNLOCKED(EINVAL);
+ if (pf_chk_limiter_action(rule->statelim.limiter_action) ||
+ pf_chk_limiter_action(rule->sourcelim.limiter_action))
+ ERROUT_UNLOCKED(EINVAL);
if (rule->ifname[0])
kif = pf_kkif_create(M_WAITOK);
diff --git a/sys/netpfil/pf/pf_nl.c b/sys/netpfil/pf/pf_nl.c
index 4845342563ce..7a7655d7d9c8 100644
--- a/sys/netpfil/pf/pf_nl.c
+++ b/sys/netpfil/pf/pf_nl.c
@@ -100,6 +100,7 @@ static bool
dump_state_peer(struct nl_writer *nw, int attr, const struct pf_state_peer *peer)
{
int off = nlattr_add_nested(nw, attr);
+
if (off == 0)
return (false);
@@ -129,6 +130,7 @@ static bool
dump_state_key(struct nl_writer *nw, int attr, const struct pf_state_key *key)
{
int off = nlattr_add_nested(nw, attr);
+
if (off == 0)
return (false);
@@ -430,6 +432,9 @@ nlattr_add_addr_wrap(struct nl_writer *nw, int attrtype, struct pf_addr_wrap *a)
{
int off = nlattr_add_nested(nw, attrtype);
+ if (off == 0)
+ return (false);
+
nlattr_add_in6_addr(nw, PF_AT_ADDR, &a->v.a.addr.v6);
nlattr_add_in6_addr(nw, PF_AT_MASK, &a->v.a.mask.v6);
nlattr_add_u8(nw, PF_AT_TYPE, a->type);
@@ -465,6 +470,9 @@ nlattr_add_rule_addr(struct nl_writer *nw, int attrtype, struct pf_rule_addr *r)
struct pf_addr_wrap aw = {0};
int off = nlattr_add_nested(nw, attrtype);
+ if (off == 0)
+ return (false);
+
bcopy(&(r->addr), &aw, sizeof(struct pf_addr_wrap));
pf_addr_copyout(&aw);
@@ -493,6 +501,9 @@ nlattr_add_mape_portset(struct nl_writer *nw, int attrtype, const struct pf_mape
{
int off = nlattr_add_nested(nw, attrtype);
+ if (off == 0)
+ return (false);
+
nlattr_add_u8(nw, PF_MET_OFFSET, m->offset);
nlattr_add_u8(nw, PF_MET_PSID_LEN, m->psidlen);
nlattr_add_u16(nw, PF_MET_PSID, m->psid);
@@ -555,6 +566,9 @@ nlattr_add_labels(struct nl_writer *nw, int attrtype, const struct pf_krule *r)
int off = nlattr_add_nested(nw, attrtype);
int i = 0;
+ if (off == 0)
+ return (false);
+
while (r->label[i][0] != 0
&& i < PF_RULE_MAX_LABEL_COUNT) {
nlattr_add_string(nw, PF_LT_LABEL, r->label[i]);
@@ -584,6 +598,9 @@ nlattr_add_pool(struct nl_writer *nw, int attrtype, const struct pf_kpool *pool)
{
int off = nlattr_add_nested(nw, attrtype);
+ if (off == 0)
+ return (false);
+
nlattr_add(nw, PF_PT_KEY, sizeof(struct pf_poolhashkey), &pool->key);
nlattr_add_in6_addr(nw, PF_PT_COUNTER, (const struct in6_addr *)&pool->counter);
nlattr_add_u32(nw, PF_PT_TBLIDX, pool->tblidx);
@@ -611,6 +628,9 @@ nlattr_add_rule_uid(struct nl_writer *nw, int attrtype, const struct pf_rule_uid
{
int off = nlattr_add_nested(nw, attrtype);
+ if (off == 0)
+ return (false);
+
nlattr_add_u32(nw, PF_RUT_UID_LOW, u->uid[0]);
nlattr_add_u32(nw, PF_RUT_UID_HIGH, u->uid[1]);
nlattr_add_u8(nw, PF_RUT_OP, u->op);
@@ -671,6 +691,9 @@ nlattr_add_timeout(struct nl_writer *nw, int attrtype, uint32_t *timeout)
{
int off = nlattr_add_nested(nw, attrtype);
+ if (off == 0)
+ return (false);
+
for (int i = 0; i < PFTM_MAX; i++)
nlattr_add_u32(nw, PF_RT_TIMEOUT, timeout[i]);
@@ -761,6 +784,10 @@ static const struct nlattr_parser nla_p_rule[] = {
{ .type = PF_RT_MAX_PKT_SIZE, .off = _OUT(max_pkt_size), .cb = nlattr_get_uint16 },
{ .type = PF_RT_TYPE_2, .off = _OUT(type), .cb = nlattr_get_uint16 },
{ .type = PF_RT_CODE_2, .off = _OUT(code), .cb = nlattr_get_uint16 },
+ { .type = PF_RT_STATE_LIMIT, .off = _OUT(statelim.id), .cb = nlattr_get_uint8 },
+ { .type = PF_RT_SOURCE_LIMIT, .off = _OUT(sourcelim.id), .cb = nlattr_get_uint8 },
+ { .type = PF_RT_STATE_LIMIT_ACTION, .off = _OUT(statelim.limiter_action), .cb = nlattr_get_uint32 },
+ { .type = PF_RT_SOURCE_LIMIT_ACTION, .off = _OUT(sourcelim.limiter_action), .cb = nlattr_get_uint32 },
};
NL_DECLARE_ATTR_PARSER(rule_parser, nla_p_rule);
#undef _OUT
@@ -1018,6 +1045,10 @@ pf_handle_getrule(struct nlmsghdr *hdr, struct nl_pstate *npt)
nlattr_add_u64(nw, PF_RT_SRC_NODES_ROUTE, counter_u64_fetch(rule->src_nodes[PF_SN_ROUTE]));
nlattr_add_pf_threshold(nw, PF_RT_PKTRATE, &rule->pktrate);
nlattr_add_time_t(nw, PF_RT_EXPTIME, time_second - (time_uptime - rule->exptime));
+ nlattr_add_u8(nw, PF_RT_STATE_LIMIT, rule->statelim.id);
+ nlattr_add_u32(nw, PF_RT_STATE_LIMIT_ACTION, rule->statelim.limiter_action);
+ nlattr_add_u8(nw, PF_RT_SOURCE_LIMIT, rule->sourcelim.id);
+ nlattr_add_u32(nw, PF_RT_SOURCE_LIMIT_ACTION, rule->sourcelim.limiter_action);
error = pf_kanchor_copyout(ruleset, rule, anchor_call, sizeof(anchor_call));
MPASS(error == 0);
@@ -1144,6 +1175,10 @@ nlattr_add_counters(struct nl_writer *nw, int attr, size_t number, char **names,
{
for (int i = 0; i < number; i++) {
int off = nlattr_add_nested(nw, attr);
+
+ if (off == 0)
+ return (false);
+
nlattr_add_u32(nw, PF_C_ID, i);
nlattr_add_string(nw, PF_C_NAME, names[i]);
nlattr_add_u64(nw, PF_C_COUNTER, counter_u64_fetch(counters[i]));
@@ -1159,6 +1194,10 @@ nlattr_add_fcounters(struct nl_writer *nw, int attr, size_t number, char **names
{
for (int i = 0; i < number; i++) {
int off = nlattr_add_nested(nw, attr);
+
+ if (off == 0)
+ return (false);
+
nlattr_add_u32(nw, PF_C_ID, i);
nlattr_add_string(nw, PF_C_NAME, names[i]);
nlattr_add_u64(nw, PF_C_COUNTER, pf_counter_u64_fetch(&counters[i]));
@@ -1173,6 +1212,9 @@ nlattr_add_u64_array(struct nl_writer *nw, int attr, size_t number, const uint64
{
int off = nlattr_add_nested(nw, attr);
+ if (off == 0)
+ return (false);
+
for (size_t i = 0; i < number; i++)
nlattr_add_u64(nw, 0, array[i]);
@@ -1482,6 +1524,9 @@ nlattr_add_pool_addr(struct nl_writer *nw, int attrtype, struct pf_pooladdr *a)
off = nlattr_add_nested(nw, attrtype);
+ if (off == 0)
+ return (false);
+
nlattr_add_addr_wrap(nw, PF_PA_ADDR, &a->addr);
nlattr_add_string(nw, PF_PA_IFNAME, a->ifname);
@@ -1689,6 +1734,9 @@ nlattr_add_pf_threshold(struct nl_writer *nw, int attrtype,
int off = nlattr_add_nested(nw, attrtype);
int conn_rate_count = 0;
+ if (off == 0)
+ return (false);
+
/* Adjust the connection rate estimate. */
if (t->cr != NULL)
conn_rate_count = counter_rate_get(t->cr);
@@ -1889,6 +1937,9 @@ nlattr_add_pfr_table(struct nl_writer *nw, int attrtype,
{
int off = nlattr_add_nested(nw, attrtype);
+ if (off == 0)
+ return (false);
+
nlattr_add_string(nw, PF_T_ANCHOR, t->pfrt_anchor);
nlattr_add_string(nw, PF_T_NAME, t->pfrt_name);
nlattr_add_u32(nw, PF_T_TABLE_FLAGS, t->pfrt_flags);
@@ -2212,6 +2263,7 @@ static int
nlattr_add_pfr_addr(struct nl_writer *nw, int attr, const struct pfr_addr *a)
{
int off = nlattr_add_nested(nw, attr);
+
if (off == 0)
return (false);
@@ -2291,6 +2343,7 @@ static int
nlattr_add_pfr_astats(struct nl_writer *nw, int attr, const struct pfr_astats *a)
{
int off = nlattr_add_nested(nw, attr);
+
if (off == 0)
return (false);
@@ -2414,6 +2467,346 @@ pf_handle_table_clear_astats(struct nlmsghdr *hdr, struct nl_pstate *npt)
return (error);
}
+static const struct nlattr_parser nla_p_rate[] = {
+ { .type = PF_LR_LIMIT, .off = 0, .cb = nlattr_get_uint32 },
+ { .type = PF_LR_SECONDS, .off = sizeof(unsigned int), .cb = nlattr_get_uint32 },
+};
+NL_DECLARE_ATTR_PARSER(rate_parser, nla_p_rate);
+
+#define _OUT(_field) offsetof(struct pfioc_statelim, _field)
+static const struct nlattr_parser nla_p_state_limiter[] = {
+ { .type = PF_SL_TICKET, .off = _OUT(ticket), .cb = nlattr_get_uint32 },
+ { .type = PF_SL_NAME, .off = _OUT(name), .arg = (void *)PF_STATELIM_NAME_LEN, .cb = nlattr_get_chara },
+ { .type = PF_SL_ID, .off = _OUT(id), .cb = nlattr_get_uint32 },
+ { .type = PF_SL_LIMIT, .off = _OUT(limit), .cb = nlattr_get_uint32 },
+ { .type = PF_SL_RATE, .off = _OUT(rate), .arg = &rate_parser, .cb = nlattr_get_nested },
+ { .type = PF_SL_DESCR, .off = _OUT(description), .arg = (void *)PF_STATELIM_DESCR_LEN, .cb = nlattr_get_chara },
+};
+NL_DECLARE_PARSER(state_limiter_parser, struct genlmsghdr, nlf_p_empty, nla_p_state_limiter);
+#undef _OUT
+
+static int
+pf_handle_state_limiter_add(struct nlmsghdr *hdr, struct nl_pstate *npt)
+{
+ struct pfioc_statelim attrs = { 0 };
+ struct nl_writer *nw = npt->nw;
+ struct genlmsghdr *ghdr_new;
+ int error;
+
+ error = nl_parse_nlmsg(hdr, &state_limiter_parser, npt, &attrs);
+ if (error != 0)
+ return (error);
+
+ error = pf_statelim_add(&attrs);
+ if (error != 0)
+ return (error);
+
+ if (!nlmsg_reply(nw, hdr, sizeof(struct genlmsghdr)))
+ return (ENOMEM);
+
+ ghdr_new = nlmsg_reserve_object(nw, struct genlmsghdr);
+ ghdr_new->cmd = PFNL_CMD_STATE_LIMITER_ADD;
+
+ nlattr_add_u32(nw, PF_SL_ID, attrs.id);
+
+ if (!nlmsg_end(nw))
+ return (ENOMEM);
+
+ return (error);
+}
+
+static bool
+nlattr_add_limiter_rate(struct nl_writer *nw, int attrtype,
+ const struct pf_limiter_rate *rate)
+{
+ int off = nlattr_add_nested(nw, attrtype);
+ if (off == 0)
+ return (false);
+
+ nlattr_add_u32(nw, PF_LR_LIMIT, rate->limit);
+ nlattr_add_u32(nw, PF_LR_SECONDS, rate->seconds);
+
+ nlattr_set_len(nw, off);
+
+ return (true);
+}
+
+static int
+pf_handle_state_limiter_get(struct nlmsghdr *hdr, struct nl_pstate *npt)
+{
+ struct pfioc_statelim attrs = { 0 };
+ struct nl_writer *nw = npt->nw;
+ struct genlmsghdr *ghdr = (struct genlmsghdr *)(hdr + 1);
+ struct genlmsghdr *ghdr_new;
+ int error;
+
+ error = nl_parse_nlmsg(hdr, &state_limiter_parser, npt, &attrs);
+ if (error != 0)
+ return (error);
+
+ error = pf_statelim_get(&attrs,
+ ghdr->cmd == PFNL_CMD_STATE_LIMITER_GET ? pf_statelim_rb_find :
+ pf_statelim_rb_nfind);
+ if (error != 0)
+ return (error);
+
+ if (!nlmsg_reply(nw, hdr, sizeof(struct genlmsghdr)))
+ return (ENOMEM);
+
+ ghdr_new = nlmsg_reserve_object(nw, struct genlmsghdr);
+ ghdr_new->cmd = PFNL_CMD_STATE_LIMITER_GET;
+
+ nlattr_add_string(nw, PF_SL_NAME, attrs.name);
+ nlattr_add_u32(nw, PF_SL_ID, attrs.id);
+ nlattr_add_u32(nw, PF_SL_LIMIT, attrs.limit);
+ nlattr_add_limiter_rate(nw, PF_SL_RATE, &attrs.rate);
+ nlattr_add_string(nw, PF_SL_DESCR, attrs.description);
+ nlattr_add_u32(nw, PF_SL_INUSE, attrs.inuse);
+ nlattr_add_u64(nw, PF_SL_ADMITTED, attrs.admitted);
+ nlattr_add_u64(nw, PF_SL_HARDLIMITED, attrs.hardlimited);
+ nlattr_add_u64(nw, PF_SL_RATELIMITED, attrs.ratelimited);
+
+ if (!nlmsg_end(nw))
+ return (ENOMEM);
+
+ return (error);
+}
+
+#define _OUT(_field) offsetof(struct pfioc_sourcelim, _field)
+static const struct nlattr_parser nla_p_source_limiter[] = {
+ { .type = PF_SCL_TICKET, .off = _OUT(ticket), .cb = nlattr_get_uint32 },
+ { .type = PF_SCL_NAME, .off = _OUT(name), .arg = (void *)PF_STATELIM_NAME_LEN, .cb = nlattr_get_chara },
+ { .type = PF_SCL_ID, .off = _OUT(id), .cb = nlattr_get_uint32 },
+ { .type = PF_SCL_ENTRIES, .off = _OUT(entries), .cb = nlattr_get_uint32 },
+ { .type = PF_SCL_LIMIT, .off = _OUT(limit), .cb = nlattr_get_uint32 },
+ { .type = PF_SCL_RATE, .off = _OUT(rate), .arg = &rate_parser, .cb = nlattr_get_nested },
+ { .type = PF_SCL_OVERLOAD_TBL_NAME, .off = _OUT(overload_tblname), .arg = (void *)PF_TABLE_NAME_SIZE, .cb = nlattr_get_chara },
+ { .type = PF_SCL_OVERLOAD_HIGH_WM, .off = _OUT(overload_hwm), .cb = nlattr_get_uint32 },
+ { .type = PF_SCL_OVERLOAD_LOW_WM, .off = _OUT(overload_lwm), .cb = nlattr_get_uint32 },
+ { .type = PF_SCL_INET_PREFIX, .off = _OUT(inet_prefix), .cb = nlattr_get_uint32 },
+ { .type = PF_SCL_INET6_PREFIX, .off = _OUT(inet6_prefix), .cb = nlattr_get_uint32 },
+ { .type = PF_SCL_DESCR, .off = _OUT(description), .arg = (void *)PF_STATELIM_DESCR_LEN, .cb = nlattr_get_chara },
+};
+#undef _OUT
+NL_DECLARE_PARSER(source_limiter_parser, struct genlmsghdr, nlf_p_empty, nla_p_source_limiter);
+
+static int
+pf_handle_source_limiter_add(struct nlmsghdr *hdr, struct nl_pstate *npt)
+{
+ struct pfioc_sourcelim attrs = { 0 };
+ struct nl_writer *nw = npt->nw;
+ struct genlmsghdr *ghdr_new;
+ int error;
+
+ error = nl_parse_nlmsg(hdr, &source_limiter_parser, npt, &attrs);
+ if (error != 0)
+ return (error);
+
+ error = pf_sourcelim_add(&attrs);
+ if (error != 0)
+ return (error);
+
+ if (!nlmsg_reply(nw, hdr, sizeof(struct genlmsghdr)))
+ return (ENOMEM);
+
+ ghdr_new = nlmsg_reserve_object(nw, struct genlmsghdr);
+ ghdr_new->cmd = PFNL_CMD_SOURCE_LIMITER_ADD;
+
+ nlattr_add_u32(nw, PF_SCL_ID, attrs.id);
+
+ if (!nlmsg_end(nw))
+ return (ENOMEM);
+
+ return (error);
+}
+
+static int
+pf_handle_source_limiter_get(struct nlmsghdr *hdr, struct nl_pstate *npt)
+{
+ struct pfioc_sourcelim attrs = { 0 };
+ struct nl_writer *nw = npt->nw;
+ struct genlmsghdr *ghdr = (struct genlmsghdr *)(hdr + 1);
+ struct genlmsghdr *ghdr_new;
+ int error;
+
+ error = nl_parse_nlmsg(hdr, &source_limiter_parser, npt, &attrs);
+ if (error != 0)
+ return (error);
+
+ error = pf_sourcelim_get(&attrs,
+ ghdr->cmd == PFNL_CMD_SOURCE_LIMITER_GET ? pf_sourcelim_rb_find :
+ pf_sourcelim_rb_nfind);
+ if (error != 0)
+ return (error);
+
+ if (!nlmsg_reply(nw, hdr, sizeof(struct genlmsghdr)))
+ return (ENOMEM);
+
+ ghdr_new = nlmsg_reserve_object(nw, struct genlmsghdr);
+ ghdr_new->cmd = ghdr->cmd;
+
+ nlattr_add_string(nw, PF_SCL_NAME, attrs.name);
+ nlattr_add_u32(nw, PF_SCL_ID, attrs.id);
+ nlattr_add_u32(nw, PF_SCL_ENTRIES, attrs.entries);
+ nlattr_add_u32(nw, PF_SCL_LIMIT, attrs.limit);
+ nlattr_add_limiter_rate(nw, PF_SCL_RATE, &attrs.rate);
+ nlattr_add_string(nw, PF_SCL_OVERLOAD_TBL_NAME, attrs.overload_tblname);
+ nlattr_add_u32(nw, PF_SCL_OVERLOAD_HIGH_WM, attrs.overload_hwm);
+ nlattr_add_u32(nw, PF_SCL_OVERLOAD_LOW_WM, attrs.overload_lwm);
+ nlattr_add_u32(nw, PF_SCL_INET_PREFIX, attrs.inet_prefix);
+ nlattr_add_u32(nw, PF_SCL_INET6_PREFIX, attrs.inet6_prefix);
+ nlattr_add_string(nw, PF_SCL_DESCR, attrs.description);
+ nlattr_add_u32(nw, PF_SCL_NENTRIES, attrs.nentries);
+ nlattr_add_u32(nw, PF_SCL_INUSE, attrs.inuse);
+ nlattr_add_u64(nw, PF_SCL_ADDR_ALLOCS, attrs.addrallocs);
+ nlattr_add_u64(nw, PF_SCL_ADDR_NOMEM, attrs.addrnomem);
+ nlattr_add_u64(nw, PF_SCL_ADMITTED, attrs.admitted);
+ nlattr_add_u64(nw, PF_SCL_ADDRLIMITED, attrs.addrlimited);
+ nlattr_add_u64(nw, PF_SCL_HARDLIMITED, attrs.hardlimited);
+ nlattr_add_u64(nw, PF_SCL_RATELIMITED, attrs.ratelimited);
+
+ if (!nlmsg_end(nw))
+ return (ENOMEM);
+
+ return (error);
+}
+
+struct nlattr_source {
+ char name[PF_SOURCELIM_NAME_LEN];
+ uint32_t id;
+ sa_family_t af;
+ unsigned int rdomain;
+ struct pf_addr addr;
+};
+#define _OUT(_field) offsetof(struct nlattr_source, _field)
+static const struct nlattr_parser nla_p_source[] = {
+ { .type = PF_SRC_NAME, .off = _OUT(name), .arg = (void *)PF_SOURCELIM_NAME_LEN, .cb = nlattr_get_chara },
+ { .type = PF_SRC_ID, .off = _OUT(id), .cb = nlattr_get_uint32 },
+ { .type = PF_SRC_AF, .off = _OUT(af), .cb = nlattr_get_uint8 },
+ { .type = PF_SRC_RDOMAIN, .off = _OUT(rdomain), .cb = nlattr_get_uint32 },
+ { .type = PF_SRC_ADDR, .off = _OUT(addr), .cb = nlattr_get_in6_addr },
+};
+#undef _OUT
+NL_DECLARE_PARSER(source_parser, struct genlmsghdr, nlf_p_empty, nla_p_source);
+
+static int
+pf_handle_source_get(struct nlmsghdr *hdr, struct nl_pstate *npt)
+{
+ struct nlattr_source attrs = { 0 };
+ struct pf_source key;
+ struct pf_sourcelim plkey;
+ struct nl_writer *nw = npt->nw;
+ struct genlmsghdr *ghdr = (struct genlmsghdr *)(hdr + 1);
+ struct genlmsghdr *ghdr_new;
+ struct pf_sourcelim *pfsrlim;
+ struct pf_source *pfsr;
+ int error;
+ PF_RULES_RLOCK_TRACKER;
+
+ error = nl_parse_nlmsg(hdr, &source_parser, npt, &attrs);
+ if (error != 0)
+ return (error);
+
+ PF_RULES_RLOCK();
+ plkey.pfsrlim_id = attrs.id;
+ pfsrlim = pf_sourcelim_rb_find(&V_pf_sourcelim_id_tree_active, &plkey);
+ if (pfsrlim == NULL) {
+ error = ESRCH;
+ goto out;
+ }
+
+ key.pfsr_af = attrs.af;
+ key.pfsr_rdomain = attrs.rdomain;
+ key.pfsr_addr = attrs.addr;
+
+ pfsr = (ghdr->cmd == PFNL_CMD_SOURCE_GET ? pf_source_rb_find :
+ pf_source_rb_nfind)(&pfsrlim->pfsrlim_ioc_sources, &key);
+ if (pfsr == NULL) {
+ error = ENOENT;
+ goto out;
+ }
+
+ for (;;) {
+ if (!nlmsg_reply(nw, hdr, sizeof(struct genlmsghdr))) {
+ error = ENOMEM;
+ goto out;
+ }
+
+ ghdr_new = nlmsg_reserve_object(nw, struct genlmsghdr);
+ ghdr_new->cmd = ghdr->cmd;
+
+ nlattr_add_u8(nw, PF_SRC_AF, pfsr->pfsr_af);
+ nlattr_add_u32(nw, PF_SRC_RDOMAIN, pfsr->pfsr_rdomain);
+ nlattr_add_in6_addr(nw, PF_SRC_ADDR, &pfsr->pfsr_addr.v6);
+
+ nlattr_add_u32(nw, PF_SRC_INUSE, pfsr->pfsr_inuse);
+ nlattr_add_u64(nw, PF_SRC_ADMITTED, pfsr->pfsr_counters.admitted);
+ nlattr_add_u64(nw, PF_SRC_HARDLIMITED, pfsr->pfsr_counters.hardlimited);
+ nlattr_add_u64(nw, PF_SRC_RATELIMITED, pfsr->pfsr_counters.ratelimited);
+
+ nlattr_add_u32(nw, PF_SRC_LIMIT, pfsrlim->pfsrlim_limit);
+ nlattr_add_u32(nw, PF_SRC_INET_PREFIX, pfsrlim->pfsrlim_ipv4_prefix);
+ nlattr_add_u32(nw, PF_SRC_INET6_PREFIX, pfsrlim->pfsrlim_ipv6_prefix);
+
+ if (!nlmsg_end(nw)) {
+ nlmsg_abort(nw);
+ error = ENOMEM;
+ goto out;
+ }
+
+ pfsr = RB_NEXT(pf_source_ioc_tree, srlim->pfsrlim_ioc_sources, pfsr);
+ if (pfsr == NULL)
+ break;
+ }
+
+out:
+ PF_RULES_RUNLOCK();
+ return (error);
+}
+
+
+#define _OUT(_field) offsetof(struct pfioc_source_kill, _field)
+static const struct nlattr_parser nla_p_source_clear[] = {
+ { .type = PF_SC_NAME, .off = _OUT(name), .arg = (void *)PF_SOURCELIM_NAME_LEN, .cb = nlattr_get_chara },
+ { .type = PF_SC_ID, .off = _OUT(id), .cb = nlattr_get_uint32 },
+ { .type = PF_SC_RDOMAIN, .off = _OUT(rdomain), .cb = nlattr_get_uint32 },
+ { .type = PF_SC_AF, .off = _OUT(af), .cb = nlattr_get_uint8 },
+ { .type = PF_SC_ADDR, .off = _OUT(addr), .cb = nlattr_get_in6_addr },
+};
+#undef _OUT
+NL_DECLARE_PARSER(source_clear_parser, struct genlmsghdr, nlf_p_empty, nla_p_source_clear);
+
+static int
+pf_handle_source_clear(struct nlmsghdr *hdr, struct nl_pstate *npt)
+{
+ struct pfioc_source_kill attrs = { 0 };
+ struct nl_writer *nw = npt->nw;
+ struct genlmsghdr *ghdr = (struct genlmsghdr *)(hdr + 1);
+ struct genlmsghdr *ghdr_new;
+ int error;
+
+ error = nl_parse_nlmsg(hdr, &source_clear_parser, npt, &attrs);
+ if (error != 0)
+ return (error);
+
+ error = pf_source_clr(&attrs);
+ if (error != 0)
+ return (error);
+
+ if (!nlmsg_reply(nw, hdr, sizeof(struct genlmsghdr)))
+ return (ENOMEM);
+
+ ghdr_new = nlmsg_reserve_object(nw, struct genlmsghdr);
+ ghdr_new->cmd = ghdr->cmd;
+
+ nlattr_add_string(nw, PF_SCL_NAME, attrs.name);
+
+ if (!nlmsg_end(nw))
+ return (ENOMEM);
+
+ return (error);
+}
+
static const struct nlhdr_parser *all_parsers[] = {
&state_parser,
&addrule_parser,
@@ -2430,6 +2823,10 @@ static const struct nlhdr_parser *all_parsers[] = {
&table_parser,
&table_addr_parser,
&table_astats_parser,
+ &state_limiter_parser,
+ &source_limiter_parser,
+ &source_parser,
+ &source_clear_parser,
};
static uint16_t family_id;
@@ -2694,6 +3091,69 @@ static const struct genl_cmd pf_cmds[] = {
.cmd_flags = GENL_CMD_CAP_DUMP | GENL_CMD_CAP_HASPOL,
.cmd_priv = PRIV_NETINET_PF,
},
+ {
+ .cmd_num = PFNL_CMD_STATE_LIMITER_ADD,
+ .cmd_name = "STATE_LIMITER_ADD",
+ .cmd_cb = pf_handle_state_limiter_add,
+ .cmd_flags = GENL_CMD_CAP_DO | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
+ {
+ .cmd_num = PFNL_CMD_STATE_LIMITER_GET,
+ .cmd_name = "STATE_LIMITER_GET",
+ .cmd_cb = pf_handle_state_limiter_get,
+ .cmd_flags = GENL_CMD_CAP_DUMP | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
+ {
+ .cmd_num = PFNL_CMD_STATE_LIMITER_NGET,
+ .cmd_name = "STATE_LIMITER_NGET",
+ .cmd_cb = pf_handle_state_limiter_get,
+ .cmd_flags = GENL_CMD_CAP_DUMP | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
+ {
+ .cmd_num = PFNL_CMD_SOURCE_LIMITER_ADD,
+ .cmd_name = "SOURCE_LIMITER_ADD",
+ .cmd_cb = pf_handle_source_limiter_add,
+ .cmd_flags = GENL_CMD_CAP_DO | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
+ {
+ .cmd_num = PFNL_CMD_SOURCE_LIMITER_GET,
+ .cmd_name = "SOURCE_LIMITER_GET",
+ .cmd_cb = pf_handle_source_limiter_get,
+ .cmd_flags = GENL_CMD_CAP_DUMP | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
+ {
+ .cmd_num = PFNL_CMD_SOURCE_LIMITER_NGET,
+ .cmd_name = "SOURCE_LIMITER_NGET",
+ .cmd_cb = pf_handle_source_limiter_get,
+ .cmd_flags = GENL_CMD_CAP_DUMP | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
+ {
+ .cmd_num = PFNL_CMD_SOURCE_GET,
+ .cmd_name = "SOURCE_GET",
+ .cmd_cb = pf_handle_source_get,
+ .cmd_flags = GENL_CMD_CAP_DUMP | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
+ {
+ .cmd_num = PFNL_CMD_SOURCE_NGET,
+ .cmd_name = "SOURCE_NGET",
+ .cmd_cb = pf_handle_source_get,
+ .cmd_flags = GENL_CMD_CAP_DUMP | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
+ {
+ .cmd_num = PFNL_CMD_SOURCE_CLEAR,
+ .cmd_name = "SOURCE_CLEAR",
+ .cmd_cb = pf_handle_source_clear,
+ .cmd_flags = GENL_CMD_CAP_DO | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
};
void
diff --git a/sys/netpfil/pf/pf_nl.h b/sys/netpfil/pf/pf_nl.h
index 216f3d13db32..84e9d3a97303 100644
--- a/sys/netpfil/pf/pf_nl.h
+++ b/sys/netpfil/pf/pf_nl.h
@@ -73,6 +73,15 @@ enum {
PFNL_CMD_TABLE_GET_ADDR = 35,
PFNL_CMD_TABLE_GET_ASTATS = 36,
PFNL_CMD_TABLE_CLEAR_ASTATS = 37,
+ PFNL_CMD_STATE_LIMITER_ADD = 38,
+ PFNL_CMD_STATE_LIMITER_GET = 39,
+ PFNL_CMD_STATE_LIMITER_NGET = 40,
+ PFNL_CMD_SOURCE_LIMITER_ADD = 41,
+ PFNL_CMD_SOURCE_LIMITER_GET = 42,
+ PFNL_CMD_SOURCE_LIMITER_NGET = 43,
+ PFNL_CMD_SOURCE_GET = 44,
+ PFNL_CMD_SOURCE_NGET = 45,
+ PFNL_CMD_SOURCE_CLEAR = 46,
__PFNL_CMD_MAX,
};
#define PFNL_CMD_MAX (__PFNL_CMD_MAX -1)
@@ -290,6 +299,10 @@ enum pf_rule_type_t {
PF_RT_TYPE_2 = 84, /* u16 */
PF_RT_CODE_2 = 85, /* u16 */
PF_RT_EXPTIME = 86, /* time_t */
+ PF_RT_STATE_LIMIT = 87, /* u8 */
+ PF_RT_SOURCE_LIMIT = 88, /* u8 */
+ PF_RT_STATE_LIMIT_ACTION = 89, /* u32 */
+ PF_RT_SOURCE_LIMIT_ACTION = 90, /* u32 */
};
enum pf_addrule_type_t {
@@ -507,6 +520,76 @@ enum pf_table_astats_t {
PF_TAS_ASTATS_COUNT = 4, /* u32 */
PF_TAS_ASTATS_ZEROED = 5, /* u32 */
};
+
+enum pf_limit_rate_t {
+ PF_LR_UNSPEC,
+ PF_LR_LIMIT = 1, /* u32 */
+ PF_LR_SECONDS = 2, /* u32 */
+};
+
+enum pf_state_limit_t {
+ PF_SL_UNSPEC,
+ PF_SL_TICKET = 1, /* u32 */
+ PF_SL_NAME = 2, /* string */
+ PF_SL_ID = 3, /* u32 */
+ PF_SL_LIMIT = 4, /* u32 */
+ PF_SL_RATE = 5, /* nested, pf_limit_rate_t */
+ PF_SL_DESCR = 6, /* string */
+ PF_SL_INUSE = 7, /* u32 */
+ PF_SL_ADMITTED = 8, /* u64 */
+ PF_SL_HARDLIMITED = 9, /* u64 */
+ PF_SL_RATELIMITED = 10, /* u64 */
+};
+
+enum pf_source_limit_t {
+ PF_SCL_UNSPEC,
+ PF_SCL_TICKET = 1, /* u32 */
+ PF_SCL_NAME = 2, /* string */
+ PF_SCL_ID = 3, /* u32 */
+ PF_SCL_ENTRIES = 4, /* u32 */
+ PF_SCL_LIMIT = 5, /* u32 */
+ PF_SCL_RATE = 6, /* nested, pf_limit_rate_t */
+ PF_SCL_OVERLOAD_TBL_NAME = 7, /* string*/
+ PF_SCL_OVERLOAD_HIGH_WM = 8, /* u32 */
+ PF_SCL_OVERLOAD_LOW_WM = 9, /* u32 */
+ PF_SCL_INET_PREFIX = 10, /* u32 */
+ PF_SCL_INET6_PREFIX = 11, /* u32 */
+ PF_SCL_DESCR = 12, /* string */
+ PF_SCL_NENTRIES = 13, /* u32 */
+ PF_SCL_INUSE = 14, /* u32 */
+ PF_SCL_ADDR_ALLOCS = 15, /* u64 */
+ PF_SCL_ADDR_NOMEM = 16, /* u64 */
+ PF_SCL_ADMITTED = 17, /* u64 */
+ PF_SCL_ADDRLIMITED = 18, /* u64 */
+ PF_SCL_HARDLIMITED = 19, /* u64 */
+ PF_SCL_RATELIMITED = 20, /* u64 */
+};
+
+enum pf_source_t {
+ PF_SRC_UNSPEC,
+ PF_SRC_NAME = 1, /* string */
+ PF_SRC_ID = 2, /* u32 */
+ PF_SRC_AF = 3, /* u8 */
+ PF_SRC_RDOMAIN = 4, /* u32 */
+ PF_SRC_ADDR = 5, /* in6_addr */
+ PF_SRC_INUSE = 6, /* u32 */
+ PF_SRC_ADMITTED = 7, /* u64 */
+ PF_SRC_HARDLIMITED = 8, /* u64 */
+ PF_SRC_RATELIMITED = 9, /* u64 */
+ PF_SRC_LIMIT = 10, /* u32 */
+ PF_SRC_INET_PREFIX = 11, /* u32 */
+ PF_SRC_INET6_PREFIX = 12, /* u32 */
+};
+
+enum pf_source_clear_t {
+ PF_SC_UNSPEC,
+ PF_SC_NAME = 1, /* string */
+ PF_SC_ID = 2, /* u32*/
+ PF_SC_RDOMAIN = 3, /* u32 */
+ PF_SC_AF = 4, /* u8 */
+ PF_SC_ADDR = 5, /* in6_addr */
+};
+
#ifdef _KERNEL
void pf_nl_register(void);
diff --git a/sys/netpfil/pf/pf_table.c b/sys/netpfil/pf/pf_table.c
index 0e2b9fe1cac8..650334c45db3 100644
--- a/sys/netpfil/pf/pf_table.c
+++ b/sys/netpfil/pf/pf_table.c
@@ -882,6 +882,26 @@ pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero)
return (0);
}
+int
+pfr_remove_kentry(struct pfr_ktable *kt, struct pfr_addr *ad)
+{
+ struct pfr_kentryworkq workq = SLIST_HEAD_INITIALIZER(workq);
+ struct pfr_kentry *p;
+
+ p = pfr_lookup_addr(kt, ad, 1);
+ if (p == NULL || p->pfrke_not)
+ return (ESRCH);
+
+ if (p->pfrke_mark)
+ return (0);
+
+ p->pfrke_mark = 1;
+ SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
+ pfr_remove_kentries(kt, &workq);
+
+ return (0);
+}
+
static void
pfr_remove_kentries(struct pfr_ktable *kt,
struct pfr_kentryworkq *workq)
diff --git a/sys/powerpc/aim/moea64_native.c b/sys/powerpc/aim/moea64_native.c
index bf254e1f466c..20beccbdf935 100644
--- a/sys/powerpc/aim/moea64_native.c
+++ b/sys/powerpc/aim/moea64_native.c
@@ -565,6 +565,11 @@ moea64_bootstrap_native(vm_offset_t kernelstart, vm_offset_t kernelend)
moea64_early_bootstrap(kernelstart, kernelend);
switch (mfpvr() >> 16) {
+ case IBMPOWER8:
+ case IBMPOWER8E:
+ case IBMPOWER8NVL:
+ moea64_need_lock = false;
+ break;
case IBMPOWER9:
moea64_need_lock = false;
break;
diff --git a/sys/powerpc/booke/booke_machdep.c b/sys/powerpc/booke/booke_machdep.c
index 1a37959b439f..467eb2ab9638 100644
--- a/sys/powerpc/booke/booke_machdep.c
+++ b/sys/powerpc/booke/booke_machdep.c
@@ -187,10 +187,6 @@ extern void *int_debug;
extern void *int_debug_ed;
extern void *int_vec;
extern void *int_vecast;
-#ifdef __SPE__
-extern void *int_spe_fpdata;
-extern void *int_spe_fpround;
-#endif
#ifdef HWPMC_HOOKS
extern void *int_performance_counter;
#endif
@@ -278,10 +274,6 @@ ivor_setup(void)
case FSL_E500v1:
case FSL_E500v2:
SET_TRAP(SPR_IVOR32, int_vec);
-#ifdef __SPE__
- SET_TRAP(SPR_IVOR33, int_spe_fpdata);
- SET_TRAP(SPR_IVOR34, int_spe_fpround);
-#endif
break;
}
diff --git a/sys/powerpc/booke/spe.c b/sys/powerpc/booke/spe.c
deleted file mode 100644
index e10392508e4e..000000000000
--- a/sys/powerpc/booke/spe.c
+++ /dev/null
@@ -1,685 +0,0 @@
-/*-
- * Copyright (C) 1996 Wolfgang Solfrank.
- * Copyright (C) 1996 TooLs GmbH.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by TooLs GmbH.
- * 4. The name of TooLs GmbH may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * $NetBSD: fpu.c,v 1.5 2001/07/22 11:29:46 wiz Exp $
- */
-
-#include <sys/param.h>
-#include <sys/proc.h>
-#include <sys/systm.h>
-#include <sys/limits.h>
-
-#include <machine/altivec.h>
-#include <machine/fpu.h>
-#include <machine/ieeefp.h>
-#include <machine/pcb.h>
-#include <machine/psl.h>
-
-#include <powerpc/fpu/fpu_arith.h>
-#include <powerpc/fpu/fpu_emu.h>
-#include <powerpc/fpu/fpu_extern.h>
-
-void spe_handle_fpdata(struct trapframe *);
-void spe_handle_fpround(struct trapframe *);
-static int spe_emu_instr(uint32_t, struct fpemu *, struct fpn **, uint32_t *);
-
-static void
-save_vec_int(struct thread *td)
-{
- int msr;
- struct pcb *pcb;
-
- pcb = td->td_pcb;
-
- /*
- * Temporarily re-enable the vector unit during the save
- */
- msr = mfmsr();
- mtmsr(msr | PSL_VEC);
-
- /*
- * Save the vector registers and SPEFSCR to the PCB
- */
-#define EVSTDW(n) __asm ("evstdw %1,0(%0)" \
- :: "b"(pcb->pcb_vec.vr[n]), "n"(n));
- EVSTDW(0); EVSTDW(1); EVSTDW(2); EVSTDW(3);
- EVSTDW(4); EVSTDW(5); EVSTDW(6); EVSTDW(7);
- EVSTDW(8); EVSTDW(9); EVSTDW(10); EVSTDW(11);
- EVSTDW(12); EVSTDW(13); EVSTDW(14); EVSTDW(15);
- EVSTDW(16); EVSTDW(17); EVSTDW(18); EVSTDW(19);
- EVSTDW(20); EVSTDW(21); EVSTDW(22); EVSTDW(23);
- EVSTDW(24); EVSTDW(25); EVSTDW(26); EVSTDW(27);
- EVSTDW(28); EVSTDW(29); EVSTDW(30); EVSTDW(31);
-#undef EVSTDW
-
- __asm ( "evxor 0,0,0\n"
- "evmwumiaa 0,0,0\n"
- "evstdd 0,0(%0)" :: "b"(&pcb->pcb_vec.spare[0]));
- pcb->pcb_vec.vscr = mfspr(SPR_SPEFSCR);
-
- /*
- * Disable vector unit again
- */
- isync();
- mtmsr(msr);
-
-}
-
-void
-enable_vec(struct thread *td)
-{
- int msr;
- struct pcb *pcb;
- struct trapframe *tf;
-
- pcb = td->td_pcb;
- tf = trapframe(td);
-
- /*
- * Save the thread's SPE CPU number, and set the CPU's current
- * vector thread
- */
- td->td_pcb->pcb_veccpu = PCPU_GET(cpuid);
- PCPU_SET(vecthread, td);
-
- /*
- * Enable the vector unit for when the thread returns from the
- * exception. If this is the first time the unit has been used by
- * the thread, initialise the vector registers and VSCR to 0, and
- * set the flag to indicate that the vector unit is in use.
- */
- tf->srr1 |= PSL_VEC;
- if (!(pcb->pcb_flags & PCB_VEC)) {
- memset(&pcb->pcb_vec, 0, sizeof pcb->pcb_vec);
- pcb->pcb_flags |= PCB_VEC;
- pcb->pcb_vec.vscr = mfspr(SPR_SPEFSCR);
- }
-
- /*
- * Temporarily enable the vector unit so the registers
- * can be restored.
- */
- msr = mfmsr();
- mtmsr(msr | PSL_VEC);
-
- /* Restore SPEFSCR and ACC. Use %r0 as the scratch for ACC. */
- mtspr(SPR_SPEFSCR, pcb->pcb_vec.vscr);
- __asm __volatile("isync;evldd 0, 0(%0); evmra 0,0\n"
- :: "b"(&pcb->pcb_vec.spare[0]));
-
- /*
- * The lower half of each register will be restored on trap return. Use
- * %r0 as a scratch register, and restore it last.
- */
-#define EVLDW(n) __asm __volatile("evldw 0, 0(%0); evmergehilo "#n",0,"#n \
- :: "b"(&pcb->pcb_vec.vr[n]));
- EVLDW(1); EVLDW(2); EVLDW(3); EVLDW(4);
- EVLDW(5); EVLDW(6); EVLDW(7); EVLDW(8);
- EVLDW(9); EVLDW(10); EVLDW(11); EVLDW(12);
- EVLDW(13); EVLDW(14); EVLDW(15); EVLDW(16);
- EVLDW(17); EVLDW(18); EVLDW(19); EVLDW(20);
- EVLDW(21); EVLDW(22); EVLDW(23); EVLDW(24);
- EVLDW(25); EVLDW(26); EVLDW(27); EVLDW(28);
- EVLDW(29); EVLDW(30); EVLDW(31); EVLDW(0);
-#undef EVLDW
-
- isync();
- mtmsr(msr);
-}
-
-void
-save_vec(struct thread *td)
-{
- struct pcb *pcb;
-
- pcb = td->td_pcb;
-
- save_vec_int(td);
-
- /*
- * Clear the current vec thread and pcb's CPU id
- * XXX should this be left clear to allow lazy save/restore ?
- */
- pcb->pcb_veccpu = INT_MAX;
- PCPU_SET(vecthread, NULL);
-}
-
-/*
- * Save SPE state without dropping ownership. This will only save state if
- * the current vector-thread is `td'. This is used for taking core dumps, so
- * don't leak kernel information; overwrite the low words of each vector with
- * their real value, taken from the thread's trap frame, unconditionally.
- */
-void
-save_vec_nodrop(struct thread *td)
-{
- struct pcb *pcb;
- int i;
-
- if (td == PCPU_GET(vecthread))
- save_vec_int(td);
-
- pcb = td->td_pcb;
-
- for (i = 0; i < 32; i++) {
- pcb->pcb_vec.vr[i][1] =
- td->td_frame ? td->td_frame->fixreg[i] : 0;
- }
-}
-
-#define SPE_INST_MASK 0x31f
-#define EADD 0x200
-#define ESUB 0x201
-#define EABS 0x204
-#define ENABS 0x205
-#define ENEG 0x206
-#define EMUL 0x208
-#define EDIV 0x209
-#define ECMPGT 0x20c
-#define ECMPLT 0x20d
-#define ECMPEQ 0x20e
-#define ECFUI 0x210
-#define ECFSI 0x211
-#define ECTUI 0x214
-#define ECTSI 0x215
-#define ECTUF 0x216
-#define ECTSF 0x217
-#define ECTUIZ 0x218
-#define ECTSIZ 0x21a
-
-#define SPE 0x4
-#define SPFP 0x6
-#define DPFP 0x7
-
-#define SPE_OPC 4
-#define OPC_SHIFT 26
-
-#define EVFSADD 0x280
-#define EVFSSUB 0x281
-#define EVFSABS 0x284
-#define EVFSNABS 0x285
-#define EVFSNEG 0x286
-#define EVFSMUL 0x288
-#define EVFSDIV 0x289
-#define EVFSCMPGT 0x28c
-#define EVFSCMPLT 0x28d
-#define EVFSCMPEQ 0x28e
-#define EVFSCFUI 0x290
-#define EVFSCFSI 0x291
-#define EVFSCTUI 0x294
-#define EVFSCTSI 0x295
-#define EVFSCTUF 0x296
-#define EVFSCTSF 0x297
-#define EVFSCTUIZ 0x298
-#define EVFSCTSIZ 0x29a
-
-#define EFSADD 0x2c0
-#define EFSSUB 0x2c1
-#define EFSABS 0x2c4
-#define EFSNABS 0x2c5
-#define EFSNEG 0x2c6
-#define EFSMUL 0x2c8
-#define EFSDIV 0x2c9
-#define EFSCMPGT 0x2cc
-#define EFSCMPLT 0x2cd
-#define EFSCMPEQ 0x2ce
-#define EFSCFD 0x2cf
-#define EFSCFUI 0x2d0
-#define EFSCFSI 0x2d1
-#define EFSCTUI 0x2d4
-#define EFSCTSI 0x2d5
-#define EFSCTUF 0x2d6
-#define EFSCTSF 0x2d7
-#define EFSCTUIZ 0x2d8
-#define EFSCTSIZ 0x2da
-
-#define EFDADD 0x2e0
-#define EFDSUB 0x2e1
-#define EFDABS 0x2e4
-#define EFDNABS 0x2e5
-#define EFDNEG 0x2e6
-#define EFDMUL 0x2e8
-#define EFDDIV 0x2e9
-#define EFDCMPGT 0x2ec
-#define EFDCMPLT 0x2ed
-#define EFDCMPEQ 0x2ee
-#define EFDCFS 0x2ef
-#define EFDCFUI 0x2f0
-#define EFDCFSI 0x2f1
-#define EFDCTUI 0x2f4
-#define EFDCTSI 0x2f5
-#define EFDCTUF 0x2f6
-#define EFDCTSF 0x2f7
-#define EFDCTUIZ 0x2f8
-#define EFDCTSIZ 0x2fa
-
-enum {
- NONE,
- SINGLE,
- DOUBLE,
- VECTOR,
-};
-
-static uint32_t fpscr_to_spefscr(uint32_t fpscr)
-{
- uint32_t spefscr;
-
- spefscr = 0;
-
- if (fpscr & FPSCR_VX)
- spefscr |= SPEFSCR_FINV;
- if (fpscr & FPSCR_OX)
- spefscr |= SPEFSCR_FOVF;
- if (fpscr & FPSCR_UX)
- spefscr |= SPEFSCR_FUNF;
- if (fpscr & FPSCR_ZX)
- spefscr |= SPEFSCR_FDBZ;
- if (fpscr & FPSCR_XX)
- spefscr |= SPEFSCR_FX;
-
- return (spefscr);
-}
-
-/* Sign is 0 for unsigned, 1 for signed. */
-static int
-spe_to_int(struct fpemu *fpemu, struct fpn *fpn, uint32_t *val, int sign)
-{
- uint32_t res[2];
-
- res[0] = fpu_ftox(fpemu, fpn, res);
- if (res[0] != UINT_MAX && res[0] != 0)
- fpemu->fe_cx |= FPSCR_OX;
- else if (sign == 0 && res[0] != 0)
- fpemu->fe_cx |= FPSCR_UX;
- else
- *val = res[1];
-
- return (0);
-}
-
-/* Masked instruction */
-/*
- * For compare instructions, returns 1 if success, 0 if not. For all others,
- * returns -1, or -2 if no result needs recorded.
- */
-static int
-spe_emu_instr(uint32_t instr, struct fpemu *fpemu,
- struct fpn **result, uint32_t *iresult)
-{
- switch (instr & SPE_INST_MASK) {
- case EABS:
- case ENABS:
- case ENEG:
- /* Taken care of elsewhere. */
- break;
- case ECTUIZ:
- fpemu->fe_cx &= ~FPSCR_RN;
- fpemu->fe_cx |= FP_RZ;
- case ECTUI:
- spe_to_int(fpemu, &fpemu->fe_f2, iresult, 0);
- return (-1);
- case ECTSIZ:
- fpemu->fe_cx &= ~FPSCR_RN;
- fpemu->fe_cx |= FP_RZ;
- case ECTSI:
- spe_to_int(fpemu, &fpemu->fe_f2, iresult, 1);
- return (-1);
- case EADD:
- *result = fpu_add(fpemu);
- break;
- case ESUB:
- *result = fpu_sub(fpemu);
- break;
- case EMUL:
- *result = fpu_mul(fpemu);
- break;
- case EDIV:
- *result = fpu_div(fpemu);
- break;
- case ECMPGT:
- fpu_compare(fpemu, 0);
- if (fpemu->fe_cx & FPSCR_FG)
- return (1);
- return (0);
- case ECMPLT:
- fpu_compare(fpemu, 0);
- if (fpemu->fe_cx & FPSCR_FL)
- return (1);
- return (0);
- case ECMPEQ:
- fpu_compare(fpemu, 0);
- if (fpemu->fe_cx & FPSCR_FE)
- return (1);
- return (0);
- default:
- printf("Unknown instruction %x\n", instr);
- }
-
- return (-1);
-}
-
-static int
-spe_explode(struct fpemu *fe, struct fpn *fp, uint32_t type,
- uint32_t hi, uint32_t lo)
-{
- uint32_t s;
-
- fp->fp_sign = hi >> 31;
- fp->fp_sticky = 0;
- switch (type) {
- case SINGLE:
- s = fpu_stof(fp, hi);
- break;
-
- case DOUBLE:
- s = fpu_dtof(fp, hi, lo);
- break;
- }
-
- if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) {
- /*
- * Input is a signalling NaN. All operations that return
- * an input NaN operand put it through a ``NaN conversion'',
- * which basically just means ``turn on the quiet bit''.
- * We do this here so that all NaNs internally look quiet
- * (we can tell signalling ones by their class).
- */
- fp->fp_mant[0] |= FP_QUIETBIT;
- fe->fe_cx = FPSCR_VXSNAN; /* assert invalid operand */
- s = FPC_SNAN;
- }
- fp->fp_class = s;
-
- return (0);
-}
-
-/*
- * Save the high word of a 64-bit GPR for manipulation in the exception handler.
- */
-static uint32_t
-spe_save_reg_high(int reg)
-{
- uint32_t vec[2];
-#define EVSTDW(n) case n: __asm __volatile ("evstdw %1,0(%0)" \
- :: "b"(vec), "n"(n) : "memory"); break;
- switch (reg) {
- EVSTDW(0); EVSTDW(1); EVSTDW(2); EVSTDW(3);
- EVSTDW(4); EVSTDW(5); EVSTDW(6); EVSTDW(7);
- EVSTDW(8); EVSTDW(9); EVSTDW(10); EVSTDW(11);
- EVSTDW(12); EVSTDW(13); EVSTDW(14); EVSTDW(15);
- EVSTDW(16); EVSTDW(17); EVSTDW(18); EVSTDW(19);
- EVSTDW(20); EVSTDW(21); EVSTDW(22); EVSTDW(23);
- EVSTDW(24); EVSTDW(25); EVSTDW(26); EVSTDW(27);
- EVSTDW(28); EVSTDW(29); EVSTDW(30); EVSTDW(31);
- }
-#undef EVSTDW
-
- return (vec[0]);
-}
-
-/*
- * Load the given value into the high word of the requested register.
- */
-static void
-spe_load_reg_high(int reg, uint32_t val)
-{
-#define EVLDW(n) case n: __asm __volatile("evmergelo "#n",%0,"#n \
- :: "r"(val)); break;
- switch (reg) {
- EVLDW(1); EVLDW(2); EVLDW(3); EVLDW(4);
- EVLDW(5); EVLDW(6); EVLDW(7); EVLDW(8);
- EVLDW(9); EVLDW(10); EVLDW(11); EVLDW(12);
- EVLDW(13); EVLDW(14); EVLDW(15); EVLDW(16);
- EVLDW(17); EVLDW(18); EVLDW(19); EVLDW(20);
- EVLDW(21); EVLDW(22); EVLDW(23); EVLDW(24);
- EVLDW(25); EVLDW(26); EVLDW(27); EVLDW(28);
- EVLDW(29); EVLDW(30); EVLDW(31); EVLDW(0);
- }
-#undef EVLDW
-
-}
-
-void
-spe_handle_fpdata(struct trapframe *frame)
-{
- struct fpemu fpemu;
- struct fpn *result;
- uint32_t instr, instr_sec_op;
- uint32_t cr_shift, ra, rb, rd, src;
- uint32_t high, low, res, tmp; /* For vector operations. */
- uint32_t spefscr = 0;
- uint32_t ftod_res[2];
- int width; /* Single, Double, Vector, Integer */
- int err;
- uint32_t msr;
-
- err = fueword32((void *)frame->srr0, &instr);
-
- if (err != 0)
- return;
- /* Fault. */;
-
- if ((instr >> OPC_SHIFT) != SPE_OPC)
- return;
-
- msr = mfmsr();
- /*
- * 'cr' field is the upper 3 bits of rd. Magically, since a) rd is 5
- * bits, b) each 'cr' field is 4 bits, and c) Only the 'GT' bit is
- * modified for most compare operations, the full value of rd can be
- * used as a shift value.
- */
- rd = (instr >> 21) & 0x1f;
- ra = (instr >> 16) & 0x1f;
- rb = (instr >> 11) & 0x1f;
- src = (instr >> 5) & 0x7;
- cr_shift = 28 - (rd & 0x1f);
-
- instr_sec_op = (instr & 0x7ff);
-
- memset(&fpemu, 0, sizeof(fpemu));
-
- width = NONE;
- switch (src) {
- case SPE:
- mtmsr(msr | PSL_VEC);
- switch (instr_sec_op) {
- case EVFSABS:
- high = spe_save_reg_high(ra) & ~(1U << 31);
- frame->fixreg[rd] = frame->fixreg[ra] & ~(1U << 31);
- spe_load_reg_high(rd, high);
- break;
- case EVFSNABS:
- high = spe_save_reg_high(ra) | (1U << 31);
- frame->fixreg[rd] = frame->fixreg[ra] | (1U << 31);
- spe_load_reg_high(rd, high);
- break;
- case EVFSNEG:
- high = spe_save_reg_high(ra) ^ (1U << 31);
- frame->fixreg[rd] = frame->fixreg[ra] ^ (1U << 31);
- spe_load_reg_high(rd, high);
- break;
- default:
- /* High word */
- spe_explode(&fpemu, &fpemu.fe_f1, SINGLE,
- spe_save_reg_high(ra), 0);
- spe_explode(&fpemu, &fpemu.fe_f2, SINGLE,
- spe_save_reg_high(rb), 0);
- high = spe_emu_instr(instr_sec_op, &fpemu, &result,
- &tmp);
-
- if (high < 0)
- spe_load_reg_high(rd, tmp);
-
- spefscr = fpscr_to_spefscr(fpemu.fe_cx) << 16;
- /* Clear the fpemu to start over on the lower bits. */
- memset(&fpemu, 0, sizeof(fpemu));
-
- /* Now low word */
- spe_explode(&fpemu, &fpemu.fe_f1, SINGLE,
- frame->fixreg[ra], 0);
- spe_explode(&fpemu, &fpemu.fe_f2, SINGLE,
- frame->fixreg[rb], 0);
- spefscr |= fpscr_to_spefscr(fpemu.fe_cx);
- low = spe_emu_instr(instr_sec_op, &fpemu, &result,
- &frame->fixreg[rd]);
- if (instr_sec_op == EVFSCMPEQ ||
- instr_sec_op == EVFSCMPGT ||
- instr_sec_op == EVFSCMPLT) {
- res = (high << 3) | (low << 2) |
- ((high | low) << 1) | (high & low);
- width = NONE;
- } else
- width = VECTOR;
- break;
- }
- goto end;
-
- case SPFP:
- switch (instr_sec_op) {
- case EFSABS:
- frame->fixreg[rd] = frame->fixreg[ra] & ~(1U << 31);
- break;
- case EFSNABS:
- frame->fixreg[rd] = frame->fixreg[ra] | (1U << 31);
- break;
- case EFSNEG:
- frame->fixreg[rd] = frame->fixreg[ra] ^ (1U << 31);
- break;
- case EFSCFD:
- mtmsr(msr | PSL_VEC);
- spe_explode(&fpemu, &fpemu.fe_f3, DOUBLE,
- spe_save_reg_high(rb), frame->fixreg[rb]);
- result = &fpemu.fe_f3;
- width = SINGLE;
- break;
- default:
- spe_explode(&fpemu, &fpemu.fe_f1, SINGLE,
- frame->fixreg[ra], 0);
- spe_explode(&fpemu, &fpemu.fe_f2, SINGLE,
- frame->fixreg[rb], 0);
- width = SINGLE;
- }
- break;
- case DPFP:
- mtmsr(msr | PSL_VEC);
- switch (instr_sec_op) {
- case EFDABS:
- high = spe_save_reg_high(ra) & ~(1U << 31);
- frame->fixreg[rd] = frame->fixreg[ra];
- spe_load_reg_high(rd, high);
- break;
- case EFDNABS:
- high = spe_save_reg_high(ra) | (1U << 31);
- frame->fixreg[rd] = frame->fixreg[ra];
- spe_load_reg_high(rd, high);
- break;
- case EFDNEG:
- high = spe_save_reg_high(ra) ^ (1U << 31);
- frame->fixreg[rd] = frame->fixreg[ra];
- spe_load_reg_high(rd, high);
- break;
- case EFDCFS:
- spe_explode(&fpemu, &fpemu.fe_f3, SINGLE,
- frame->fixreg[rb], 0);
- result = &fpemu.fe_f3;
- width = DOUBLE;
- break;
- default:
- spe_explode(&fpemu, &fpemu.fe_f1, DOUBLE,
- spe_save_reg_high(ra), frame->fixreg[ra]);
- spe_explode(&fpemu, &fpemu.fe_f2, DOUBLE,
- spe_save_reg_high(rb), frame->fixreg[rb]);
- width = DOUBLE;
- }
- break;
- }
- switch (instr_sec_op) {
- case EFDCFS:
- case EFSCFD:
- /* Already handled. */
- break;
- default:
- res = spe_emu_instr(instr_sec_op, &fpemu, &result,
- &frame->fixreg[rd]);
- if (res != -1)
- res <<= 2;
- break;
- }
-
- switch (instr_sec_op & SPE_INST_MASK) {
- case ECMPEQ:
- case ECMPGT:
- case ECMPLT:
- frame->cr &= ~(0xf << cr_shift);
- frame->cr |= (res << cr_shift);
- break;
- case ECTUI:
- case ECTUIZ:
- case ECTSI:
- case ECTSIZ:
- break;
- default:
- switch (width) {
- case NONE:
- case VECTOR:
- break;
- case SINGLE:
- frame->fixreg[rd] = fpu_ftos(&fpemu, result);
- break;
- case DOUBLE:
- spe_load_reg_high(rd, fpu_ftod(&fpemu, result, ftod_res));
- frame->fixreg[rd] = ftod_res[1];
- break;
- default:
- panic("Unknown storage width %d", width);
- break;
- }
- }
-
-end:
- spefscr |= (mfspr(SPR_SPEFSCR) & ~SPEFSCR_FINVS);
- mtspr(SPR_SPEFSCR, spefscr);
- frame->srr0 += 4;
- mtmsr(msr);
-
- return;
-}
-
-void
-spe_handle_fpround(struct trapframe *frame)
-{
-
- /*
- * Punt fpround exceptions for now. This leaves the truncated result in
- * the register. We'll deal with overflow/underflow later.
- */
- return;
-}
diff --git a/sys/powerpc/booke/trap_subr.S b/sys/powerpc/booke/trap_subr.S
index 523d8b57d3bc..789e746dba3e 100644
--- a/sys/powerpc/booke/trap_subr.S
+++ b/sys/powerpc/booke/trap_subr.S
@@ -636,28 +636,6 @@ INTERRUPT(int_vecast)
b trap_common
-#ifdef __SPE__
-/*****************************************************************************
- * Floating point Assist interrupt
- ****************************************************************************/
-INTERRUPT(int_spe_fpdata)
- STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1)
- FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_SPFPD)
- addi %r3, %r1, CALLSIZE
- bl spe_handle_fpdata
- FRAME_LEAVE(SPR_SRR0, SPR_SRR1)
- rfi
-
-INTERRUPT(int_spe_fpround)
- STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1)
- FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_SPFPR)
- addi %r3, %r1, CALLSIZE
- bl spe_handle_fpround
- FRAME_LEAVE(SPR_SRR0, SPR_SRR1)
- rfi
-#endif
-
-
#ifdef HWPMC_HOOKS
/*****************************************************************************
* PMC Interrupt
diff --git a/sys/powerpc/conf/MPC85XXSPE b/sys/powerpc/conf/MPC85XXSPE
deleted file mode 100644
index ca828137ed72..000000000000
--- a/sys/powerpc/conf/MPC85XXSPE
+++ /dev/null
@@ -1,151 +0,0 @@
-#
-# Custom kernel for Freescale MPC85XX development boards like the CDS etc.
-#
-#
-
-cpu BOOKE
-cpu BOOKE_E500
-ident MPC85XXSPE
-
-machine powerpc powerpcspe
-
-include "dpaa/config.dpaa"
-makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols
-makeoptions WITH_CTF=1
-
-options FPU_EMU
-options MAXCPU=2
-
-options _KPOSIX_PRIORITY_SCHEDULING
-options ALT_BREAK_TO_DEBUGGER
-options BREAK_TO_DEBUGGER
-options BOOTP
-options BOOTP_NFSROOT
-#options BOOTP_NFSV3
-options CD9660
-options COMPAT_43
-options DDB
-#options DEADLKRES
-options DEVICE_POLLING
-#options DIAGNOSTIC
-options FDT
-#makeoptions FDT_DTS_FILE=mpc8555cds.dts
-options FFS
-options GDB
-options GEOM_PART_GPT
-options GEOM_LABEL # Provides labelization
-options VIMAGE
-options INET
-options INET6
-options TCP_HHOOK # hhook(9) framework for TCP
-options INVARIANTS
-options INVARIANT_SUPPORT
-options KDB
-options KTRACE
-options MD_ROOT
-options MPC85XX
-options MSDOSFS
-options NFS_ROOT
-options NFSCL
-options NFSLOCKD
-options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed.
-options PROCFS
-options PSEUDOFS
-options SCHED_ULE
-options CAPABILITIES
-options CAPABILITY_MODE
-options SMP
-options SYSVMSG
-options SYSVSEM
-options SYSVSHM
-options WITNESS
-options WITNESS_SKIPSPIN
-
-# The powerpcspe target arch can run non Altivec/VMX powerpc binaries
-# Keep COMPAT options in sync with powerpc target arch.
-options COMPAT_FREEBSD6 # Compatible with FreeBSD6
-options COMPAT_FREEBSD7 # Compatible with FreeBSD7
-options COMPAT_FREEBSD9 # Compatible with FreeBSD9
-options COMPAT_FREEBSD10 # Compatible with FreeBSD10
-options COMPAT_FREEBSD11 # Compatible with FreeBSD11
-options COMPAT_FREEBSD12 # Compatible with FreeBSD12
-options COMPAT_FREEBSD13 # Compatible with FreeBSD13
-options COMPAT_FREEBSD14 # Compatible with FreeBSD14
-
-options HWPMC_HOOKS
-options KDTRACE_HOOKS # Kernel DTrace hooks
-options DDB_CTF # Kernel ELF linker loads CTF data
-
-device ata
-device bpf
-device cfi
-device cpufreq
-device crypto
-device cryptodev
-device da
-device ds1307
-device ds1553
-device iflib
-device em
-device alc
-device ether
-device fxp
-device gpio
-device gpiopower
-device iic
-device iicbus
-#device isa
-device loop
-device md
-device miibus
-device mmc
-device mmcsd
-device pass
-device pci
-device quicc
-#device rl
-device scbus
-device scc
-device sdhci
-device sec
-device spibus
-device spigen
-device tsec
-device dpaa
-device tuntap
-device uart
-options USB_DEBUG # enable debug msgs
-#device uhci
-device ehci
-device hkbd
-device ukbd
-device hms
-device ums
-device umass
-device usb
-device usbhid
-device vlan
-
-# VirtIO support
-device virtio # Generic VirtIO bus (required)
-device virtio_pci # VirtIO PCI device
-device vtnet # VirtIO Ethernet device
-device virtio_blk # VirtIO Block device
-device virtio_scsi # VirtIO SCSI device
-device virtio_balloon # VirtIO Memory Balloon device
-
-# P1022 DIU
-device diu
-device videomode
-device vt
-device fbd
-
-# evdev interface
-options EVDEV_SUPPORT # evdev support in legacy drivers
-device evdev # input event device support
-device uinput # install /dev/uinput cdev
-
-# HID support
-options HID_DEBUG # enable debug msgs
-device hid # Generic HID support
-device hidbus # Generic HID bus
diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h
index b2d7549e5bd0..60fb678e6b41 100644
--- a/sys/powerpc/include/atomic.h
+++ b/sys/powerpc/include/atomic.h
@@ -165,6 +165,28 @@ _ATOMIC_ADD(long)
* { *p &= ~v; }
*/
+#ifdef ISA_206_ATOMICS
+#define __atomic_clear_char(p, v, t) \
+ __asm __volatile( \
+ "1: lbarx %0, 0, %2\n" \
+ " andc %0, %0, %3\n" \
+ " stbcx. %0, 0, %2\n" \
+ " bne- 1b\n" \
+ : "=&r" (t), "=m" (*p) \
+ : "r" (p), "r" (v), "m" (*p) \
+ : "cr0", "memory") \
+ /* __atomic_clear_short */
+#define __atomic_clear_short(p, v, t) \
+ __asm __volatile( \
+ "1: lharx %0, 0, %2\n" \
+ " andc %0, %0, %3\n" \
+ " sthcx. %0, 0, %2\n" \
+ " bne- 1b\n" \
+ : "=&r" (t), "=m" (*p) \
+ : "r" (p), "r" (v), "m" (*p) \
+ : "cr0", "memory") \
+ /* __atomic_clear_short */
+#endif
#define __atomic_clear_int(p, v, t) \
__asm __volatile( \
"1: lwarx %0, 0, %2\n" \
@@ -222,6 +244,11 @@ _ATOMIC_ADD(long)
} \
/* _ATOMIC_CLEAR */
+#ifdef ISA_206_ATOMICS
+_ATOMIC_CLEAR(char)
+_ATOMIC_CLEAR(short)
+#endif
+
_ATOMIC_CLEAR(int)
_ATOMIC_CLEAR(long)
@@ -265,6 +292,28 @@ _ATOMIC_CLEAR(long)
* atomic_set(p, v)
* { *p |= v; }
*/
+#ifdef ISA_206_ATOMICS
+#define __atomic_set_char(p, v, t) \
+ __asm __volatile( \
+ "1: lbarx %0, 0, %2\n" \
+ " or %0, %3, %0\n" \
+ " stbcx. %0, 0, %2\n" \
+ " bne- 1b\n" \
+ : "=&r" (t), "=m" (*p) \
+ : "r" (p), "r" (v), "m" (*p) \
+ : "cr0", "memory") \
+ /* __atomic_set_char */
+#define __atomic_set_short(p, v, t) \
+ __asm __volatile( \
+ "1: lharx %0, 0, %2\n" \
+ " or %0, %3, %0\n" \
+ " sthcx. %0, 0, %2\n" \
+ " bne- 1b\n" \
+ : "=&r" (t), "=m" (*p) \
+ : "r" (p), "r" (v), "m" (*p) \
+ : "cr0", "memory") \
+ /* __atomic_set_short */
+#endif
#define __atomic_set_int(p, v, t) \
__asm __volatile( \
@@ -323,6 +372,11 @@ _ATOMIC_CLEAR(long)
} \
/* _ATOMIC_SET */
+#ifdef ISA_206_ATOMICS
+_ATOMIC_SET(char)
+_ATOMIC_SET(short)
+#endif
+
_ATOMIC_SET(int)
_ATOMIC_SET(long)
@@ -1140,34 +1194,10 @@ atomic_thread_fence_seq_cst(void)
#define atomic_set_short atomic_set_16
#define atomic_clear_short atomic_clear_16
#else
-
-static __inline void
-atomic_set_short(volatile u_short *p, u_short bit)
-{
- u_short v;
-
- v = atomic_load_short(p);
- for (;;) {
- if (atomic_fcmpset_16(p, &v, v | bit))
- break;
- }
-}
-
-static __inline void
-atomic_clear_short(volatile u_short *p, u_short bit)
-{
- u_short v;
-
- v = atomic_load_short(p);
- for (;;) {
- if (atomic_fcmpset_16(p, &v, v & ~bit))
- break;
- }
-}
-
+#define atomic_set_8 atomic_set_char
+#define atomic_clear_8 atomic_clear_char
#define atomic_set_16 atomic_set_short
#define atomic_clear_16 atomic_clear_short
-
#endif /* ISA_206_ATOMICS */
/* These need sys/_atomic_subword.h on non-ISA-2.06-atomic platforms. */
diff --git a/sys/powerpc/include/ieeefp.h b/sys/powerpc/include/ieeefp.h
index 3aa92ff4522b..829e167aefae 100644
--- a/sys/powerpc/include/ieeefp.h
+++ b/sys/powerpc/include/ieeefp.h
@@ -10,19 +10,11 @@
/* Deprecated historical FPU control interface */
typedef int fp_except_t;
-#ifdef __SPE__
-#define FP_X_OFL 0x01 /* overflow exception */
-#define FP_X_UFL 0x02 /* underflow exception */
-#define FP_X_DZ 0x04 /* divide-by-zero exception */
-#define FP_X_INV 0x08 /* invalid operation exception */
-#define FP_X_IMP 0x10 /* imprecise (loss of precision) */
-#else
#define FP_X_IMP 0x01 /* imprecise (loss of precision) */
#define FP_X_DZ 0x02 /* divide-by-zero exception */
#define FP_X_UFL 0x04 /* underflow exception */
#define FP_X_OFL 0x08 /* overflow exception */
#define FP_X_INV 0x10 /* invalid operation exception */
-#endif
typedef enum {
FP_RN=0, /* round to nearest representable number */
diff --git a/sys/powerpc/include/param.h b/sys/powerpc/include/param.h
index a190f60c02cc..a154859c0459 100644
--- a/sys/powerpc/include/param.h
+++ b/sys/powerpc/include/param.h
@@ -57,13 +57,9 @@
#define MACHINE_ARCH "powerpc64"
#endif
#else
-#ifdef __SPE__
-#define MACHINE_ARCH "powerpcspe"
-#else
#define MACHINE_ARCH "powerpc"
#endif
#endif
-#endif
#define MID_MACHINE MID_POWERPC
#ifdef __powerpc64__
#ifndef MACHINE_ARCH32
diff --git a/sys/powerpc/mpc85xx/pci_mpc85xx.c b/sys/powerpc/mpc85xx/pci_mpc85xx.c
index 8e349df03a51..33dca7ab6c50 100644
--- a/sys/powerpc/mpc85xx/pci_mpc85xx.c
+++ b/sys/powerpc/mpc85xx/pci_mpc85xx.c
@@ -360,13 +360,13 @@ fsl_pcib_attach(device_t dev)
error = ofw_pcib_init(dev);
if (error)
- return (error);
+ goto err;
/*
* Configure decode windows for PCI(E) access.
*/
if (fsl_pcib_decode_win(node, sc) != 0)
- goto err;
+ goto err1;
cfgreg = fsl_pcib_cfgread(sc, 0, 0, 0, PCIR_COMMAND, 2);
cfgreg |= PCIM_CMD_SERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN |
@@ -392,6 +392,7 @@ fsl_pcib_attach(device_t dev)
if (sc->sc_pcie) {
ltssm = fsl_pcib_cfgread(sc, 0, 0, 0, PCIR_LTSSM, 1);
if (ltssm < LTSSM_STAT_L0) {
+ /* Stay attached, it may change later. */
if (bootverbose)
printf("PCI %d: no PCIE link, skipping\n",
device_get_unit(dev));
@@ -432,7 +433,15 @@ fsl_pcib_attach(device_t dev)
return (ofw_pcib_attach(dev));
+err1:
+ ofw_pcib_fini(dev);
err:
+ if (sc->sc_irq_res != NULL)
+ bus_release_resource(dev, sc->sc_irq_res);
+ if (sc->sc_res != NULL)
+ bus_release_resource(dev, sc->sc_res);
+ mtx_destroy(&sc->sc_cfg_mtx);
+
return (ENXIO);
}
@@ -680,9 +689,15 @@ fsl_pcib_detach(device_t dev)
return (error);
sc = device_get_softc(dev);
+ ofw_pcib_fini(dev);
mtx_destroy(&sc->sc_cfg_mtx);
+ if (sc->sc_irq_res != NULL)
+ bus_release_resource(dev, sc->sc_irq_res);
+ if (sc->sc_res != NULL)
+ bus_release_resource(dev, sc->sc_res);
+
return (0);
}
diff --git a/sys/powerpc/mpc85xx/platform_mpc85xx.c b/sys/powerpc/mpc85xx/platform_mpc85xx.c
index 6653c40b01a3..cc2ad829eb05 100644
--- a/sys/powerpc/mpc85xx/platform_mpc85xx.c
+++ b/sys/powerpc/mpc85xx/platform_mpc85xx.c
@@ -302,14 +302,26 @@ mpc85xx_smp_first_cpu(platform_t plat, struct cpuref *cpuref)
static int
mpc85xx_smp_next_cpu(platform_t plat, struct cpuref *cpuref)
{
+ phandle_t node;
+ pcell_t reg;
+ int i;
if (cpu >= maxcpu)
return (ENOENT);
cpuref->cr_cpuid = cpu++;
- cpuref->cr_hwref = cpuref->cr_cpuid;
+
+ node = OF_finddevice("/cpus");
+ for (i = 0, node = OF_child(node); i < cpuref->cr_cpuid;
+ i++, node = OF_peer(node))
+ ;
+ if (OF_getencprop(node, "reg", &reg, sizeof(reg)) > 0)
+ cpuref->cr_hwref = reg;
+ else
+ cpuref->cr_hwref = cpuref->cr_cpuid;
if (bootverbose)
- printf("powerpc_smp_next_cpu: cpuid %d\n", cpuref->cr_cpuid);
+ printf("powerpc_smp_next_cpu: cpuid %d, hwref %d\n",
+ cpuref->cr_cpuid, (int)cpuref->cr_hwref);
return (0);
}
diff --git a/sys/powerpc/powermac/platform_powermac.c b/sys/powerpc/powermac/platform_powermac.c
index c63ef521ca8f..cbb2e212a00d 100644
--- a/sys/powerpc/powermac/platform_powermac.c
+++ b/sys/powerpc/powermac/platform_powermac.c
@@ -405,21 +405,88 @@ powermac_register_timebase(device_t dev, powermac_tb_disable_t cb)
freeze_timebase = cb;
}
+/**
+ * @brief Implement a default platform AP/BSP SMP timebase synchronisation
+ *
+ * Some powermac platforms don't have a freeze/unfreeze method.
+ * Here just try our best to force synchronisation.
+ */
static void
-powermac_smp_timebase_sync(platform_t plat, u_long tb, int ap)
+powermac_smp_timebase_sync_fallback(platform_t plat, u_long tb, int ap)
+{
+ static volatile bool tb_ready = false;
+ static volatile int cpu_done;
+
+ if (bootverbose)
+ printf("[%d] %s: called, AP tb=0x%lx tb=0x%lx\n",
+ ap, __func__, tb, mftb());
+
+ /* Do initial timebase sync */
+ mttb(tb);
+
+ if (ap) {
+ /*
+ * APs - wait until the BSP signals its ready to sync,
+ * then wait for all CPUs to be ready.
+ */
+ critical_enter();
+ while (!tb_ready)
+ atomic_thread_fence_seq_cst();
+ atomic_add_int(&cpu_done, 1);
+ do {
+ atomic_thread_fence_seq_cst();
+ } while (cpu_done < mp_ncpus);
+ mttb(tb);
+ critical_exit();
+ } else {
+ /*
+ * BSP - signify that the timebase sync is about to start,
+ * then wait for other CPUs to be ready.
+ */
+ critical_enter();
+ /* Ensure cpu_done is zeroed so we can resync at runtime */
+ atomic_store_int(&cpu_done, 0);
+ tb_ready = true;
+ atomic_add_int(&cpu_done, 1);
+ do {
+ atomic_thread_fence_seq_cst();
+ } while (cpu_done < mp_ncpus);
+ mttb(tb);
+ /* Reset tb_ready so we can resync at runtime */
+ tb_ready = false;
+ critical_exit();
+ }
+ if (bootverbose)
+ printf("[%d] %s: finished; AP tb=0x%lx called tb=0x%lx\n",
+ ap, __func__, tb, mftb());
+}
+
+/**
+ * @brief Implement freeze/unfreeze AP/BSP SMP timebase synchronisation
+ *
+ * This implements SMP timebase synchronisation for hardware that
+ * implements freezing a shared timebase clock source.
+ *
+ * The BSP will freeze the timebase and signal the APs to program their
+ * local timebase with the shared timebase value. The BSP will then
+ * unfreeze the timebase clock, allowing all CPUs to march forward
+ * from the same base timebase value.
+ */
+static void
+powermac_smp_timebase_sync_freeze(platform_t plat, u_long tb, int ap)
{
- static volatile bool tb_ready;
+ static volatile bool tb_ready = false;
static volatile int cpu_done;
+ if (bootverbose)
+ printf("[%d] %s: called, AP tb=0x%lx tb=0x%lx\n",
+ ap, __func__, tb, mftb());
+
/*
- * XXX Temporary fallback for platforms we don't know how to freeze.
- *
* This needs to be replaced with a cpu-to-cpu software sync
* protocol, because this is not a consistent way to sync timebase.
*/
mttb(tb);
- if (freeze_timebase == dummy_timebase)
- return;
if (ap) {
/* APs. Hold off until we get a stable timebase. */
@@ -428,25 +495,39 @@ powermac_smp_timebase_sync(platform_t plat, u_long tb, int ap)
atomic_thread_fence_seq_cst();
mttb(tb);
atomic_add_int(&cpu_done, 1);
- while (cpu_done < mp_ncpus)
+ do {
atomic_thread_fence_seq_cst();
+ } while (cpu_done < mp_ncpus);
critical_exit();
} else {
/* BSP */
critical_enter();
/* Ensure cpu_done is zeroed so we can resync at runtime */
- atomic_set_int(&cpu_done, 0);
+ atomic_store_int(&cpu_done, 0);
freeze_timebase(powermac_tb_dev, true);
tb_ready = true;
mttb(tb);
atomic_add_int(&cpu_done, 1);
- while (cpu_done < mp_ncpus)
+ do {
atomic_thread_fence_seq_cst();
+ } while (cpu_done < mp_ncpus);
freeze_timebase(powermac_tb_dev, false);
/* Reset tb_ready so we can resync at runtime */
tb_ready = false;
critical_exit();
}
+ if (bootverbose)
+ printf("[%d] %s: finished; AP tb=0x%lx called tb=0x%lx\n",
+ ap, __func__, tb, mftb());
+}
+
+static void
+powermac_smp_timebase_sync(platform_t plat, u_long tb, int ap)
+{
+ if (freeze_timebase == dummy_timebase)
+ powermac_smp_timebase_sync_fallback(plat, tb, ap);
+ else
+ powermac_smp_timebase_sync_freeze(plat, tb, ap);
}
/* Fallback freeze. In case no real handler is found in the device tree. */
diff --git a/sys/powerpc/powerpc/exec_machdep.c b/sys/powerpc/powerpc/exec_machdep.c
index 8a33d0f589a7..00c04b4ddbaa 100644
--- a/sys/powerpc/powerpc/exec_machdep.c
+++ b/sys/powerpc/powerpc/exec_machdep.c
@@ -646,18 +646,6 @@ cpu_update_pcb(struct thread *td)
pcb->pcb_dscr = mfspr(SPR_DSCRP);
#endif
-#if defined(__SPE__)
- /*
- * On E500v2, single-precision scalar instructions and access to
- * SPEFSCR may be used without PSL_VEC turned on, as long as they
- * limit themselves to the low word of the registers.
- *
- * As such, we need to unconditionally save SPEFSCR, even though
- * it is also updated in save_vec_nodrop().
- */
- pcb->pcb_vec.vscr = mfspr(SPR_SPEFSCR);
-#endif
-
if (pcb_flags & PCB_FPU)
save_fpu_nodrop(td);
@@ -1094,8 +1082,8 @@ cpu_thread_alloc(struct thread *td)
{
struct pcb *pcb;
- pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
- sizeof(struct pcb)) & ~0x2fUL);
+ pcb = (struct pcb *)__align_down(td->td_kstack + td->td_kstack_pages *
+ PAGE_SIZE - sizeof(struct pcb), 0x40);
td->td_pcb = pcb;
td->td_frame = (struct trapframe *)pcb - 1;
}
@@ -1155,9 +1143,6 @@ cpu_copy_thread(struct thread *td, struct thread *td0)
pcb2->pcb_context[0] = pcb2->pcb_lr;
#endif
pcb2->pcb_cpu.aim.usr_vsid = 0;
-#ifdef __SPE__
- pcb2->pcb_vec.vscr = SPEFSCR_DFLT;
-#endif
/* Setup to release spin count in fork_exit(). */
td->td_md.md_spinlock_count = 1;
@@ -1215,9 +1200,6 @@ cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
}
td->td_pcb->pcb_flags = 0;
-#ifdef __SPE__
- td->td_pcb->pcb_vec.vscr = SPEFSCR_DFLT;
-#endif
td->td_retval[0] = (register_t)entry;
td->td_retval[1] = 0;
diff --git a/sys/powerpc/powerpc/fpu.c b/sys/powerpc/powerpc/fpu.c
index cc8f22f7dda3..3e43656874a7 100644
--- a/sys/powerpc/powerpc/fpu.c
+++ b/sys/powerpc/powerpc/fpu.c
@@ -317,7 +317,6 @@ disable_fpu(struct thread *td)
pcb->pcb_flags &= ~(PCB_FPU | PCB_VSX);
}
-#ifndef __SPE__
/*
* XXX: Implement fpu_kern_alloc_ctx/fpu_kern_free_ctx once fpu_kern_enter and
* fpu_kern_leave can handle !FPU_KERN_NOCTX.
@@ -403,5 +402,3 @@ is_fpu_kern_thread(u_int flags __unused)
curpcb = curthread->td_pcb;
return ((curpcb->pcb_flags & PCB_KERN_FPU) != 0);
}
-
-#endif /* !__SPE__ */
diff --git a/sys/powerpc/powerpc/machdep.c b/sys/powerpc/powerpc/machdep.c
index e9979712aa9c..f4a065e1ce46 100644
--- a/sys/powerpc/powerpc/machdep.c
+++ b/sys/powerpc/powerpc/machdep.c
@@ -488,9 +488,8 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
/*
* Finish setting up thread0.
*/
- thread0.td_pcb = (struct pcb *)
- ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
- sizeof(struct pcb)) & ~15UL);
+ thread0.td_pcb = (struct pcb *)__align_down(thread0.td_kstack +
+ thread0.td_kstack_pages * PAGE_SIZE - sizeof(struct pcb), 16);
bzero((void *)thread0.td_pcb, sizeof(struct pcb));
pc->pc_curpcb = thread0.td_pcb;
diff --git a/sys/powerpc/powerpc/ptrace_machdep.c b/sys/powerpc/powerpc/ptrace_machdep.c
index 619faabf3ce0..1b6afdf3c015 100644
--- a/sys/powerpc/powerpc/ptrace_machdep.c
+++ b/sys/powerpc/powerpc/ptrace_machdep.c
@@ -37,11 +37,7 @@
#include <machine/md_var.h>
#include <machine/pcb.h>
-#ifdef __SPE__
-#define PPC_FEATURE_VECTOR PPC_FEATURE_HAS_SPE
-#else
#define PPC_FEATURE_VECTOR PPC_FEATURE_HAS_ALTIVEC
-#endif
int
cpu_ptrace(struct thread *td, int req, void *addr, int data)
diff --git a/sys/powerpc/powerpc/swtch32.S b/sys/powerpc/powerpc/swtch32.S
index ef1a397bf74d..7fc0641722aa 100644
--- a/sys/powerpc/powerpc/swtch32.S
+++ b/sys/powerpc/powerpc/swtch32.S
@@ -119,10 +119,6 @@ ENTRY(cpu_switch)
bl save_vec
.L2:
-#if defined(__SPE__)
- mfspr %r3,SPR_SPEFSCR
- stw %r3,PCB_VSCR(%r17)
-#endif
mr %r3,%r14 /* restore old thread ptr */
bl pmap_deactivate /* Deactivate the current pmap */
@@ -176,10 +172,6 @@ blocked_loop:
bl enable_vec
.L4:
-#if defined(__SPE__)
- lwz %r3,PCB_VSCR(%r17)
- mtspr SPR_SPEFSCR,%r3
-#endif
/* thread to restore is in r3 */
mr %r3,%r17 /* Recover PCB ptr */
lmw %r12,PCB_CONTEXT(%r3) /* Load the non-volatile GP regs */
@@ -224,9 +216,5 @@ ENTRY(fork_trampoline)
trapframe to simulate FRAME_SETUP
does when allocating space for
a frame pointer/saved LR */
-#ifdef __SPE__
- li %r3,SPEFSCR_DFLT
- mtspr SPR_SPEFSCR, %r3
-#endif
b trapexit
END(fork_trampoline)
diff --git a/sys/powerpc/powerpc/trap.c b/sys/powerpc/powerpc/trap.c
index 203d270a504b..6843ce49b48f 100644
--- a/sys/powerpc/powerpc/trap.c
+++ b/sys/powerpc/powerpc/trap.c
@@ -861,41 +861,6 @@ fix_unaligned(struct thread *td, struct trapframe *frame)
int indicator, reg;
double *fpr;
-#ifdef __SPE__
- indicator = (frame->cpu.booke.esr & (ESR_ST|ESR_SPE));
- if (indicator & ESR_SPE) {
- if (copyin((void *)frame->srr0, &inst, sizeof(inst)) != 0)
- return (-1);
- reg = EXC_ALI_INST_RST(inst);
- fpr = (double *)td->td_pcb->pcb_vec.vr[reg];
- fputhread = PCPU_GET(vecthread);
-
- /* Juggle the SPE to ensure that we've initialized
- * the registers, and that their current state is in
- * the PCB.
- */
- if (fputhread != td) {
- if (fputhread)
- save_vec(fputhread);
- enable_vec(td);
- }
- save_vec(td);
-
- if (!(indicator & ESR_ST)) {
- if (copyin((void *)frame->dar, fpr,
- sizeof(double)) != 0)
- return (-1);
- frame->fixreg[reg] = td->td_pcb->pcb_vec.vr[reg][1];
- enable_vec(td);
- } else {
- td->td_pcb->pcb_vec.vr[reg][1] = frame->fixreg[reg];
- if (copyout(fpr, (void *)frame->dar,
- sizeof(double)) != 0)
- return (-1);
- }
- return (0);
- }
-#else
#ifdef BOOKE
indicator = (frame->cpu.booke.esr & ESR_ST) ? EXC_ALI_STFD : EXC_ALI_LFD;
#else
@@ -939,7 +904,6 @@ fix_unaligned(struct thread *td, struct trapframe *frame)
return (0);
break;
}
-#endif
return (-1);
}
diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c
index d47beedb595e..00fdc301a7e7 100644
--- a/sys/powerpc/powerpc/vm_machdep.c
+++ b/sys/powerpc/powerpc/vm_machdep.c
@@ -123,8 +123,8 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
if (td1 == curthread)
cpu_update_pcb(td1);
- pcb = (struct pcb *)((td2->td_kstack +
- td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fUL);
+ pcb = (struct pcb *)__align_down(td2->td_kstack +
+ td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb), 0x40);
td2->td_pcb = pcb;
/* Copy the pcb */
diff --git a/sys/riscv/include/acpica_machdep.h b/sys/riscv/include/acpica_machdep.h
new file mode 100644
index 000000000000..ad162f5c0907
--- /dev/null
+++ b/sys/riscv/include/acpica_machdep.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2026 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef __ACPICA_MACHDEP_H__
+#define __ACPICA_MACHDEP_H__
+
+/*
+ * This is a placeholder until full ACPI support for RISC-V emerges.
+ * With is we can include acpi.h from LinuxKPI and avoid (major) local changes
+ * to compile drivers otherwise fine on RISC-V.
+ */
+
+#endif /* __ACPICA_MACHDEP_H__ */
diff --git a/sys/riscv/include/cpufunc.h b/sys/riscv/include/cpufunc.h
index 75b22632c546..c39f17131eb7 100644
--- a/sys/riscv/include/cpufunc.h
+++ b/sys/riscv/include/cpufunc.h
@@ -119,6 +119,13 @@ sfence_vma_asid_page(uint64_t asid, uintptr_t addr)
: "memory");
}
+static __inline void
+hfence_gvma(void)
+{
+
+ __asm __volatile("hfence.gvma" ::: "memory");
+}
+
#define rdcycle() csr_read64(cycle)
#define rdtime() csr_read64(time)
#define rdinstret() csr_read64(instret)
diff --git a/sys/riscv/include/vmm.h b/sys/riscv/include/vmm.h
index 361140834805..c346f09cc28c 100644
--- a/sys/riscv/include/vmm.h
+++ b/sys/riscv/include/vmm.h
@@ -104,8 +104,18 @@ enum vm_reg_name {
#define VM_INTINFO_SWINTR (4 << 8)
#ifdef _KERNEL
+#include <machine/vmm_instruction_emul.h>
+
+#define VMM_VCPU_MD_FIELDS \
+ struct vm_exit exitinfo; \
+ uint64_t nextpc; /* (x) next instruction to execute */ \
+ struct fpreg *guestfpu /* (a,i) guest fpu state */
+
+#define VMM_VM_MD_FIELDS \
+ struct vmm_mmio_region mmio_region[VM_MAX_MMIO_REGIONS]
struct vm;
+struct vm_eventinfo;
struct vm_exception;
struct vm_exit;
struct vm_run;
@@ -114,11 +124,13 @@ struct vm_guest_paging;
struct vm_aplic_descr;
struct pmap;
-struct vm_eventinfo {
- void *rptr; /* rendezvous cookie */
- int *sptr; /* suspend cookie */
- int *iptr; /* reqidle cookie */
+struct vmm_mmio_region {
+ uint64_t start;
+ uint64_t end;
+ mem_region_read_t read;
+ mem_region_write_t write;
};
+#define VM_MAX_MMIO_REGIONS 4
#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \
ret_type vmmops_##opname args
@@ -143,34 +155,13 @@ DECLARE_VMMOPS_FUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
vm_offset_t max));
DECLARE_VMMOPS_FUNC(void, vmspace_free, (struct vmspace *vmspace));
-int vm_create(const char *name, struct vm **retvm);
-struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
-void vm_disable_vcpu_creation(struct vm *vm);
-void vm_lock_vcpus(struct vm *vm);
-void vm_unlock_vcpus(struct vm *vm);
-void vm_destroy(struct vm *vm);
-int vm_reinit(struct vm *vm);
-const char *vm_name(struct vm *vm);
-
-uint16_t vm_get_maxcpus(struct vm *vm);
-void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
- uint16_t *threads, uint16_t *maxcpus);
-int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
- uint16_t threads, uint16_t maxcpus);
int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
int vm_run(struct vcpu *vcpu);
-int vm_suspend(struct vm *vm, enum vm_suspend_how how);
-void* vm_get_cookie(struct vm *vm);
-int vcpu_vcpuid(struct vcpu *vcpu);
+void *vm_get_cookie(struct vm *vm);
void *vcpu_get_cookie(struct vcpu *vcpu);
-struct vm *vcpu_vm(struct vcpu *vcpu);
-struct vcpu *vm_vcpu(struct vm *vm, int cpu);
int vm_get_capability(struct vcpu *vcpu, int type, int *val);
int vm_set_capability(struct vcpu *vcpu, int type, int val);
-int vm_activate_cpu(struct vcpu *vcpu);
-int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
-int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
int vm_inject_exception(struct vcpu *vcpu, uint64_t scause);
int vm_attach_aplic(struct vm *vm, struct vm_aplic_descr *descr);
int vm_assert_irq(struct vm *vm, uint32_t irq);
@@ -180,62 +171,7 @@ int vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot,
struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
void vm_exit_suspended(struct vcpu *vcpu, uint64_t pc);
void vm_exit_debug(struct vcpu *vcpu, uint64_t pc);
-void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t pc);
void vm_exit_astpending(struct vcpu *vcpu, uint64_t pc);
-
-cpuset_t vm_active_cpus(struct vm *vm);
-cpuset_t vm_debug_cpus(struct vm *vm);
-cpuset_t vm_suspended_cpus(struct vm *vm);
-
-static __inline int
-vcpu_rendezvous_pending(struct vm_eventinfo *info)
-{
-
- return (*((uintptr_t *)(info->rptr)) != 0);
-}
-
-static __inline int
-vcpu_suspended(struct vm_eventinfo *info)
-{
-
- return (*info->sptr);
-}
-
-int vcpu_debugged(struct vcpu *vcpu);
-
-enum vcpu_state {
- VCPU_IDLE,
- VCPU_FROZEN,
- VCPU_RUNNING,
- VCPU_SLEEPING,
-};
-
-int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
-enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
-
-static int __inline
-vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
-{
- return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
-}
-
-#ifdef _SYS_PROC_H_
-static int __inline
-vcpu_should_yield(struct vcpu *vcpu)
-{
- struct thread *td;
-
- td = curthread;
- return (td->td_ast != 0 || td->td_owepreempt != 0);
-}
-#endif
-
-void *vcpu_stats(struct vcpu *vcpu);
-void vcpu_notify_event(struct vcpu *vcpu);
-struct vm_mem *vm_mem(struct vm *vm);
-
-enum vm_reg_name vm_segment_name(int seg_encoding);
-
#endif /* _KERNEL */
#define VM_DIR_READ 0
diff --git a/sys/riscv/include/vmm_instruction_emul.h b/sys/riscv/include/vmm_instruction_emul.h
index bee63d2f86ba..5041b10569f5 100644
--- a/sys/riscv/include/vmm_instruction_emul.h
+++ b/sys/riscv/include/vmm_instruction_emul.h
@@ -29,6 +29,12 @@
#ifndef _VMM_INSTRUCTION_EMUL_H_
#define _VMM_INSTRUCTION_EMUL_H_
+struct vcpu;
+struct vie;
+struct vre;
+struct vm;
+struct vm_guest_paging;
+
/*
* Callback functions to read and write memory regions.
*/
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 0deb8b93a6dc..3fc261a15c06 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -578,16 +578,13 @@ pmap_early_alloc_tables(vm_paddr_t *freemempos, int npages)
}
/*
- * Construct the direct map -- a linear mapping of physical memory into
+ * Construct the Direct Map -- a linear mapping of physical memory into
* the kernel address space.
*
* We walk the list of physical memory segments (of arbitrary size and
- * address) mapping each appropriately using L2 and L1 superpages.
- * Consequently, the DMAP address space will have unmapped regions
- * corresponding to any holes between physical memory segments.
- *
- * The lowest usable physical address will always be mapped to
- * DMAP_MIN_ADDRESS.
+ * alignment) mapping each appropriately. Consequently, the DMAP address
+ * space will have unmapped regions corresponding to the holes between
+ * physical memory segments.
*/
static vm_paddr_t
pmap_bootstrap_dmap(pd_entry_t *l1, vm_paddr_t freemempos)
@@ -595,9 +592,9 @@ pmap_bootstrap_dmap(pd_entry_t *l1, vm_paddr_t freemempos)
vm_paddr_t physmap[PHYS_AVAIL_ENTRIES];
vm_offset_t va;
vm_paddr_t min_pa, max_pa, pa, endpa;
- pd_entry_t *l2;
+ pd_entry_t *l3, *l2;
pt_entry_t memattr;
- u_int l1slot, l2slot;
+ u_int l1slot, l2slot, l3slot;
int physmap_idx;
physmap_idx = physmem_avail(physmap, nitems(physmap));
@@ -614,17 +611,58 @@ pmap_bootstrap_dmap(pd_entry_t *l1, vm_paddr_t freemempos)
memattr = pmap_memattr_bits(VM_MEMATTR_DEFAULT);
- /* Walk the physmap table. */
- l2 = NULL;
- l1slot = Ln_ENTRIES; /* sentinel value */
+ /*
+ * Walk the physmap table, using the largest page sizes possible for each
+ * mapping. So, for each physmap entry, map as needed/able:
+ * - 4K/L3 page prefix
+ * - 2M/L2 superpage prefix
+ * - 1G/L1 superpages
+ * - 2M/L2 superpage suffix
+ * - 4K/L3 page suffix
+ */
+ l3 = l2 = NULL;
+ l2slot = l1slot = Ln_ENTRIES; /* sentinel value */
for (int idx = 0; idx < physmap_idx; idx += 2) {
- pa = rounddown(physmap[idx], L2_SIZE);
+ pa = rounddown(physmap[idx], L3_SIZE);
endpa = physmap[idx + 1];
/* Virtual address for this range. */
va = PHYS_TO_DMAP(pa);
- /* Any 1GB possible for this range? */
+ /* Any 2MB possible for this range? */
+ if (roundup(pa, L2_SIZE) + L2_SIZE > endpa)
+ goto l3end;
+
+ /* Loop until the next 2MB boundary. */
+ while ((pa & L2_OFFSET) != 0) {
+ if (l2 == NULL || pmap_l1_index(va) != l1slot) {
+ /* Need to alloc another page table. */
+ l2 = pmap_early_alloc_tables(&freemempos, 1);
+
+ /* Link it. */
+ l1slot = pmap_l1_index(va);
+ pmap_store(&l1[l1slot],
+ L1_PDE((vm_paddr_t)l2, PTE_V));
+ }
+
+ if (l3 == NULL || pmap_l2_index(va) != l2slot) {
+ l3 = pmap_early_alloc_tables(&freemempos, 1);
+
+ /* Link it to L2. */
+ l2slot = pmap_l2_index(va);
+ pmap_store(&l2[l2slot],
+ L2_PDE((vm_paddr_t)l3, PTE_V));
+ }
+
+ /* map l3 pages */
+ l3slot = pmap_l3_index(va);
+ pmap_store(&l3[l3slot], L3_PTE(pa, PTE_KERN | memattr));
+
+ pa += L3_SIZE;
+ va += L3_SIZE;
+ }
+
+ /* Any 1GB possible for remaining range? */
if (roundup(pa, L1_SIZE) + L1_SIZE > endpa)
goto l2end;
@@ -659,7 +697,8 @@ pmap_bootstrap_dmap(pd_entry_t *l1, vm_paddr_t freemempos)
}
l2end:
- while (pa < endpa) {
+ /* Map what we can with 2MB superpages. */
+ while (pa + L2_SIZE - 1 < endpa) {
if (l2 == NULL || pmap_l1_index(va) != l1slot) {
/* Need to alloc another page table. */
l2 = pmap_early_alloc_tables(&freemempos, 1);
@@ -677,6 +716,35 @@ l2end:
pa += L2_SIZE;
va += L2_SIZE;
}
+
+l3end:
+ while (pa < endpa) {
+ if (l2 == NULL || pmap_l1_index(va) != l1slot) {
+ /* Need to alloc another page table. */
+ l2 = pmap_early_alloc_tables(&freemempos, 1);
+
+ /* Link it. */
+ l1slot = pmap_l1_index(va);
+ pmap_store(&l1[l1slot],
+ L1_PDE((vm_paddr_t)l2, PTE_V));
+ }
+
+ if (l3 == NULL || pmap_l2_index(va) != l2slot) {
+ l3 = pmap_early_alloc_tables(&freemempos, 1);
+
+ /* Link it to L2. */
+ l2slot = pmap_l2_index(va);
+ pmap_store(&l2[l2slot],
+ L2_PDE((vm_paddr_t)l3, PTE_V));
+ }
+
+ /* map l3 pages */
+ l3slot = pmap_l3_index(va);
+ pmap_store(&l3[l3slot], L3_PTE(pa, PTE_KERN | memattr));
+
+ pa += L3_SIZE;
+ va += L3_SIZE;
+ }
}
/* And finally, the limit on DMAP VA. */
diff --git a/sys/riscv/vmm/vmm.c b/sys/riscv/vmm/vmm.c
index 23b57ad3b7aa..1546bde87b41 100644
--- a/sys/riscv/vmm/vmm.c
+++ b/sys/riscv/vmm/vmm.c
@@ -45,7 +45,6 @@
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/smp.h>
-#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@@ -71,80 +70,18 @@
#include <dev/vmm/vmm_dev.h>
#include <dev/vmm/vmm_ktr.h>
#include <dev/vmm/vmm_mem.h>
+#include <dev/vmm/vmm_vm.h>
#include "vmm_stat.h"
#include "riscv.h"
#include "vmm_aplic.h"
-struct vcpu {
- int flags;
- enum vcpu_state state;
- struct mtx mtx;
- int hostcpu; /* host cpuid this vcpu last ran on */
- int vcpuid;
- void *stats;
- struct vm_exit exitinfo;
- uint64_t nextpc; /* (x) next instruction to execute */
- struct vm *vm; /* (o) */
- void *cookie; /* (i) cpu-specific data */
- struct fpreg *guestfpu; /* (a,i) guest fpu state */
-};
-
-#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
-#define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
-#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
-#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
-#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
-
-struct vmm_mmio_region {
- uint64_t start;
- uint64_t end;
- mem_region_read_t read;
- mem_region_write_t write;
-};
-#define VM_MAX_MMIO_REGIONS 4
-
-/*
- * Initialization:
- * (o) initialized the first time the VM is created
- * (i) initialized when VM is created and when it is reinitialized
- * (x) initialized before use
- */
-struct vm {
- void *cookie; /* (i) cpu-specific data */
- volatile cpuset_t active_cpus; /* (i) active vcpus */
- volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug*/
- int suspend; /* (i) stop VM execution */
- bool dying; /* (o) is dying */
- volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
- volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
- struct vm_mem mem; /* (i) [m+v] guest memory */
- char name[VM_MAX_NAMELEN + 1]; /* (o) virtual machine name */
- struct vcpu **vcpu; /* (i) guest vcpus */
- struct vmm_mmio_region mmio_region[VM_MAX_MMIO_REGIONS];
- /* (o) guest MMIO regions */
- /* The following describe the vm cpu topology */
- uint16_t sockets; /* (o) num of sockets */
- uint16_t cores; /* (o) num of cores/socket */
- uint16_t threads; /* (o) num of threads/core */
- uint16_t maxcpus; /* (o) max pluggable cpus */
- struct sx vcpus_init_lock; /* (o) */
-};
-
static MALLOC_DEFINE(M_VMM, "vmm", "vmm");
/* statistics */
static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
-SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
-
-static int vmm_ipinum;
-SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
- "IPI vector used for vcpu notifications");
-
-static void vcpu_notify_event_locked(struct vcpu *vcpu);
-
/* global statistics */
VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
VMM_STAT(VMEXIT_IRQ, "number of vmexits for an irq");
@@ -233,14 +170,6 @@ vm_init(struct vm *vm, bool create)
}
}
-void
-vm_disable_vcpu_creation(struct vm *vm)
-{
- sx_xlock(&vm->vcpus_init_lock);
- vm->dying = true;
- sx_xunlock(&vm->vcpus_init_lock);
-}
-
struct vcpu *
vm_alloc_vcpu(struct vm *vm, int vcpuid)
{
@@ -271,18 +200,6 @@ vm_alloc_vcpu(struct vm *vm, int vcpuid)
return (vcpu);
}
-void
-vm_lock_vcpus(struct vm *vm)
-{
- sx_xlock(&vm->vcpus_init_lock);
-}
-
-void
-vm_unlock_vcpus(struct vm *vm)
-{
- sx_unlock(&vm->vcpus_init_lock);
-}
-
int
vm_create(const char *name, struct vm **retvm)
{
@@ -296,6 +213,7 @@ vm_create(const char *name, struct vm **retvm)
return (error);
}
strcpy(vm->name, name);
+ mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
sx_init(&vm->vcpus_init_lock, "vm vcpus");
vm->sockets = 1;
@@ -312,35 +230,6 @@ vm_create(const char *name, struct vm **retvm)
return (0);
}
-void
-vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
- uint16_t *threads, uint16_t *maxcpus)
-{
- *sockets = vm->sockets;
- *cores = vm->cores;
- *threads = vm->threads;
- *maxcpus = vm->maxcpus;
-}
-
-uint16_t
-vm_get_maxcpus(struct vm *vm)
-{
- return (vm->maxcpus);
-}
-
-int
-vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
- uint16_t threads, uint16_t maxcpus)
-{
- /* Ignore maxcpus. */
- if ((sockets * cores * threads) > vm->maxcpus)
- return (EINVAL);
- vm->sockets = sockets;
- vm->cores = cores;
- vm->threads = threads;
- return(0);
-}
-
static void
vm_cleanup(struct vm *vm, bool destroy)
{
@@ -372,35 +261,15 @@ vm_cleanup(struct vm *vm, bool destroy)
void
vm_destroy(struct vm *vm)
{
-
vm_cleanup(vm, true);
-
free(vm, M_VMM);
}
-int
-vm_reinit(struct vm *vm)
-{
- int error;
-
- /*
- * A virtual machine can be reset only if all vcpus are suspended.
- */
- if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
- vm_cleanup(vm, false);
- vm_init(vm, false);
- error = 0;
- } else {
- error = EBUSY;
- }
-
- return (error);
-}
-
-const char *
-vm_name(struct vm *vm)
+void
+vm_reset(struct vm *vm)
{
- return (vm->name);
+ vm_cleanup(vm, false);
+ vm_init(vm, false);
}
int
@@ -491,33 +360,6 @@ out_user:
return (0);
}
-int
-vm_suspend(struct vm *vm, enum vm_suspend_how how)
-{
- int i;
-
- if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
- return (EINVAL);
-
- if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
- VM_CTR2(vm, "virtual machine already suspended %d/%d",
- vm->suspend, how);
- return (EALREADY);
- }
-
- VM_CTR1(vm, "virtual machine successfully suspended %d", how);
-
- /*
- * Notify all active vcpus that they are now suspended.
- */
- for (i = 0; i < vm->maxcpus; i++) {
- if (CPU_ISSET(i, &vm->active_cpus))
- vcpu_notify_event(vm_vcpu(vm, i));
- }
-
- return (0);
-}
-
void
vm_exit_suspended(struct vcpu *vcpu, uint64_t pc)
{
@@ -545,136 +387,6 @@ vm_exit_debug(struct vcpu *vcpu, uint64_t pc)
vmexit->exitcode = VM_EXITCODE_DEBUG;
}
-int
-vm_activate_cpu(struct vcpu *vcpu)
-{
- struct vm *vm = vcpu->vm;
-
- if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
- return (EBUSY);
-
- CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
- return (0);
-
-}
-
-int
-vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
-{
- if (vcpu == NULL) {
- vm->debug_cpus = vm->active_cpus;
- for (int i = 0; i < vm->maxcpus; i++) {
- if (CPU_ISSET(i, &vm->active_cpus))
- vcpu_notify_event(vm_vcpu(vm, i));
- }
- } else {
- if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
- return (EINVAL);
-
- CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
- vcpu_notify_event(vcpu);
- }
- return (0);
-}
-
-int
-vm_resume_cpu(struct vm *vm, struct vcpu *vcpu)
-{
-
- if (vcpu == NULL) {
- CPU_ZERO(&vm->debug_cpus);
- } else {
- if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
- return (EINVAL);
-
- CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
- }
- return (0);
-}
-
-int
-vcpu_debugged(struct vcpu *vcpu)
-{
-
- return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
-}
-
-cpuset_t
-vm_active_cpus(struct vm *vm)
-{
-
- return (vm->active_cpus);
-}
-
-cpuset_t
-vm_debug_cpus(struct vm *vm)
-{
-
- return (vm->debug_cpus);
-}
-
-cpuset_t
-vm_suspended_cpus(struct vm *vm)
-{
-
- return (vm->suspended_cpus);
-}
-
-
-void *
-vcpu_stats(struct vcpu *vcpu)
-{
-
- return (vcpu->stats);
-}
-
-/*
- * This function is called to ensure that a vcpu "sees" a pending event
- * as soon as possible:
- * - If the vcpu thread is sleeping then it is woken up.
- * - If the vcpu is running on a different host_cpu then an IPI will be directed
- * to the host_cpu to cause the vcpu to trap into the hypervisor.
- */
-static void
-vcpu_notify_event_locked(struct vcpu *vcpu)
-{
- int hostcpu;
-
- hostcpu = vcpu->hostcpu;
- if (vcpu->state == VCPU_RUNNING) {
- KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
- if (hostcpu != curcpu) {
- ipi_cpu(hostcpu, vmm_ipinum);
- } else {
- /*
- * If the 'vcpu' is running on 'curcpu' then it must
- * be sending a notification to itself (e.g. SELF_IPI).
- * The pending event will be picked up when the vcpu
- * transitions back to guest context.
- */
- }
- } else {
- KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
- "with hostcpu %d", vcpu->state, hostcpu));
- if (vcpu->state == VCPU_SLEEPING)
- wakeup_one(vcpu);
- }
-}
-
-void
-vcpu_notify_event(struct vcpu *vcpu)
-{
- vcpu_lock(vcpu);
- vcpu_notify_event_locked(vcpu);
- vcpu_unlock(vcpu);
-}
-
-struct vm_mem *
-vm_mem(struct vm *vm)
-{
- return (&vm->mem);
-}
-
static void
restore_guest_fpustate(struct vcpu *vcpu)
{
@@ -709,72 +421,6 @@ save_guest_fpustate(struct vcpu *vcpu)
("%s: fpcurthread set with guest registers", __func__));
}
-static int
-vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
- bool from_idle)
-{
- int error;
-
- vcpu_assert_locked(vcpu);
-
- /*
- * State transitions from the vmmdev_ioctl() must always begin from
- * the VCPU_IDLE state. This guarantees that there is only a single
- * ioctl() operating on a vcpu at any point.
- */
- if (from_idle) {
- while (vcpu->state != VCPU_IDLE) {
- vcpu_notify_event_locked(vcpu);
- msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
- }
- } else {
- KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
- "vcpu idle state"));
- }
-
- if (vcpu->state == VCPU_RUNNING) {
- KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
- "mismatch for running vcpu", curcpu, vcpu->hostcpu));
- } else {
- KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
- "vcpu that is not running", vcpu->hostcpu));
- }
-
- /*
- * The following state transitions are allowed:
- * IDLE -> FROZEN -> IDLE
- * FROZEN -> RUNNING -> FROZEN
- * FROZEN -> SLEEPING -> FROZEN
- */
- switch (vcpu->state) {
- case VCPU_IDLE:
- case VCPU_RUNNING:
- case VCPU_SLEEPING:
- error = (newstate != VCPU_FROZEN);
- break;
- case VCPU_FROZEN:
- error = (newstate == VCPU_FROZEN);
- break;
- default:
- error = 1;
- break;
- }
-
- if (error)
- return (EBUSY);
-
- vcpu->state = newstate;
- if (newstate == VCPU_RUNNING)
- vcpu->hostcpu = curcpu;
- else
- vcpu->hostcpu = NOCPU;
-
- if (newstate == VCPU_IDLE)
- wakeup(&vcpu->state);
-
- return (0);
-}
-
static void
vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
{
@@ -813,20 +459,6 @@ vm_set_capability(struct vcpu *vcpu, int type, int val)
return (vmmops_setcap(vcpu->cookie, type, val));
}
-struct vm *
-vcpu_vm(struct vcpu *vcpu)
-{
-
- return (vcpu->vm);
-}
-
-int
-vcpu_vcpuid(struct vcpu *vcpu)
-{
-
- return (vcpu->vcpuid);
-}
-
void *
vcpu_get_cookie(struct vcpu *vcpu)
{
@@ -834,39 +466,6 @@ vcpu_get_cookie(struct vcpu *vcpu)
return (vcpu->cookie);
}
-struct vcpu *
-vm_vcpu(struct vm *vm, int vcpuid)
-{
-
- return (vm->vcpu[vcpuid]);
-}
-
-int
-vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle)
-{
- int error;
-
- vcpu_lock(vcpu);
- error = vcpu_set_state_locked(vcpu, newstate, from_idle);
- vcpu_unlock(vcpu);
-
- return (error);
-}
-
-enum vcpu_state
-vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
-{
- enum vcpu_state state;
-
- vcpu_lock(vcpu);
- state = vcpu->state;
- if (hostcpu != NULL)
- *hostcpu = vcpu->hostcpu;
- vcpu_unlock(vcpu);
-
- return (state);
-}
-
int
vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
{
diff --git a/sys/riscv/vmm/vmm_aplic.c b/sys/riscv/vmm/vmm_aplic.c
index 74cb4fef4273..7c1cd260d352 100644
--- a/sys/riscv/vmm/vmm_aplic.c
+++ b/sys/riscv/vmm/vmm_aplic.c
@@ -46,7 +46,9 @@
#include <riscv/vmm/vmm_aplic.h>
#include <machine/vmm_instruction_emul.h>
-#include <machine/vmm_dev.h>
+
+#include <dev/vmm/vmm_dev.h>
+#include <dev/vmm/vmm_vm.h>
MALLOC_DEFINE(M_APLIC, "RISC-V VMM APLIC", "RISC-V AIA APLIC");
diff --git a/sys/riscv/vmm/vmm_fence.c b/sys/riscv/vmm/vmm_fence.c
index f8b69aac77a9..ff7eabdb3d50 100644
--- a/sys/riscv/vmm/vmm_fence.c
+++ b/sys/riscv/vmm/vmm_fence.c
@@ -39,6 +39,10 @@
#include <sys/mutex.h>
#include <sys/bus.h>
+#include <machine/vmm.h>
+
+#include <dev/vmm/vmm_vm.h>
+
#include "riscv.h"
#include "vmm_fence.h"
@@ -145,7 +149,6 @@ vmm_fence_add(struct vm *vm, cpuset_t *cpus, struct vmm_fence *fence)
struct vcpu *vcpu;
uint16_t maxcpus;
int hostcpu;
- int state;
bool enq;
int i;
@@ -193,8 +196,7 @@ vmm_fence_add(struct vm *vm, cpuset_t *cpus, struct vmm_fence *fence)
mb();
- state = vcpu_get_state(vcpu, &hostcpu);
- if (state == VCPU_RUNNING)
+ if (vcpu_is_running(vcpu, &hostcpu))
CPU_SET(hostcpu, &running_cpus);
}
diff --git a/sys/riscv/vmm/vmm_riscv.c b/sys/riscv/vmm/vmm_riscv.c
index 0e46aca60fdf..fe2ca5c07789 100644
--- a/sys/riscv/vmm/vmm_riscv.c
+++ b/sys/riscv/vmm/vmm_riscv.c
@@ -59,7 +59,6 @@
#include <machine/cpu.h>
#include <machine/machdep.h>
#include <machine/vmm.h>
-#include <machine/vmm_dev.h>
#include <machine/atomic.h>
#include <machine/pmap.h>
#include <machine/intr.h>
@@ -67,6 +66,7 @@
#include <machine/db_machdep.h>
#include <dev/vmm/vmm_mem.h>
+#include <dev/vmm/vmm_vm.h>
#include "riscv.h"
#include "vmm_aplic.h"
@@ -625,7 +625,7 @@ vmmops_run(void *vcpui, register_t pc, pmap_t pmap, struct vm_eventinfo *evinfo)
* have been modified, it may be necessary to execute an HFENCE.GVMA
* instruction (see Section 5.3.2) before or after writing hgatp.
*/
- __asm __volatile("hfence.gvma" ::: "memory");
+ hfence_gvma();
csr_write(hgatp, pmap->pm_satp);
if (has_sstc)
diff --git a/sys/riscv/vmm/vmm_sbi.c b/sys/riscv/vmm/vmm_sbi.c
index 426276444357..c1e6022097e3 100644
--- a/sys/riscv/vmm/vmm_sbi.c
+++ b/sys/riscv/vmm/vmm_sbi.c
@@ -36,6 +36,8 @@
#include <machine/sbi.h>
+#include <dev/vmm/vmm_vm.h>
+
#include "riscv.h"
#include "vmm_fence.h"
diff --git a/sys/riscv/vmm/vmm_vtimer.c b/sys/riscv/vmm/vmm_vtimer.c
index 0dadc962114f..cb2ca878116c 100644
--- a/sys/riscv/vmm/vmm_vtimer.c
+++ b/sys/riscv/vmm/vmm_vtimer.c
@@ -39,6 +39,8 @@
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/ofw/openfirm.h>
+#include <dev/vmm/vmm_vm.h>
+
#include "riscv.h"
#define VTIMER_DEFAULT_FREQ 1000000
diff --git a/sys/rpc/rpcsec_tls/rpctls_impl.c b/sys/rpc/rpcsec_tls/rpctls_impl.c
index 51fe270b13d9..22ba699a6fab 100644
--- a/sys/rpc/rpcsec_tls/rpctls_impl.c
+++ b/sys/rpc/rpcsec_tls/rpctls_impl.c
@@ -163,7 +163,7 @@ sys_rpctls_syscall(struct thread *td, struct rpctls_syscall_args *uap)
mtx_lock(&rpctls_lock);
upsp = RB_FIND(upsock_t, &upcall_sockets,
&(struct upsock){
- .so = __DECONST(struct socket *, uap->socookie) });
+ .so = (struct socket *)(uintptr_t)uap->socookie });
if (__predict_true(upsp != NULL)) {
RB_REMOVE(upsock_t, &upcall_sockets, upsp);
/*
diff --git a/sys/rpc/xdr.h b/sys/rpc/xdr.h
index 4307b5101477..b3eafcb864b2 100644
--- a/sys/rpc/xdr.h
+++ b/sys/rpc/xdr.h
@@ -133,14 +133,7 @@ typedef struct XDR {
* to be decoded. If this pointer is 0, then the type routines should
* allocate dynamic storage of the appropriate size and return it.
*/
-#ifdef _KERNEL
-typedef bool_t (*xdrproc_t)(XDR *, void *, ...);
-#else
-/*
- * XXX can't actually prototype it, because some take three args!!!
- */
-typedef bool_t (*xdrproc_t)(XDR *, ...);
-#endif
+typedef bool_t (*xdrproc_t)(XDR *, void *);
/*
* Operations defined on a XDR handle
diff --git a/sys/security/audit/audit_bsm.c b/sys/security/audit/audit_bsm.c
index 2cd1511b2296..6cd96ebe092f 100644
--- a/sys/security/audit/audit_bsm.c
+++ b/sys/security/audit/audit_bsm.c
@@ -1115,6 +1115,16 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau)
}
break;
+ case AUE_PDWAIT:
+ if (ARG_IS_VALID(kar, ARG_FFLAGS)) {
+ tok = au_to_arg32(1, "flags", ar->ar_arg_fflags);
+ kau_write(rec, tok);
+ }
+ if (ARG_IS_VALID(kar, ARG_FD)) {
+ tok = au_to_arg32(1, "fd", ar->ar_arg_fd);
+ kau_write(rec, tok);
+ }
+
case AUE_IOCTL:
if (ARG_IS_VALID(kar, ARG_CMD)) {
tok = au_to_arg32(2, "cmd", ar->ar_arg_cmd);
@@ -1365,6 +1375,24 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau)
kau_write(rec, tok);
}
break;
+ case AUE_PDRFORK:
+ if (ARG_IS_VALID(kar, ARG_PID)) {
+ tok = au_to_arg32(0, "child PID", ar->ar_arg_pid);
+ kau_write(rec, tok);
+ }
+ if (ARG_IS_VALID(kar, ARG_CMD)) {
+ tok = au_to_arg32(2, "fflags", ar->ar_arg_cmd);
+ kau_write(rec, tok);
+ }
+ if (ARG_IS_VALID(kar, ARG_FFLAGS)) {
+ tok = au_to_arg32(2, "flags", ar->ar_arg_fflags);
+ kau_write(rec, tok);
+ }
+ if (ARG_IS_VALID(kar, ARG_FD)) {
+ tok = au_to_arg32(1, "fd", ar->ar_arg_fd);
+ kau_write(rec, tok);
+ }
+ break;
case AUE_PDGETPID:
if (ARG_IS_VALID(kar, ARG_FD)) {
tok = au_to_arg32(1, "fd", ar->ar_arg_fd);
diff --git a/sys/security/mac/mac_framework.c b/sys/security/mac/mac_framework.c
index b0776160cc74..fec63b99c0e0 100644
--- a/sys/security/mac/mac_framework.c
+++ b/sys/security/mac/mac_framework.c
@@ -374,6 +374,7 @@ mac_policy_getlabeled(struct mac_policy_conf *mpc)
MPC_FLAG(mount_init_label, MPC_OBJECT_MOUNT);
MPC_FLAG(posixsem_init_label, MPC_OBJECT_POSIXSEM);
MPC_FLAG(posixshm_init_label, MPC_OBJECT_POSIXSHM);
+ MPC_FLAG(prison_init_label, MPC_OBJECT_PRISON);
MPC_FLAG(sysvmsg_init_label, MPC_OBJECT_SYSVMSG);
MPC_FLAG(sysvmsq_init_label, MPC_OBJECT_SYSVMSQ);
MPC_FLAG(sysvsem_init_label, MPC_OBJECT_SYSVSEM);
diff --git a/sys/security/mac/mac_framework.h b/sys/security/mac/mac_framework.h
index 1233cd30f211..5e13434e5ecc 100644
--- a/sys/security/mac/mac_framework.h
+++ b/sys/security/mac/mac_framework.h
@@ -73,6 +73,7 @@ struct mount;
struct msg;
struct msqid_kernel;
struct pipepair;
+struct prison;
struct proc;
struct semid_kernel;
struct shmfd;
@@ -85,6 +86,7 @@ struct thread;
struct timespec;
struct ucred;
struct vattr;
+struct vfsoptlist;
struct vnode;
struct vop_setlabel_args;
@@ -346,6 +348,22 @@ void mac_posixshm_create(struct ucred *cred, struct shmfd *shmfd);
void mac_posixshm_destroy(struct shmfd *);
void mac_posixshm_init(struct shmfd *);
+int mac_prison_init(struct prison *pr, int flag);
+void mac_prison_relabel(struct ucred *cred, struct prison *pr,
+ struct label *newlabel);
+void mac_prison_destroy(struct prison *pr);
+int mac_prison_check_attach(struct ucred *cred, struct prison *pr);
+int mac_prison_check_create(struct ucred *cred, struct vfsoptlist *opts,
+ int flags);
+int mac_prison_check_get(struct ucred *cred, struct prison *pr,
+ struct vfsoptlist *opts, int flags);
+int mac_prison_check_set(struct ucred *cred, struct prison *pr,
+ struct vfsoptlist *opts, int flags);
+int mac_prison_check_remove(struct ucred *cred, struct prison *pr);
+void mac_prison_created(struct ucred *cred, struct prison *pr);
+void mac_prison_attached(struct ucred *cred, struct prison *pr,
+ struct proc *p);
+
int mac_priv_check_impl(struct ucred *cred, int priv);
#ifdef MAC
extern bool mac_priv_check_fp_flag;
diff --git a/sys/security/mac/mac_internal.h b/sys/security/mac/mac_internal.h
index aeef59017d18..3f032ed3934a 100644
--- a/sys/security/mac/mac_internal.h
+++ b/sys/security/mac/mac_internal.h
@@ -64,6 +64,12 @@
SDT_PROVIDER_DECLARE(mac); /* MAC Framework-level events. */
SDT_PROVIDER_DECLARE(mac_framework); /* Entry points to MAC. */
+#define MAC_CHECK_PROBE_DEFINE5(name, arg0, arg1, arg2, arg3, arg4) \
+ SDT_PROBE_DEFINE6(mac_framework, , name, mac__check__err, \
+ "int", arg0, arg1, arg2, arg3, arg4); \
+ SDT_PROBE_DEFINE6(mac_framework, , name, mac__check__ok, \
+ "int", arg0, arg1, arg2, arg3, arg4);
+
#define MAC_CHECK_PROBE_DEFINE4(name, arg0, arg1, arg2, arg3) \
SDT_PROBE_DEFINE5(mac_framework, , name, mac__check__err, \
"int", arg0, arg1, arg2, arg3); \
@@ -88,18 +94,20 @@ SDT_PROVIDER_DECLARE(mac_framework); /* Entry points to MAC. */
SDT_PROBE_DEFINE2(mac_framework, , name, mac__check__ok, \
"int", arg0);
-#define MAC_CHECK_PROBE4(name, error, arg0, arg1, arg2, arg3) do { \
+#define MAC_CHECK_PROBE5(name, error, arg0, arg1, arg2, arg3, arg4) do { \
if (SDT_PROBES_ENABLED()) { \
if (error) { \
- SDT_PROBE5(mac_framework, , name, mac__check__err,\
- error, arg0, arg1, arg2, arg3); \
+ SDT_PROBE6(mac_framework, , name, mac__check__err,\
+ error, arg0, arg1, arg2, arg3, arg4); \
} else { \
- SDT_PROBE5(mac_framework, , name, mac__check__ok,\
- 0, arg0, arg1, arg2, arg3); \
+ SDT_PROBE6(mac_framework, , name, mac__check__ok,\
+ 0, arg0, arg1, arg2, arg3, arg4); \
} \
} \
} while (0)
+#define MAC_CHECK_PROBE4(name, error, arg0, arg1, arg2, arg3) \
+ MAC_CHECK_PROBE5(name, error, arg0, arg1, arg2, arg3, 0)
#define MAC_CHECK_PROBE3(name, error, arg0, arg1, arg2) \
MAC_CHECK_PROBE4(name, error, arg0, arg1, arg2, 0)
#define MAC_CHECK_PROBE2(name, error, arg0, arg1) \
@@ -177,6 +185,7 @@ struct label {
#define MPC_OBJECT_SYSVSHM 0x0000000000020000
#define MPC_OBJECT_SYNCACHE 0x0000000000040000
#define MPC_OBJECT_IP6Q 0x0000000000080000
+#define MPC_OBJECT_PRISON 0x0000000000100000
/*
* MAC Framework global variables.
@@ -233,6 +242,8 @@ struct label *mac_cred_label_alloc(void);
void mac_cred_label_free(struct label *label);
struct label *mac_pipe_label_alloc(void);
void mac_pipe_label_free(struct label *label);
+struct label *mac_prison_label_alloc(int flags);
+void mac_prison_label_free(struct label *label);
struct label *mac_socket_label_alloc(int flag);
void mac_socket_label_free(struct label *label);
void mac_socketpeer_label_free(struct label *label);
@@ -252,6 +263,17 @@ int mac_pipe_externalize_label(struct label *label, char *elements,
char *outbuf, size_t outbuflen);
int mac_pipe_internalize_label(struct label *label, char *string);
+int mac_prison_label_set(struct ucred *cred, struct prison *pr,
+ struct label *label);
+int mac_prison_check_relabel(struct ucred *cred, struct prison *pr,
+ struct label *newlabel);
+void mac_prison_copy_label(struct label *src, struct label *dest);
+int mac_prison_externalize_label(struct label *label, char *elements,
+ char *outbuf, size_t outbuflen);
+int mac_prison_internalize_label(struct label *label, char *string);
+void mac_prison_relabel(struct ucred *cred, struct prison *pr,
+ struct label *newlabel);
+
int mac_socket_label_set(struct ucred *cred, struct socket *so,
struct label *label);
void mac_socket_copy_label(struct label *src, struct label *dest);
diff --git a/sys/security/mac/mac_policy.h b/sys/security/mac/mac_policy.h
index f0a1f0863c96..0078138d472f 100644
--- a/sys/security/mac/mac_policy.h
+++ b/sys/security/mac/mac_policy.h
@@ -88,6 +88,7 @@ struct mount;
struct msg;
struct msqid_kernel;
struct pipepair;
+struct prison;
struct proc;
struct sbuf;
struct semid_kernel;
@@ -100,6 +101,7 @@ struct sysctl_req;
struct thread;
struct ucred;
struct vattr;
+struct vfsoptlist;
struct vnode;
struct in_addr;
@@ -407,6 +409,37 @@ typedef void (*mpo_posixshm_create_t)(struct ucred *cred,
typedef void (*mpo_posixshm_destroy_label_t)(struct label *label);
typedef void (*mpo_posixshm_init_label_t)(struct label *label);
+typedef int (*mpo_prison_init_label_t)(struct label *label, int flag);
+typedef int (*mpo_prison_check_relabel_t)(struct ucred *cred,
+ struct prison *pr, struct label *prlabel,
+ struct label *newlabel);
+typedef void (*mpo_prison_destroy_label_t)(struct label *label);
+typedef void (*mpo_prison_copy_label_t)(struct label *src,
+ struct label *dest);
+typedef int (*mpo_prison_externalize_label_t)(struct label *label,
+ char *element_name, struct sbuf *sb, int *claimed);
+typedef int (*mpo_prison_internalize_label_t)(struct label *label,
+ char *element_name, char *element_data, int *claimed);
+typedef void (*mpo_prison_relabel_t)(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct label *newlabel);
+typedef int (*mpo_prison_check_attach_t)(struct ucred *cred,
+ struct prison *pr, struct label *prlabel);
+typedef int (*mpo_prison_check_create_t)(struct ucred *cred,
+ struct vfsoptlist *opts, int flags);
+typedef int (*mpo_prison_check_get_t)(struct ucred *cred,
+ struct prison *pr, struct label *prlabel,
+ struct vfsoptlist *opts, int flags);
+typedef int (*mpo_prison_check_set_t)(struct ucred *cred,
+ struct prison *pr, struct label *prlabel,
+ struct vfsoptlist *opts, int flags);
+typedef int (*mpo_prison_check_remove_t)(struct ucred *cred,
+ struct prison *pr, struct label *prlabel);
+typedef void (*mpo_prison_created_t)(struct ucred *cred,
+ struct prison *pr, struct label *prlabel);
+typedef void (*mpo_prison_attached_t)(struct ucred *cred,
+ struct prison *pr, struct label *prlabel, struct proc *p,
+ struct label *proclabel);
+
typedef int (*mpo_priv_check_t)(struct ucred *cred, int priv);
typedef int (*mpo_priv_grant_t)(struct ucred *cred, int priv);
@@ -863,6 +896,21 @@ struct mac_policy_ops {
mpo_posixshm_destroy_label_t mpo_posixshm_destroy_label;
mpo_posixshm_init_label_t mpo_posixshm_init_label;
+ mpo_prison_init_label_t mpo_prison_init_label;
+ mpo_prison_check_relabel_t mpo_prison_check_relabel;
+ mpo_prison_destroy_label_t mpo_prison_destroy_label;
+ mpo_prison_copy_label_t mpo_prison_copy_label;
+ mpo_prison_externalize_label_t mpo_prison_externalize_label;
+ mpo_prison_internalize_label_t mpo_prison_internalize_label;
+ mpo_prison_relabel_t mpo_prison_relabel;
+ mpo_prison_check_attach_t mpo_prison_check_attach;
+ mpo_prison_check_create_t mpo_prison_check_create;
+ mpo_prison_check_get_t mpo_prison_check_get;
+ mpo_prison_check_set_t mpo_prison_check_set;
+ mpo_prison_check_remove_t mpo_prison_check_remove;
+ mpo_prison_created_t mpo_prison_created;
+ mpo_prison_attached_t mpo_prison_attached;
+
mpo_priv_check_t mpo_priv_check;
mpo_priv_grant_t mpo_priv_grant;
diff --git a/sys/security/mac/mac_prison.c b/sys/security/mac/mac_prison.c
new file mode 100644
index 000000000000..68ffd7a3cda3
--- /dev/null
+++ b/sys/security/mac/mac_prison.c
@@ -0,0 +1,255 @@
+/*-
+ * Copyright (c) 2025 Kyle Evans <kevans@FreeBSD.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/cdefs.h>
+#include "opt_mac.h"
+
+#include <sys/param.h>
+#include <sys/condvar.h>
+#include <sys/imgact.h>
+#include <sys/jail.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/mac.h>
+#include <sys/proc.h>
+#include <sys/sbuf.h>
+#include <sys/sdt.h>
+#include <sys/systm.h>
+#include <sys/vnode.h>
+#include <sys/mount.h>
+#include <sys/file.h>
+#include <sys/namei.h>
+#include <sys/sysctl.h>
+
+#include <security/mac/mac_framework.h>
+#include <security/mac/mac_internal.h>
+#include <security/mac/mac_policy.h>
+
+void
+mac_prison_label_free(struct label *label)
+{
+ if (label == NULL)
+ return;
+
+ MAC_POLICY_PERFORM_NOSLEEP(prison_destroy_label, label);
+ mac_labelzone_free(label);
+}
+
+struct label *
+mac_prison_label_alloc(int flag)
+{
+ struct label *label;
+ int error;
+
+ label = mac_labelzone_alloc(flag);
+ if (label == NULL)
+ return (NULL);
+
+ if (flag & M_WAITOK)
+ MAC_POLICY_CHECK(prison_init_label, label, flag);
+ else
+ MAC_POLICY_CHECK_NOSLEEP(prison_init_label, label, flag);
+ if (error) {
+ mac_prison_label_free(label);
+ return (NULL);
+ }
+ return (label);
+}
+
+/*
+ * The caller's expecting us to return with the prison locked if we were
+ * successful, since we're also setting pr->pr_label. On error, it remains
+ * unlocked.
+ */
+int
+mac_prison_init(struct prison *pr, int flag)
+{
+ struct label *prlabel;
+
+ mtx_assert(&pr->pr_mtx, MA_NOTOWNED);
+ if ((mac_labeled & MPC_OBJECT_PRISON) == 0) {
+ mtx_lock(&pr->pr_mtx);
+ pr->pr_label = NULL;
+ return (0);
+ }
+
+ prlabel = mac_prison_label_alloc(flag);
+ if (prlabel == NULL) {
+ KASSERT((flag & M_WAITOK) == 0,
+ ("MAC policy prison_init_label failed under M_WAITOK"));
+ return (ENOMEM);
+ }
+
+ mtx_lock(&pr->pr_mtx);
+ pr->pr_label = prlabel;
+ return (0);
+}
+
+void
+mac_prison_destroy(struct prison *pr)
+{
+ mtx_assert(&pr->pr_mtx, MA_OWNED);
+ mac_prison_label_free(pr->pr_label);
+ pr->pr_label = NULL;
+}
+
+void
+mac_prison_copy_label(struct label *src, struct label *dest)
+{
+
+ MAC_POLICY_PERFORM_NOSLEEP(prison_copy_label, src, dest);
+}
+
+int
+mac_prison_externalize_label(struct label *label, char *elements,
+ char *outbuf, size_t outbuflen)
+{
+ int error;
+
+ MAC_POLICY_EXTERNALIZE(prison, label, elements, outbuf, outbuflen);
+ return (error);
+}
+
+int
+mac_prison_internalize_label(struct label *label, char *string)
+{
+ int error;
+
+ MAC_POLICY_INTERNALIZE(prison, label, string);
+ return (error);
+}
+
+void
+mac_prison_relabel(struct ucred *cred, struct prison *pr,
+ struct label *newlabel)
+{
+ mtx_assert(&pr->pr_mtx, MA_OWNED);
+ MAC_POLICY_PERFORM_NOSLEEP(prison_relabel, cred, pr, pr->pr_label,
+ newlabel);
+}
+
+int
+mac_prison_label_set(struct ucred *cred, struct prison *pr,
+ struct label *label)
+{
+ int error;
+
+ mtx_assert(&pr->pr_mtx, MA_OWNED);
+
+ error = mac_prison_check_relabel(cred, pr, label);
+ if (error)
+ return (error);
+
+ mac_prison_relabel(cred, pr, label);
+
+ return (0);
+}
+
+MAC_CHECK_PROBE_DEFINE4(prison_check_relabel, "struct ucred *",
+ "struct prison *", "struct label *", "struct label *");
+int
+mac_prison_check_relabel(struct ucred *cred, struct prison *pr,
+ struct label *newlabel)
+{
+ int error;
+
+ mtx_assert(&pr->pr_mtx, MA_OWNED);
+ MAC_POLICY_CHECK_NOSLEEP(prison_check_relabel, cred, pr,
+ pr->pr_label, newlabel);
+ MAC_CHECK_PROBE4(prison_check_relabel, error, cred, pr,
+ pr->pr_label, newlabel);
+
+ return (error);
+}
+
+MAC_CHECK_PROBE_DEFINE3(prison_check_attach, "struct ucred *",
+ "struct prison *", "struct label *");
+int
+mac_prison_check_attach(struct ucred *cred, struct prison *pr)
+{
+ int error;
+
+ MAC_POLICY_CHECK_NOSLEEP(prison_check_attach, cred, pr, pr->pr_label);
+ MAC_CHECK_PROBE3(prison_check_attach, error, cred, pr, pr->pr_label);
+
+ return (error);
+}
+
+MAC_CHECK_PROBE_DEFINE3(prison_check_create, "struct ucred *",
+ "struct vfsoptlist *", "int");
+int
+mac_prison_check_create(struct ucred *cred, struct vfsoptlist *opts,
+ int flags)
+{
+ int error;
+
+ MAC_POLICY_CHECK_NOSLEEP(prison_check_create, cred, opts, flags);
+ MAC_CHECK_PROBE3(prison_check_create, error, cred, opts, flags);
+
+ return (error);
+}
+
+MAC_CHECK_PROBE_DEFINE5(prison_check_get, "struct ucred *",
+ "struct prison *", "struct label *", "struct vfsoptlist *", "int");
+int
+mac_prison_check_get(struct ucred *cred, struct prison *pr,
+ struct vfsoptlist *opts, int flags)
+{
+ int error;
+
+ MAC_POLICY_CHECK_NOSLEEP(prison_check_get, cred, pr, pr->pr_label,
+ opts, flags);
+ MAC_CHECK_PROBE5(prison_check_get, error, cred, pr, pr->pr_label, opts,
+ flags);
+
+ return (error);
+}
+
+MAC_CHECK_PROBE_DEFINE5(prison_check_set, "struct ucred *",
+ "struct prison *", "struct label *", "struct vfsoptlist *", "int");
+int
+mac_prison_check_set(struct ucred *cred, struct prison *pr,
+ struct vfsoptlist *opts, int flags)
+{
+ int error;
+
+ MAC_POLICY_CHECK_NOSLEEP(prison_check_set, cred, pr, pr->pr_label,
+ opts, flags);
+ MAC_CHECK_PROBE5(prison_check_set, error, cred, pr, pr->pr_label, opts,
+ flags);
+
+ return (error);
+}
+
+MAC_CHECK_PROBE_DEFINE3(prison_check_remove, "struct ucred *",
+ "struct prison *", "struct label *");
+int
+mac_prison_check_remove(struct ucred *cred, struct prison *pr)
+{
+ int error;
+
+ MAC_POLICY_CHECK_NOSLEEP(prison_check_remove, cred, pr, pr->pr_label);
+ MAC_CHECK_PROBE3(prison_check_remove, error, cred, pr, pr->pr_label);
+
+ return (error);
+}
+
+void
+mac_prison_created(struct ucred *cred, struct prison *pr)
+{
+
+ MAC_POLICY_PERFORM(prison_created, cred, pr, pr->pr_label);
+}
+
+void
+mac_prison_attached(struct ucred *cred, struct prison *pr, struct proc *p)
+{
+
+ MAC_POLICY_PERFORM(prison_attached, cred, pr, pr->pr_label, p,
+ p->p_label);
+}
diff --git a/sys/security/mac/mac_syscalls.c b/sys/security/mac/mac_syscalls.c
index 13c7998041f9..9bafa6d30c36 100644
--- a/sys/security/mac/mac_syscalls.c
+++ b/sys/security/mac/mac_syscalls.c
@@ -49,6 +49,8 @@
#include <sys/abi_compat.h>
#include <sys/capsicum.h>
#include <sys/fcntl.h>
+#include <sys/jail.h>
+#include <sys/jaildesc.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
@@ -88,6 +90,35 @@ struct mac32 {
};
#endif
+static int
+mac_label_copyin_string(struct mac *const mac, char **const u_string,
+ int flag)
+{
+ char *buffer;
+ int error;
+
+ error = mac_check_structmac_consistent(mac);
+ if (error != 0)
+ return (error);
+
+ /* 'm_buflen' not too big checked by function call above. */
+ buffer = malloc(mac->m_buflen, M_MACTEMP, flag);
+ if (buffer == NULL)
+ return (ENOMEM);
+
+ error = copyinstr(mac->m_string, buffer, mac->m_buflen, NULL);
+ if (error != 0) {
+ free(buffer, M_MACTEMP);
+ return (error);
+ }
+
+ MPASS(error == 0);
+ if (u_string != NULL)
+ *u_string = mac->m_string;
+ mac->m_string = buffer;
+ return (0);
+}
+
/*
* Copyin a 'struct mac', including the string pointed to by 'm_string'.
*
@@ -99,7 +130,6 @@ int
mac_label_copyin(const void *const u_mac, struct mac *const mac,
char **const u_string)
{
- char *buffer;
int error;
#ifdef COMPAT_FREEBSD32
@@ -120,23 +150,7 @@ mac_label_copyin(const void *const u_mac, struct mac *const mac,
return (error);
}
- error = mac_check_structmac_consistent(mac);
- if (error != 0)
- return (error);
-
- /* 'm_buflen' not too big checked by function call above. */
- buffer = malloc(mac->m_buflen, M_MACTEMP, M_WAITOK);
- error = copyinstr(mac->m_string, buffer, mac->m_buflen, NULL);
- if (error != 0) {
- free(buffer, M_MACTEMP);
- return (error);
- }
-
- MPASS(error == 0);
- if (u_string != NULL)
- *u_string = mac->m_string;
- mac->m_string = buffer;
- return (0);
+ return (mac_label_copyin_string(mac, u_string, M_WAITOK));
}
void
@@ -289,6 +303,156 @@ mac_set_proc_finish(struct thread *const td, bool proc_label_set,
}
int
+mac_get_prison(struct thread *const td, struct prison *pr,
+ struct vfsoptlist *opts)
+{
+ char *buffer = NULL, *u_buffer;
+ struct label *intlabel = NULL;
+ struct mac mac;
+ int error;
+ bool locked = true;
+
+ mtx_assert(&pr->pr_mtx, MA_OWNED);
+#ifdef COMPAT_FREEBSD32
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+ struct mac32 mac32;
+
+ error = vfs_copyopt(opts, "mac.label", &mac32, sizeof(mac32));
+ if (error == 0) {
+ CP(mac32, mac, m_buflen);
+ PTRIN_CP(mac32, mac, m_string);
+ }
+ } else
+#endif
+ error = vfs_copyopt(opts, "mac.label", &mac, sizeof(mac));
+ if (error) {
+ if (error != ENOENT)
+ vfs_opterror(opts, "bad mac.label");
+ goto out_nomac;
+ }
+
+ intlabel = mac_prison_label_alloc(M_NOWAIT);
+ if (intlabel == NULL) {
+ error = ENOMEM;
+ goto out;
+ }
+
+ if ((mac_labeled & MPC_OBJECT_PRISON) != 0)
+ mac_prison_copy_label(pr->pr_label, intlabel);
+
+ /*
+ * Externalization may want to acquire an rmlock. We already tapped out
+ * a copy of the label from when the jail_get(2) operation started and
+ * we're expected to be called near the end of jail_get(2) when the lock
+ * is about to be dropped anyways, so this is safe.
+ */
+ mtx_unlock(&pr->pr_mtx);
+ locked = false;
+
+ error = mac_label_copyin_string(&mac, &u_buffer, M_WAITOK);
+ if (error) {
+ vfs_opterror(opts, "mac.label: string copy failure");
+ goto out;
+ }
+
+ buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO);
+ if (buffer == NULL) {
+ error = ENOMEM;
+ goto out;
+ }
+
+ error = mac_prison_externalize_label(intlabel, mac.m_string,
+ buffer, mac.m_buflen);
+
+ if (error == 0)
+ error = copyout(buffer, u_buffer, strlen(buffer)+1);
+
+out:
+ mac_prison_label_free(intlabel);
+ free_copied_label(&mac);
+ free(buffer, M_MACTEMP);
+
+out_nomac:
+ if (locked) {
+ MPASS(error != 0);
+ mtx_unlock(&pr->pr_mtx);
+ }
+
+ return (error);
+}
+
+int
+mac_set_prison_prepare(struct thread *const td, struct vfsoptlist *opts,
+ void **const mac_set_prison_data)
+{
+ struct mac mac;
+ struct label *intlabel;
+ int error;
+
+#ifdef COMPAT_FREEBSD32
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+ struct mac32 mac32;
+
+ error = vfs_copyopt(opts, "mac.label", &mac32, sizeof(mac32));
+ if (error == 0) {
+ CP(mac32, mac, m_buflen);
+ PTRIN_CP(mac32, mac, m_string);
+ }
+ } else
+#endif
+ error = vfs_copyopt(opts, "mac.label", &mac, sizeof(mac));
+ if (error) {
+ if (error != ENOENT)
+ vfs_opterror(opts, "bad mac.label");
+ return (error);
+ }
+
+ error = mac_label_copyin_string(&mac, NULL, M_WAITOK);
+ if (error) {
+ vfs_opterror(opts, "mac.label: string copy failure");
+ return (error);
+ }
+
+ /*
+ * If the option wasn't set, then we return ENOENT above. If we don't
+ * have any policies applicable to prisons, we can return EINVAL early.
+ */
+ if (!(mac_labeled & MPC_OBJECT_PRISON)) {
+ vfs_opterror(opts, "no labelled jail policies");
+ return (EINVAL);
+ }
+
+ intlabel = mac_prison_label_alloc(M_WAITOK);
+ error = mac_prison_internalize_label(intlabel, mac.m_string);
+ if (error) {
+ mac_prison_label_free(intlabel);
+ vfs_opterror(opts, "internalize_label error");
+ return (error);
+ }
+
+ *mac_set_prison_data = intlabel;
+ return (0);
+}
+
+int
+mac_set_prison_core(struct thread *const td, struct prison *pr,
+ void *const mac_set_prison_data)
+{
+ struct label *const intlabel = mac_set_prison_data;
+
+ return (mac_prison_label_set(td->td_ucred, pr, intlabel));
+}
+
+void
+mac_set_prison_finish(struct thread *const td, bool prison_label_set __unused,
+ void *const mac_set_prison_data)
+{
+ struct label *const intlabel = mac_set_prison_data;
+
+ mac_prison_label_free(intlabel);
+}
+
+int
sys___mac_set_proc(struct thread *td, struct __mac_set_proc_args *uap)
{
struct ucred *newcred, *oldcred;
@@ -339,6 +503,7 @@ sys___mac_get_fd(struct thread *td, struct __mac_get_fd_args *uap)
struct mac mac;
struct vnode *vp;
struct pipe *pipe;
+ struct prison *pr;
struct socket *so;
cap_rights_t rights;
int error;
@@ -400,6 +565,25 @@ sys___mac_get_fd(struct thread *td, struct __mac_get_fd_args *uap)
mac_socket_label_free(intlabel);
break;
+ case DTYPE_JAILDESC:
+ if (!(mac_labeled & MPC_OBJECT_PRISON)) {
+ error = EINVAL;
+ goto out_fdrop;
+ }
+
+ error = jaildesc_get_prison(fp, &pr);
+ if (error != 0)
+ goto out_fdrop;
+
+ intlabel = mac_prison_label_alloc(M_WAITOK);
+ mac_prison_copy_label(pr->pr_label, intlabel);
+ prison_free(pr);
+
+ error = mac_prison_externalize_label(intlabel, mac.m_string,
+ buffer, mac.m_buflen);
+ mac_prison_label_free(intlabel);
+ break;
+
default:
error = EINVAL;
}
@@ -473,6 +657,7 @@ sys___mac_set_fd(struct thread *td, struct __mac_set_fd_args *uap)
{
struct label *intlabel;
struct pipe *pipe;
+ struct prison *pr;
struct socket *so;
struct file *fp;
struct mount *mp;
@@ -548,6 +733,27 @@ sys___mac_set_fd(struct thread *td, struct __mac_set_fd_args *uap)
mac_socket_label_free(intlabel);
break;
+ case DTYPE_JAILDESC:
+ if (!(mac_labeled & MPC_OBJECT_PRISON)) {
+ error = EINVAL;
+ goto out_fdrop;
+ }
+
+ pr = NULL;
+ intlabel = mac_prison_label_alloc(M_WAITOK);
+ error = mac_prison_internalize_label(intlabel, mac.m_string);
+ if (error == 0)
+ error = jaildesc_get_prison(fp, &pr);
+ if (error == 0) {
+ prison_lock(pr);
+ error = mac_prison_label_set(td->td_ucred, pr,
+ intlabel);
+ prison_free_locked(pr);
+ }
+
+ mac_prison_label_free(intlabel);
+ break;
+
default:
error = EINVAL;
}
diff --git a/sys/security/mac/mac_syscalls.h b/sys/security/mac/mac_syscalls.h
index f95ff3ef1264..76c8e6d188bb 100644
--- a/sys/security/mac/mac_syscalls.h
+++ b/sys/security/mac/mac_syscalls.h
@@ -30,4 +30,14 @@ int mac_set_proc_core(struct thread *const td, struct ucred *const newcred,
void mac_set_proc_finish(struct thread *const td, bool proc_label_set,
void *const mac_set_proc_data);
+struct vfsoptlist;
+int mac_get_prison(struct thread *const td, struct prison *pr,
+ struct vfsoptlist *opts);
+int mac_set_prison_prepare(struct thread *const td, struct vfsoptlist *opts,
+ void **const mac_set_prison_data);
+int mac_set_prison_core(struct thread *const td, struct prison *pr,
+ void *const mac_set_prison_data);
+void mac_set_prison_finish(struct thread *const td, bool prison_label_set,
+ void *const mac_set_prison_data);
+
#endif /* !_SECURITY_MAC_MAC_SYSCALLS_H_ */
diff --git a/sys/security/mac_stub/mac_stub.c b/sys/security/mac_stub/mac_stub.c
index ac5d5b58e5db..4a567c68b2be 100644
--- a/sys/security/mac_stub/mac_stub.c
+++ b/sys/security/mac_stub/mac_stub.c
@@ -52,6 +52,7 @@
#include <sys/acl.h>
#include <sys/conf.h>
#include <sys/extattr.h>
+#include <sys/jail.h>
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/ksem.h>
@@ -852,6 +853,74 @@ stub_posixshm_create(struct ucred *cred, struct shmfd *shmfd,
}
+static void
+stub_prison_relabel(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct label *newlabel)
+{
+
+}
+
+static int
+stub_prison_check_relabel(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct label *newlabel)
+{
+
+ return (0);
+}
+
+static int
+stub_prison_check_attach(struct ucred *cred, struct prison *pr,
+ struct label *prlabel)
+{
+
+ return (0);
+}
+
+static int
+stub_prison_check_create(struct ucred *cred, struct vfsoptlist *opts, int flags)
+{
+
+ return (0);
+}
+
+static int
+stub_prison_check_get(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct vfsoptlist *opts, int flags)
+{
+
+ return (0);
+}
+
+static int
+stub_prison_check_set(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct vfsoptlist *opts, int flags)
+{
+
+ return (0);
+}
+
+static int
+stub_prison_check_remove(struct ucred *cred, struct prison *pr,
+ struct label *prlabel)
+{
+
+ return (0);
+}
+
+static void
+stub_prison_created(struct ucred *cred, struct prison *pr,
+ struct label *prlabel)
+{
+
+}
+
+static void
+stub_prison_attached(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct proc *p, struct label *proclabel)
+{
+
+}
+
static int
stub_priv_check(struct ucred *cred, int priv)
{
@@ -1841,6 +1910,21 @@ static struct mac_policy_ops stub_ops =
.mpo_posixshm_destroy_label = stub_destroy_label,
.mpo_posixshm_init_label = stub_init_label,
+ .mpo_prison_init_label = stub_init_label_waitcheck,
+ .mpo_prison_destroy_label = stub_destroy_label,
+ .mpo_prison_copy_label = stub_copy_label,
+ .mpo_prison_externalize_label = stub_externalize_label,
+ .mpo_prison_internalize_label = stub_internalize_label,
+ .mpo_prison_relabel = stub_prison_relabel,
+ .mpo_prison_check_relabel = stub_prison_check_relabel,
+ .mpo_prison_check_attach = stub_prison_check_attach,
+ .mpo_prison_check_create = stub_prison_check_create,
+ .mpo_prison_check_get = stub_prison_check_get,
+ .mpo_prison_check_set = stub_prison_check_set,
+ .mpo_prison_check_remove = stub_prison_check_remove,
+ .mpo_prison_created = stub_prison_created,
+ .mpo_prison_attached = stub_prison_attached,
+
.mpo_priv_check = stub_priv_check,
.mpo_priv_grant = stub_priv_grant,
diff --git a/sys/security/mac_test/mac_test.c b/sys/security/mac_test/mac_test.c
index c447eeef010d..47dd7d1326a3 100644
--- a/sys/security/mac_test/mac_test.c
+++ b/sys/security/mac_test/mac_test.c
@@ -51,6 +51,7 @@
#include <sys/param.h>
#include <sys/acl.h>
+#include <sys/jail.h>
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/ksem.h>
@@ -99,6 +100,7 @@ static SYSCTL_NODE(_security_mac, OID_AUTO, test,
#define MAGIC_PIPE 0xdc6c9919
#define MAGIC_POSIX_SEM 0x78ae980c
#define MAGIC_POSIX_SHM 0x4e853fc9
+#define MAGIC_PRISON 0x9639acdb
#define MAGIC_PROC 0x3b4be98f
#define MAGIC_CRED 0x9a5a4987
#define MAGIC_VNODE 0x1a67a45c
@@ -1591,6 +1593,161 @@ test_posixshm_init_label(struct label *label)
COUNTER_INC(posixshm_init_label);
}
+COUNTER_DECL(prison_init_label);
+static int
+test_prison_init_label(struct label *label, int flag)
+{
+
+ if (flag & M_WAITOK)
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "test_prison_init_label() at %s:%d", __FILE__,
+ __LINE__);
+
+ LABEL_INIT(label, MAGIC_PRISON);
+ COUNTER_INC(prison_init_label);
+ return (0);
+}
+
+COUNTER_DECL(prison_destroy_label);
+static void
+test_prison_destroy_label(struct label *label)
+{
+
+ LABEL_DESTROY(label, MAGIC_PRISON);
+ COUNTER_INC(prison_destroy_label);
+}
+
+COUNTER_DECL(prison_copy_label);
+static void
+test_prison_copy_label(struct label *src, struct label *dest)
+{
+
+ LABEL_CHECK(src, MAGIC_PRISON);
+ LABEL_CHECK(dest, MAGIC_PRISON);
+ COUNTER_INC(prison_copy_label);
+}
+
+COUNTER_DECL(prison_externalize_label);
+static int
+test_prison_externalize_label(struct label *label, char *element_name,
+ struct sbuf *sb, int *claimed)
+{
+
+ LABEL_CHECK(label, MAGIC_PRISON);
+ COUNTER_INC(prison_externalize_label);
+
+ return (0);
+}
+
+COUNTER_DECL(prison_internalize_label);
+static int
+test_prison_internalize_label(struct label *label, char *element_name,
+ char *element_data, int *claimed)
+{
+
+ LABEL_CHECK(label, MAGIC_PRISON);
+ COUNTER_INC(prison_internalize_label);
+
+ return (0);
+}
+
+COUNTER_DECL(prison_relabel);
+static void
+test_prison_relabel(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct label *newlabel)
+{
+
+ LABEL_CHECK(prlabel, MAGIC_PRISON);
+ LABEL_CHECK(newlabel, MAGIC_PRISON);
+ COUNTER_INC(prison_relabel);
+}
+
+COUNTER_DECL(prison_check_relabel);
+static int
+test_prison_check_relabel(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct label *newlabel)
+{
+
+ LABEL_CHECK(prlabel, MAGIC_PRISON);
+ LABEL_CHECK(newlabel, MAGIC_PRISON);
+ COUNTER_INC(prison_check_relabel);
+ return (0);
+}
+
+COUNTER_DECL(prison_check_attach);
+static int
+test_prison_check_attach(struct ucred *cred, struct prison *pr,
+ struct label *prlabel)
+{
+
+ LABEL_CHECK(prlabel, MAGIC_PRISON);
+ COUNTER_INC(prison_check_attach);
+ return (0);
+}
+
+COUNTER_DECL(prison_check_create);
+static int
+test_prison_check_create(struct ucred *cred, struct vfsoptlist *opts, int flags)
+{
+
+ COUNTER_INC(prison_check_create);
+ return (0);
+}
+
+COUNTER_DECL(prison_check_get);
+static int
+test_prison_check_get(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct vfsoptlist *opts, int flags)
+{
+
+ LABEL_CHECK(prlabel, MAGIC_PRISON);
+ COUNTER_INC(prison_check_get);
+ return (0);
+}
+
+COUNTER_DECL(prison_check_set);
+static int
+test_prison_check_set(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct vfsoptlist *opts, int flags)
+{
+
+ LABEL_CHECK(prlabel, MAGIC_PRISON);
+ COUNTER_INC(prison_check_set);
+ return (0);
+}
+
+COUNTER_DECL(prison_check_remove);
+static int
+test_prison_check_remove(struct ucred *cred, struct prison *pr,
+ struct label *prlabel)
+{
+
+ LABEL_CHECK(prlabel, MAGIC_PRISON);
+ COUNTER_INC(prison_check_remove);
+ return (0);
+}
+
+COUNTER_DECL(prison_created);
+static void
+test_prison_created(struct ucred *cred, struct prison *pr,
+ struct label *prlabel)
+{
+
+ LABEL_CHECK(prlabel, MAGIC_PRISON);
+ COUNTER_INC(prison_created);
+}
+
+COUNTER_DECL(prison_attached);
+static void
+test_prison_attached(struct ucred *cred, struct prison *pr,
+ struct label *prlabel, struct proc *p, struct label *proclabel)
+{
+
+ LABEL_CHECK(prlabel, MAGIC_PRISON);
+ LABEL_CHECK(proclabel, MAGIC_PROC);
+ COUNTER_INC(prison_attached);
+}
+
COUNTER_DECL(proc_check_debug);
static int
test_proc_check_debug(struct ucred *cred, struct proc *p)
@@ -3208,6 +3365,21 @@ static struct mac_policy_ops test_ops =
.mpo_posixshm_destroy_label = test_posixshm_destroy_label,
.mpo_posixshm_init_label = test_posixshm_init_label,
+ .mpo_prison_init_label = test_prison_init_label,
+ .mpo_prison_destroy_label = test_prison_destroy_label,
+ .mpo_prison_copy_label = test_prison_copy_label,
+ .mpo_prison_externalize_label = test_prison_externalize_label,
+ .mpo_prison_internalize_label = test_prison_internalize_label,
+ .mpo_prison_relabel = test_prison_relabel,
+ .mpo_prison_check_relabel = test_prison_check_relabel,
+ .mpo_prison_check_attach = test_prison_check_attach,
+ .mpo_prison_check_create = test_prison_check_create,
+ .mpo_prison_check_get = test_prison_check_get,
+ .mpo_prison_check_set = test_prison_check_set,
+ .mpo_prison_check_remove = test_prison_check_remove,
+ .mpo_prison_created = test_prison_created,
+ .mpo_prison_attached = test_prison_attached,
+
.mpo_proc_check_debug = test_proc_check_debug,
.mpo_proc_check_sched = test_proc_check_sched,
.mpo_proc_check_signal = test_proc_check_signal,
diff --git a/sys/sys/_types.h b/sys/sys/_types.h
index 2e622090e81d..a2ee83a862da 100644
--- a/sys/sys/_types.h
+++ b/sys/sys/_types.h
@@ -161,10 +161,7 @@ typedef int __cpulevel_t; /* level parameter for cpuset. */
typedef int __cpusetid_t; /* cpuset identifier. */
typedef __int64_t __daddr_t; /* bwrite(3), FIOBMAP2, etc */
-#ifndef __has_feature
-#define __has_feature(x) 0
-#endif
-#if !__has_feature(capabilities)
+#ifndef __SIZEOF_INTCAP__
/*
* On non-CHERI systems, define __(u)intcap_t to __(u)intptr_t so that
* hybrid-C code which needs to be explicitly aware of capabilities can
diff --git a/sys/sys/abi_compat.h b/sys/sys/abi_compat.h
index c2233f2eac2c..bd99a21d8e23 100644
--- a/sys/sys/abi_compat.h
+++ b/sys/sys/abi_compat.h
@@ -67,9 +67,17 @@
TS_CP((src), (dst), it_value); \
} while (0)
+#define FU64_CP(src, dst, fld) do { \
+ _Static_assert(sizeof((src).fld) == sizeof(uint64_t), \
+ "FU64_CP src: " #src "." #fld "is not 8 bytes"); \
+ _Static_assert(sizeof((dst).fld) == sizeof(uint64_t), \
+ "FU64_CP dst: " #dst "." #fld "is not 8 bytes"); \
+ memcpy(&(dst).fld, &(src).fld, sizeof(uint64_t)); \
+} while (0)
+
#define BT_CP(src, dst, fld) do { \
CP((src).fld, (dst).fld, sec); \
- *(uint64_t *)&(dst).fld.frac[0] = (src).fld.frac; \
+ FU64_CP((src).fld, (dst).fld, frac); \
} while (0)
#endif /* !_COMPAT_H_ */
diff --git a/sys/sys/buf_ring.h b/sys/sys/buf_ring.h
index 00870cbf3531..07a4fa52891e 100644
--- a/sys/sys/buf_ring.h
+++ b/sys/sys/buf_ring.h
@@ -266,7 +266,7 @@ buf_ring_advance_sc(struct buf_ring *br)
* the compare and an atomic.
*/
static __inline void
-buf_ring_putback_sc(struct buf_ring *br, void *new)
+buf_ring_putback_sc(struct buf_ring *br, void *buf)
{
uint32_t cons_idx, mask;
@@ -274,7 +274,7 @@ buf_ring_putback_sc(struct buf_ring *br, void *new)
cons_idx = atomic_load_32(&br->br_cons_head) & mask;
KASSERT(cons_idx != (atomic_load_32(&br->br_prod_tail) & mask),
("Buf-Ring has none in putback")) ;
- br->br_ring[cons_idx] = new;
+ br->br_ring[cons_idx] = buf;
}
/*
@@ -305,7 +305,7 @@ static __inline void *
buf_ring_peek_clear_sc(struct buf_ring *br)
{
uint32_t cons_head, prod_tail, mask;
- void *ret;
+ void *buf;
#if defined(DEBUG_BUFRING) && defined(_KERNEL)
if (!mtx_owned(br->br_lock))
@@ -319,7 +319,7 @@ buf_ring_peek_clear_sc(struct buf_ring *br)
if (cons_head == prod_tail)
return (NULL);
- ret = br->br_ring[cons_head & mask];
+ buf = br->br_ring[cons_head & mask];
#ifdef DEBUG_BUFRING
/*
* Single consumer, i.e. cons_head will not move while we are
@@ -327,13 +327,12 @@ buf_ring_peek_clear_sc(struct buf_ring *br)
*/
br->br_ring[cons_head & mask] = NULL;
#endif
- return (ret);
+ return (buf);
}
static __inline int
buf_ring_full(struct buf_ring *br)
{
-
return (atomic_load_32(&br->br_prod_head) ==
atomic_load_32(&br->br_cons_tail) + br->br_cons_size - 1);
}
@@ -341,7 +340,6 @@ buf_ring_full(struct buf_ring *br)
static __inline int
buf_ring_empty(struct buf_ring *br)
{
-
return (atomic_load_32(&br->br_cons_head) ==
atomic_load_32(&br->br_prod_tail));
}
diff --git a/sys/sys/caprights.h b/sys/sys/caprights.h
index 6a5a17eda5ee..904d9b4e843a 100644
--- a/sys/sys/caprights.h
+++ b/sys/sys/caprights.h
@@ -92,6 +92,7 @@ extern const cap_rights_t cap_mmap_rights;
extern const cap_rights_t cap_no_rights;
extern const cap_rights_t cap_pdgetpid_rights;
extern const cap_rights_t cap_pdkill_rights;
+extern const cap_rights_t cap_pdwait_rights;
extern const cap_rights_t cap_pread_rights;
extern const cap_rights_t cap_pwrite_rights;
extern const cap_rights_t cap_read_rights;
diff --git a/sys/sys/eventfd.h b/sys/sys/eventfd.h
index 0f64483753e5..1b390feed48e 100644
--- a/sys/sys/eventfd.h
+++ b/sys/sys/eventfd.h
@@ -38,8 +38,13 @@ typedef uint64_t eventfd_t;
#ifdef _KERNEL
+struct eventfd;
+
int eventfd_create_file(struct thread *td, struct file *fp, uint32_t initval,
int flags);
+struct eventfd *eventfd_get(struct file *fp);
+void eventfd_put(struct eventfd *efd);
+void eventfd_signal(struct eventfd *efd);
#else
diff --git a/sys/sys/exterr_cat.h b/sys/sys/exterr_cat.h
index 24f07539fe35..015eb6a1ae76 100644
--- a/sys/sys/exterr_cat.h
+++ b/sys/sys/exterr_cat.h
@@ -37,6 +37,8 @@
#define EXTERR_CAT_GEOM 12
#define EXTERR_CAT_FUSE_VFS 13
#define EXTERR_CAT_FUSE_DEVICE 14
+#define EXTERR_CAT_FORK 15
+#define EXTERR_CAT_PROCEXIT 16
#endif
diff --git a/sys/sys/jail.h b/sys/sys/jail.h
index e6a13e6719dd..5ac4c5f9008d 100644
--- a/sys/sys/jail.h
+++ b/sys/sys/jail.h
@@ -198,6 +198,7 @@ struct prison {
struct prison_ip *pr_addrs[PR_FAMILY_MAX]; /* (p,n) IPs of jail */
struct prison_racct *pr_prison_racct; /* (c) racct jail proxy */
struct knlist *pr_klist; /* (m) attached knotes */
+ struct label *pr_label; /* (m) MAC label */
LIST_HEAD(, jaildesc) pr_descs; /* (a) attached descriptors */
void *pr_sparep;
int pr_childcount; /* (a) number of child jails */
diff --git a/sys/sys/jaildesc.h b/sys/sys/jaildesc.h
index fda270d62e70..b0a1a6238cc9 100644
--- a/sys/sys/jaildesc.h
+++ b/sys/sys/jaildesc.h
@@ -78,6 +78,7 @@ struct jaildesc {
int jaildesc_find(struct thread *td, int fd, struct prison **prp,
struct ucred **ucredp);
int jaildesc_alloc(struct thread *td, struct file **fpp, int *fdp, int owning);
+int jaildesc_get_prison(struct file *jd, struct prison **prp);
void jaildesc_set_prison(struct file *jd, struct prison *pr);
void jaildesc_prison_cleanup(struct prison *pr);
void jaildesc_knote(struct prison *pr, long hint);
diff --git a/sys/sys/module.h b/sys/sys/module.h
index f749455bdf63..b4a08e2fc26f 100644
--- a/sys/sys/module.h
+++ b/sys/sys/module.h
@@ -116,15 +116,17 @@ struct mod_pnp_match_info
}; \
DATA_SET(modmetadata_set, MODULE_METADATA_CONCAT(uniquifier))
+#define MODULE_DEPEND_CONCAT(module, mdepend) _##module##_depend_on_##mdepend
#define MODULE_DEPEND(module, mdepend, vmin, vpref, vmax) \
- static struct mod_depend _##module##_depend_on_##mdepend \
+ static struct mod_depend MODULE_DEPEND_CONCAT(module, mdepend) \
__section(".data") = { \
vmin, \
vpref, \
vmax \
}; \
- MODULE_METADATA(_md_##module##_on_##mdepend, MDT_DEPEND, \
- &_##module##_depend_on_##mdepend, #mdepend)
+ MODULE_METADATA(MODULE_DEPEND_CONCAT(module, mdepend), \
+ MDT_DEPEND, &MODULE_DEPEND_CONCAT(module, mdepend), \
+ __XSTRING(mdepend))
/*
* Every kernel has a 'kernel' module with the version set to
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 3b56c582be0e..65a244311fee 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -74,7 +74,7 @@
* cannot include sys/param.h and should only be updated here.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1600007
+#define __FreeBSD_version 1600011
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/sys/procdesc.h b/sys/sys/procdesc.h
index 81102dffa6ff..b477903f8053 100644
--- a/sys/sys/procdesc.h
+++ b/sys/sys/procdesc.h
@@ -122,9 +122,15 @@ struct rusage;
* Process descriptor system calls.
*/
__BEGIN_DECLS
+struct __wrusage;
+struct __siginfo;
+
pid_t pdfork(int *, int);
+pid_t pdrfork(int *, int, int);
int pdkill(int, int);
int pdgetpid(int, pid_t *);
+int pdwait(int, int *, int, struct __wrusage *, struct __siginfo *);
+pid_t pdrfork_thread(int *, int, int, void *, int (*)(void *), void *);
__END_DECLS
#endif /* _KERNEL */
diff --git a/sys/sys/sdt.h b/sys/sys/sdt.h
index cd45bc1a1ffd..f705be915684 100644
--- a/sys/sys/sdt.h
+++ b/sys/sys/sdt.h
@@ -447,7 +447,7 @@ struct sdt_probe {
const char *mod;
const char *func;
const char *name;
- id_t id; /* DTrace probe ID. */
+ uint32_t id; /* DTrace probe ID. */
int n_args; /* Number of arguments. */
struct linker_file *sdtp_lf; /* Module in which we're defined. */
};
diff --git a/sys/sys/smp.h b/sys/sys/smp.h
index b642a6014f33..fdb69b13c0d4 100644
--- a/sys/sys/smp.h
+++ b/sys/sys/smp.h
@@ -16,6 +16,7 @@
#ifndef LOCORE
+#include <sys/types.h>
#include <sys/cpuset.h>
#include <sys/queue.h>
@@ -278,7 +279,12 @@ void smp_rendezvous(void (*)(void *),
void (*)(void *),
void *arg);
void smp_rendezvous_cpus(cpuset_t,
- void (*)(void *),
+ void (*)(void *),
+ void (*)(void *),
+ void (*)(void *),
+ void *arg);
+void smp_rendezvous_cpu(u_int,
+ void (*)(void *),
void (*)(void *),
void (*)(void *),
void *arg);
diff --git a/sys/sys/syscall.h b/sys/sys/syscall.h
index 43f46f063e3e..814437732df3 100644
--- a/sys/sys/syscall.h
+++ b/sys/sys/syscall.h
@@ -538,4 +538,6 @@
#define SYS_jail_attach_jd 597
#define SYS_jail_remove_jd 598
#define SYS_kexec_load 599
-#define SYS_MAXSYSCALL 600
+#define SYS_pdrfork 600
+#define SYS_pdwait 601
+#define SYS_MAXSYSCALL 602
diff --git a/sys/sys/syscall.mk b/sys/sys/syscall.mk
index ce29c050885e..e9d54983b5c4 100644
--- a/sys/sys/syscall.mk
+++ b/sys/sys/syscall.mk
@@ -441,4 +441,6 @@ MIASM = \
setgroups.o \
jail_attach_jd.o \
jail_remove_jd.o \
- kexec_load.o
+ kexec_load.o \
+ pdrfork.o \
+ pdwait.o
diff --git a/sys/sys/syscallsubr.h b/sys/sys/syscallsubr.h
index 4ddd2eba25c8..e2bbbc188553 100644
--- a/sys/sys/syscallsubr.h
+++ b/sys/sys/syscallsubr.h
@@ -286,6 +286,8 @@ int kern_posix_fallocate(struct thread *td, int fd, off_t offset,
off_t len);
int kern_fspacectl(struct thread *td, int fd, int cmd,
const struct spacectl_range *, int flags, struct spacectl_range *);
+int kern_pdwait(struct thread *td, int fd, int *status,
+ int options, struct __wrusage *wrusage, siginfo_t *sip);
int kern_procctl(struct thread *td, enum idtype idtype, id_t id, int com,
void *data);
int kern_pread(struct thread *td, int fd, void *buf, size_t nbyte,
diff --git a/sys/sys/sysproto.h b/sys/sys/sysproto.h
index 5f5524a4519b..0496077bb555 100644
--- a/sys/sys/sysproto.h
+++ b/sys/sys/sysproto.h
@@ -1913,6 +1913,18 @@ struct kexec_load_args {
char segments_l_[PADL_(struct kexec_segment *)]; struct kexec_segment * segments; char segments_r_[PADR_(struct kexec_segment *)];
char flags_l_[PADL_(u_long)]; u_long flags; char flags_r_[PADR_(u_long)];
};
+struct pdrfork_args {
+ char fdp_l_[PADL_(int *)]; int * fdp; char fdp_r_[PADR_(int *)];
+ char pdflags_l_[PADL_(int)]; int pdflags; char pdflags_r_[PADR_(int)];
+ char rfflags_l_[PADL_(int)]; int rfflags; char rfflags_r_[PADR_(int)];
+};
+struct pdwait_args {
+ char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)];
+ char status_l_[PADL_(int *)]; int * status; char status_r_[PADR_(int *)];
+ char options_l_[PADL_(int)]; int options; char options_r_[PADR_(int)];
+ char wrusage_l_[PADL_(struct __wrusage *)]; struct __wrusage * wrusage; char wrusage_r_[PADR_(struct __wrusage *)];
+ char info_l_[PADL_(struct __siginfo *)]; struct __siginfo * info; char info_r_[PADR_(struct __siginfo *)];
+};
int sys__exit(struct thread *, struct _exit_args *);
int sys_fork(struct thread *, struct fork_args *);
int sys_read(struct thread *, struct read_args *);
@@ -2320,6 +2332,8 @@ int sys_setgroups(struct thread *, struct setgroups_args *);
int sys_jail_attach_jd(struct thread *, struct jail_attach_jd_args *);
int sys_jail_remove_jd(struct thread *, struct jail_remove_jd_args *);
int sys_kexec_load(struct thread *, struct kexec_load_args *);
+int sys_pdrfork(struct thread *, struct pdrfork_args *);
+int sys_pdwait(struct thread *, struct pdwait_args *);
#ifdef COMPAT_43
@@ -3319,6 +3333,8 @@ int freebsd14_setgroups(struct thread *, struct freebsd14_setgroups_args *);
#define SYS_AUE_jail_attach_jd AUE_JAIL_ATTACH
#define SYS_AUE_jail_remove_jd AUE_JAIL_REMOVE
#define SYS_AUE_kexec_load AUE_NULL
+#define SYS_AUE_pdrfork AUE_PDRFORK
+#define SYS_AUE_pdwait AUE_PDWAIT
#undef PAD_
#undef PADL_
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index ab95bde34ceb..834c757aa385 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -239,7 +239,8 @@ swap_release_by_cred_rlimit(u_long pdecr, struct ucred *cred)
#ifdef INVARIANTS
prev = atomic_fetchadd_long(&uip->ui_vmsize, -pdecr);
KASSERT(prev >= pdecr,
- ("negative vmsize for uid %d\n", uip->ui_uid));
+ ("negative vmsize for uid %d, prev %#jx decr %#jx\n",
+ uip->ui_uid, (uintmax_t)prev, (uintmax_t)pdecr));
#else
atomic_subtract_long(&uip->ui_vmsize, pdecr);
#endif
@@ -329,7 +330,7 @@ out_error:
}
void
-swap_reserve_force(vm_ooffset_t incr)
+swap_reserve_force_by_cred(vm_ooffset_t incr, struct ucred *cred)
{
u_long pincr;
@@ -345,7 +346,13 @@ swap_reserve_force(vm_ooffset_t incr)
#endif
pincr = atop(incr);
atomic_add_long(&swap_reserved, pincr);
- swap_reserve_force_rlimit(pincr, curthread->td_ucred);
+ swap_reserve_force_rlimit(pincr, cred);
+}
+
+void
+swap_reserve_force(vm_ooffset_t incr)
+{
+ swap_reserve_force_by_cred(incr, curthread->td_ucred);
}
void
@@ -373,7 +380,8 @@ swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
pdecr = atop(decr);
#ifdef INVARIANTS
prev = atomic_fetchadd_long(&swap_reserved, -pdecr);
- KASSERT(prev >= pdecr, ("swap_reserved < decr"));
+ KASSERT(prev >= pdecr, ("swap_reserved %#jx < decr %#jx",
+ (uintmax_t)prev, (uintmax_t)pdecr));
#else
atomic_subtract_long(&swap_reserved, pdecr);
#endif
@@ -776,10 +784,7 @@ swap_pager_init_object(vm_object_t object, void *handle, struct ucred *cred,
object->un_pager.swp.writemappings = 0;
object->handle = handle;
- if (cred != NULL) {
- object->cred = cred;
- object->charge = size;
- }
+ object->cred = cred;
return (true);
}
@@ -892,8 +897,7 @@ swap_pager_dealloc(vm_object_t object)
* Release the allocation charge.
*/
if (object->cred != NULL) {
- swap_release_by_cred(object->charge, object->cred);
- object->charge = 0;
+ swap_release_by_cred(ptoa(object->size), object->cred);
crfree(object->cred);
object->cred = NULL;
}
@@ -1358,14 +1362,22 @@ static int
swap_pager_getpages_locked(struct pctrie_iter *blks, vm_object_t object,
vm_page_t *ma, int count, int *a_rbehind, int *a_rahead, struct buf *bp)
{
+ vm_page_t m;
vm_pindex_t pindex;
- int rahead, rbehind;
+ int i, rahead, rbehind;
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & OBJ_SWAP) != 0,
("%s: object not swappable", __func__));
- pindex = ma[0]->pindex;
+ for (pindex = 0, i = 0; i < count; i++) {
+ m = ma[i];
+ if (m != bogus_page) {
+ pindex = m->pindex - i;
+ break;
+ }
+ }
+ MPASS(i != count);
if (!swp_pager_haspage_iter(pindex, &rbehind, &rahead, blks)) {
VM_OBJECT_WUNLOCK(object);
uma_zfree(swrbuf_zone, bp);
@@ -1392,8 +1404,14 @@ swap_pager_getpages_locked(struct pctrie_iter *blks, vm_object_t object,
vm_object_prepare_buf_pages(object, bp->b_pages, count, &rbehind,
&rahead, ma);
bp->b_npages = rbehind + count + rahead;
- for (int i = 0; i < bp->b_npages; i++)
- bp->b_pages[i]->oflags |= VPO_SWAPINPROG;
+ KASSERT(bp->b_npages <= PBUF_PAGES,
+ ("bp_npages %d (rb %d c %d ra %d) not less than PBUF_PAGES %jd",
+ bp->b_npages, rbehind, count, rahead, (uintmax_t)PBUF_PAGES));
+ for (i = 0; i < bp->b_npages; i++) {
+ m = bp->b_pages[i];
+ if (m != bogus_page)
+ m->oflags |= VPO_SWAPINPROG;
+ }
bp->b_blkno = swp_pager_meta_lookup(blks, pindex - rbehind);
KASSERT(bp->b_blkno != SWAPBLK_NONE,
("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex));
@@ -1441,8 +1459,14 @@ swap_pager_getpages_locked(struct pctrie_iter *blks, vm_object_t object,
*/
VM_OBJECT_WLOCK(object);
/* This could be implemented more efficiently with aflags */
- while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) {
- ma[0]->oflags |= VPO_SWAPSLEEP;
+ for (i = 0; i < count; i++) {
+ m = ma[i];
+ if (m != bogus_page)
+ break;
+ }
+ MPASS(i != count);
+ while ((m->oflags & VPO_SWAPINPROG) != 0) {
+ m->oflags |= VPO_SWAPSLEEP;
VM_CNT_INC(v_intrans);
if (VM_OBJECT_SLEEP(object, &object->handle, PSWP,
"swread", hz * 20)) {
@@ -1456,9 +1480,10 @@ swap_pager_getpages_locked(struct pctrie_iter *blks, vm_object_t object,
/*
* If we had an unrecoverable read error pages will not be valid.
*/
- for (int i = 0; i < count; i++)
- if (ma[i]->valid != VM_PAGE_BITS_ALL)
+ for (i = 0; i < count; i++) {
+ if (ma[i] != bogus_page && ma[i]->valid != VM_PAGE_BITS_ALL)
return (VM_PAGER_ERROR);
+ }
return (VM_PAGER_OK);
@@ -1723,6 +1748,9 @@ swp_pager_async_iodone(struct buf *bp)
for (i = 0; i < bp->b_npages; ++i) {
vm_page_t m = bp->b_pages[i];
+ if (m == bogus_page)
+ continue;
+
m->oflags &= ~VPO_SWAPINPROG;
if (m->oflags & VPO_SWAPSLEEP) {
m->oflags &= ~VPO_SWAPSLEEP;
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index d28c84dd1c95..0da1891dfcc7 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -168,6 +168,7 @@ void vm_ksubmap_init(struct kva_md_info *);
bool swap_reserve(vm_ooffset_t incr);
bool swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred);
void swap_reserve_force(vm_ooffset_t incr);
+void swap_reserve_force_by_cred(vm_ooffset_t incr, struct ucred *cred);
void swap_release(vm_ooffset_t decr);
void swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred);
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 1f13869aebf1..addda72e2b56 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -2310,13 +2310,12 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map __unused,
* directly.
*/
dst_object = vm_object_allocate_anon(atop(dst_entry->end -
- dst_entry->start), NULL, NULL, 0);
+ dst_entry->start), NULL, NULL);
#if VM_NRESERVLEVEL > 0
dst_object->flags |= OBJ_COLORED;
dst_object->pg_color = atop(dst_entry->start);
#endif
dst_object->domain = src_object->domain;
- dst_object->charge = dst_entry->end - dst_entry->start;
dst_entry->object.vm_object = dst_object;
dst_entry->offset = 0;
@@ -2329,7 +2328,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map __unused,
("vm_fault_copy_entry: leaked swp charge"));
dst_object->cred = curthread->td_ucred;
crhold(dst_object->cred);
- *fork_charge += dst_object->charge;
+ *fork_charge += ptoa(dst_object->size);
} else if ((dst_object->flags & OBJ_SWAP) != 0 &&
dst_object->cred == NULL) {
KASSERT(dst_entry->cred != NULL, ("no cred for entry %p",
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index a0d3651ba266..2764b438d27b 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -159,6 +159,14 @@ vm_mem_init(void *dummy)
pmap_init();
vm_pager_init();
+ /*
+ * Now we can properly handle calls into vm_fault() from
+ * kernel page faults during initialization, typically to
+ * panic. Clear the nofaulting flag set for thread0 in the
+ * image, see kern/init_main.c
+ */
+ curthread->td_pflags &= ~TDP_NOFAULTING;
+
#ifdef INVARIANTS
vm_check_pagesizes();
#endif
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 68dcadd2b2f1..b8295bb2108d 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -2428,7 +2428,7 @@ vm_map_entry_back(vm_map_entry_t entry)
KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
("map entry %p is a submap", entry));
object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL,
- entry->cred, entry->end - entry->start);
+ entry->cred);
entry->object.vm_object = object;
entry->offset = 0;
entry->cred = NULL;
@@ -2443,21 +2443,26 @@ vm_map_entry_back(vm_map_entry_t entry)
static inline void
vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry)
{
+ vm_object_t object;
VM_MAP_ASSERT_LOCKED(map);
KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
("map entry %p is a submap", entry));
- if (entry->object.vm_object == NULL && !vm_map_is_system(map) &&
+ object = entry->object.vm_object;
+ if (object == NULL && !vm_map_is_system(map) &&
(entry->eflags & MAP_ENTRY_GUARD) == 0)
vm_map_entry_back(entry);
- else if (entry->object.vm_object != NULL &&
+ else if (object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
entry->cred != NULL) {
- VM_OBJECT_WLOCK(entry->object.vm_object);
- KASSERT(entry->object.vm_object->cred == NULL,
+ VM_OBJECT_WLOCK(object);
+ KASSERT(object->cred == NULL,
("OVERCOMMIT: %s: both cred e %p", __func__, entry));
- entry->object.vm_object->cred = entry->cred;
- entry->object.vm_object->charge = entry->end - entry->start;
+ object->cred = entry->cred;
+ if (entry->end - entry->start < ptoa(object->size)) {
+ swap_reserve_force_by_cred(ptoa(object->size) -
+ entry->end + entry->start, object->cred);
+ }
VM_OBJECT_WUNLOCK(entry->object.vm_object);
entry->cred = NULL;
}
@@ -2956,7 +2961,7 @@ again:
* we cannot distinguish between non-charged and
* charged clipped mapping of the same object later.
*/
- KASSERT(obj->charge == 0,
+ KASSERT(obj->cred == NULL,
("vm_map_protect: object %p overcharged (entry %p)",
obj, entry));
if (!swap_reserve(ptoa(obj->size))) {
@@ -2968,7 +2973,6 @@ again:
crhold(cred);
obj->cred = cred;
- obj->charge = ptoa(obj->size);
VM_OBJECT_WUNLOCK(obj);
}
@@ -3942,7 +3946,7 @@ static void
vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
{
vm_object_t object;
- vm_pindex_t offidxstart, offidxend, size1;
+ vm_pindex_t offidxstart, offidxend, oldsize;
vm_size_t size;
vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
@@ -3989,15 +3993,11 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
OBJPR_NOTMAPPED);
if (offidxend >= object->size &&
offidxstart < object->size) {
- size1 = object->size;
+ oldsize = object->size;
object->size = offidxstart;
if (object->cred != NULL) {
- size1 -= object->size;
- KASSERT(object->charge >= ptoa(size1),
- ("object %p charge < 0", object));
- swap_release_by_cred(ptoa(size1),
- object->cred);
- object->charge -= ptoa(size1);
+ swap_release_by_cred(ptoa(oldsize -
+ object->size), object->cred);
}
}
}
@@ -4198,7 +4198,7 @@ vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry,
("OVERCOMMIT: vm_map_copy_anon_entry: cred %p",
src_object));
src_object->cred = src_entry->cred;
- src_object->charge = size;
+ *fork_charge += ptoa(src_object->size) - size;
}
dst_entry->object.vm_object = src_object;
if (charged) {
@@ -4455,7 +4455,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
KASSERT(object->cred == NULL,
("vmspace_fork both cred"));
object->cred = old_entry->cred;
- object->charge = old_entry->end -
+ *fork_charge += old_entry->end -
old_entry->start;
old_entry->cred = NULL;
}
@@ -4957,6 +4957,13 @@ vmspace_unshare(struct proc *p)
if (newvmspace == NULL)
return (ENOMEM);
if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
+ /*
+ * The swap reservation failed. The accounting from
+ * the entries of the copied newvmspace will be
+ * subtracted in vmspace_free(), so force the
+ * reservation there.
+ */
+ swap_reserve_force_by_cred(fork_charge, p->p_ucred);
vmspace_free(newvmspace);
return (ENOMEM);
}
@@ -5138,7 +5145,7 @@ RetryLookupLocked:
if (vm_map_lock_upgrade(map))
goto RetryLookup;
entry->object.vm_object = vm_object_allocate_anon(atop(size),
- NULL, entry->cred, size);
+ NULL, entry->cred);
entry->offset = 0;
entry->cred = NULL;
vm_map_lock_downgrade(map);
@@ -5396,9 +5403,8 @@ vm_map_print(vm_map_t map)
(void *)entry->object.vm_object,
(uintmax_t)entry->offset);
if (entry->object.vm_object && entry->object.vm_object->cred)
- db_printf(", obj ruid %d charge %jx",
- entry->object.vm_object->cred->cr_ruid,
- (uintmax_t)entry->object.vm_object->charge);
+ db_printf(", obj ruid %d ",
+ entry->object.vm_object->cred->cr_ruid);
if (entry->eflags & MAP_ENTRY_COW)
db_printf(", copy (%s)",
(entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index f4c54ba91742..aa2d7676e6a8 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -196,9 +196,9 @@ vm_object_zdtor(void *mem, int size, void *arg)
KASSERT(object->type == OBJT_DEAD,
("object %p has non-dead type %d",
object, object->type));
- KASSERT(object->charge == 0 && object->cred == NULL,
- ("object %p has non-zero charge %ju (%p)",
- object, (uintmax_t)object->charge, object->cred));
+ KASSERT(object->cred == NULL,
+ ("object %p has non-zero charge cred %p",
+ object, object->cred));
}
#endif
@@ -254,7 +254,6 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
refcount_init(&object->ref_count, 1);
object->memattr = VM_MEMATTR_DEFAULT;
object->cred = NULL;
- object->charge = 0;
object->handle = handle;
object->backing_object = NULL;
object->backing_object_offset = (vm_ooffset_t) 0;
@@ -452,7 +451,7 @@ vm_object_allocate_dyn(objtype_t dyntype, vm_pindex_t size, u_short flags)
*/
vm_object_t
vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object,
- struct ucred *cred, vm_size_t charge)
+ struct ucred *cred)
{
vm_object_t handle, object;
@@ -466,7 +465,6 @@ vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object,
_vm_object_allocate(OBJT_SWAP, size,
OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle);
object->cred = cred;
- object->charge = cred != NULL ? charge : 0;
return (object);
}
@@ -1448,7 +1446,7 @@ vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length,
/*
* Allocate a new object with the given length.
*/
- result = vm_object_allocate_anon(atop(length), source, cred, length);
+ result = vm_object_allocate_anon(atop(length), source, cred);
/*
* Store the offset into the source object, and fix up the offset into
@@ -1511,6 +1509,7 @@ vm_object_split(vm_map_entry_t entry)
struct pctrie_iter pages;
vm_page_t m;
vm_object_t orig_object, new_object, backing_object;
+ struct ucred *cred;
vm_pindex_t offidxstart;
vm_size_t size;
@@ -1525,9 +1524,26 @@ vm_object_split(vm_map_entry_t entry)
offidxstart = OFF_TO_IDX(entry->offset);
size = atop(entry->end - entry->start);
+ if (orig_object->cred != NULL) {
+ /*
+ * vm_object_split() is currently called from
+ * vmspace_fork(), and it might be tempting to add the
+ * charge for the split object to fork_charge. But
+ * fork_charge is discharged on error when the copied
+ * vmspace is destroyed. Since the split object is
+ * inserted into the shadow hierarchy serving the
+ * source vm_map, it is kept even after the
+ * unsuccessful fork, meaning that we have to force
+ * its swap usage.
+ */
+ cred = curthread->td_ucred;
+ crhold(cred);
+ swap_reserve_force_by_cred(ptoa(size), cred);
+ } else {
+ cred = NULL;
+ }
- new_object = vm_object_allocate_anon(size, orig_object,
- orig_object->cred, ptoa(size));
+ new_object = vm_object_allocate_anon(size, orig_object, cred);
/*
* We must wait for the orig_object to complete any in-progress
@@ -1550,12 +1566,6 @@ vm_object_split(vm_map_entry_t entry)
new_object->backing_object_offset =
orig_object->backing_object_offset + entry->offset;
}
- if (orig_object->cred != NULL) {
- crhold(orig_object->cred);
- KASSERT(orig_object->charge >= ptoa(size),
- ("orig_object->charge < 0"));
- orig_object->charge -= ptoa(size);
- }
/*
* Mark the split operation so that swap_pager_getpages() knows
@@ -2233,7 +2243,6 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
swap_release_by_cred(ptoa(prev_object->size -
next_pindex), prev_object->cred);
}
- prev_object->charge += charge;
} else if ((cflags & OBJCO_CHARGED) != 0) {
/*
* The caller charged, but the object has
@@ -2786,9 +2795,8 @@ DB_SHOW_COMMAND(object, vm_object_print_static)
db_iprintf("Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x",
object, (int)object->type, (uintmax_t)object->size,
object->resident_page_count, object->ref_count, object->flags);
- db_iprintf(" ruid %d charge %jx\n",
- object->cred ? object->cred->cr_ruid : -1,
- (uintmax_t)object->charge);
+ db_iprintf(" ruid %d\n",
+ object->cred ? object->cred->cr_ruid : -1);
db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
atomic_load_int(&object->shadow_count),
object->backing_object ? object->backing_object->ref_count : 0,
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index ca88adc12c24..e01d8ad79995 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -175,7 +175,6 @@ struct vm_object {
} phys;
} un_pager;
struct ucred *cred;
- vm_ooffset_t charge;
void *umtx_data;
};
@@ -356,8 +355,7 @@ void umtx_shm_object_terminated(vm_object_t object);
extern int umtx_shm_vnobj_persistent;
vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
-vm_object_t vm_object_allocate_anon(vm_pindex_t, vm_object_t, struct ucred *,
- vm_size_t);
+vm_object_t vm_object_allocate_anon(vm_pindex_t, vm_object_t, struct ucred *);
vm_object_t vm_object_allocate_dyn(objtype_t, vm_pindex_t, u_short);
boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
int);
diff --git a/sys/x86/cpufreq/hwpstate_amd.c b/sys/x86/cpufreq/hwpstate_amd.c
index ce0e0f6dd47a..34838753e221 100644
--- a/sys/x86/cpufreq/hwpstate_amd.c
+++ b/sys/x86/cpufreq/hwpstate_amd.c
@@ -179,6 +179,11 @@ SYSCTL_BOOL(_machdep, OID_AUTO, hwpstate_pkg_ctrl, CTLFLAG_RDTUN,
&hwpstate_pkg_ctrl_enable, 0,
"Set 1 (default) to enable package-level control, 0 to disable");
+static bool hwpstate_amd_cppc_enable = true;
+SYSCTL_BOOL(_machdep, OID_AUTO, hwpstate_amd_cppc_enable, CTLFLAG_RDTUN,
+ &hwpstate_amd_cppc_enable, 0,
+ "Set 1 (default) to enable AMD CPPC, 0 to disable");
+
static device_method_t hwpstate_methods[] = {
/* Device interface */
DEVMETHOD(device_identify, hwpstate_identify),
@@ -198,6 +203,26 @@ static device_method_t hwpstate_methods[] = {
{0, 0}
};
+struct amdhwp_dump_sysctl_handler_request {
+ uint64_t enable;
+ uint64_t caps;
+ uint64_t req;
+ int res;
+};
+
+static void
+amdhwp_dump_sysctl_handler_cb(void *args)
+{
+ struct amdhwp_dump_sysctl_handler_request *req =
+ (struct amdhwp_dump_sysctl_handler_request *)args;
+
+ req->res = rdmsr_safe(MSR_AMD_CPPC_ENABLE, &req->enable);
+ if (req->res == 0)
+ req->res = rdmsr_safe(MSR_AMD_CPPC_CAPS_1, &req->caps);
+ if (req->res == 0)
+ req->res = rdmsr_safe(MSR_AMD_CPPC_REQUEST, &req->req);
+}
+
static int
amdhwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
{
@@ -205,6 +230,7 @@ amdhwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
struct pcpu *pc;
struct sbuf *sb;
struct hwpstate_softc *sc;
+ struct amdhwp_dump_sysctl_handler_request request;
uint64_t data;
int ret;
@@ -217,20 +243,19 @@ amdhwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
sb = sbuf_new(NULL, NULL, 1024, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
sbuf_putc(sb, '\n');
- thread_lock(curthread);
- sched_bind(curthread, pc->pc_cpuid);
- thread_unlock(curthread);
+ smp_rendezvous_cpu(pc->pc_cpuid, smp_no_rendezvous_barrier,
+ amdhwp_dump_sysctl_handler_cb, smp_no_rendezvous_barrier, &request);
+ ret = request.res;
+ if (ret)
+ goto out;
- rdmsr_safe(MSR_AMD_CPPC_ENABLE, &data);
+ data = request.enable;
sbuf_printf(sb, "CPU%d: HWP %sabled\n", pc->pc_cpuid,
((data & 1) ? "En" : "Dis"));
-
- if (data == 0) {
- ret = 0;
+ if (data == 0)
goto out;
- }
- rdmsr_safe(MSR_AMD_CPPC_CAPS_1, &data);
+ data = request.caps;
sbuf_printf(sb, "\tHighest Performance: %03ju\n",
BITS_VALUE(AMD_CPPC_CAPS_1_HIGH_PERF_BITS, data));
sbuf_printf(sb, "\tGuaranteed Performance: %03ju\n",
@@ -241,8 +266,7 @@ amdhwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
BITS_VALUE(AMD_CPPC_CAPS_1_LOW_PERF_BITS, data));
sbuf_putc(sb, '\n');
- rdmsr_safe(MSR_AMD_CPPC_REQUEST, &data);
-
+ data = request.req;
#define pkg_print(name, offset) \
do { \
sbuf_printf(sb, "\t%s: %03u\n", name, \
@@ -258,11 +282,8 @@ amdhwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
sbuf_putc(sb, '\n');
out:
- thread_lock(curthread);
- sched_unbind(curthread);
- thread_unlock(curthread);
-
- ret = sbuf_finish(sb);
+ if (ret == 0)
+ ret = sbuf_finish(sb);
if (ret == 0)
ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
sbuf_delete(sb);
@@ -270,44 +291,29 @@ out:
return (ret);
}
-static bool
-sysctl_epp_select_per_core(const device_t hwp_device, uint32_t val)
+static void
+sysctl_epp_select_per_core(device_t hwp_device, uint32_t val)
{
struct hwpstate_softc *sc;
- bool success = true;
- int ret, cpuid;
- cpuid = cpu_get_pcpu(hwp_device)->pc_cpuid;
- thread_lock(curthread);
- sched_bind(curthread, cpuid);
- thread_unlock(curthread);
sc = device_get_softc(hwp_device);
if (BITS_VALUE(AMD_CPPC_REQUEST_ENERGY_PERF_BITS, sc->req) == val)
- goto end;
+ return;
SET_BITS_VALUE(sc->req, AMD_CPPC_REQUEST_ENERGY_PERF_BITS, val);
- ret = wrmsr_safe(MSR_AMD_CPPC_REQUEST, sc->req);
- if (ret != 0) {
- success = false;
- device_printf(hwp_device, "Failed to set EPP to %u", val);
- goto end;
- }
-
-end:
- thread_lock(curthread);
- sched_unbind(curthread);
- thread_unlock(curthread);
-
- return (success);
+ x86_msr_op(MSR_AMD_CPPC_REQUEST,
+ MSR_OP_RENDEZVOUS_ONE | MSR_OP_WRITE |
+ MSR_OP_CPUID(cpu_get_pcpu(hwp_device)->pc_cpuid),
+ sc->req, NULL);
}
static int
sysctl_epp_select(SYSCTL_HANDLER_ARGS)
{
device_t dev, hwp_dev;
+ devclass_t dc;
struct hwpstate_softc *sc;
const uint32_t max_energy_perf =
BITS_VALUE(AMD_CPPC_REQUEST_ENERGY_PERF_BITS, (uint64_t)-1);
- devclass_t dc;
uint32_t val;
int ret = 0;
int cpu;
@@ -577,44 +583,45 @@ hwpstate_identify(driver_t *driver, device_t parent)
device_printf(parent, "hwpstate: add child failed\n");
}
-static int
-amd_set_autonomous_hwp(struct hwpstate_softc *sc)
+struct amd_set_autonomous_hwp_request {
+ device_t dev;
+ int res;
+};
+
+static void
+amd_set_autonomous_hwp_cb(void *args)
{
- struct pcpu *pc;
+ struct hwpstate_softc *sc;
+ struct amd_set_autonomous_hwp_request *req =
+ (struct amd_set_autonomous_hwp_request *)args;
device_t dev;
uint64_t caps;
int ret;
- dev = sc->dev;
- pc = cpu_get_pcpu(dev);
- if (pc == NULL)
- return (ENXIO);
-
- thread_lock(curthread);
- sched_bind(curthread, pc->pc_cpuid);
- thread_unlock(curthread);
-
+ dev = req->dev;
+ sc = device_get_softc(dev);
ret = wrmsr_safe(MSR_AMD_CPPC_ENABLE, 1);
if (ret != 0) {
device_printf(dev, "Failed to enable cppc for cpu%d (%d)\n",
- pc->pc_cpuid, ret);
- goto out;
+ curcpu, ret);
+ req->res = ret;
}
ret = rdmsr_safe(MSR_AMD_CPPC_REQUEST, &sc->req);
if (ret != 0) {
device_printf(dev,
- "Failed to read CPPC request MSR for cpu%d (%d)\n",
- pc->pc_cpuid, ret);
- goto out;
+ "Failed to read CPPC request MSR for cpu%d (%d)\n", curcpu,
+ ret);
+ req->res = ret;
}
ret = rdmsr_safe(MSR_AMD_CPPC_CAPS_1, &caps);
if (ret != 0) {
device_printf(dev,
"Failed to read HWP capabilities MSR for cpu%d (%d)\n",
- pc->pc_cpuid, ret);
- goto out;
+ curcpu, ret);
+ req->res = ret;
+ return;
}
/*
@@ -632,17 +639,27 @@ amd_set_autonomous_hwp(struct hwpstate_softc *sc)
ret = wrmsr_safe(MSR_AMD_CPPC_REQUEST, sc->req);
if (ret) {
- device_printf(dev,
- "Failed to setup autonomous HWP for cpu%d\n",
- pc->pc_cpuid);
- goto out;
+ device_printf(dev, "Failed to setup autonomous HWP for cpu%d\n",
+ curcpu);
+ req->res = ret;
+ return;
}
-out:
- thread_lock(curthread);
- sched_unbind(curthread);
- thread_unlock(curthread);
+ req->res = 0;
+}
- return (ret);
+static int
+amd_set_autonomous_hwp(struct hwpstate_softc *sc)
+{
+ struct amd_set_autonomous_hwp_request req;
+ device_t dev;
+
+ dev = sc->dev;
+ req.dev = dev;
+ smp_rendezvous_cpu(cpu_get_pcpu(dev)->pc_cpuid,
+ smp_no_rendezvous_barrier, amd_set_autonomous_hwp_cb,
+ smp_no_rendezvous_barrier, &req);
+
+ return (req.res);
}
static int
@@ -655,7 +672,8 @@ hwpstate_probe(device_t dev)
sc = device_get_softc(dev);
- if (amd_extended_feature_extensions & AMDFEID_CPPC) {
+ if (hwpstate_amd_cppc_enable &&
+ (amd_extended_feature_extensions & AMDFEID_CPPC)) {
sc->flags |= PSTATE_CPPC;
device_set_desc(dev,
"AMD Collaborative Processor Performance Control (CPPC)");
diff --git a/sys/x86/include/apicreg.h b/sys/x86/include/apicreg.h
index 1252647fbab3..4cc9cabdad9e 100644
--- a/sys/x86/include/apicreg.h
+++ b/sys/x86/include/apicreg.h
@@ -439,7 +439,12 @@ typedef struct IOAPIC ioapic_t;
#define APIC_EXTF_SEIO_CAP 0x00000002
#define APIC_EXTF_IER_CAP 0x00000001
-/* LVT table indices */
+/*
+ * LVT table indices.
+ * Must be ordered following the appearance of the LVT entries in
+ * series the LAPIC versions, which is reported by LAPIC_VERSION
+ * MAXLVT field.
+ */
#define APIC_LVT_LINT0 0
#define APIC_LVT_LINT1 1
#define APIC_LVT_TIMER 2
diff --git a/sys/x86/include/intr_machdep.h b/sys/x86/include/intr_machdep.h
index 497c89b0a7eb..6d6b1962b6bd 100644
--- a/sys/x86/include/intr_machdep.h
+++ b/sys/x86/include/intr_machdep.h
@@ -151,6 +151,7 @@ int intr_register_source(struct intsrc *isrc);
int intr_remove_handler(void *cookie);
void intr_resume(bool suspend_cancelled);
void intr_suspend(void);
+void intr_enable_src(u_int irq);
void intr_reprogram(void);
void intrcnt_add(const char *name, u_long **countp);
void nexus_add_irq(u_long irq);
diff --git a/sys/x86/x86/intr_machdep.c b/sys/x86/x86/intr_machdep.c
index a16d2ced8dba..7e57c97a7291 100644
--- a/sys/x86/x86/intr_machdep.c
+++ b/sys/x86/x86/intr_machdep.c
@@ -405,6 +405,15 @@ intr_suspend(void)
mtx_unlock(&intrpic_lock);
}
+void
+intr_enable_src(u_int irq)
+{
+ struct intsrc *is;
+
+ is = interrupt_sources[irq];
+ is->is_pic->pic_enable_source(is);
+}
+
static int
intr_assign_cpu(void *arg, int cpu)
{
diff --git a/sys/x86/x86/local_apic.c b/sys/x86/x86/local_apic.c
index c1c9029531f5..c5399984c896 100644
--- a/sys/x86/x86/local_apic.c
+++ b/sys/x86/x86/local_apic.c
@@ -128,6 +128,8 @@ struct lvt {
u_int lvt_active:1;
u_int lvt_mode:16;
u_int lvt_vector:8;
+ u_int lvt_reg;
+ const char *lvt_desc;
};
struct lapic {
@@ -147,22 +149,123 @@ struct lapic {
} static *lapics;
/* Global defaults for local APIC LVT entries. */
-static struct lvt lvts[APIC_LVT_MAX + 1] = {
- { 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 }, /* LINT0: masked ExtINT */
- { 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* LINT1: NMI */
- { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT }, /* Timer */
- { 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
- { 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 }, /* PMC */
- { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT }, /* Thermal */
- { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT }, /* CMCI */
+static struct lvt lvts[] = {
+ /* LINT0: masked ExtINT */
+ [APIC_LVT_LINT0] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 1,
+ .lvt_active = 1,
+ .lvt_mode = APIC_LVT_DM_EXTINT,
+ .lvt_vector = 0,
+ .lvt_reg = LAPIC_LVT_LINT0,
+ .lvt_desc = "LINT0",
+ },
+ /* LINT1: NMI */
+ [APIC_LVT_LINT1] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 0,
+ .lvt_active = 1,
+ .lvt_mode = APIC_LVT_DM_NMI,
+ .lvt_vector = 0,
+ .lvt_reg = LAPIC_LVT_LINT1,
+ .lvt_desc = "LINT1",
+ },
+ [APIC_LVT_TIMER] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 1,
+ .lvt_active = 1,
+ .lvt_mode = APIC_LVT_DM_FIXED,
+ .lvt_vector = APIC_TIMER_INT,
+ .lvt_reg = LAPIC_LVT_TIMER,
+ .lvt_desc = "TIMER",
+ },
+ [APIC_LVT_ERROR] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 0,
+ .lvt_active = 1,
+ .lvt_mode = APIC_LVT_DM_FIXED,
+ .lvt_vector = APIC_ERROR_INT,
+ .lvt_reg = LAPIC_LVT_ERROR,
+ .lvt_desc = "ERROR",
+ },
+ [APIC_LVT_PMC] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 1,
+ .lvt_active = 1,
+ .lvt_mode = APIC_LVT_DM_NMI,
+ .lvt_vector = 0,
+ .lvt_reg = LAPIC_LVT_PCINT,
+ .lvt_desc = "PMC",
+ },
+ [APIC_LVT_THERMAL] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 1,
+ .lvt_active = 1,
+ .lvt_mode = APIC_LVT_DM_FIXED,
+ .lvt_vector = APIC_THERMAL_INT,
+ .lvt_reg = LAPIC_LVT_THERMAL,
+ .lvt_desc = "THERM",
+ },
+ [APIC_LVT_CMCI] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 1,
+ .lvt_active = 1,
+ .lvt_mode = APIC_LVT_DM_FIXED,
+ .lvt_vector = APIC_CMC_INT,
+ .lvt_reg = LAPIC_LVT_CMCI,
+ .lvt_desc = "CMCI",
+ },
};
/* Global defaults for AMD local APIC ELVT entries. */
-static struct lvt elvts[APIC_ELVT_MAX + 1] = {
- { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
- { 1, 1, 1, 0, APIC_LVT_DM_FIXED, APIC_CMC_INT },
- { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
- { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
+static struct lvt elvts[] = {
+ [APIC_ELVT_IBS] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 1,
+ .lvt_active = 0,
+ .lvt_mode = APIC_LVT_DM_FIXED,
+ .lvt_vector = 0,
+ .lvt_reg = LAPIC_EXT_LVT0,
+ .lvt_desc = "ELVT0",
+ },
+ [APIC_ELVT_MCA] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 1,
+ .lvt_active = 0,
+ .lvt_mode = APIC_LVT_DM_FIXED,
+ .lvt_vector = APIC_CMC_INT,
+ .lvt_reg = LAPIC_EXT_LVT1,
+ .lvt_desc = "MCA",
+ },
+ [APIC_ELVT_DEI] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 1,
+ .lvt_active = 0,
+ .lvt_mode = APIC_LVT_DM_FIXED,
+ .lvt_vector = 0,
+ .lvt_reg = LAPIC_EXT_LVT2,
+ .lvt_desc = "ELVT2",
+ },
+ [APIC_ELVT_SBI] = {
+ .lvt_edgetrigger = 1,
+ .lvt_activehi = 1,
+ .lvt_masked = 1,
+ .lvt_active = 0,
+ .lvt_mode = APIC_LVT_DM_FIXED,
+ .lvt_vector = 0,
+ .lvt_reg = LAPIC_EXT_LVT3,
+ .lvt_desc = "ELVT3",
+ },
};
static inthand_t *ioint_handlers[] = {
@@ -224,6 +327,16 @@ SYSCTL_INT(_hw_apic, OID_AUTO, ds_idle_timeout, CTLFLAG_RWTUN,
static void lapic_calibrate_initcount(struct lapic *la);
/*
+ * Calculate the max index of the present LVT entry from the value of
+ * the LAPIC version register.
+ */
+static int
+lapic_maxlvt(uint32_t version)
+{
+ return ((version & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
+}
+
+/*
* Use __nosanitizethread to exempt the LAPIC I/O accessors from KCSan
* instrumentation. Otherwise, if x2APIC is not available, use of the global
* lapic_map will generate a KCSan false positive. While the mapping is
@@ -337,6 +450,7 @@ lapic_is_x2apic(void)
(APICBASE_X2APIC | APICBASE_ENABLED));
}
+static void lapic_early_mask_vecs(void);
static void lapic_enable(void);
static void lapic_resume(struct pic *pic, bool suspend_cancelled);
static void lapic_timer_oneshot(struct lapic *);
@@ -462,6 +576,7 @@ lapic_init(vm_paddr_t addr)
/* Perform basic initialization of the BSP's local APIC. */
lapic_enable();
+ lapic_early_mask_vecs();
/* Set BSP's per-CPU local APIC ID. */
PCPU_SET(apic_id, lapic_id());
@@ -660,7 +775,7 @@ lapic_dump(const char* str)
int i;
version = lapic_read32(LAPIC_VERSION);
- maxlvt = (version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
+ maxlvt = lapic_maxlvt(version);
printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
printf(" ID: 0x%08x VER: 0x%08x LDR: 0x%08x DFR: 0x%08x",
lapic_read32(LAPIC_ID), version,
@@ -700,6 +815,35 @@ lapic_xapic_mode(void)
intr_restore(saveintr);
}
+static void
+lapic_early_mask_vec(const struct lvt *l)
+{
+ uint32_t v;
+
+ if (l->lvt_masked != 0) {
+ v = lapic_read32(l->lvt_reg);
+ v |= APIC_LVT_M;
+ lapic_write32(l->lvt_reg, v);
+ }
+}
+
+/* Done on BSP only */
+static void
+lapic_early_mask_vecs(void)
+{
+ int elvt_count, lvts_count, i;
+ uint32_t version;
+
+ version = lapic_read32(LAPIC_VERSION);
+ lvts_count = min(nitems(lvts), lapic_maxlvt(version) + 1);
+ for (i = 0; i < lvts_count; i++)
+ lapic_early_mask_vec(&lvts[i]);
+
+ elvt_count = amd_read_elvt_count();
+ for (i = 0; i < elvt_count; i++)
+ lapic_early_mask_vec(&elvts[i]);
+}
+
void
lapic_setup(int boot)
{
@@ -715,7 +859,7 @@ lapic_setup(int boot)
la = &lapics[lapic_id()];
KASSERT(la->la_present, ("missing APIC structure"));
version = lapic_read32(LAPIC_VERSION);
- maxlvt = (version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
+ maxlvt = lapic_maxlvt(version);
/* Initialize the TPR to allow all interrupts. */
lapic_set_tpr(0);
@@ -871,7 +1015,7 @@ lapic_enable_pcint(void)
#endif
/* Fail if the PMC LVT is not present. */
- maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
+ maxlvt = lapic_maxlvt(lapic_read32(LAPIC_VERSION));
if (maxlvt < APIC_LVT_PMC)
return (0);
if (refcount_acquire(&pcint_refcnt) > 0)
@@ -895,7 +1039,7 @@ lapic_disable_pcint(void)
#endif
/* Fail if the PMC LVT is not present. */
- maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
+ maxlvt = lapic_maxlvt(lapic_read32(LAPIC_VERSION));
if (maxlvt < APIC_LVT_PMC)
return;
if (!refcount_release(&pcint_refcnt))
@@ -1758,18 +1902,34 @@ dump_mask(const char *prefix, uint32_t v, int base)
/* Show info from the lapic regs for this CPU. */
DB_SHOW_COMMAND_FLAGS(lapic, db_show_lapic, DB_CMD_MEMSAFE)
{
- uint32_t v;
+ const struct lvt *l;
+ int elvt_count, lvts_count, i;
+ uint32_t v, vr;
db_printf("lapic ID = %d\n", lapic_id());
v = lapic_read32(LAPIC_VERSION);
- db_printf("version = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
- v & 0xf);
- db_printf("max LVT = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
- v = lapic_read32(LAPIC_SVR);
- db_printf("SVR = %02x (%s)\n", v & APIC_SVR_VECTOR,
- v & APIC_SVR_ENABLE ? "enabled" : "disabled");
+ db_printf("version = %d.%d (%#x) \n", (v & APIC_VER_VERSION) >> 4,
+ v & 0xf, v);
+ db_printf("max LVT = %d\n", lapic_maxlvt(v));
+ vr = lapic_read32(LAPIC_SVR);
+ db_printf("SVR = %02x (%s)\n", vr & APIC_SVR_VECTOR,
+ vr & APIC_SVR_ENABLE ? "enabled" : "disabled");
db_printf("TPR = %02x\n", lapic_read32(LAPIC_TPR));
+ lvts_count = min(nitems(lvts), lapic_maxlvt(v) + 1);
+ for (i = 0; i < lvts_count; i++) {
+ l = &lvts[i];
+ db_printf("LVT%d (reg %#x %-5s) = %#010x\n", i, l->lvt_reg,
+ l->lvt_desc, lapic_read32(l->lvt_reg));
+ }
+
+ elvt_count = amd_read_elvt_count();
+ for (i = 0; i < elvt_count; i++) {
+ l = &elvts[i];
+ db_printf("ELVT%d (reg %#x %-5s) = %#010x\n", i, l->lvt_reg,
+ l->lvt_desc, lapic_read32(l->lvt_reg));
+ }
+
#define dump_field(prefix, regn, index) \
dump_mask(__XSTRING(prefix ## index), \
lapic_read32(LAPIC_ ## regn ## index), \
diff --git a/sys/xdr/xdr.c b/sys/xdr/xdr.c
index 81d238ebf19f..f983a474abdd 100644
--- a/sys/xdr/xdr.c
+++ b/sys/xdr/xdr.c
@@ -620,6 +620,13 @@ xdr_string(XDR *xdrs, char **cpp, u_int maxsize)
if (sp == NULL) {
return(TRUE); /* already free */
}
+ /*
+ * XXX: buggy software may call this without a third
+ * argument via xdr_free(). Ignore maxsize since it may
+ * be invalid. Otherwise, if it's very small, we might
+ * fail to free the string.
+ */
+ maxsize = RPC_MAXDATASIZE;
/* FALLTHROUGH */
case XDR_ENCODE:
size = strlen(sp);