aboutsummaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/acpica/acpi.c58
-rw-r--r--sys/dev/acpica/acpivar.h1
-rw-r--r--sys/dev/ahci/ahci_pci.c1
-rw-r--r--sys/dev/amdgpio/amdgpio.c136
-rw-r--r--sys/dev/amdgpio/amdgpio.h9
-rw-r--r--sys/dev/ath/ath_rate/sample/sample.c8
-rw-r--r--sys/dev/ath/if_ath.c3
-rw-r--r--sys/dev/ath/if_ath_tx.c10
-rw-r--r--sys/dev/ath/if_ath_tx_ht.c6
-rw-r--r--sys/dev/axgbe/if_axgbe_pci.c3
-rw-r--r--sys/dev/bce/if_bce.c2
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_hwrm.c2
-rw-r--r--sys/dev/bwi/if_bwi.c4
-rw-r--r--sys/dev/bwn/if_bwn.c2
-rw-r--r--sys/dev/cpuctl/cpuctl.c22
-rw-r--r--sys/dev/cxgbe/cxgbei/icl_cxgbei.c2
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/qp.c2
-rw-r--r--sys/dev/cxgbe/t4_main.c2
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c133
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c2
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.c69
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c2
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h12
-rw-r--r--sys/dev/cyapa/cyapa.c95
-rw-r--r--sys/dev/e1000/if_em.c354
-rw-r--r--sys/dev/e1000/if_em.h19
-rw-r--r--sys/dev/enetc/if_enetc.c3
-rw-r--r--sys/dev/gpio/gpio_if.m26
-rw-r--r--sys/dev/gpio/gpiobus.c101
-rw-r--r--sys/dev/gpio/gpiobus_if.m30
-rw-r--r--sys/dev/gpio/gpiobus_internal.h1
-rw-r--r--sys/dev/gpio/gpioc.c157
-rw-r--r--sys/dev/gpio/gpioled.c106
-rw-r--r--sys/dev/gpio/ofw_gpiobus.c3
-rw-r--r--sys/dev/hpt27xx/hptintf.h6
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c21
-rw-r--r--sys/dev/hwt/hwt_ioctl.c9
-rw-r--r--sys/dev/ice/ice_fw_logging.c2
-rw-r--r--sys/dev/ice/ice_lib.c6
-rw-r--r--sys/dev/ichsmb/ichsmb_pci.c3
-rw-r--r--sys/dev/ichwd/i6300esbwd.c245
-rw-r--r--sys/dev/ichwd/i6300esbwd.h46
-rw-r--r--sys/dev/ichwd/ichwd.c2
-rw-r--r--sys/dev/ichwd/ichwd.h3
-rw-r--r--sys/dev/igc/if_igc.c4
-rw-r--r--sys/dev/iicbus/iicbb.c7
-rw-r--r--sys/dev/iommu/busdma_iommu.c54
-rw-r--r--sys/dev/iommu/iommu.h2
-rw-r--r--sys/dev/ipw/if_ipw.c3
-rw-r--r--sys/dev/irdma/irdma_cm.c2
-rw-r--r--sys/dev/irdma/irdma_utils.c4
-rw-r--r--sys/dev/iwi/if_iwi.c4
-rw-r--r--sys/dev/iwm/if_iwm.c3
-rw-r--r--sys/dev/iwn/if_iwn.c12
-rw-r--r--sys/dev/iwx/if_iwx.c2
-rw-r--r--sys/dev/ixgbe/if_ix.c36
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.c34
-rw-r--r--sys/dev/ixl/if_ixl.c4
-rw-r--r--sys/dev/malo/if_malo.c4
-rw-r--r--sys/dev/mpi3mr/mpi3mr.c9
-rw-r--r--sys/dev/mpi3mr/mpi3mr_cam.c5
-rw-r--r--sys/dev/mpr/mpr.c10
-rw-r--r--sys/dev/mpr/mpr_mapping.c18
-rw-r--r--sys/dev/mpr/mprvar.h1
-rw-r--r--sys/dev/mwl/if_mwl.c8
-rw-r--r--sys/dev/nvme/nvme.c1
-rw-r--r--sys/dev/nvme/nvme_ahci.c1
-rw-r--r--sys/dev/nvme/nvme_ctrlr.c117
-rw-r--r--sys/dev/nvme/nvme_ctrlr_cmd.c3
-rw-r--r--sys/dev/nvme/nvme_ns.c3
-rw-r--r--sys/dev/nvme/nvme_pci.c1
-rw-r--r--sys/dev/nvme/nvme_private.h3
-rw-r--r--sys/dev/nvme/nvme_qpair.c3
-rw-r--r--sys/dev/nvme/nvme_sim.c1
-rw-r--r--sys/dev/nvme/nvme_sysctl.c1
-rw-r--r--sys/dev/nvme/nvme_util.c23
-rw-r--r--sys/dev/nvmf/nvmf_tcp.c2
-rw-r--r--sys/dev/otus/if_otus.c11
-rw-r--r--sys/dev/pci/pci_user.c121
-rw-r--r--sys/dev/puc/pucdata.c43
-rw-r--r--sys/dev/qat/include/common/adf_accel_devices.h4
-rw-r--r--sys/dev/qat/qat_api/include/icp_sal_versions.h2
-rw-r--r--sys/dev/qat/qat_common/adf_gen4_timer.c2
-rw-r--r--sys/dev/qat/qat_common/qat_uclo.c1
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c58
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h6
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c15
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c15
-rw-r--r--sys/dev/qlnx/qlnxe/ecore_dev.c6
-rw-r--r--sys/dev/qlnx/qlnxe/ecore_mcp.c2
-rw-r--r--sys/dev/qlnx/qlnxe/qlnx_def.h16
-rw-r--r--sys/dev/qlnx/qlnxe/qlnx_os.c25
-rw-r--r--sys/dev/ral/rt2560.c4
-rw-r--r--sys/dev/ral/rt2661.c4
-rw-r--r--sys/dev/ral/rt2860.c3
-rw-r--r--sys/dev/random/fenestrasX/fx_pool.c3
-rw-r--r--sys/dev/random/random_harvestq.c148
-rw-r--r--sys/dev/random/randomdev.h3
-rw-r--r--sys/dev/re/if_re.c5
-rw-r--r--sys/dev/rtwn/if_rtwn.c8
-rw-r--r--sys/dev/rtwn/if_rtwn_tx.c8
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_tx.c11
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_tx.c16
-rw-r--r--sys/dev/sound/pci/hda/hdaa_patches.c6
-rw-r--r--sys/dev/sound/pci/hda/hdac.c11
-rw-r--r--sys/dev/sound/pci/hda/hdac.h2
-rw-r--r--sys/dev/sound/pcm/channel.h2
-rw-r--r--sys/dev/sound/pcm/dsp.c4
-rw-r--r--sys/dev/tpm/tpm20.c13
-rw-r--r--sys/dev/tpm/tpm_tis_core.c7
-rw-r--r--sys/dev/uart/uart_bus_pci.c2
-rw-r--r--sys/dev/ufshci/ufshci.h69
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr.c408
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr_cmd.c2
-rw-r--r--sys/dev/ufshci/ufshci_dev.c354
-rw-r--r--sys/dev/ufshci/ufshci_pci.c6
-rw-r--r--sys/dev/ufshci/ufshci_private.h54
-rw-r--r--sys/dev/ufshci/ufshci_reg.h2
-rw-r--r--sys/dev/ufshci/ufshci_req_queue.c292
-rw-r--r--sys/dev/ufshci/ufshci_req_sdb.c77
-rw-r--r--sys/dev/ufshci/ufshci_sim.c1
-rw-r--r--sys/dev/ufshci/ufshci_sysctl.c20
-rw-r--r--sys/dev/ufshci/ufshci_uic_cmd.c19
-rw-r--r--sys/dev/usb/controller/xhci.c85
-rw-r--r--sys/dev/usb/controller/xhci_pci.c2
-rw-r--r--sys/dev/usb/controller/xhcireg.h5
-rw-r--r--sys/dev/usb/net/if_umb.c6
-rw-r--r--sys/dev/usb/serial/udbc.c404
-rw-r--r--sys/dev/usb/usb_hub.c3
-rw-r--r--sys/dev/usb/wlan/if_mtw.c5
-rw-r--r--sys/dev/usb/wlan/if_rsu.c66
-rw-r--r--sys/dev/usb/wlan/if_rsureg.h9
-rw-r--r--sys/dev/usb/wlan/if_run.c14
-rw-r--r--sys/dev/usb/wlan/if_uath.c4
-rw-r--r--sys/dev/usb/wlan/if_upgt.c5
-rw-r--r--sys/dev/usb/wlan/if_ural.c6
-rw-r--r--sys/dev/usb/wlan/if_urtw.c6
-rw-r--r--sys/dev/usb/wlan/if_zyd.c4
-rw-r--r--sys/dev/virtio/network/if_vtnet.c443
-rw-r--r--sys/dev/virtio/network/if_vtnetvar.h2
-rw-r--r--sys/dev/vmgenc/vmgenc_acpi.c8
-rw-r--r--sys/dev/vmm/vmm_dev.c1
142 files changed, 4011 insertions, 1072 deletions
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index a2159b12876f..574d3aacbcde 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -4,6 +4,10 @@
* Copyright (c) 2000, 2001 Michael Smith
* Copyright (c) 2000 BSDi
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Aymeric Wibo
+ * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -181,7 +185,8 @@ static const char *acpi_sstate2sname(int sstate);
static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
-static int acpi_pm_func(u_long cmd, void *arg, ...);
+static int acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype);
+static int acpi_pm_func(u_long cmd, void *arg, enum power_stype stype);
static void acpi_enable_pcie(void);
static void acpi_reset_interfaces(device_t dev);
@@ -741,6 +746,28 @@ acpi_attach(device_t dev)
return_VALUE (error);
}
+static int
+acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype)
+{
+ switch (stype) {
+ case POWER_STYPE_AWAKE:
+ return (ACPI_STATE_S0);
+ case POWER_STYPE_STANDBY:
+ return (sc->acpi_standby_sx);
+ case POWER_STYPE_SUSPEND_TO_MEM:
+ return (ACPI_STATE_S3);
+ case POWER_STYPE_HIBERNATE:
+ return (ACPI_STATE_S4);
+ case POWER_STYPE_POWEROFF:
+ return (ACPI_STATE_S5);
+ case POWER_STYPE_SUSPEND_TO_IDLE:
+ case POWER_STYPE_COUNT:
+ case POWER_STYPE_UNKNOWN:
+ return (ACPI_STATE_UNKNOWN);
+ }
+ return (ACPI_STATE_UNKNOWN);
+}
+
static void
acpi_set_power_children(device_t dev, int state)
{
@@ -4621,12 +4648,10 @@ acpi_reset_interfaces(device_t dev)
}
static int
-acpi_pm_func(u_long cmd, void *arg, ...)
+acpi_pm_func(u_long cmd, void *arg, enum power_stype stype)
{
- int state, acpi_state;
- int error;
+ int error, sstate;
struct acpi_softc *sc;
- va_list ap;
error = 0;
switch (cmd) {
@@ -4636,27 +4661,8 @@ acpi_pm_func(u_long cmd, void *arg, ...)
error = EINVAL;
goto out;
}
-
- va_start(ap, arg);
- state = va_arg(ap, int);
- va_end(ap);
-
- switch (state) {
- case POWER_SLEEP_STATE_STANDBY:
- acpi_state = sc->acpi_standby_sx;
- break;
- case POWER_SLEEP_STATE_SUSPEND:
- acpi_state = sc->acpi_suspend_sx;
- break;
- case POWER_SLEEP_STATE_HIBERNATE:
- acpi_state = ACPI_STATE_S4;
- break;
- default:
- error = EINVAL;
- goto out;
- }
-
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
+ sstate = acpi_stype_to_sstate(sc, stype);
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sstate)))
error = ENXIO;
break;
default:
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 7495a010432b..fac32d832598 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -40,6 +40,7 @@
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/power.h>
#include <sys/selinfo.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
diff --git a/sys/dev/ahci/ahci_pci.c b/sys/dev/ahci/ahci_pci.c
index f29d803e99a8..82f56fc0d19e 100644
--- a/sys/dev/ahci/ahci_pci.c
+++ b/sys/dev/ahci/ahci_pci.c
@@ -195,6 +195,7 @@ static const struct {
{0x1f3f8086, 0x00, "Intel Avoton (RAID)", 0},
{0x23a38086, 0x00, "Intel Coleto Creek", 0},
{0x31e38086, 0x00, "Intel Gemini Lake", 0},
+ {0x4b638086, 0x00, "Intel Elkhart Lake", 0},
{0x5ae38086, 0x00, "Intel Apollo Lake", 0},
{0x7ae28086, 0x00, "Intel Alder Lake", 0},
{0x8c028086, 0x00, "Intel Lynx Point", 0},
diff --git a/sys/dev/amdgpio/amdgpio.c b/sys/dev/amdgpio/amdgpio.c
index 2bd455c612b8..20589ff71b0b 100644
--- a/sys/dev/amdgpio/amdgpio.c
+++ b/sys/dev/amdgpio/amdgpio.c
@@ -3,6 +3,10 @@
*
* Copyright (c) 2018 Advanced Micro Devices
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Aymeric Wibo
+ * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -51,11 +55,11 @@
#include <dev/acpica/acpivar.h>
#include <dev/gpio/gpiobusvar.h>
-#include "gpio_if.h"
#include "amdgpio.h"
static struct resource_spec amdgpio_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
@@ -196,7 +200,7 @@ static int
amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
{
struct amdgpio_softc *sc;
- uint32_t reg, val, allowed;
+ uint32_t reg, val;
sc = device_get_softc(dev);
@@ -204,18 +208,19 @@ amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
if (!amdgpio_valid_pin(sc, pin))
return (EINVAL);
- allowed = GPIO_PIN_INPUT | GPIO_PIN_OUTPUT;
+ if ((flags & ~AMDGPIO_DEFAULT_CAPS) != 0) {
+ device_printf(dev, "disallowed flags (0x%x) trying to be set "
+ "(allowed is 0x%x)\n", flags, AMDGPIO_DEFAULT_CAPS);
+ return (EINVAL);
+ }
- /*
- * Only directtion flag allowed
- */
- if (flags & ~allowed)
+ /* Either input or output must be selected. */
+ if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) == 0)
return (EINVAL);
- /*
- * Not both directions simultaneously
- */
- if ((flags & allowed) == allowed)
+ /* Not both directions simultaneously. */
+ if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) ==
+ (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT))
return (EINVAL);
/* Set the GPIO mode and state */
@@ -224,16 +229,21 @@ amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
reg = AMDGPIO_PIN_REGISTER(pin);
val = amdgpio_read_4(sc, reg);
- if (flags & GPIO_PIN_INPUT) {
+ if ((flags & GPIO_PIN_INPUT) != 0)
val &= ~BIT(OUTPUT_ENABLE_OFF);
- sc->sc_gpio_pins[pin].gp_flags = GPIO_PIN_INPUT;
- } else {
+ else
val |= BIT(OUTPUT_ENABLE_OFF);
- sc->sc_gpio_pins[pin].gp_flags = GPIO_PIN_OUTPUT;
- }
+
+ val &= ~(BIT(PULL_DOWN_ENABLE_OFF) | BIT(PULL_UP_ENABLE_OFF));
+
+ if ((flags & GPIO_PIN_PULLDOWN) != 0)
+ val |= BIT(PULL_DOWN_ENABLE_OFF);
+ if ((flags & GPIO_PIN_PULLUP) != 0)
+ val |= BIT(PULL_UP_ENABLE_OFF);
amdgpio_write_4(sc, reg, val);
+ sc->sc_gpio_pins[pin].gp_flags = flags;
dprintf("pin %d flags 0x%x val 0x%x gp_flags 0x%x\n",
pin, flags, val, sc->sc_gpio_pins[pin].gp_flags);
@@ -359,11 +369,73 @@ amdgpio_probe(device_t dev)
return (rv);
}
+static void
+amdgpio_eoi_locked(struct amdgpio_softc *sc)
+{
+ uint32_t master_reg = amdgpio_read_4(sc, WAKE_INT_MASTER_REG);
+
+ AMDGPIO_ASSERT_LOCKED(sc);
+ master_reg |= EOI_MASK;
+ amdgpio_write_4(sc, WAKE_INT_MASTER_REG, master_reg);
+}
+
+static void
+amdgpio_eoi(struct amdgpio_softc *sc)
+{
+ AMDGPIO_LOCK(sc);
+ amdgpio_eoi_locked(sc);
+ AMDGPIO_UNLOCK(sc);
+}
+
+static int
+amdgpio_intr_filter(void *arg)
+{
+ struct amdgpio_softc *sc = arg;
+ int off, rv = FILTER_STRAY;
+ uint32_t reg;
+
+ /* We can lock in the filter routine as it is MTX_SPIN. */
+ AMDGPIO_LOCK(sc);
+
+ /*
+ * TODO Instead of just reading the registers of all pins, we should
+ * read WAKE_INT_STATUS_REG0/1. A bit set in here denotes a group of
+ * 4 pins where at least one has an interrupt for us. Then we can just
+ * iterate over those 4 pins.
+ *
+ * See GPIO_Interrupt_Status_Index_0 in BKDG.
+ */
+ for (size_t pin = 0; pin < AMD_GPIO_PINS_EXPOSED; pin++) {
+ off = AMDGPIO_PIN_REGISTER(pin);
+ reg = amdgpio_read_4(sc, off);
+ if ((reg & UNSERVICED_INTERRUPT_MASK) == 0)
+ continue;
+ /*
+ * Must write 1's to wake/interrupt status bits to clear them.
+ * We can do this simply by writing back to the register.
+ */
+ amdgpio_write_4(sc, off, reg);
+ }
+
+ amdgpio_eoi_locked(sc);
+ AMDGPIO_UNLOCK(sc);
+
+ rv = FILTER_HANDLED;
+ return (rv);
+}
+
+static void
+amdgpio_intr_handler(void *arg)
+{
+ /* TODO */
+}
+
static int
amdgpio_attach(device_t dev)
{
struct amdgpio_softc *sc;
- int i, pin, bank;
+ int i, pin, bank, reg;
+ uint32_t flags;
sc = device_get_softc(dev);
sc->sc_dev = dev;
@@ -386,6 +458,14 @@ amdgpio_attach(device_t dev)
sc->sc_bst = rman_get_bustag(sc->sc_res[0]);
sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]);
+ /* Set up interrupt handler. */
+ if (bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_MISC | INTR_MPSAFE,
+ amdgpio_intr_filter, amdgpio_intr_handler, sc, &sc->sc_intr_handle)
+ != 0) {
+ device_printf(dev, "couldn't set up interrupt\n");
+ goto err_intr;
+ }
+
/* Initialize all possible pins to be Invalid */
for (i = 0; i < AMD_GPIO_PINS_MAX ; i++) {
snprintf(sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME,
@@ -395,7 +475,12 @@ amdgpio_attach(device_t dev)
sc->sc_gpio_pins[i].gp_flags = 0;
}
- /* Initialize only driver exposed pins with appropriate capabilities */
+ /*
+ * Initialize only driver exposed pins with appropriate capabilities.
+ *
+ * XXX Also mask and disable interrupts on all pins, since we don't
+ * support them at the moment.
+ */
for (i = 0; i < AMD_GPIO_PINS_EXPOSED ; i++) {
pin = kernzp_pins[i].pin_num;
bank = pin/AMD_GPIO_PINS_PER_BANK;
@@ -406,7 +491,14 @@ amdgpio_attach(device_t dev)
sc->sc_gpio_pins[pin].gp_flags =
amdgpio_is_pin_output(sc, pin) ?
GPIO_PIN_OUTPUT : GPIO_PIN_INPUT;
+
+ reg = AMDGPIO_PIN_REGISTER(pin);
+ flags = amdgpio_read_4(sc, reg);
+ flags &= ~(1 << INTERRUPT_ENABLE_OFF);
+ flags &= ~(1 << INTERRUPT_MASK_OFF);
+ amdgpio_write_4(sc, reg, flags);
}
+ amdgpio_eoi(sc);
sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
@@ -418,8 +510,9 @@ amdgpio_attach(device_t dev)
return (0);
err_bus:
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intr_handle);
+err_intr:
bus_release_resources(dev, amdgpio_spec, sc->sc_res);
-
err_rsrc:
AMDGPIO_LOCK_DESTROY(sc);
@@ -434,7 +527,8 @@ amdgpio_detach(device_t dev)
if (sc->sc_busdev)
gpiobus_detach_bus(dev);
-
+ if (sc->sc_intr_handle)
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intr_handle);
bus_release_resources(dev, amdgpio_spec, sc->sc_res);
AMDGPIO_LOCK_DESTROY(sc);
diff --git a/sys/dev/amdgpio/amdgpio.h b/sys/dev/amdgpio/amdgpio.h
index aca3039bfc98..3743eba23e17 100644
--- a/sys/dev/amdgpio/amdgpio.h
+++ b/sys/dev/amdgpio/amdgpio.h
@@ -50,7 +50,8 @@
AMD_GPIO_PINS_BANK1 + \
AMD_GPIO_PINS_BANK2 + \
AMD_GPIO_PINS_BANK3)
-#define AMDGPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)
+#define AMDGPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \
+ GPIO_PIN_PULLDOWN | GPIO_PIN_PULLUP)
/* Register related macros */
#define AMDGPIO_PIN_REGISTER(pin) (pin * 4)
@@ -84,6 +85,9 @@
#define INTERRUPT_STS_OFF 28
#define WAKE_STS_OFF 29
+#define UNSERVICED_INTERRUPT_MASK \
+ ((1 << INTERRUPT_STS_OFF) | (1 << WAKE_STS_OFF))
+
#define DB_TMR_OUT_MASK 0xFUL
#define DB_CNTRL_MASK 0x3UL
#define ACTIVE_LEVEL_MASK 0x3UL
@@ -316,12 +320,13 @@ struct amdgpio_softc {
int sc_npins;
int sc_ngroups;
struct mtx sc_mtx;
- struct resource *sc_res[AMD_GPIO_NUM_PIN_BANK + 1];
+ struct resource *sc_res[2];
bus_space_tag_t sc_bst;
bus_space_handle_t sc_bsh;
struct gpio_pin sc_gpio_pins[AMD_GPIO_PINS_MAX];
const struct pin_info *sc_pin_info;
const struct amd_pingroup *sc_groups;
+ void *sc_intr_handle;
};
struct amdgpio_sysctl {
diff --git a/sys/dev/ath/ath_rate/sample/sample.c b/sys/dev/ath/ath_rate/sample/sample.c
index 291d1ec64ed7..79bf08678249 100644
--- a/sys/dev/ath/ath_rate/sample/sample.c
+++ b/sys/dev/ath/ath_rate/sample/sample.c
@@ -179,7 +179,7 @@ ath_rate_sample_find_min_pktlength(struct ath_softc *sc,
const struct txschedule *sched = &sn->sched[rix0];
int max_pkt_length = 65530; // ATH_AGGR_MAXSIZE
// Note: this may not be true in all cases; need to check?
- int is_ht40 = (an->an_node.ni_chw == IEEE80211_STA_RX_BW_40);
+ int is_ht40 = (an->an_node.ni_chw == NET80211_STA_RX_BW_40);
// Note: not great, but good enough..
int idx = is_ht40 ? MCS_HT40 : MCS_HT20;
@@ -979,7 +979,7 @@ update_stats(struct ath_softc *sc, struct ath_node *an,
const int size_bin = size_to_bin(frame_size);
const int size = bin_to_size(size_bin);
int tt;
- int is_ht40 = (an->an_node.ni_chw == IEEE80211_STA_RX_BW_40);
+ int is_ht40 = (an->an_node.ni_chw == NET80211_STA_RX_BW_40);
int pct;
if (!IS_RATE_DEFINED(sn, rix0))
@@ -1365,7 +1365,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
continue;
printf(" %d %s/%d", dot11rate(rt, rix), dot11rate_label(rt, rix),
calc_usecs_unicast_packet(sc, 1600, rix, 0,0,
- (ni->ni_chw == IEEE80211_STA_RX_BW_40)));
+ (ni->ni_chw == NET80211_STA_RX_BW_40)));
}
printf("\n");
}
@@ -1396,7 +1396,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
sn->stats[y][rix].perfect_tx_time =
calc_usecs_unicast_packet(sc, size, rix, 0, 0,
- (ni->ni_chw == IEEE80211_STA_RX_BW_40));
+ (ni->ni_chw == NET80211_STA_RX_BW_40));
sn->stats[y][rix].average_tx_time =
sn->stats[y][rix].perfect_tx_time;
}
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 934024ddfbcf..1304b597c545 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -924,6 +924,9 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
| IEEE80211_C_PMGT /* Station side power mgmt */
| IEEE80211_C_SWSLEEP
;
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Query the hal to figure out h/w crypto support.
*/
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index 1559b66a7c7d..deadd63c3d18 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -1588,6 +1588,10 @@ ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
*/
pktlen = m0->m_pkthdr.len - (hdrlen & 3);
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m0->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/* Handle encryption twiddling if needed */
if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
&pktlen, &keyix)) {
@@ -2201,6 +2205,10 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
* for QoS frames.
*/
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m0->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/* Handle encryption twiddling if needed */
if (! ath_tx_tag_crypto(sc, ni,
m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
@@ -2981,6 +2989,8 @@ ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
ATH_TX_LOCK_ASSERT(sc);
+ /* TODO: can this use ieee80211_output_seqno_assign() now? */
+
/*
* Is it a QOS NULL Data frame? Give it a sequence number from
* the default TID (IEEE80211_NONQOS_TID.)
diff --git a/sys/dev/ath/if_ath_tx_ht.c b/sys/dev/ath/if_ath_tx_ht.c
index e7ee029fecf0..f42058bacb0d 100644
--- a/sys/dev/ath/if_ath_tx_ht.c
+++ b/sys/dev/ath/if_ath_tx_ht.c
@@ -283,7 +283,7 @@ ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
if (IS_HT_RATE(rate)) {
rc[i].flags |= ATH_RC_HT_FLAG;
- if (ni->ni_chw == IEEE80211_STA_RX_BW_40)
+ if (ni->ni_chw == NET80211_STA_RX_BW_40)
rc[i].flags |= ATH_RC_CW40_FLAG;
/*
@@ -295,13 +295,13 @@ ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
* and doesn't return the fractional part, so
* we are always "out" by some amount.
*/
- if (ni->ni_chw == IEEE80211_STA_RX_BW_40 &&
+ if (ni->ni_chw == NET80211_STA_RX_BW_40 &&
ieee80211_ht_check_tx_shortgi_40(ni) &&
(bf->bf_flags & ATH_BUF_TOA_PROBE) == 0) {
rc[i].flags |= ATH_RC_SGI_FLAG;
}
- if (ni->ni_chw == IEEE80211_STA_RX_BW_20 &&
+ if (ni->ni_chw == NET80211_STA_RX_BW_20 &&
ieee80211_ht_check_tx_shortgi_20(ni) &&
(bf->bf_flags & ATH_BUF_TOA_PROBE) == 0) {
rc[i].flags |= ATH_RC_SGI_FLAG;
diff --git a/sys/dev/axgbe/if_axgbe_pci.c b/sys/dev/axgbe/if_axgbe_pci.c
index 290156ff11ca..6bc4bd33e162 100644
--- a/sys/dev/axgbe/if_axgbe_pci.c
+++ b/sys/dev/axgbe/if_axgbe_pci.c
@@ -2415,7 +2415,8 @@ axgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_OPACKETS:
return (pstats->txframecount_gb);
case IFCOUNTER_OERRORS:
- return (pstats->txframecount_gb - pstats->txframecount_g);
+ return (if_get_counter_default(ifp, cnt) +
+ pstats->txframecount_gb - pstats->txframecount_g);
case IFCOUNTER_IBYTES:
return (pstats->rxoctetcount_gb);
case IFCOUNTER_OBYTES:
diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c
index 16bfce5338a7..6cf39e035ea6 100644
--- a/sys/dev/bce/if_bce.c
+++ b/sys/dev/bce/if_bce.c
@@ -1221,7 +1221,7 @@ bce_attach(device_t dev)
sc->bce_bc_ver[j++] = '.';
}
- /* Check if any management firwmare is enabled. */
+ /* Check if any management firmware is enabled. */
val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c b/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
index 7dd555cfaadb..9e7f4614d9f9 100644
--- a/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
+++ b/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
@@ -714,7 +714,7 @@ int bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc *softc, uint32_t enables)
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
req.flags = cpu_to_le32(flags);
- return hwrm_send_message(softc, &req, sizeof(req));
+ return hwrm_send_message(softc, &req, req_len);
}
int bnxt_hwrm_func_resc_qcaps(struct bnxt_softc *softc, bool all)
diff --git a/sys/dev/bwi/if_bwi.c b/sys/dev/bwi/if_bwi.c
index 1087ca813d65..85146d4c4010 100644
--- a/sys/dev/bwi/if_bwi.c
+++ b/sys/dev/bwi/if_bwi.c
@@ -498,6 +498,9 @@ bwi_attach(struct bwi_softc *sc)
IEEE80211_C_BGSCAN |
IEEE80211_C_MONITOR;
ic->ic_opmode = IEEE80211_M_STA;
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ieee80211_ifattach(ic);
ic->ic_headroom = sizeof(struct bwi_txbuf_hdr);
@@ -1361,6 +1364,7 @@ bwi_start_locked(struct bwi_softc *sc)
(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) != 0 &&
ieee80211_crypto_encap(ni, m) == NULL) {
if_inc_counter(ni->ni_vap->iv_ifp,
diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c
index 38bf6f5d31a3..ec9d56661034 100644
--- a/sys/dev/bwn/if_bwn.c
+++ b/sys/dev/bwn/if_bwn.c
@@ -774,6 +774,7 @@ bwn_attach_post(struct bwn_softc *sc)
;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
/* Determine the NVRAM variable containing our MAC address */
core_unit = bhnd_get_core_unit(sc->sc_dev);
@@ -999,6 +1000,7 @@ bwn_start(struct bwn_softc *sc)
continue;
}
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
diff --git a/sys/dev/cpuctl/cpuctl.c b/sys/dev/cpuctl/cpuctl.c
index 9253b17a259d..b0ab3467df69 100644
--- a/sys/dev/cpuctl/cpuctl.c
+++ b/sys/dev/cpuctl/cpuctl.c
@@ -402,19 +402,20 @@ out:
* its workings.
*/
static void
-amd_ucode_wrmsr(void *ucode_ptr)
+amd_ucode_wrmsr(void *arg)
{
+ struct ucode_update_data *d = arg;
uint32_t tmp[4];
- wrmsr_safe(MSR_K8_UCODE_UPDATE, (uintptr_t)ucode_ptr);
+ if (PCPU_GET(cpuid) == d->cpu)
+ d->ret = wrmsr_safe(MSR_K8_UCODE_UPDATE, (uintptr_t)d->ptr);
do_cpuid(0, tmp);
}
static int
update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td)
{
- void *ptr;
- int ret;
+ struct ucode_update_data d = { .cpu = cpu };
if (args->size == 0 || args->data == NULL) {
DPRINTF("[cpuctl,%d]: zero-sized firmware image", __LINE__);
@@ -430,18 +431,17 @@ update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td)
* malloc(9) always returns the pointer aligned at least on
* the size of the allocation.
*/
- ptr = malloc(args->size + 16, M_CPUCTL, M_ZERO | M_WAITOK);
- if (copyin(args->data, ptr, args->size) != 0) {
+ d.ptr = malloc(args->size + 16, M_CPUCTL, M_ZERO | M_WAITOK);
+ if (copyin(args->data, d.ptr, args->size) != 0) {
DPRINTF("[cpuctl,%d]: copyin %p->%p of %zd bytes failed",
__LINE__, args->data, ptr, args->size);
- ret = EFAULT;
+ d.ret = EFAULT;
goto fail;
}
- smp_rendezvous(NULL, amd_ucode_wrmsr, NULL, ptr);
- ret = 0;
+ smp_rendezvous(NULL, amd_ucode_wrmsr, NULL, &d);
fail:
- free(ptr, M_CPUCTL);
- return (ret);
+ free(d.ptr, M_CPUCTL);
+ return (d.ret);
}
static int
diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
index c8592807f843..d805642541d3 100644
--- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
@@ -1053,6 +1053,8 @@ send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen)
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
flowc->mnemval[0].val = htobe32(maxlen);
+ KASSERT(howmany(flowclen, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, howmany(flowclen, 16)));
txsd->tx_credits = howmany(flowclen, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
diff --git a/sys/dev/cxgbe/iw_cxgbe/qp.c b/sys/dev/cxgbe/iw_cxgbe/qp.c
index 0e374bc961c4..cbf4bae00a60 100644
--- a/sys/dev/cxgbe/iw_cxgbe/qp.c
+++ b/sys/dev/cxgbe/iw_cxgbe/qp.c
@@ -1326,6 +1326,8 @@ creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
return (EINVAL);
}
txsd = &toep->txsd[toep->txsd_pidx];
+ KASSERT(howmany(wrsize, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %zu too large", __func__, howmany(wrsize, 16)));
txsd->tx_credits = howmany(wrsize, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 9e91250cb61c..9756a6945384 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -9016,7 +9016,7 @@ sysctl_loadavg(SYSCTL_HANDLER_ARGS)
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
if (rc)
return (rc);
- if (hw_all_ok(sc))
+ if (!hw_all_ok(sc))
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index 7a6b1cbdd736..be20ea42474e 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -148,6 +148,8 @@ send_flowc_wr(struct toepcb *toep, struct tcpcb *tp)
KASSERT(paramidx == nparams, ("nparams mismatch"));
+ KASSERT(howmany(flowclen, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, howmany(flowclen, 16)));
txsd->tx_credits = howmany(flowclen, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
@@ -215,6 +217,8 @@ update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps)
else
flowc->mnemval[0].val = htobe32(tc_idx);
+ KASSERT(flowclen16 <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, flowclen16));
txsd->tx_credits = flowclen16;
txsd->plen = 0;
toep->tx_credits -= txsd->tx_credits;
@@ -491,6 +495,9 @@ t4_close_conn(struct adapter *sc, struct toepcb *toep)
#define MIN_TX_CREDITS(iso) \
(MIN_OFLD_TX_CREDITS + ((iso) ? MIN_ISO_TX_CREDITS : 0))
+_Static_assert(MAX_OFLD_TX_CREDITS <= MAX_OFLD_TX_SDESC_CREDITS,
+ "MAX_OFLD_TX_SDESC_CREDITS too small");
+
/* Maximum amount of immediate data we could stuff in a WR */
static inline int
max_imm_payload(int tx_credits, int iso)
@@ -612,6 +619,48 @@ write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n)
__func__, nsegs, start, stop));
}
+bool
+t4_push_raw_wr(struct adapter *sc, struct toepcb *toep, struct mbuf *m)
+{
+#ifdef INVARIANTS
+ struct inpcb *inp = toep->inp;
+#endif
+ struct wrqe *wr;
+ struct ofld_tx_sdesc *txsd;
+ u_int credits, plen;
+
+ INP_WLOCK_ASSERT(inp);
+ MPASS(mbuf_raw_wr(m));
+ plen = m->m_pkthdr.len;
+ credits = howmany(plen, 16);
+ if (credits > toep->tx_credits)
+ return (false);
+
+ wr = alloc_wrqe(roundup2(plen, 16), &toep->ofld_txq->wrq);
+ if (wr == NULL)
+ return (false);
+
+ m_copydata(m, 0, plen, wrtod(wr));
+ m_freem(m);
+
+ toep->tx_credits -= credits;
+ if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
+ toep->flags |= TPF_TX_SUSPENDED;
+
+ KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(credits <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, credits));
+ txsd = &toep->txsd[toep->txsd_pidx];
+ txsd->plen = 0;
+ txsd->tx_credits = credits;
+ if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
+ toep->txsd_pidx = 0;
+ toep->txsd_avail--;
+
+ t4_wrq_tx(sc, wr);
+ return (true);
+}
+
/*
* Max number of SGL entries an offload tx work request can have. This is 41
* (1 + 40) for a full 512B work request.
@@ -644,6 +693,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
struct tcpcb *tp = intotcpcb(inp);
struct socket *so = inp->inp_socket;
struct sockbuf *sb = &so->so_snd;
+ struct mbufq *pduq = &toep->ulp_pduq;
int tx_credits, shove, compl, sowwakeup;
struct ofld_tx_sdesc *txsd;
bool nomap_mbuf_seen;
@@ -688,6 +738,19 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
max_imm = max_imm_payload(tx_credits, 0);
max_nsegs = max_dsgl_nsegs(tx_credits, 0);
+ if (__predict_false((sndptr = mbufq_first(pduq)) != NULL)) {
+ if (!t4_push_raw_wr(sc, toep, sndptr)) {
+ toep->flags |= TPF_TX_SUSPENDED;
+ return;
+ }
+
+ m = mbufq_dequeue(pduq);
+ MPASS(m == sndptr);
+
+ txsd = &toep->txsd[toep->txsd_pidx];
+ continue;
+ }
+
SOCKBUF_LOCK(sb);
sowwakeup = drop;
if (drop) {
@@ -705,6 +768,8 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
if ((m->m_flags & M_NOTREADY) != 0)
break;
+ if (plen + m->m_len > MAX_OFLD_TX_SDESC_PLEN)
+ break;
if (m->m_flags & M_EXTPG) {
#ifdef KERN_TLS
if (m->m_epg_tls != NULL) {
@@ -870,6 +935,8 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
toep->flags |= TPF_TX_SUSPENDED;
KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(plen <= MAX_OFLD_TX_SDESC_PLEN,
+ ("%s: plen %u too large", __func__, plen));
txsd->plen = plen;
txsd->tx_credits = credits;
txsd++;
@@ -1211,6 +1278,8 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
toep->flags |= TPF_TX_SUSPENDED;
KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(plen <= MAX_OFLD_TX_SDESC_PLEN,
+ ("%s: plen %u too large", __func__, plen));
txsd->plen = plen;
txsd->tx_credits = credits;
txsd++;
@@ -1240,6 +1309,35 @@ t4_push_data(struct adapter *sc, struct toepcb *toep, int drop)
t4_push_frames(sc, toep, drop);
}
+void
+t4_raw_wr_tx(struct adapter *sc, struct toepcb *toep, struct mbuf *m)
+{
+#ifdef INVARIANTS
+ struct inpcb *inp = toep->inp;
+#endif
+
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * If there are other raw WRs enqueued, enqueue to preserve
+ * FIFO ordering.
+ */
+ if (!mbufq_empty(&toep->ulp_pduq)) {
+ mbufq_enqueue(&toep->ulp_pduq, m);
+ return;
+ }
+
+ /*
+ * Cannot call t4_push_data here as that will lock so_snd and
+ * some callers of this run in rx handlers with so_rcv locked.
+ * Instead, just try to transmit this WR.
+ */
+ if (!t4_push_raw_wr(sc, toep, m)) {
+ mbufq_enqueue(&toep->ulp_pduq, m);
+ toep->flags |= TPF_TX_SUSPENDED;
+ }
+}
+
int
t4_tod_output(struct toedev *tod, struct tcpcb *tp)
{
@@ -1941,25 +2039,16 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
}
void
-t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep,
+write_set_tcb_field(struct adapter *sc, void *dst, struct toepcb *toep,
uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie)
{
- struct wrqe *wr;
- struct cpl_set_tcb_field *req;
- struct ofld_tx_sdesc *txsd;
+ struct cpl_set_tcb_field *req = dst;
MPASS((cookie & ~M_COOKIE) == 0);
if (reply) {
MPASS(cookie != CPL_COOKIE_RESERVED);
}
- wr = alloc_wrqe(sizeof(*req), wrq);
- if (wr == NULL) {
- /* XXX */
- panic("%s: allocation failure.", __func__);
- }
- req = wrtod(wr);
-
INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id));
if (reply == 0)
@@ -1967,9 +2056,29 @@ t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep,
req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
req->mask = htobe64(mask);
req->val = htobe64(val);
+}
+
+void
+t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep,
+ uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie)
+{
+ struct wrqe *wr;
+ struct ofld_tx_sdesc *txsd;
+ const u_int len = sizeof(struct cpl_set_tcb_field);
+
+ wr = alloc_wrqe(len, wrq);
+ if (wr == NULL) {
+ /* XXX */
+ panic("%s: allocation failure.", __func__);
+ }
+ write_set_tcb_field(sc, wrtod(wr), toep, word, mask, val, reply,
+ cookie);
+
if (wrq->eq.type == EQ_OFLD) {
txsd = &toep->txsd[toep->txsd_pidx];
- txsd->tx_credits = howmany(sizeof(*req), 16);
+ _Static_assert(howmany(len, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ "MAX_OFLD_TX_SDESC_CREDITS too small");
+ txsd->tx_credits = howmany(len, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits &&
toep->txsd_avail > 0,
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index 2fee8fa91dac..da0753296532 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -1785,7 +1785,7 @@ t4_write_page_pods_for_rcvbuf(struct adapter *sc, struct sge_wrq *wrq, int tid,
return (0);
}
-static struct mbuf *
+struct mbuf *
alloc_raw_wr_mbuf(int len)
{
struct mbuf *m;
diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c
index 27c16b9988ae..ad72c6a6b025 100644
--- a/sys/dev/cxgbe/tom/t4_tls.c
+++ b/sys/dev/cxgbe/tom/t4_tls.c
@@ -61,11 +61,21 @@
static void
t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
- uint64_t val)
+ uint64_t val, int reply, int cookie)
{
struct adapter *sc = td_adapter(toep->td);
+ struct mbuf *m;
+
+ m = alloc_raw_wr_mbuf(sizeof(struct cpl_set_tcb_field));
+ if (m == NULL) {
+ /* XXX */
+ panic("%s: out of memory", __func__);
+ }
- t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0);
+ write_set_tcb_field(sc, mtod(m, void *), toep, word, mask, val, reply,
+ cookie);
+
+ t4_raw_wr_tx(sc, toep, m);
}
/* TLS and DTLS common routines */
@@ -88,10 +98,9 @@ tls_tx_key(struct toepcb *toep)
static void
t4_set_rx_quiesce(struct toepcb *toep)
{
- struct adapter *sc = td_adapter(toep->td);
- t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, W_TCB_T_FLAGS,
- V_TF_RX_QUIESCE(1), V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM);
+ t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1),
+ V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM);
}
/* Clear TF_RX_QUIESCE to re-enable receive. */
@@ -99,7 +108,7 @@ static void
t4_clear_rx_quiesce(struct toepcb *toep)
{
- t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
+ t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0, 0, 0);
}
/* TLS/DTLS content type for CPL SFO */
@@ -145,16 +154,15 @@ get_tp_plen_max(struct ktls_session *tls)
return (tls->params.max_frame_len <= 8192 ? plen : FC_TP_PLEN_MAX);
}
-/* Send request to get the key-id */
+/* Send request to save the key in on-card memory. */
static int
tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
int direction)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
struct adapter *sc = td_adapter(toep->td);
- struct ofld_tx_sdesc *txsd;
int keyid;
- struct wrqe *wr;
+ struct mbuf *m;
struct tls_key_req *kwr;
struct tls_keyctx *kctx;
@@ -173,12 +181,12 @@ tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
return (ENOSPC);
}
- wr = alloc_wrqe(TLS_KEY_WR_SZ, &toep->ofld_txq->wrq);
- if (wr == NULL) {
+ m = alloc_raw_wr_mbuf(TLS_KEY_WR_SZ);
+ if (m == NULL) {
t4_free_tls_keyid(sc, keyid);
return (ENOMEM);
}
- kwr = wrtod(wr);
+ kwr = mtod(m, struct tls_key_req *);
memset(kwr, 0, TLS_KEY_WR_SZ);
t4_write_tlskey_wr(tls, direction, toep->tid, F_FW_WR_COMPL, keyid,
@@ -190,15 +198,7 @@ tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
tls_ofld->rx_key_addr = keyid;
t4_tls_key_ctx(tls, direction, kctx);
- txsd = &toep->txsd[toep->txsd_pidx];
- txsd->tx_credits = DIV_ROUND_UP(TLS_KEY_WR_SZ, 16);
- txsd->plen = 0;
- toep->tx_credits -= txsd->tx_credits;
- if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
- toep->txsd_pidx = 0;
- toep->txsd_avail--;
-
- t4_wrq_tx(sc, wr);
+ t4_raw_wr_tx(sc, toep, m);
return (0);
}
@@ -494,6 +494,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
struct tcpcb *tp = intotcpcb(inp);
struct socket *so = inp->inp_socket;
struct sockbuf *sb = &so->so_snd;
+ struct mbufq *pduq = &toep->ulp_pduq;
int tls_size, tx_credits, shove, sowwakeup;
struct ofld_tx_sdesc *txsd;
char *buf;
@@ -536,6 +537,18 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
for (;;) {
tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
+ if (__predict_false((m = mbufq_first(pduq)) != NULL)) {
+ if (!t4_push_raw_wr(sc, toep, m)) {
+ toep->flags |= TPF_TX_SUSPENDED;
+ return;
+ }
+
+ (void)mbufq_dequeue(pduq);
+
+ txsd = &toep->txsd[toep->txsd_pidx];
+ continue;
+ }
+
SOCKBUF_LOCK(sb);
sowwakeup = drop;
if (drop) {
@@ -694,6 +707,8 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
toep->flags |= TPF_TX_SUSPENDED;
KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(m->m_len <= MAX_OFLD_TX_SDESC_PLEN,
+ ("%s: plen %u too large", __func__, m->m_len));
txsd->plen = m->m_len;
txsd->tx_credits = credits;
txsd++;
@@ -1082,7 +1097,7 @@ out:
static void
tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
{
- struct wrqe *wr;
+ struct mbuf *m;
struct work_request_hdr *wrh;
struct ulp_txpkt *ulpmc;
int fields, key_offset, len;
@@ -1111,14 +1126,14 @@ tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
KASSERT(len <= SGE_MAX_WR_LEN,
("%s: WR with %d TCB field updates too large", __func__, fields));
- wr = alloc_wrqe(len, toep->ctrlq);
- if (wr == NULL) {
+ m = alloc_raw_wr_mbuf(len);
+ if (m == NULL) {
/* XXX */
panic("%s: out of memory", __func__);
}
- wrh = wrtod(wr);
- INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */
+ wrh = mtod(m, struct work_request_hdr *);
+ INIT_ULPTX_WRH(wrh, len, 1, toep->tid); /* atomic */
ulpmc = (struct ulp_txpkt *)(wrh + 1);
/*
@@ -1163,7 +1178,7 @@ tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS,
V_TF_RX_QUIESCE(1), 0);
- t4_wrq_tx(sc, wr);
+ t4_raw_wr_tx(sc, toep, m);
}
/*
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 9b09facd05a7..0b54fdaa5c80 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -882,6 +882,8 @@ send_mss_flowc_wr(struct adapter *sc, struct toepcb *toep)
flowc->mnemval[0].val = htobe32(toep->params.emss);
txsd = &toep->txsd[toep->txsd_pidx];
+ _Static_assert(flowclen16 <= MAX_OFLD_TX_SDESC_CREDITS,
+ "MAX_OFLD_TX_SDESC_CREDITS too small");
txsd->tx_credits = flowclen16;
txsd->plen = 0;
toep->tx_credits -= txsd->tx_credits;
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index 6295a3484b9f..4fb87d92d91e 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -122,10 +122,13 @@ struct conn_params {
};
struct ofld_tx_sdesc {
- uint32_t plen; /* payload length */
- uint8_t tx_credits; /* firmware tx credits (unit is 16B) */
+ uint32_t plen : 26; /* payload length */
+ uint32_t tx_credits : 6; /* firmware tx credits (unit is 16B) */
};
+#define MAX_OFLD_TX_SDESC_PLEN ((1u << 26) - 1)
+#define MAX_OFLD_TX_SDESC_CREDITS ((1u << 6) - 1)
+
struct ppod_region {
u_int pr_start;
u_int pr_len;
@@ -526,6 +529,10 @@ int t4_send_rst(struct toedev *, struct tcpcb *);
void t4_set_tcb_field(struct adapter *, struct sge_wrq *, struct toepcb *,
uint16_t, uint64_t, uint64_t, int, int);
void t4_push_pdus(struct adapter *, struct toepcb *, int);
+bool t4_push_raw_wr(struct adapter *, struct toepcb *, struct mbuf *);
+void t4_raw_wr_tx(struct adapter *, struct toepcb *, struct mbuf *);
+void write_set_tcb_field(struct adapter *, void *, struct toepcb *, uint16_t,
+ uint64_t, uint64_t, int, int);
/* t4_ddp.c */
int t4_init_ppod_region(struct ppod_region *, struct t4_range *, u_int,
@@ -551,6 +558,7 @@ int t4_aio_queue_ddp(struct socket *, struct kaiocb *);
int t4_enable_ddp_rcv(struct socket *, struct toepcb *);
void t4_ddp_mod_load(void);
void t4_ddp_mod_unload(void);
+struct mbuf *alloc_raw_wr_mbuf(int);
void ddp_assert_empty(struct toepcb *);
void ddp_uninit_toep(struct toepcb *);
void ddp_queue_toep(struct toepcb *);
diff --git a/sys/dev/cyapa/cyapa.c b/sys/dev/cyapa/cyapa.c
index 50fa4faa560a..ed755f992949 100644
--- a/sys/dev/cyapa/cyapa.c
+++ b/sys/dev/cyapa/cyapa.c
@@ -761,42 +761,60 @@ again:
/*
* Generate report
*/
- c0 = 0;
- if (delta_x < 0)
- c0 |= 0x10;
- if (delta_y < 0)
- c0 |= 0x20;
- c0 |= 0x08;
- if (but & CYAPA_FNGR_LEFT)
- c0 |= 0x01;
- if (but & CYAPA_FNGR_MIDDLE)
- c0 |= 0x04;
- if (but & CYAPA_FNGR_RIGHT)
- c0 |= 0x02;
-
- fifo_write_char(sc, &sc->rfifo, c0);
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_x);
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_y);
- switch(sc->zenabled) {
- case 1:
- /* Z axis all 8 bits */
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_z);
- break;
- case 2:
- /*
- * Z axis low 4 bits + 4th button and 5th button
- * (high 2 bits must be left 0). Auto-scale
- * delta_z to fit to avoid a wrong-direction
- * overflow (don't try to retain the remainder).
- */
- while (delta_z > 7 || delta_z < -8)
- delta_z >>= 1;
- c0 = (uint8_t)delta_z & 0x0F;
+ if (sc->mode.level == 1) {
+ c0 = MOUSE_SYS_SYNC;
+ if (but & CYAPA_FNGR_LEFT)
+ c0 |= MOUSE_SYS_BUTTON1UP;
+ if (but & CYAPA_FNGR_MIDDLE)
+ c0 |= MOUSE_SYS_BUTTON2UP;
+ if (but & CYAPA_FNGR_RIGHT)
+ c0 |= MOUSE_SYS_BUTTON3UP;
fifo_write_char(sc, &sc->rfifo, c0);
- break;
- default:
- /* basic PS/2 */
- break;
+ fifo_write_char(sc, &sc->rfifo, delta_x >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_y >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_x - (delta_x >> 1));
+ fifo_write_char(sc, &sc->rfifo, delta_y - (delta_y >> 1));
+ fifo_write_char(sc, &sc->rfifo, delta_z >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_z - (delta_z >> 1));
+ fifo_write_char(sc, &sc->rfifo, MOUSE_SYS_EXTBUTTONS);
+ } else {
+ c0 = 0;
+ if (delta_x < 0)
+ c0 |= 0x10;
+ if (delta_y < 0)
+ c0 |= 0x20;
+ c0 |= 0x08;
+ if (but & CYAPA_FNGR_LEFT)
+ c0 |= 0x01;
+ if (but & CYAPA_FNGR_MIDDLE)
+ c0 |= 0x04;
+ if (but & CYAPA_FNGR_RIGHT)
+ c0 |= 0x02;
+
+ fifo_write_char(sc, &sc->rfifo, c0);
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_x);
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_y);
+ switch(sc->zenabled) {
+ case 1:
+ /* Z axis all 8 bits */
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_z);
+ break;
+ case 2:
+ /*
+ * Z axis low 4 bits + 4th button and 5th button
+ * (high 2 bits must be left 0). Auto-scale
+ * delta_z to fit to avoid a wrong-direction
+ * overflow (don't try to retain the remainder).
+ */
+ while (delta_z > 7 || delta_z < -8)
+ delta_z >>= 1;
+ c0 = (uint8_t)delta_z & 0x0F;
+ fifo_write_char(sc, &sc->rfifo, c0);
+ break;
+ default:
+ /* basic PS/2 */
+ break;
+ }
}
cyapa_notify(sc);
}
@@ -1205,6 +1223,11 @@ cyapaioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
((mousemode_t *)data)->packetsize =
MOUSE_PS2_PACKETSIZE;
break;
+ case 1:
+ ((mousemode_t *)data)->protocol = MOUSE_PROTO_SYSMOUSE;
+ ((mousemode_t *)data)->packetsize =
+ MOUSE_SYS_PACKETSIZE;
+ break;
case 2:
((mousemode_t *)data)->protocol = MOUSE_PROTO_PS2;
((mousemode_t *)data)->packetsize =
@@ -1223,7 +1246,7 @@ cyapaioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
error = EINVAL;
break;
}
- sc->mode.level = *(int *)data ? 2 : 0;
+ sc->mode.level = *(int *)data;
sc->zenabled = sc->mode.level ? 1 : 0;
break;
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 9c5ae2806f75..247cf9d7fed3 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -407,6 +407,7 @@ static int em_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
int);
static void em_if_queues_free(if_ctx_t);
+static uint64_t em_if_get_vf_counter(if_ctx_t, ift_counter);
static uint64_t em_if_get_counter(if_ctx_t, ift_counter);
static void em_if_init(if_ctx_t);
static void em_if_stop(if_ctx_t);
@@ -440,6 +441,7 @@ static int igb_if_tx_queue_intr_enable(if_ctx_t, uint16_t);
static void em_if_multi_set(if_ctx_t);
static void em_if_update_admin_status(if_ctx_t);
static void em_if_debug(if_ctx_t);
+static void em_update_vf_stats_counters(struct e1000_softc *);
static void em_update_stats_counters(struct e1000_softc *);
static void em_add_hw_stats(struct e1000_softc *);
static int em_if_set_promisc(if_ctx_t, int);
@@ -1377,6 +1379,11 @@ em_if_attach_post(if_ctx_t ctx)
em_reset(ctx);
/* Initialize statistics */
+ if (sc->vf_ifp)
+ sc->ustats.vf_stats = (struct e1000_vf_stats){};
+ else
+ sc->ustats.stats = (struct e1000_hw_stats){};
+
em_update_stats_counters(sc);
hw->mac.get_link_status = 1;
em_if_update_admin_status(ctx);
@@ -4668,122 +4675,176 @@ em_disable_aspm(struct e1000_softc *sc)
static void
em_update_stats_counters(struct e1000_softc *sc)
{
- u64 prev_xoffrxc = sc->stats.xoffrxc;
+ struct e1000_hw_stats *stats;
+ u64 prev_xoffrxc;
+
+ if (sc->vf_ifp) {
+ em_update_vf_stats_counters(sc);
+ return;
+ }
+
+ stats = &sc->ustats.stats;
+ prev_xoffrxc = stats->xoffrxc;
if(sc->hw.phy.media_type == e1000_media_type_copper ||
(E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) {
- sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
- sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC);
- }
- sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
- sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
- sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC);
- sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
-
- sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
- sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
- sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC);
- sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC);
- sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
- sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
- sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
- sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
+ stats->symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
+ stats->sec += E1000_READ_REG(&sc->hw, E1000_SEC);
+ }
+ stats->crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
+ stats->mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
+ stats->scc += E1000_READ_REG(&sc->hw, E1000_SCC);
+ stats->ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
+
+ stats->mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
+ stats->latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
+ stats->colc += E1000_READ_REG(&sc->hw, E1000_COLC);
+ stats->dc += E1000_READ_REG(&sc->hw, E1000_DC);
+ stats->rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
+ stats->xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
+ stats->xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
+ stats->xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
/*
** For watchdog management we need to know if we have been
** paused during the last interval, so capture that here.
*/
- if (sc->stats.xoffrxc != prev_xoffrxc)
+ if (stats->xoffrxc != prev_xoffrxc)
sc->shared->isc_pause_frames = 1;
- sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
- sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
- sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
- sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
- sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
- sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
- sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
- sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
- sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
- sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
- sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
- sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
+ stats->xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
+ stats->fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
+ stats->prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
+ stats->prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
+ stats->prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
+ stats->prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
+ stats->prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
+ stats->prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
+ stats->gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
+ stats->bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
+ stats->mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
+ stats->gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
/* For the 64-bit byte counters the low dword must be read first. */
/* Both registers clear on the read of the high dword */
- sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCL) +
+ stats->gorc += E1000_READ_REG(&sc->hw, E1000_GORCL) +
((u64)E1000_READ_REG(&sc->hw, E1000_GORCH) << 32);
- sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCL) +
+ stats->gotc += E1000_READ_REG(&sc->hw, E1000_GOTCL) +
((u64)E1000_READ_REG(&sc->hw, E1000_GOTCH) << 32);
- sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
- sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
- sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
- sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC);
- sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
-
- sc->stats.mgprc += E1000_READ_REG(&sc->hw, E1000_MGTPRC);
- sc->stats.mgpdc += E1000_READ_REG(&sc->hw, E1000_MGTPDC);
- sc->stats.mgptc += E1000_READ_REG(&sc->hw, E1000_MGTPTC);
-
- sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH);
- sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
-
- sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
- sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
- sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
- sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
- sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
- sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
- sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
- sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
- sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
- sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
+ stats->rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
+ stats->ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
+ stats->rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
+ stats->roc += E1000_READ_REG(&sc->hw, E1000_ROC);
+ stats->rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
+
+ stats->mgprc += E1000_READ_REG(&sc->hw, E1000_MGTPRC);
+ stats->mgpdc += E1000_READ_REG(&sc->hw, E1000_MGTPDC);
+ stats->mgptc += E1000_READ_REG(&sc->hw, E1000_MGTPTC);
+
+ stats->tor += E1000_READ_REG(&sc->hw, E1000_TORH);
+ stats->tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
+
+ stats->tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
+ stats->tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
+ stats->ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
+ stats->ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
+ stats->ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
+ stats->ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
+ stats->ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
+ stats->ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
+ stats->mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
+ stats->bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
/* Interrupt Counts */
- sc->stats.iac += E1000_READ_REG(&sc->hw, E1000_IAC);
- sc->stats.icrxptc += E1000_READ_REG(&sc->hw, E1000_ICRXPTC);
- sc->stats.icrxatc += E1000_READ_REG(&sc->hw, E1000_ICRXATC);
- sc->stats.ictxptc += E1000_READ_REG(&sc->hw, E1000_ICTXPTC);
- sc->stats.ictxatc += E1000_READ_REG(&sc->hw, E1000_ICTXATC);
- sc->stats.ictxqec += E1000_READ_REG(&sc->hw, E1000_ICTXQEC);
- sc->stats.ictxqmtc += E1000_READ_REG(&sc->hw, E1000_ICTXQMTC);
- sc->stats.icrxdmtc += E1000_READ_REG(&sc->hw, E1000_ICRXDMTC);
- sc->stats.icrxoc += E1000_READ_REG(&sc->hw, E1000_ICRXOC);
+ stats->iac += E1000_READ_REG(&sc->hw, E1000_IAC);
+ stats->icrxptc += E1000_READ_REG(&sc->hw, E1000_ICRXPTC);
+ stats->icrxatc += E1000_READ_REG(&sc->hw, E1000_ICRXATC);
+ stats->ictxptc += E1000_READ_REG(&sc->hw, E1000_ICTXPTC);
+ stats->ictxatc += E1000_READ_REG(&sc->hw, E1000_ICTXATC);
+ stats->ictxqec += E1000_READ_REG(&sc->hw, E1000_ICTXQEC);
+ stats->ictxqmtc += E1000_READ_REG(&sc->hw, E1000_ICTXQMTC);
+ stats->icrxdmtc += E1000_READ_REG(&sc->hw, E1000_ICRXDMTC);
+ stats->icrxoc += E1000_READ_REG(&sc->hw, E1000_ICRXOC);
if (sc->hw.mac.type >= e1000_82543) {
- sc->stats.algnerrc +=
+ stats->algnerrc +=
E1000_READ_REG(&sc->hw, E1000_ALGNERRC);
- sc->stats.rxerrc +=
+ stats->rxerrc +=
E1000_READ_REG(&sc->hw, E1000_RXERRC);
- sc->stats.tncrs +=
+ stats->tncrs +=
E1000_READ_REG(&sc->hw, E1000_TNCRS);
- sc->stats.cexterr +=
+ stats->cexterr +=
E1000_READ_REG(&sc->hw, E1000_CEXTERR);
- sc->stats.tsctc +=
+ stats->tsctc +=
E1000_READ_REG(&sc->hw, E1000_TSCTC);
- sc->stats.tsctfc +=
+ stats->tsctfc +=
E1000_READ_REG(&sc->hw, E1000_TSCTFC);
}
}
+static void
+em_update_vf_stats_counters(struct e1000_softc *sc)
+{
+ struct e1000_vf_stats *stats;
+
+ if (sc->link_speed == 0)
+ return;
+
+ stats = &sc->ustats.vf_stats;
+
+ UPDATE_VF_REG(E1000_VFGPRC,
+ stats->last_gprc, stats->gprc);
+ UPDATE_VF_REG(E1000_VFGORC,
+ stats->last_gorc, stats->gorc);
+ UPDATE_VF_REG(E1000_VFGPTC,
+ stats->last_gptc, stats->gptc);
+ UPDATE_VF_REG(E1000_VFGOTC,
+ stats->last_gotc, stats->gotc);
+ UPDATE_VF_REG(E1000_VFMPRC,
+ stats->last_mprc, stats->mprc);
+}
+
+static uint64_t
+em_if_get_vf_counter(if_ctx_t ctx, ift_counter cnt)
+{
+ struct e1000_softc *sc = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+
+ switch (cnt) {
+ case IFCOUNTER_IERRORS:
+ return sc->dropped_pkts;
+ case IFCOUNTER_OERRORS:
+ return (if_get_counter_default(ifp, cnt) +
+ sc->watchdog_events);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}
+
static uint64_t
em_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
struct e1000_softc *sc = iflib_get_softc(ctx);
+ struct e1000_hw_stats *stats;
if_t ifp = iflib_get_ifp(ctx);
+ if (sc->vf_ifp)
+ return (em_if_get_vf_counter(ctx, cnt));
+
+ stats = &sc->ustats.stats;
+
switch (cnt) {
case IFCOUNTER_COLLISIONS:
- return (sc->stats.colc);
+ return (stats->colc);
case IFCOUNTER_IERRORS:
- return (sc->dropped_pkts + sc->stats.rxerrc +
- sc->stats.crcerrs + sc->stats.algnerrc +
- sc->stats.ruc + sc->stats.roc +
- sc->stats.mpc + sc->stats.cexterr);
+ return (sc->dropped_pkts + stats->rxerrc +
+ stats->crcerrs + stats->algnerrc +
+ stats->ruc + stats->roc +
+ stats->mpc + stats->cexterr);
case IFCOUNTER_OERRORS:
- return (sc->stats.ecol + sc->stats.latecol +
- sc->watchdog_events);
+ return (if_get_counter_default(ifp, cnt) +
+ stats->ecol + stats->latecol + sc->watchdog_events);
default:
return (if_get_counter_default(ifp, cnt));
}
@@ -4884,7 +4945,7 @@ em_add_hw_stats(struct e1000_softc *sc)
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct e1000_hw_stats *stats = &sc->stats;
+ struct e1000_hw_stats *stats;
struct sysctl_oid *stat_node, *queue_node, *int_node;
struct sysctl_oid_list *stat_list, *queue_list, *int_list;
@@ -4975,6 +5036,33 @@ em_add_hw_stats(struct e1000_softc *sc)
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
stat_list = SYSCTL_CHILDREN(stat_node);
+ /*
+ ** VF adapter has a very limited set of stats
+ ** since its not managing the metal, so to speak.
+ */
+ if (sc->vf_ifp) {
+ struct e1000_vf_stats *vfstats = &sc->ustats.vf_stats;
+
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
+ CTLFLAG_RD, &vfstats->gprc,
+ "Good Packets Received");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+ CTLFLAG_RD, &vfstats->gptc,
+ "Good Packets Transmitted");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
+ CTLFLAG_RD, &vfstats->gorc,
+ "Good Octets Received");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+ CTLFLAG_RD, &vfstats->gotc,
+ "Good Octets Transmitted");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
+ CTLFLAG_RD, &vfstats->mprc,
+ "Multicast Packets Received");
+ return;
+ }
+
+ stats = &sc->ustats.stats;
+
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
CTLFLAG_RD, &stats->ecol,
"Excessive collisions");
@@ -4991,147 +5079,147 @@ em_add_hw_stats(struct e1000_softc *sc)
CTLFLAG_RD, &stats->colc,
"Collision Count");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
- CTLFLAG_RD, &sc->stats.symerrs,
+ CTLFLAG_RD, &stats->symerrs,
"Symbol Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
- CTLFLAG_RD, &sc->stats.sec,
+ CTLFLAG_RD, &stats->sec,
"Sequence Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
- CTLFLAG_RD, &sc->stats.dc,
+ CTLFLAG_RD, &stats->dc,
"Defer Count");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
- CTLFLAG_RD, &sc->stats.mpc,
+ CTLFLAG_RD, &stats->mpc,
"Missed Packets");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_length_errors",
- CTLFLAG_RD, &sc->stats.rlec,
+ CTLFLAG_RD, &stats->rlec,
"Receive Length Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
- CTLFLAG_RD, &sc->stats.rnbc,
+ CTLFLAG_RD, &stats->rnbc,
"Receive No Buffers");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
- CTLFLAG_RD, &sc->stats.ruc,
+ CTLFLAG_RD, &stats->ruc,
"Receive Undersize");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
- CTLFLAG_RD, &sc->stats.rfc,
+ CTLFLAG_RD, &stats->rfc,
"Fragmented Packets Received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
- CTLFLAG_RD, &sc->stats.roc,
+ CTLFLAG_RD, &stats->roc,
"Oversized Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
- CTLFLAG_RD, &sc->stats.rjc,
+ CTLFLAG_RD, &stats->rjc,
"Recevied Jabber");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
- CTLFLAG_RD, &sc->stats.rxerrc,
+ CTLFLAG_RD, &stats->rxerrc,
"Receive Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
- CTLFLAG_RD, &sc->stats.crcerrs,
+ CTLFLAG_RD, &stats->crcerrs,
"CRC errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
- CTLFLAG_RD, &sc->stats.algnerrc,
+ CTLFLAG_RD, &stats->algnerrc,
"Alignment Errors");
/* On 82575 these are collision counts */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
- CTLFLAG_RD, &sc->stats.cexterr,
+ CTLFLAG_RD, &stats->cexterr,
"Collision/Carrier extension errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
- CTLFLAG_RD, &sc->stats.xonrxc,
+ CTLFLAG_RD, &stats->xonrxc,
"XON Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
- CTLFLAG_RD, &sc->stats.xontxc,
+ CTLFLAG_RD, &stats->xontxc,
"XON Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
- CTLFLAG_RD, &sc->stats.xoffrxc,
+ CTLFLAG_RD, &stats->xoffrxc,
"XOFF Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
- CTLFLAG_RD, &sc->stats.xofftxc,
+ CTLFLAG_RD, &stats->xofftxc,
"XOFF Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd",
- CTLFLAG_RD, &sc->stats.fcruc,
+ CTLFLAG_RD, &stats->fcruc,
"Unsupported Flow Control Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd",
- CTLFLAG_RD, &sc->stats.mgprc,
+ CTLFLAG_RD, &stats->mgprc,
"Management Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop",
- CTLFLAG_RD, &sc->stats.mgpdc,
+ CTLFLAG_RD, &stats->mgpdc,
"Management Packets Dropped");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd",
- CTLFLAG_RD, &sc->stats.mgptc,
+ CTLFLAG_RD, &stats->mgptc,
"Management Packets Transmitted");
/* Packet Reception Stats */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
- CTLFLAG_RD, &sc->stats.tpr,
+ CTLFLAG_RD, &stats->tpr,
"Total Packets Received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
- CTLFLAG_RD, &sc->stats.gprc,
+ CTLFLAG_RD, &stats->gprc,
"Good Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
- CTLFLAG_RD, &sc->stats.bprc,
+ CTLFLAG_RD, &stats->bprc,
"Broadcast Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
- CTLFLAG_RD, &sc->stats.mprc,
+ CTLFLAG_RD, &stats->mprc,
"Multicast Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
- CTLFLAG_RD, &sc->stats.prc64,
+ CTLFLAG_RD, &stats->prc64,
"64 byte frames received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
- CTLFLAG_RD, &sc->stats.prc127,
+ CTLFLAG_RD, &stats->prc127,
"65-127 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
- CTLFLAG_RD, &sc->stats.prc255,
+ CTLFLAG_RD, &stats->prc255,
"128-255 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
- CTLFLAG_RD, &sc->stats.prc511,
+ CTLFLAG_RD, &stats->prc511,
"256-511 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
- CTLFLAG_RD, &sc->stats.prc1023,
+ CTLFLAG_RD, &stats->prc1023,
"512-1023 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
- CTLFLAG_RD, &sc->stats.prc1522,
+ CTLFLAG_RD, &stats->prc1522,
"1023-1522 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
- CTLFLAG_RD, &sc->stats.gorc,
+ CTLFLAG_RD, &stats->gorc,
"Good Octets Received");
/* Packet Transmission Stats */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
- CTLFLAG_RD, &sc->stats.gotc,
+ CTLFLAG_RD, &stats->gotc,
"Good Octets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
- CTLFLAG_RD, &sc->stats.tpt,
+ CTLFLAG_RD, &stats->tpt,
"Total Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
- CTLFLAG_RD, &sc->stats.gptc,
+ CTLFLAG_RD, &stats->gptc,
"Good Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
- CTLFLAG_RD, &sc->stats.bptc,
+ CTLFLAG_RD, &stats->bptc,
"Broadcast Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
- CTLFLAG_RD, &sc->stats.mptc,
+ CTLFLAG_RD, &stats->mptc,
"Multicast Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
- CTLFLAG_RD, &sc->stats.ptc64,
+ CTLFLAG_RD, &stats->ptc64,
"64 byte frames transmitted ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
- CTLFLAG_RD, &sc->stats.ptc127,
+ CTLFLAG_RD, &stats->ptc127,
"65-127 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
- CTLFLAG_RD, &sc->stats.ptc255,
+ CTLFLAG_RD, &stats->ptc255,
"128-255 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
- CTLFLAG_RD, &sc->stats.ptc511,
+ CTLFLAG_RD, &stats->ptc511,
"256-511 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
- CTLFLAG_RD, &sc->stats.ptc1023,
+ CTLFLAG_RD, &stats->ptc1023,
"512-1023 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
- CTLFLAG_RD, &sc->stats.ptc1522,
+ CTLFLAG_RD, &stats->ptc1522,
"1024-1522 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
- CTLFLAG_RD, &sc->stats.tsctc,
+ CTLFLAG_RD, &stats->tsctc,
"TSO Contexts Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
- CTLFLAG_RD, &sc->stats.tsctfc,
+ CTLFLAG_RD, &stats->tsctfc,
"TSO Contexts Failed");
/* Interrupt Stats */
@@ -5140,39 +5228,39 @@ em_add_hw_stats(struct e1000_softc *sc)
int_list = SYSCTL_CHILDREN(int_node);
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
- CTLFLAG_RD, &sc->stats.iac,
+ CTLFLAG_RD, &stats->iac,
"Interrupt Assertion Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
- CTLFLAG_RD, &sc->stats.icrxptc,
+ CTLFLAG_RD, &stats->icrxptc,
"Interrupt Cause Rx Pkt Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
- CTLFLAG_RD, &sc->stats.icrxatc,
+ CTLFLAG_RD, &stats->icrxatc,
"Interrupt Cause Rx Abs Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
- CTLFLAG_RD, &sc->stats.ictxptc,
+ CTLFLAG_RD, &stats->ictxptc,
"Interrupt Cause Tx Pkt Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
- CTLFLAG_RD, &sc->stats.ictxatc,
+ CTLFLAG_RD, &stats->ictxatc,
"Interrupt Cause Tx Abs Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
- CTLFLAG_RD, &sc->stats.ictxqec,
+ CTLFLAG_RD, &stats->ictxqec,
"Interrupt Cause Tx Queue Empty Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
- CTLFLAG_RD, &sc->stats.ictxqmtc,
+ CTLFLAG_RD, &stats->ictxqmtc,
"Interrupt Cause Tx Queue Min Thresh Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
- CTLFLAG_RD, &sc->stats.icrxdmtc,
+ CTLFLAG_RD, &stats->icrxdmtc,
"Interrupt Cause Rx Desc Min Thresh Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun",
- CTLFLAG_RD, &sc->stats.icrxoc,
+ CTLFLAG_RD, &stats->icrxoc,
"Interrupt Cause Receiver Overrun Count");
}
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index 52bfed0f9a42..582e8d9c6327 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -370,6 +370,19 @@
#define EM_NVM_MSIX_N_MASK (0x7 << EM_NVM_MSIX_N_SHIFT)
#define EM_NVM_MSIX_N_SHIFT 7
+/*
+ * VFs use 32-bit counter that rolls over.
+ */
+#define UPDATE_VF_REG(reg, last, cur) \
+do { \
+ u32 new = E1000_READ_REG(&sc->hw, reg); \
+ if (new < last) \
+ cur += 0x100000000LL; \
+ last = new; \
+ cur &= 0xFFFFFFFF00000000LL; \
+ cur |= new; \
+} while (0)
+
struct e1000_softc;
struct em_int_delay_info {
@@ -546,7 +559,11 @@ struct e1000_softc {
unsigned long rx_overruns;
unsigned long watchdog_events;
- struct e1000_hw_stats stats;
+ union {
+ struct e1000_hw_stats stats; /* !sc->vf_ifp */
+ struct e1000_vf_stats vf_stats; /* sc->vf_ifp */
+ } ustats;
+
u16 vf_ifp;
};
diff --git a/sys/dev/enetc/if_enetc.c b/sys/dev/enetc/if_enetc.c
index 3a5d6ec23282..808397b229a7 100644
--- a/sys/dev/enetc/if_enetc.c
+++ b/sys/dev/enetc/if_enetc.c
@@ -1343,7 +1343,8 @@ enetc_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_IERRORS:
return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR));
case IFCOUNTER_OERRORS:
- return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
+ return (if_get_counter_default(ifp, cnt) +
+ ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
default:
return (if_get_counter_default(ifp, cnt));
}
diff --git a/sys/dev/gpio/gpio_if.m b/sys/dev/gpio/gpio_if.m
index 5501b2b5c0e7..0b6988ceba79 100644
--- a/sys/dev/gpio/gpio_if.m
+++ b/sys/dev/gpio/gpio_if.m
@@ -62,6 +62,22 @@ CODE {
return (0);
}
+
+ static int
+ gpio_default_get_pin_list(device_t dev, uint32_t *pin_list)
+ {
+ uint32_t maxpin;
+ int err;
+
+ err = GPIO_PIN_MAX(dev, &maxpin);
+ if (err != 0)
+ return (ENXIO);
+
+ for (int i = 0; i <= maxpin; i++)
+ pin_list[i] = i;
+
+ return (0);
+ }
};
HEADER {
@@ -185,3 +201,13 @@ METHOD int pin_config_32 {
uint32_t num_pins;
uint32_t *pin_flags;
} DEFAULT gpio_default_nosupport;
+
+#
+# Get the controller's pin numbers. pin_list is expected to be an array with at
+# least GPIO_PIN_MAX() elements. Populates pin_list from 0 to GPIO_PIN_MAX() by
+# default.
+#
+METHOD int get_pin_list {
+ device_t dev;
+ uint32_t *pin_list;
+} DEFAULT gpio_default_get_pin_list;
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index 5f1f6532a79b..698b5e5fdd01 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -319,10 +319,6 @@ gpiobus_add_bus(device_t dev)
busdev = device_add_child(dev, "gpiobus", DEVICE_UNIT_ANY);
if (busdev == NULL)
return (NULL);
- if (device_add_child(dev, "gpioc", DEVICE_UNIT_ANY) == NULL) {
- device_delete_child(dev, busdev);
- return (NULL);
- }
#ifdef FDT
ofw_gpiobus_register_provider(dev);
#endif
@@ -372,6 +368,37 @@ gpiobus_init_softc(device_t dev)
}
int
+gpiobus_add_gpioc(device_t dev)
+{
+ struct gpiobus_ivar *devi;
+ struct gpiobus_softc *sc;
+ device_t gpioc;
+ int err;
+
+ gpioc = BUS_ADD_CHILD(dev, 0, "gpioc", DEVICE_UNIT_ANY);
+ if (gpioc == NULL)
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+ devi = device_get_ivars(gpioc);
+
+ devi->npins = sc->sc_npins;
+ err = gpiobus_alloc_ivars(devi);
+ if (err != 0) {
+ device_delete_child(dev, gpioc);
+ return (err);
+ }
+
+ err = GPIO_GET_PIN_LIST(sc->sc_dev, devi->pins);
+ if (err != 0) {
+ device_delete_child(dev, gpioc);
+ gpiobus_free_ivars(devi);
+ }
+
+ return (err);
+}
+
+int
gpiobus_alloc_ivars(struct gpiobus_ivar *devi)
{
@@ -562,6 +589,10 @@ gpiobus_attach(device_t dev)
if (err != 0)
return (err);
+ err = gpiobus_add_gpioc(dev);
+ if (err != 0)
+ return (err);
+
/*
* Get parent's pins and mark them as unmapped
*/
@@ -961,7 +992,7 @@ gpiobus_pin_getflags(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags);
+ return (GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags));
}
static int
@@ -974,7 +1005,7 @@ gpiobus_pin_getcaps(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps);
+ return (GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps));
}
static int
@@ -987,7 +1018,7 @@ gpiobus_pin_set(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value);
+ return (GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value));
}
static int
@@ -1000,7 +1031,7 @@ gpiobus_pin_get(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value);
+ return (GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value));
}
static int
@@ -1012,7 +1043,57 @@ gpiobus_pin_toggle(device_t dev, device_t child, uint32_t pin)
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]);
+ return (GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]));
+}
+
+/*
+ * Verify that a child has all the pins they are requesting
+ * to access in their ivars.
+ */
+static bool
+gpiobus_pin_verify_32(struct gpiobus_ivar *devi, uint32_t first_pin,
+ uint32_t num_pins)
+{
+ if (first_pin + num_pins > devi->npins)
+ return (false);
+
+ /* Make sure the pins are consecutive. */
+ for (uint32_t pin = first_pin; pin < first_pin + num_pins - 1; pin++) {
+ if (devi->pins[pin] + 1 != devi->pins[pin + 1])
+ return (false);
+ }
+
+ return (true);
+}
+
+static int
+gpiobus_pin_access_32(device_t dev, device_t child, uint32_t first_pin,
+ uint32_t clear_pins, uint32_t change_pins, uint32_t *orig_pins)
+{
+ struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
+ struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
+
+ if (!gpiobus_pin_verify_32(devi, first_pin, 32))
+ return (EINVAL);
+
+ return (GPIO_PIN_ACCESS_32(sc->sc_dev, devi->pins[first_pin],
+ clear_pins, change_pins, orig_pins));
+}
+
+static int
+gpiobus_pin_config_32(device_t dev, device_t child, uint32_t first_pin,
+ uint32_t num_pins, uint32_t *pin_flags)
+{
+ struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
+ struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
+
+ if (num_pins > 32)
+ return (EINVAL);
+ if (!gpiobus_pin_verify_32(devi, first_pin, num_pins))
+ return (EINVAL);
+
+ return (GPIO_PIN_CONFIG_32(sc->sc_dev,
+ devi->pins[first_pin], num_pins, pin_flags));
}
static int
@@ -1093,6 +1174,8 @@ static device_method_t gpiobus_methods[] = {
DEVMETHOD(gpiobus_pin_get, gpiobus_pin_get),
DEVMETHOD(gpiobus_pin_set, gpiobus_pin_set),
DEVMETHOD(gpiobus_pin_toggle, gpiobus_pin_toggle),
+ DEVMETHOD(gpiobus_pin_access_32,gpiobus_pin_access_32),
+ DEVMETHOD(gpiobus_pin_config_32,gpiobus_pin_config_32),
DEVMETHOD(gpiobus_pin_getname, gpiobus_pin_getname),
DEVMETHOD(gpiobus_pin_setname, gpiobus_pin_setname),
diff --git a/sys/dev/gpio/gpiobus_if.m b/sys/dev/gpio/gpiobus_if.m
index 8bf29839ef4e..890738c4e809 100644
--- a/sys/dev/gpio/gpiobus_if.m
+++ b/sys/dev/gpio/gpiobus_if.m
@@ -107,6 +107,36 @@ METHOD int pin_setflags {
};
#
+# Simultaneously read and/or change up to 32 adjacent pins.
+# If the device cannot change the pins simultaneously, returns EOPNOTSUPP.
+#
+# More details about using this interface can be found in sys/gpio.h
+#
+METHOD int pin_access_32 {
+ device_t dev;
+ device_t child;
+ uint32_t first_pin;
+ uint32_t clear_pins;
+ uint32_t change_pins;
+ uint32_t *orig_pins;
+};
+
+#
+# Simultaneously configure up to 32 adjacent pins.
+# This is intended to change the configuration of all the pins simultaneously,
+# but unlike pin_access_32, this will not fail if the hardware can't do so.
+#
+# More details about using this interface can be found in sys/gpio.h
+#
+METHOD int pin_config_32 {
+ device_t dev;
+ device_t child;
+ uint32_t first_pin;
+ uint32_t num_pins;
+ uint32_t *pin_flags;
+};
+
+#
# Get the pin name
#
METHOD int pin_getname {
diff --git a/sys/dev/gpio/gpiobus_internal.h b/sys/dev/gpio/gpiobus_internal.h
index c198e5f79989..58f862343403 100644
--- a/sys/dev/gpio/gpiobus_internal.h
+++ b/sys/dev/gpio/gpiobus_internal.h
@@ -44,6 +44,7 @@ int gpiobus_acquire_pin(device_t, uint32_t);
void gpiobus_release_pin(device_t, uint32_t);
int gpiobus_child_location(device_t, device_t, struct sbuf *);
device_t gpiobus_add_child_common(device_t, u_int, const char *, int, size_t);
+int gpiobus_add_gpioc(device_t);
extern driver_t gpiobus_driver;
#endif
diff --git a/sys/dev/gpio/gpioc.c b/sys/dev/gpio/gpioc.c
index 87fed38ebe3e..5a60f939dc78 100644
--- a/sys/dev/gpio/gpioc.c
+++ b/sys/dev/gpio/gpioc.c
@@ -45,7 +45,6 @@
#include <dev/gpio/gpiobusvar.h>
-#include "gpio_if.h"
#include "gpiobus_if.h"
#undef GPIOC_DEBUG
@@ -59,7 +58,7 @@
struct gpioc_softc {
device_t sc_dev; /* gpiocX dev */
- device_t sc_pdev; /* gpioX dev */
+ device_t sc_pdev; /* gpiobusX dev */
struct cdev *sc_ctl_dev; /* controller device */
int sc_unit;
int sc_npins;
@@ -69,6 +68,7 @@ struct gpioc_softc {
struct gpioc_pin_intr {
struct gpioc_softc *sc;
gpio_pin_t pin;
+ uint32_t intr_mode;
bool config_locked;
int intr_rid;
struct resource *intr_res;
@@ -112,8 +112,10 @@ struct gpioc_pin_event {
static MALLOC_DEFINE(M_GPIOC, "gpioc", "gpioc device data");
-static int gpioc_allocate_pin_intr(struct gpioc_pin_intr*, uint32_t);
-static int gpioc_release_pin_intr(struct gpioc_pin_intr*);
+static int gpioc_allocate_pin_intr(struct gpioc_softc*,
+ struct gpioc_pin_intr*, uint32_t, uint32_t);
+static int gpioc_release_pin_intr(struct gpioc_softc*,
+ struct gpioc_pin_intr*);
static int gpioc_attach_priv_pin(struct gpioc_cdevpriv*,
struct gpioc_pin_intr*);
static int gpioc_detach_priv_pin(struct gpioc_cdevpriv*,
@@ -191,27 +193,36 @@ number_of_events(struct gpioc_cdevpriv *priv)
}
static int
-gpioc_allocate_pin_intr(struct gpioc_pin_intr *intr_conf, uint32_t flags)
+gpioc_allocate_pin_intr(struct gpioc_softc *sc,
+ struct gpioc_pin_intr *intr_conf, uint32_t pin, uint32_t flags)
{
int err;
intr_conf->config_locked = true;
mtx_unlock(&intr_conf->mtx);
- intr_conf->intr_res = gpio_alloc_intr_resource(intr_conf->pin->dev,
+ MPASS(intr_conf->pin == NULL);
+ err = gpio_pin_get_by_bus_pinnum(sc->sc_pdev, pin, &intr_conf->pin);
+ if (err != 0)
+ goto error_exit;
+
+ intr_conf->intr_res = gpio_alloc_intr_resource(sc->sc_dev,
&intr_conf->intr_rid, RF_ACTIVE, intr_conf->pin, flags);
if (intr_conf->intr_res == NULL) {
err = ENXIO;
- goto error_exit;
+ goto error_pin;
}
- err = bus_setup_intr(intr_conf->pin->dev, intr_conf->intr_res,
+ err = bus_setup_intr(sc->sc_dev, intr_conf->intr_res,
INTR_TYPE_MISC | INTR_MPSAFE, NULL, gpioc_interrupt_handler,
intr_conf, &intr_conf->intr_cookie);
- if (err != 0)
- goto error_exit;
+ if (err != 0) {
+ bus_release_resource(sc->sc_dev, intr_conf->intr_res);
+ intr_conf->intr_res = NULL;
+ goto error_pin;
+ }
- intr_conf->pin->flags = flags;
+ intr_conf->intr_mode = flags;
error_exit:
mtx_lock(&intr_conf->mtx);
@@ -219,10 +230,15 @@ error_exit:
wakeup(&intr_conf->config_locked);
return (err);
+
+error_pin:
+ gpio_pin_release(intr_conf->pin);
+ intr_conf->pin = NULL;
+ goto error_exit;
}
static int
-gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
+gpioc_release_pin_intr(struct gpioc_softc *sc, struct gpioc_pin_intr *intr_conf)
{
int err;
@@ -230,8 +246,8 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
mtx_unlock(&intr_conf->mtx);
if (intr_conf->intr_cookie != NULL) {
- err = bus_teardown_intr(intr_conf->pin->dev,
- intr_conf->intr_res, intr_conf->intr_cookie);
+ err = bus_teardown_intr(sc->sc_dev, intr_conf->intr_res,
+ intr_conf->intr_cookie);
if (err != 0)
goto error_exit;
else
@@ -239,7 +255,7 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
}
if (intr_conf->intr_res != NULL) {
- err = bus_release_resource(intr_conf->pin->dev, SYS_RES_IRQ,
+ err = bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
intr_conf->intr_rid, intr_conf->intr_res);
if (err != 0)
goto error_exit;
@@ -249,7 +265,10 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
}
}
- intr_conf->pin->flags = 0;
+ gpio_pin_release(intr_conf->pin);
+ intr_conf->pin = NULL;
+
+ intr_conf->intr_mode = 0;
err = 0;
error_exit:
@@ -386,7 +405,7 @@ gpioc_get_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
struct gpioc_privs *priv_link;
uint32_t flags;
- flags = intr_conf->pin->flags;
+ flags = intr_conf->intr_mode;
if (flags == 0)
return (0);
@@ -411,7 +430,7 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
int res;
res = 0;
- if (intr_conf->pin->flags == 0 && flags == 0) {
+ if (intr_conf->intr_mode == 0 && flags == 0) {
/* No interrupt configured and none requested: Do nothing. */
return (0);
}
@@ -419,17 +438,17 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
while (intr_conf->config_locked == true)
mtx_sleep(&intr_conf->config_locked, &intr_conf->mtx, 0,
"gpicfg", 0);
- if (intr_conf->pin->flags == 0 && flags != 0) {
+ if (intr_conf->intr_mode == 0 && flags != 0) {
/*
* No interrupt is configured, but one is requested: Allocate
* and setup interrupt on the according pin.
*/
- res = gpioc_allocate_pin_intr(intr_conf, flags);
+ res = gpioc_allocate_pin_intr(sc, intr_conf, pin, flags);
if (res == 0)
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
res = 0;
- } else if (intr_conf->pin->flags == flags) {
+ } else if (intr_conf->intr_mode == flags) {
/*
* Same interrupt requested as already configured: Attach the
* cdevpriv to the corresponding pin.
@@ -437,14 +456,14 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
res = 0;
- } else if (intr_conf->pin->flags != 0 && flags == 0) {
+ } else if (intr_conf->intr_mode != 0 && flags == 0) {
/*
* Interrupt configured, but none requested: Teardown and
* release the pin when no other cdevpriv is attached. Otherwise
* just detach pin and cdevpriv from each other.
*/
if (gpioc_intr_reconfig_allowed(priv, intr_conf)) {
- res = gpioc_release_pin_intr(intr_conf);
+ res = gpioc_release_pin_intr(sc, intr_conf);
}
if (res == 0)
res = gpioc_detach_priv_pin(priv, intr_conf);
@@ -456,9 +475,10 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
if (!gpioc_intr_reconfig_allowed(priv, intr_conf))
res = EBUSY;
else {
- res = gpioc_release_pin_intr(intr_conf);
+ res = gpioc_release_pin_intr(sc, intr_conf);
if (res == 0)
- res = gpioc_allocate_pin_intr(intr_conf, flags);
+ res = gpioc_allocate_pin_intr(sc, intr_conf,
+ pin, flags);
if (res == 0)
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
@@ -475,18 +495,16 @@ gpioc_interrupt_handler(void *arg)
{
struct gpioc_pin_intr *intr_conf;
struct gpioc_privs *privs;
- struct gpioc_softc *sc;
sbintime_t evtime;
- uint32_t pin_state;
+ bool pin_state;
intr_conf = arg;
- sc = intr_conf->sc;
/* Capture time and pin state first. */
evtime = sbinuptime();
- if (intr_conf->pin->flags & GPIO_INTR_EDGE_BOTH)
- GPIO_PIN_GET(sc->sc_pdev, intr_conf->pin->pin, &pin_state);
- else if (intr_conf->pin->flags & GPIO_INTR_EDGE_RISING)
+ if (intr_conf->intr_mode & GPIO_INTR_EDGE_BOTH)
+ gpio_pin_is_active(intr_conf->pin, &pin_state);
+ else if (intr_conf->intr_mode & GPIO_INTR_EDGE_RISING)
pin_state = true;
else
pin_state = false;
@@ -575,18 +593,11 @@ gpioc_attach(device_t dev)
sc->sc_pdev = device_get_parent(dev);
sc->sc_unit = device_get_unit(dev);
- err = GPIO_PIN_MAX(sc->sc_pdev, &sc->sc_npins);
- sc->sc_npins++; /* Number of pins is one more than max pin number. */
- if (err != 0)
- return (err);
+ sc->sc_npins = gpiobus_get_npins(dev);
sc->sc_pin_intr = malloc(sizeof(struct gpioc_pin_intr) * sc->sc_npins,
M_GPIOC, M_WAITOK | M_ZERO);
for (int i = 0; i < sc->sc_npins; i++) {
- sc->sc_pin_intr[i].pin = malloc(sizeof(struct gpiobus_pin),
- M_GPIOC, M_WAITOK | M_ZERO);
sc->sc_pin_intr[i].sc = sc;
- sc->sc_pin_intr[i].pin->pin = i;
- sc->sc_pin_intr[i].pin->dev = sc->sc_pdev;
mtx_init(&sc->sc_pin_intr[i].mtx, "gpioc pin", NULL, MTX_DEF);
SLIST_INIT(&sc->sc_pin_intr[i].privs);
}
@@ -610,20 +621,16 @@ static int
gpioc_detach(device_t dev)
{
struct gpioc_softc *sc = device_get_softc(dev);
- int err;
if (sc->sc_ctl_dev)
destroy_dev(sc->sc_ctl_dev);
for (int i = 0; i < sc->sc_npins; i++) {
mtx_destroy(&sc->sc_pin_intr[i].mtx);
- free(sc->sc_pin_intr[i].pin, M_GPIOC);
+ MPASS(sc->sc_pin_intr[i].pin == NULL);
}
free(sc->sc_pin_intr, M_GPIOC);
- if ((err = bus_generic_detach(dev)) != 0)
- return (err);
-
return (0);
}
@@ -655,7 +662,7 @@ gpioc_cdevpriv_dtor(void *data)
KASSERT(consistency == 1,
("inconsistent links between pin config and cdevpriv"));
if (gpioc_intr_reconfig_allowed(priv, pin_link->pin)) {
- gpioc_release_pin_intr(pin_link->pin);
+ gpioc_release_pin_intr(priv->sc, pin_link->pin);
}
mtx_unlock(&pin_link->pin->mtx);
SLIST_REMOVE(&priv->pins, pin_link, gpioc_pins, next);
@@ -778,7 +785,6 @@ static int
gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct thread *td)
{
- device_t bus;
int max_pin, res;
struct gpioc_softc *sc = cdev->si_drv1;
struct gpioc_cdevpriv *priv;
@@ -789,30 +795,32 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct gpio_event_config *evcfg;
uint32_t caps, intrflags;
- bus = GPIO_GET_BUS(sc->sc_pdev);
- if (bus == NULL)
- return (EINVAL);
switch (cmd) {
case GPIOMAXPIN:
- max_pin = -1;
- res = GPIO_PIN_MAX(sc->sc_pdev, &max_pin);
+ res = 0;
+ max_pin = sc->sc_npins - 1;
bcopy(&max_pin, arg, sizeof(max_pin));
break;
case GPIOGETCONFIG:
bcopy(arg, &pin, sizeof(pin));
dprintf("get config pin %d\n", pin.gp_pin);
- res = GPIO_PIN_GETFLAGS(sc->sc_pdev, pin.gp_pin,
+ res = GPIOBUS_PIN_GETFLAGS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
&pin.gp_flags);
/* Fail early */
- if (res)
+ if (res != 0)
break;
res = devfs_get_cdevpriv((void **)&priv);
- if (res)
+ if (res != 0)
break;
pin.gp_flags |= gpioc_get_intr_config(sc, priv,
pin.gp_pin);
- GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &pin.gp_caps);
- GPIOBUS_PIN_GETNAME(bus, pin.gp_pin, pin.gp_name);
+ res = GPIOBUS_PIN_GETCAPS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
+ &pin.gp_caps);
+ if (res != 0)
+ break;
+ res = GPIOBUS_PIN_GETNAME(sc->sc_pdev, pin.gp_pin, pin.gp_name);
+ if (res != 0)
+ break;
bcopy(&pin, arg, sizeof(pin));
break;
case GPIOSETCONFIG:
@@ -821,7 +829,8 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
res = devfs_get_cdevpriv((void **)&priv);
if (res != 0)
break;
- res = GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &caps);
+ res = GPIOBUS_PIN_GETCAPS(sc->sc_pdev, sc->sc_dev,
+ pin.gp_pin, &caps);
if (res != 0)
break;
res = gpio_check_flags(caps, pin.gp_flags);
@@ -847,8 +856,8 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
}
if (res != 0)
break;
- res = GPIO_PIN_SETFLAGS(sc->sc_pdev, pin.gp_pin,
- (pin.gp_flags & ~GPIO_INTR_MASK));
+ res = GPIOBUS_PIN_SETFLAGS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
+ pin.gp_flags & ~GPIO_INTR_MASK);
if (res != 0)
break;
res = gpioc_set_intr_config(sc, priv, pin.gp_pin,
@@ -856,40 +865,43 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
break;
case GPIOGET:
bcopy(arg, &req, sizeof(req));
- res = GPIO_PIN_GET(sc->sc_pdev, req.gp_pin,
+ res = GPIOBUS_PIN_GET(sc->sc_pdev, sc->sc_dev, req.gp_pin,
&req.gp_value);
- dprintf("read pin %d -> %d\n",
+ if (res != 0)
+ break;
+ dprintf("read pin %d -> %d\n",
req.gp_pin, req.gp_value);
bcopy(&req, arg, sizeof(req));
break;
case GPIOSET:
bcopy(arg, &req, sizeof(req));
- res = GPIO_PIN_SET(sc->sc_pdev, req.gp_pin,
+ res = GPIOBUS_PIN_SET(sc->sc_pdev, sc->sc_dev, req.gp_pin,
req.gp_value);
- dprintf("write pin %d -> %d\n",
+ dprintf("write pin %d -> %d\n",
req.gp_pin, req.gp_value);
break;
case GPIOTOGGLE:
bcopy(arg, &req, sizeof(req));
- dprintf("toggle pin %d\n",
+ dprintf("toggle pin %d\n",
req.gp_pin);
- res = GPIO_PIN_TOGGLE(sc->sc_pdev, req.gp_pin);
+ res = GPIOBUS_PIN_TOGGLE(sc->sc_pdev, sc->sc_dev, req.gp_pin);
break;
case GPIOSETNAME:
bcopy(arg, &pin, sizeof(pin));
dprintf("set name on pin %d\n", pin.gp_pin);
- res = GPIOBUS_PIN_SETNAME(bus, pin.gp_pin,
+ res = GPIOBUS_PIN_SETNAME(sc->sc_pdev, pin.gp_pin,
pin.gp_name);
break;
case GPIOACCESS32:
a32 = (struct gpio_access_32 *)arg;
- res = GPIO_PIN_ACCESS_32(sc->sc_pdev, a32->first_pin,
- a32->clear_pins, a32->change_pins, &a32->orig_pins);
+ res = GPIOBUS_PIN_ACCESS_32(sc->sc_pdev, sc->sc_dev,
+ a32->first_pin, a32->clear_pins, a32->change_pins,
+ &a32->orig_pins);
break;
case GPIOCONFIG32:
c32 = (struct gpio_config_32 *)arg;
- res = GPIO_PIN_CONFIG_32(sc->sc_pdev, c32->first_pin,
- c32->num_pins, c32->pin_flags);
+ res = GPIOBUS_PIN_CONFIG_32(sc->sc_pdev, sc->sc_dev,
+ c32->first_pin, c32->num_pins, c32->pin_flags);
break;
case GPIOCONFIGEVENTS:
evcfg = (struct gpio_event_config *)arg;
@@ -1050,9 +1062,6 @@ static device_method_t gpioc_methods[] = {
DEVMETHOD(device_probe, gpioc_probe),
DEVMETHOD(device_attach, gpioc_attach),
DEVMETHOD(device_detach, gpioc_detach),
- DEVMETHOD(device_shutdown, bus_generic_shutdown),
- DEVMETHOD(device_suspend, bus_generic_suspend),
- DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD_END
};
@@ -1063,5 +1072,5 @@ driver_t gpioc_driver = {
sizeof(struct gpioc_softc)
};
-DRIVER_MODULE(gpioc, gpio, gpioc_driver, 0, 0);
+DRIVER_MODULE(gpioc, gpiobus, gpioc_driver, 0, 0);
MODULE_VERSION(gpioc, 1);
diff --git a/sys/dev/gpio/gpioled.c b/sys/dev/gpio/gpioled.c
index ba53cb733971..a36c2faef379 100644
--- a/sys/dev/gpio/gpioled.c
+++ b/sys/dev/gpio/gpioled.c
@@ -55,13 +55,13 @@
device_get_nameunit((_sc)->sc_dev), "gpioled", MTX_DEF)
#define GPIOLED_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
-struct gpioled_softc
+struct gpioled_softc
{
device_t sc_dev;
device_t sc_busdev;
struct mtx sc_mtx;
struct cdev *sc_leddev;
- int sc_invert;
+ int sc_softinvert;
};
static void gpioled_control(void *, int);
@@ -69,20 +69,17 @@ static int gpioled_probe(device_t);
static int gpioled_attach(device_t);
static int gpioled_detach(device_t);
-static void
+static void
gpioled_control(void *priv, int onoff)
{
struct gpioled_softc *sc;
sc = (struct gpioled_softc *)priv;
+ if (sc->sc_softinvert)
+ onoff = !onoff;
GPIOLED_LOCK(sc);
- if (GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
- GPIO_PIN_OUTPUT) == 0) {
- if (sc->sc_invert)
- onoff = !onoff;
- GPIOBUS_PIN_SET(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
- onoff ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
- }
+ GPIOBUS_PIN_SET(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ onoff ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
GPIOLED_UNLOCK(sc);
}
@@ -95,26 +92,101 @@ gpioled_probe(device_t dev)
}
static int
+gpioled_inv(device_t dev, uint32_t *pin_flags)
+{
+ struct gpioled_softc *sc;
+ int invert;
+ uint32_t pin_caps;
+
+ sc = device_get_softc(dev);
+
+ if (resource_int_value(device_get_name(dev),
+ device_get_unit(dev), "invert", &invert))
+ invert = 0;
+
+ if (GPIOBUS_PIN_GETCAPS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ &pin_caps) != 0) {
+ if (bootverbose)
+ device_printf(sc->sc_dev, "unable to get pin caps\n");
+ return (-1);
+ }
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags &= ~GPIO_PIN_INVOUT;
+ sc->sc_softinvert = 0;
+ if (invert) {
+ const char *invmode;
+
+ if (resource_string_value(device_get_name(dev),
+ device_get_unit(dev), "invmode", &invmode))
+ invmode = NULL;
+
+ if (invmode) {
+ if (!strcmp(invmode, "sw"))
+ sc->sc_softinvert = 1;
+ else if (!strcmp(invmode, "hw")) {
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags |= GPIO_PIN_INVOUT;
+ else {
+ device_printf(sc->sc_dev, "hardware pin inversion not supported\n");
+ return (-1);
+ }
+ } else {
+ if (strcmp(invmode, "auto") != 0)
+ device_printf(sc->sc_dev, "invalid pin inversion mode\n");
+ invmode = NULL;
+ }
+ }
+ /*
+ * auto inversion mode: use hardware support if available, else fallback to
+ * software emulation.
+ */
+ if (invmode == NULL) {
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags |= GPIO_PIN_INVOUT;
+ else
+ sc->sc_softinvert = 1;
+ }
+ }
+ MPASS(!invert ||
+ (((*pin_flags & GPIO_PIN_INVOUT) != 0) && !sc->sc_softinvert) ||
+ (((*pin_flags & GPIO_PIN_INVOUT) == 0) && sc->sc_softinvert));
+ return (invert);
+}
+
+static int
gpioled_attach(device_t dev)
{
struct gpioled_softc *sc;
int state;
const char *name;
+ uint32_t pin_flags;
+ int invert;
sc = device_get_softc(dev);
sc->sc_dev = dev;
sc->sc_busdev = device_get_parent(dev);
GPIOLED_LOCK_INIT(sc);
- state = 0;
-
- if (resource_string_value(device_get_name(dev),
+ if (resource_string_value(device_get_name(dev),
device_get_unit(dev), "name", &name))
name = NULL;
- resource_int_value(device_get_name(dev),
- device_get_unit(dev), "invert", &sc->sc_invert);
- resource_int_value(device_get_name(dev),
- device_get_unit(dev), "state", &state);
+
+ if (resource_int_value(device_get_name(dev),
+ device_get_unit(dev), "state", &state))
+ state = 0;
+
+ pin_flags = GPIO_PIN_OUTPUT;
+ invert = gpioled_inv(dev, &pin_flags);
+ if (invert < 0)
+ return (ENXIO);
+ device_printf(sc->sc_dev, "state %d invert %s\n",
+ state, (invert ? (sc->sc_softinvert ? "sw" : "hw") : "no"));
+ if (GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ pin_flags) != 0) {
+ if (bootverbose)
+ device_printf(sc->sc_dev, "unable to set pin flags, %#x\n", pin_flags);
+ return (ENXIO);
+ }
sc->sc_leddev = led_create_state(gpioled_control, sc, name ? name :
device_get_nameunit(dev), state);
diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c
index b12b78fac18c..da1bfbc268b8 100644
--- a/sys/dev/gpio/ofw_gpiobus.c
+++ b/sys/dev/gpio/ofw_gpiobus.c
@@ -426,6 +426,9 @@ ofw_gpiobus_attach(device_t dev)
err = gpiobus_init_softc(dev);
if (err != 0)
return (err);
+ err = gpiobus_add_gpioc(dev);
+ if (err != 0)
+ return (err);
bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
/*
diff --git a/sys/dev/hpt27xx/hptintf.h b/sys/dev/hpt27xx/hptintf.h
index 558b479ec2ee..eb8105ec5666 100644
--- a/sys/dev/hpt27xx/hptintf.h
+++ b/sys/dev/hpt27xx/hptintf.h
@@ -155,8 +155,8 @@ typedef HPT_U32 DEVICEID;
#define ARRAY_FLAG_NEED_AUTOREBUILD 0x00000080 /* auto-rebuild should start */
#define ARRAY_FLAG_VERIFYING 0x00000100 /* is being verified */
#define ARRAY_FLAG_INITIALIZING 0x00000200 /* is being initialized */
-#define ARRAY_FLAG_TRANSFORMING 0x00000400 /* tranform in progress */
-#define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need tranform */
+#define ARRAY_FLAG_TRANSFORMING 0x00000400 /* transform in progress */
+#define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need transform */
#define ARRAY_FLAG_NEEDINITIALIZING 0x00001000 /* the array's initialization hasn't finished*/
#define ARRAY_FLAG_BROKEN_REDUNDANT 0x00002000 /* broken but redundant (raid6) */
#define ARRAY_FLAG_RAID15PLUS 0x80000000 /* display this RAID 1 as RAID 1.5 */
@@ -2018,7 +2018,7 @@ DEVICEID hpt_create_transform_v2(DEVICEID idArray, PCREATE_ARRAY_PARAMS_V3 destI
#endif
/* hpt_step_transform
- * move a block in a tranform progress.
+ * move a block in a transform progress.
* This function is called by mid-layer, not GUI (which uses set_array_state instead).
* Version compatibility: v2.0.0.0 or later
* Parameters:
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index 9b85c989dc96..a6a6ae68996c 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -210,7 +210,7 @@ static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
static bool pmc_can_allocate_row(int ri, enum pmc_mode mode);
static bool pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
int cpu);
-static int pmc_can_attach(struct pmc *pm, struct proc *p);
+static bool pmc_can_attach(struct pmc *pm, struct proc *p);
static void pmc_capture_user_callchain(int cpu, int soft,
struct trapframe *tf);
static void pmc_cleanup(void);
@@ -1029,19 +1029,19 @@ pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
* Check if PMC 'pm' may be attached to target process 't'.
*/
-static int
+static bool
pmc_can_attach(struct pmc *pm, struct proc *t)
{
struct proc *o; /* pmc owner */
struct ucred *oc, *tc; /* owner, target credentials */
- int decline_attach, i;
+ bool decline_attach;
/*
* A PMC's owner can always attach that PMC to itself.
*/
if ((o = pm->pm_owner->po_owner) == t)
- return 0;
+ return (true);
PROC_LOCK(o);
oc = o->p_ucred;
@@ -1066,18 +1066,17 @@ pmc_can_attach(struct pmc *pm, struct proc *t)
* Every one of the target's group ids, must be in the owner's
* group list.
*/
- for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
+ for (int i = 0; !decline_attach && i < tc->cr_ngroups; i++)
decline_attach = !groupmember(tc->cr_groups[i], oc);
-
- /* check the read and saved gids too */
- if (decline_attach == 0)
- decline_attach = !groupmember(tc->cr_rgid, oc) ||
+ if (!decline_attach)
+ decline_attach = !groupmember(tc->cr_gid, oc) ||
+ !groupmember(tc->cr_rgid, oc) ||
!groupmember(tc->cr_svgid, oc);
crfree(tc);
crfree(oc);
- return !decline_attach;
+ return (!decline_attach);
}
/*
@@ -1412,7 +1411,7 @@ pmc_process_exec(struct thread *td, struct pmckern_procexec *pk)
*/
for (ri = 0; ri < md->pmd_npmc; ri++) {
if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
- if (pmc_can_attach(pm, td->td_proc) != 0) {
+ if (pmc_can_attach(pm, td->td_proc)) {
pmc_detach_one_process(td->td_proc, pm,
PMC_FLAG_NONE);
}
diff --git a/sys/dev/hwt/hwt_ioctl.c b/sys/dev/hwt/hwt_ioctl.c
index 592db4931bb4..184c7e72f986 100644
--- a/sys/dev/hwt/hwt_ioctl.c
+++ b/sys/dev/hwt/hwt_ioctl.c
@@ -112,12 +112,11 @@ hwt_priv_check(struct proc *o, struct proc *t)
error = EPERM;
goto done;
}
-
- /* Check the read and saved GIDs too. */
- if (!groupmember(tc->cr_rgid, oc) ||
+ if (!groupmember(tc->cr_gid, oc) ||
+ !groupmember(tc->cr_rgid, oc) ||
!groupmember(tc->cr_svgid, oc)) {
- error = EPERM;
- goto done;
+ error = EPERM;
+ goto done;
}
done:
diff --git a/sys/dev/ice/ice_fw_logging.c b/sys/dev/ice/ice_fw_logging.c
index 0025a65d73fc..16a9ab6823bf 100644
--- a/sys/dev/ice/ice_fw_logging.c
+++ b/sys/dev/ice/ice_fw_logging.c
@@ -48,7 +48,7 @@ SDT_PROVIDER_DEFINE(ice_fwlog);
/*
* SDT DTrace probe fired when a firmware log message is received over the
- * AdminQ. It passes the buffer of the firwmare log message along with its
+ * AdminQ. It passes the buffer of the firmware log message along with its
* length in bytes to the DTrace framework.
*/
SDT_PROBE_DEFINE2(ice_fwlog, , , message, "uint8_t *", "int");
diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c
index 442111e5ffaf..8b6349f686eb 100644
--- a/sys/dev/ice/ice_lib.c
+++ b/sys/dev/ice/ice_lib.c
@@ -7818,7 +7818,8 @@ ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter)
case IFCOUNTER_OPACKETS:
return (es->tx_unicast + es->tx_multicast + es->tx_broadcast);
case IFCOUNTER_OERRORS:
- return (es->tx_errors);
+ return (if_get_counter_default(vsi->sc->ifp, counter) +
+ es->tx_errors);
case IFCOUNTER_COLLISIONS:
return (0);
case IFCOUNTER_IBYTES:
@@ -7832,7 +7833,8 @@ ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter)
case IFCOUNTER_IQDROPS:
return (es->rx_discards);
case IFCOUNTER_OQDROPS:
- return (hs->tx_dropped_link_down);
+ return (if_get_counter_default(vsi->sc->ifp, counter) +
+ hs->tx_dropped_link_down);
case IFCOUNTER_NOPROTO:
return (es->rx_unknown_protocol);
default:
diff --git a/sys/dev/ichsmb/ichsmb_pci.c b/sys/dev/ichsmb/ichsmb_pci.c
index 728bb942d503..e4d87fe1fed2 100644
--- a/sys/dev/ichsmb/ichsmb_pci.c
+++ b/sys/dev/ichsmb/ichsmb_pci.c
@@ -107,6 +107,7 @@
#define ID_COMETLAKE2 0x06a3
#define ID_TIGERLAKE 0xa0a3
#define ID_TIGERLAKE2 0x43a3
+#define ID_ELKHARTLAKE 0x4b23
#define ID_GEMINILAKE 0x31d4
#define ID_CEDARFORK 0x18df
#define ID_ICELAKE 0x34a3
@@ -206,6 +207,8 @@ static const struct pci_device_table ichsmb_devices[] = {
PCI_DESCR("Intel Tiger Lake SMBus controller") },
{ PCI_DEV(PCI_VENDOR_INTEL, ID_TIGERLAKE2),
PCI_DESCR("Intel Tiger Lake SMBus controller") },
+ { PCI_DEV(PCI_VENDOR_INTEL, ID_ELKHARTLAKE),
+ PCI_DESCR("Intel Elkhart Lake SMBus controller") },
{ PCI_DEV(PCI_VENDOR_INTEL, ID_GEMINILAKE),
PCI_DESCR("Intel Gemini Lake SMBus controller") },
{ PCI_DEV(PCI_VENDOR_INTEL, ID_CEDARFORK),
diff --git a/sys/dev/ichwd/i6300esbwd.c b/sys/dev/ichwd/i6300esbwd.c
new file mode 100644
index 000000000000..03d504a350aa
--- /dev/null
+++ b/sys/dev/ichwd/i6300esbwd.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+/*
+ * Reference: Intel 6300ESB Controller Hub Datasheet Section 16
+ */
+
+#include <sys/param.h>
+#include <sys/eventhandler.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+#include <sys/errno.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <sys/watchdog.h>
+
+#include <dev/pci/pcireg.h>
+
+#include <dev/ichwd/ichwd.h>
+#include <dev/ichwd/i6300esbwd.h>
+
+#include <x86/pci_cfgreg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+
+struct i6300esbwd_softc {
+ device_t dev;
+ int res_id;
+ struct resource *res;
+ eventhandler_tag ev_tag;
+ bool locked;
+};
+
+static const struct i6300esbwd_pci_id {
+ uint16_t id;
+ const char *name;
+} i6300esbwd_pci_devices[] = {
+ { DEVICEID_6300ESB_2, "6300ESB Watchdog Timer" },
+};
+
+static uint16_t __unused
+i6300esbwd_cfg_read(struct i6300esbwd_softc *sc)
+{
+ return (pci_read_config(sc->dev, WDT_CONFIG_REG, 2));
+}
+
+static void
+i6300esbwd_cfg_write(struct i6300esbwd_softc *sc, uint16_t val)
+{
+ pci_write_config(sc->dev, WDT_CONFIG_REG, val, 2);
+}
+
+static uint8_t
+i6300esbwd_lock_read(struct i6300esbwd_softc *sc)
+{
+ return (pci_read_config(sc->dev, WDT_LOCK_REG, 1));
+}
+
+static void
+i6300esbwd_lock_write(struct i6300esbwd_softc *sc, uint8_t val)
+{
+ pci_write_config(sc->dev, WDT_LOCK_REG, val, 1);
+}
+
+/*
+ * According to Intel 6300ESB I/O Controller Hub Datasheet 16.5.2,
+ * the resource should be unlocked before modifing any registers.
+ * The way to unlock is by write 0x80, 0x86 to the reload register.
+ */
+static void
+i6300esbwd_unlock_res(struct i6300esbwd_softc *sc)
+{
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_UNLOCK_SEQ_1_VAL);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_UNLOCK_SEQ_2_VAL);
+}
+
+static int
+i6300esbwd_sysctl_locked(SYSCTL_HANDLER_ARGS)
+{
+ struct i6300esbwd_softc *sc = (struct i6300esbwd_softc *)arg1;
+ int error;
+ int result;
+
+ result = sc->locked;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1 && !sc->locked) {
+ i6300esbwd_lock_write(sc, i6300esbwd_lock_read(sc) | WDT_LOCK);
+ sc->locked = true;
+ }
+
+ return (0);
+}
+
+static void
+i6300esbwd_event(void *arg, unsigned int cmd, int *error)
+{
+ struct i6300esbwd_softc *sc = arg;
+ uint32_t timeout;
+ uint16_t regval;
+
+ cmd &= WD_INTERVAL;
+ if (cmd != 0 &&
+ (cmd < WD_TO_1MS || (cmd - WD_TO_1MS) >= WDT_PRELOAD_BIT)) {
+ *error = EINVAL;
+ return;
+ }
+ timeout = 1 << (cmd - WD_TO_1MS);
+
+ /* reset the timer to prevent timeout a timeout is about to occur */
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD);
+
+ if (!cmd) {
+ /*
+ * when the lock is enabled, we are unable to overwrite LOCK
+ * register
+ */
+ if (sc->locked)
+ *error = EPERM;
+ else
+ i6300esbwd_lock_write(sc,
+ i6300esbwd_lock_read(sc) & ~WDT_ENABLE);
+ return;
+ }
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_4(sc->res, WDT_PRELOAD_1_REG, timeout);
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_4(sc->res, WDT_PRELOAD_2_REG, timeout);
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD);
+
+ if (!sc->locked) {
+ i6300esbwd_lock_write(sc, WDT_ENABLE);
+ regval = i6300esbwd_lock_read(sc);
+ sc->locked = regval & WDT_LOCK;
+ }
+}
+
+static int
+i6300esbwd_probe(device_t dev)
+{
+ const struct i6300esbwd_pci_id *pci_id;
+ uint16_t pci_dev_id;
+ int err = ENXIO;
+
+ if (pci_get_vendor(dev) != VENDORID_INTEL)
+ goto end;
+
+ pci_dev_id = pci_get_device(dev);
+ for (pci_id = i6300esbwd_pci_devices;
+ pci_id < i6300esbwd_pci_devices + nitems(i6300esbwd_pci_devices);
+ ++pci_id) {
+ if (pci_id->id == pci_dev_id) {
+ device_set_desc(dev, pci_id->name);
+ err = BUS_PROBE_DEFAULT;
+ break;
+ }
+ }
+
+end:
+ return (err);
+}
+
+static int
+i6300esbwd_attach(device_t dev)
+{
+ struct i6300esbwd_softc *sc = device_get_softc(dev);
+ uint16_t regval;
+
+ sc->dev = dev;
+ sc->res_id = PCIR_BAR(0);
+ sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->res_id,
+ RF_ACTIVE);
+ if (sc->res == NULL) {
+ device_printf(dev, "unable to map memory region\n");
+ return (ENXIO);
+ }
+
+ i6300esbwd_cfg_write(sc, WDT_INT_TYPE_DISABLED_VAL);
+ regval = i6300esbwd_lock_read(sc);
+ if (regval & WDT_LOCK)
+ sc->locked = true;
+ else {
+ sc->locked = false;
+ i6300esbwd_lock_write(sc, WDT_TOUT_CNF_WT_MODE);
+ }
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD | WDT_TIMEOUT);
+
+ sc->ev_tag = EVENTHANDLER_REGISTER(watchdog_list, i6300esbwd_event, sc,
+ 0);
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "locked",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
+ i6300esbwd_sysctl_locked, "I",
+ "Lock the timer so that we cannot disable it");
+
+ return (0);
+}
+
+static int
+i6300esbwd_detach(device_t dev)
+{
+ struct i6300esbwd_softc *sc = device_get_softc(dev);
+
+ if (sc->ev_tag)
+ EVENTHANDLER_DEREGISTER(watchdog_list, sc->ev_tag);
+
+ if (sc->res)
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->res_id, sc->res);
+
+ return (0);
+}
+
+static device_method_t i6300esbwd_methods[] = {
+ DEVMETHOD(device_probe, i6300esbwd_probe),
+ DEVMETHOD(device_attach, i6300esbwd_attach),
+ DEVMETHOD(device_detach, i6300esbwd_detach),
+ DEVMETHOD(device_shutdown, i6300esbwd_detach),
+ DEVMETHOD_END
+};
+
+static driver_t i6300esbwd_driver = {
+ "i6300esbwd",
+ i6300esbwd_methods,
+ sizeof(struct i6300esbwd_softc),
+};
+
+DRIVER_MODULE(i6300esbwd, pci, i6300esbwd_driver, NULL, NULL);
diff --git a/sys/dev/ichwd/i6300esbwd.h b/sys/dev/ichwd/i6300esbwd.h
new file mode 100644
index 000000000000..39ed5d5a84f6
--- /dev/null
+++ b/sys/dev/ichwd/i6300esbwd.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef _I6300ESBWD_H_
+#define _I6300ESBWD_H_
+
+#define WDT_CONFIG_REG 0x60
+#define WDT_LOCK_REG 0x68
+
+#define WDT_PRELOAD_1_REG 0x00
+#define WDT_PRELOAD_2_REG 0x04
+#define WDT_INTR_REG 0x08
+#define WDT_RELOAD_REG 0x0C
+
+/* For config register */
+#define WDT_OUTPUT_EN (0x1 << 5)
+#define WDT_PRE_SEL (0x1 << 2)
+#define WDT_INT_TYPE_BITS (0x3)
+#define WDT_INT_TYPE_IRQ_VAL (0x0)
+#define WDT_INT_TYPE_RES_VAL (0x1)
+#define WDT_INT_TYPE_SMI_VAL (0x2)
+#define WDT_INT_TYPE_DISABLED_VAL (0x3)
+
+/* For lock register */
+#define WDT_TOUT_CNF_WT_MODE (0x0 << 2)
+#define WDT_TOUT_CNF_FR_MODE (0x1 << 2)
+#define WDT_ENABLE (0x02)
+#define WDT_LOCK (0x01)
+
+/* For preload 1/2 registers */
+#define WDT_PRELOAD_BIT 20
+#define WDT_PRELOAD_BITS ((0x1 << WDT_PRELOAD_BIT) - 1)
+
+/* For interrupt register */
+#define WDT_INTR_ACT (0x01 << 0)
+
+/* For reload register */
+#define WDT_TIMEOUT (0x01 << 9)
+#define WDT_RELOAD (0x01 << 8)
+#define WDT_UNLOCK_SEQ_1_VAL 0x80
+#define WDT_UNLOCK_SEQ_2_VAL 0x86
+
+#endif /* _I6300ESBWD_H_ */
diff --git a/sys/dev/ichwd/ichwd.c b/sys/dev/ichwd/ichwd.c
index cade2cc4fb45..5481553cc175 100644
--- a/sys/dev/ichwd/ichwd.c
+++ b/sys/dev/ichwd/ichwd.c
@@ -90,7 +90,7 @@ static struct ichwd_device ichwd_devices[] = {
{ DEVICEID_82801E, "Intel 82801E watchdog timer", 5, 1 },
{ DEVICEID_82801EB, "Intel 82801EB watchdog timer", 5, 1 },
{ DEVICEID_82801EBR, "Intel 82801EB/ER watchdog timer", 5, 1 },
- { DEVICEID_6300ESB, "Intel 6300ESB watchdog timer", 5, 1 },
+ { DEVICEID_6300ESB_1, "Intel 6300ESB watchdog timer", 5, 1 },
{ DEVICEID_82801FBR, "Intel 82801FB/FR watchdog timer", 6, 2 },
{ DEVICEID_ICH6M, "Intel ICH6M watchdog timer", 6, 2 },
{ DEVICEID_ICH6W, "Intel ICH6W watchdog timer", 6, 2 },
diff --git a/sys/dev/ichwd/ichwd.h b/sys/dev/ichwd/ichwd.h
index 90fda08b74c1..72d0ca1cd6aa 100644
--- a/sys/dev/ichwd/ichwd.h
+++ b/sys/dev/ichwd/ichwd.h
@@ -151,7 +151,8 @@ struct ichwd_softc {
#define DEVICEID_82801E 0x2450
#define DEVICEID_82801EB 0x24dc
#define DEVICEID_82801EBR 0x24d0
-#define DEVICEID_6300ESB 0x25a1
+#define DEVICEID_6300ESB_1 0x25a1
+#define DEVICEID_6300ESB_2 0x25ab
#define DEVICEID_82801FBR 0x2640
#define DEVICEID_ICH6M 0x2641
#define DEVICEID_ICH6W 0x2642
diff --git a/sys/dev/igc/if_igc.c b/sys/dev/igc/if_igc.c
index a1ae35c7aa43..f199a128c783 100644
--- a/sys/dev/igc/if_igc.c
+++ b/sys/dev/igc/if_igc.c
@@ -2599,8 +2599,8 @@ igc_if_get_counter(if_ctx_t ctx, ift_counter cnt)
sc->stats.ruc + sc->stats.roc +
sc->stats.mpc + sc->stats.htdpmc);
case IFCOUNTER_OERRORS:
- return (sc->stats.ecol + sc->stats.latecol +
- sc->watchdog_events);
+ return (if_get_counter_default(ifp, cnt) +
+ sc->stats.ecol + sc->stats.latecol + sc->watchdog_events);
default:
return (if_get_counter_default(ifp, cnt));
}
diff --git a/sys/dev/iicbus/iicbb.c b/sys/dev/iicbus/iicbb.c
index c344bda930b0..5f6423135f46 100644
--- a/sys/dev/iicbus/iicbb.c
+++ b/sys/dev/iicbus/iicbb.c
@@ -331,7 +331,7 @@ iicbb_getack(device_t dev)
{
struct iicbb_softc *sc = device_get_softc(dev);
int noack, err;
- int t;
+ int t = 0;
/* Release SDA so that the slave can drive it. */
err = iicbb_clockin(dev, 1);
@@ -341,12 +341,13 @@ iicbb_getack(device_t dev)
}
/* Sample SDA until ACK (low) or udelay runs out. */
- for (t = 0; t < sc->udelay; t++) {
+ do {
noack = I2C_GETSDA(dev);
if (!noack)
break;
DELAY(1);
- }
+ t++;
+ } while(t < sc->udelay);
DELAY(sc->udelay - t);
iicbb_clockout(dev);
diff --git a/sys/dev/iommu/busdma_iommu.c b/sys/dev/iommu/busdma_iommu.c
index 6856b0551dde..668ccf056463 100644
--- a/sys/dev/iommu/busdma_iommu.c
+++ b/sys/dev/iommu/busdma_iommu.c
@@ -114,8 +114,8 @@ iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
* domain, and must collectively be assigned to use either IOMMU or
* bounce mapping.
*/
-device_t
-iommu_get_requester(device_t dev, uint16_t *rid)
+int
+iommu_get_requester(device_t dev, device_t *requesterp, uint16_t *rid)
{
devclass_t pci_class;
device_t l, pci, pcib, pcip, pcibp, requester;
@@ -129,7 +129,8 @@ iommu_get_requester(device_t dev, uint16_t *rid)
pci = device_get_parent(dev);
if (pci == NULL || device_get_devclass(pci) != pci_class) {
*rid = 0; /* XXXKIB: Could be ACPI HID */
- return (requester);
+ *requesterp = NULL;
+ return (ENOTTY);
}
*rid = pci_get_rid(dev);
@@ -141,16 +142,39 @@ iommu_get_requester(device_t dev, uint16_t *rid)
*/
for (;;) {
pci = device_get_parent(l);
- KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent "
- "for %s", device_get_name(dev), device_get_name(l)));
- KASSERT(device_get_devclass(pci) == pci_class,
- ("iommu_get_requester(%s): non-pci parent %s for %s",
- device_get_name(dev), device_get_name(pci),
- device_get_name(l)));
+ if (pci == NULL) {
+ if (bootverbose) {
+ printf(
+ "iommu_get_requester(%s): NULL parent for %s\n",
+ device_get_name(dev), device_get_name(l));
+ }
+ *rid = 0;
+ *requesterp = NULL;
+ return (ENXIO);
+ }
+ if (device_get_devclass(pci) != pci_class) {
+ if (bootverbose) {
+ printf(
+ "iommu_get_requester(%s): non-pci parent %s for %s\n",
+ device_get_name(dev), device_get_name(pci),
+ device_get_name(l));
+ }
+ *rid = 0;
+ *requesterp = NULL;
+ return (ENXIO);
+ }
pcib = device_get_parent(pci);
- KASSERT(pcib != NULL, ("iommu_get_requester(%s): NULL bridge "
- "for %s", device_get_name(dev), device_get_name(pci)));
+ if (pcib == NULL) {
+ if (bootverbose) {
+ printf(
+ "iommu_get_requester(%s): NULL bridge for %s\n",
+ device_get_name(dev), device_get_name(pci));
+ }
+ *rid = 0;
+ *requesterp = NULL;
+ return (ENXIO);
+ }
/*
* The parent of our "bridge" isn't another PCI bus,
@@ -229,7 +253,8 @@ iommu_get_requester(device_t dev, uint16_t *rid)
}
}
}
- return (requester);
+ *requesterp = requester;
+ return (0);
}
struct iommu_ctx *
@@ -237,10 +262,13 @@ iommu_instantiate_ctx(struct iommu_unit *unit, device_t dev, bool rmrr)
{
device_t requester;
struct iommu_ctx *ctx;
+ int error;
bool disabled;
uint16_t rid;
- requester = iommu_get_requester(dev, &rid);
+ error = iommu_get_requester(dev, &requester, &rid);
+ if (error != 0)
+ return (NULL);
/*
* If the user requested the IOMMU disabled for the device, we
diff --git a/sys/dev/iommu/iommu.h b/sys/dev/iommu/iommu.h
index b1858f0df9f7..55044042c5d2 100644
--- a/sys/dev/iommu/iommu.h
+++ b/sys/dev/iommu/iommu.h
@@ -170,7 +170,7 @@ void iommu_domain_unload(struct iommu_domain *domain,
void iommu_unit_pre_instantiate_ctx(struct iommu_unit *iommu);
struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu,
device_t dev, bool rmrr);
-device_t iommu_get_requester(device_t dev, uint16_t *rid);
+int iommu_get_requester(device_t dev, device_t *requester, uint16_t *rid);
int iommu_init_busdma(struct iommu_unit *unit);
void iommu_fini_busdma(struct iommu_unit *unit);
diff --git a/sys/dev/ipw/if_ipw.c b/sys/dev/ipw/if_ipw.c
index 01d713cdae18..9db562669487 100644
--- a/sys/dev/ipw/if_ipw.c
+++ b/sys/dev/ipw/if_ipw.c
@@ -283,6 +283,8 @@ ipw_attach(device_t dev)
| IEEE80211_C_WPA /* 802.11i supported */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/* read MAC address from EEPROM */
val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 0);
ic->ic_macaddr[0] = val >> 8;
@@ -1557,6 +1559,7 @@ ipw_tx_start(struct ipw_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
wh = mtod(m0, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m0);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/irdma/irdma_cm.c b/sys/dev/irdma/irdma_cm.c
index 450fae662dd8..d4d4f328fb43 100644
--- a/sys/dev/irdma/irdma_cm.c
+++ b/sys/dev/irdma/irdma_cm.c
@@ -1316,7 +1316,7 @@ irdma_cm_timer_tick(struct timer_list *t)
struct irdma_timer_entry *send_entry, *close_entry;
struct list_head *list_core_temp;
struct list_head *list_node;
- struct irdma_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
+ struct irdma_cm_core *cm_core = timer_container_of(cm_core, t, tcp_timer);
struct irdma_sc_vsi *vsi;
u32 settimer = 0;
unsigned long timetosend;
diff --git a/sys/dev/irdma/irdma_utils.c b/sys/dev/irdma/irdma_utils.c
index 5fc37022981f..038f1980082b 100644
--- a/sys/dev/irdma/irdma_utils.c
+++ b/sys/dev/irdma/irdma_utils.c
@@ -876,7 +876,7 @@ irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
static void
irdma_terminate_timeout(struct timer_list *t)
{
- struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
+ struct irdma_qp *iwqp = timer_container_of(iwqp, t, terminate_timer);
struct irdma_sc_qp *qp = &iwqp->sc_qp;
irdma_terminate_done(qp, 1);
@@ -1528,7 +1528,7 @@ static void
irdma_hw_stats_timeout(struct timer_list *t)
{
struct irdma_vsi_pestat *pf_devstat =
- from_timer(pf_devstat, t, stats_timer);
+ timer_container_of(pf_devstat, t, stats_timer);
struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
diff --git a/sys/dev/iwi/if_iwi.c b/sys/dev/iwi/if_iwi.c
index 3a410a5cbf2c..26b8037186a6 100644
--- a/sys/dev/iwi/if_iwi.c
+++ b/sys/dev/iwi/if_iwi.c
@@ -371,6 +371,8 @@ iwi_attach(device_t dev)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/* read MAC address from EEPROM */
val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 0);
ic->ic_macaddr[0] = val & 0xff;
@@ -1834,6 +1836,8 @@ iwi_tx_start(struct iwi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
} else
staid = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/iwm/if_iwm.c b/sys/dev/iwm/if_iwm.c
index 1e9090310ece..9f590f11864c 100644
--- a/sys/dev/iwm/if_iwm.c
+++ b/sys/dev/iwm/if_iwm.c
@@ -6142,7 +6142,8 @@ iwm_attach(device_t dev)
// IEEE80211_C_BGSCAN /* capable of bg scanning */
;
/* Advertise full-offload scanning */
- ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
sc->sc_phyctxt[i].id = i;
sc->sc_phyctxt[i].color = 0;
diff --git a/sys/dev/iwn/if_iwn.c b/sys/dev/iwn/if_iwn.c
index b7c452a4f074..a949103f20d4 100644
--- a/sys/dev/iwn/if_iwn.c
+++ b/sys/dev/iwn/if_iwn.c
@@ -584,6 +584,11 @@ iwn_attach(device_t dev)
| IEEE80211_C_PMGT /* Station-side power mgmt */
;
+ /* Driver / firmware assigned sequence numbers */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+ /* Don't originate null data frames in net80211 */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
+
/* Read MAC address, channels, etc from EEPROM. */
if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) {
device_printf(dev, "could not read EEPROM, error %d\n",
@@ -4577,6 +4582,9 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
* XXX TODO: Group addressed frames aren't aggregated and must
* go to the normal non-aggregation queue, and have a NONQOS TID
* assigned from net80211.
+ *
+ * TODO: same with NULL QOS frames, which we shouldn't be sending
+ * anyway ourselves (and should stub out / warn / etc.)
*/
ac = M_WME_GETAC(m);
@@ -4589,6 +4597,10 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
ac = *(int *)tap->txa_private;
}
+ /* Only assign if not A-MPDU; the A-MPDU TX path will do its own */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* Retrieve key for TX. */
diff --git a/sys/dev/iwx/if_iwx.c b/sys/dev/iwx/if_iwx.c
index 1fe531d69933..3b29c8e78b97 100644
--- a/sys/dev/iwx/if_iwx.c
+++ b/sys/dev/iwx/if_iwx.c
@@ -10474,6 +10474,8 @@ iwx_attach(device_t dev)
ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
/* Enable seqno offload */
ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+ /* Don't send null data frames; let firmware do it */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
ic->ic_txstream = 2;
ic->ic_rxstream = 2;
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index 73c0fd1ab16f..6d08bd49bc04 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -184,6 +184,7 @@ static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
int);
static void ixgbe_if_queues_free(if_ctx_t);
static void ixgbe_if_timer(if_ctx_t, uint16_t);
+static const char *ixgbe_link_speed_to_str(u32 link_speed);
static void ixgbe_if_update_admin_status(if_ctx_t);
static void ixgbe_if_vlan_register(if_ctx_t, u16);
static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
@@ -1349,8 +1350,6 @@ ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
return (0);
case IFCOUNTER_IQDROPS:
return (sc->iqdrops);
- case IFCOUNTER_OQDROPS:
- return (0);
case IFCOUNTER_IERRORS:
return (sc->ierrors);
default:
@@ -4027,6 +4026,33 @@ ixgbe_if_stop(if_ctx_t ctx)
} /* ixgbe_if_stop */
/************************************************************************
+ * ixgbe_link_speed_to_str - Convert link speed to string
+ *
+ * Helper function to convert link speed constants to human-readable
+ * string representations in conventional Gbps or Mbps.
+ ************************************************************************/
+static const char *
+ixgbe_link_speed_to_str(u32 link_speed)
+{
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ return "10 Gbps";
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ return "5 Gbps";
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ return "2.5 Gbps";
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ return "1 Gbps";
+ case IXGBE_LINK_SPEED_100_FULL:
+ return "100 Mbps";
+ case IXGBE_LINK_SPEED_10_FULL:
+ return "10 Mbps";
+ default:
+ return "Unknown";
+ }
+} /* ixgbe_link_speed_to_str */
+
+/************************************************************************
* ixgbe_update_link_status - Update OS on link state
*
* Note: Only updates the OS on the cached link state.
@@ -4042,9 +4068,9 @@ ixgbe_if_update_admin_status(if_ctx_t ctx)
if (sc->link_up) {
if (sc->link_active == false) {
if (bootverbose)
- device_printf(dev, "Link is up %d Gbps %s \n",
- ((sc->link_speed == 128) ? 10 : 1),
- "Full Duplex");
+ device_printf(dev,
+ "Link is up %s Full Duplex\n",
+ ixgbe_link_speed_to_str(sc->link_speed));
sc->link_active = true;
/* Update any Flow Control changes */
ixgbe_fc_enable(&sc->hw);
diff --git a/sys/dev/ixgbe/ixgbe_e610.c b/sys/dev/ixgbe/ixgbe_e610.c
index 95c6dca416c6..18c4612446e0 100644
--- a/sys/dev/ixgbe/ixgbe_e610.c
+++ b/sys/dev/ixgbe/ixgbe_e610.c
@@ -1400,40 +1400,6 @@ s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
}
/**
- * ixgbe_is_media_cage_present - check if media cage is present
- * @hw: pointer to the HW struct
- *
- * Identify presence of media cage using the ACI command (0x06E0).
- *
- * Return: true if media cage is present, else false. If no cage, then
- * media type is backplane or BASE-T.
- */
-static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
-{
- struct ixgbe_aci_cmd_get_link_topo *cmd;
- struct ixgbe_aci_desc desc;
-
- cmd = &desc.params.get_link_topo;
-
- ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
-
- cmd->addr.topo_params.node_type_ctx =
- (IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT <<
- IXGBE_ACI_LINK_TOPO_NODE_CTX_S);
-
- /* set node type */
- cmd->addr.topo_params.node_type_ctx |=
- (IXGBE_ACI_LINK_TOPO_NODE_TYPE_M &
- IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
-
- /* Node type cage can be used to determine if cage is present. If AQC
- * returns error (ENOENT), then no cage present. If no cage present then
- * connection type is backplane or BASE-T.
- */
- return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
-}
-
-/**
* ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
* @hw: pointer to the HW struct
*
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 43c3af056b67..261f76055901 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -1785,7 +1785,7 @@ ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_OPACKETS:
return (vsi->opackets);
case IFCOUNTER_OERRORS:
- return (vsi->oerrors);
+ return (if_get_counter_default(ifp, cnt) + vsi->oerrors);
case IFCOUNTER_COLLISIONS:
/* Collisions are by standard impossible in 40G/10G Ethernet */
return (0);
@@ -1800,7 +1800,7 @@ ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_IQDROPS:
return (vsi->iqdrops);
case IFCOUNTER_OQDROPS:
- return (vsi->oqdrops);
+ return (if_get_counter_default(ifp, cnt) + vsi->oqdrops);
case IFCOUNTER_NOPROTO:
return (vsi->noproto);
default:
diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c
index 79a3213c6802..2e4f3967ace4 100644
--- a/sys/dev/malo/if_malo.c
+++ b/sys/dev/malo/if_malo.c
@@ -263,6 +263,8 @@ malo_attach(uint16_t devid, struct malo_softc *sc)
;
IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->malo_hwspecs.macaddr);
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Transmit requires space in the packet for a special format transmit
* record and optional padding between this record and the payload.
@@ -1040,6 +1042,8 @@ malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni,
} else
qos = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (iswep) {
struct ieee80211_key *k;
diff --git a/sys/dev/mpi3mr/mpi3mr.c b/sys/dev/mpi3mr/mpi3mr.c
index 99edd3542619..bcf8f46ddf5d 100644
--- a/sys/dev/mpi3mr/mpi3mr.c
+++ b/sys/dev/mpi3mr/mpi3mr.c
@@ -2799,10 +2799,11 @@ retry_init:
U32 fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
MPI3_SYSIF_FAULT_CODE_MASK;
- if (fault == MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER)
+ if (fault == MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER) {
mpi3mr_dprint(sc, MPI3MR_INFO,
"controller faulted due to insufficient power, try by connecting it in a different slot\n");
goto err;
+ }
U32 host_diagnostic;
timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
@@ -4486,7 +4487,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
Mpi3SuccessReplyDescriptor_t *success_desc;
Mpi3DefaultReply_t *def_reply = NULL;
struct mpi3mr_drvr_cmd *cmdptr = NULL;
- Mpi3SCSIIOReply_t *scsi_reply;
+ Mpi3SCSIIOReply_t *scsi_reply = NULL;
U8 *sense_buf = NULL;
*reply_dma = 0;
@@ -4589,7 +4590,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
}
}
out:
- if (sense_buf != NULL)
+ if (scsi_reply != NULL && sense_buf != NULL)
mpi3mr_repost_sense_buf(sc,
scsi_reply->SenseDataBufferAddress);
return;
@@ -6161,7 +6162,7 @@ static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
{
int retval = -1;
U8 unlock_retry_count = 0;
- U32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
+ U32 host_diagnostic = 0, ioc_status, ioc_config, scratch_pad0;
U32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
diff --git a/sys/dev/mpi3mr/mpi3mr_cam.c b/sys/dev/mpi3mr/mpi3mr_cam.c
index 77e25339a1a9..a5120e2788db 100644
--- a/sys/dev/mpi3mr/mpi3mr_cam.c
+++ b/sys/dev/mpi3mr/mpi3mr_cam.c
@@ -1856,10 +1856,11 @@ int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle)
"Poll reply queue once\n", target_outstanding, target->per_id);
mpi3mr_poll_pend_io_completions(sc);
target_outstanding = mpi3mr_atomic_read(&target->outstanding);
- if (target_outstanding)
+ if (target_outstanding) {
target_outstanding = mpi3mr_atomic_read(&target->outstanding);
mpi3mr_dprint(sc, MPI3MR_ERROR, "[%2d] outstanding IOs present on target: %d "
- "despite poll\n", target_outstanding, target->per_id);
+ "despite poll\n", target_outstanding, target->per_id);
+ }
}
if (target->exposed_to_os && !sc->reset_in_progress) {
diff --git a/sys/dev/mpr/mpr.c b/sys/dev/mpr/mpr.c
index d1c572e40669..262d6b58b705 100644
--- a/sys/dev/mpr/mpr.c
+++ b/sys/dev/mpr/mpr.c
@@ -1729,6 +1729,7 @@ mpr_get_tunables(struct mpr_softc *sc)
sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD;
sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
sc->use_phynum = 1;
+ sc->encl_min_slots = 0;
sc->max_reqframes = MPR_REQ_FRAMES;
sc->max_prireqframes = MPR_PRI_REQ_FRAMES;
sc->max_replyframes = MPR_REPLY_FRAMES;
@@ -1748,6 +1749,7 @@ mpr_get_tunables(struct mpr_softc *sc)
TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu);
TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time);
TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum);
+ TUNABLE_INT_FETCH("hw.mpr.encl_min_slots", &sc->encl_min_slots);
TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes);
TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes);
TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes);
@@ -1797,6 +1799,10 @@ mpr_get_tunables(struct mpr_softc *sc)
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.encl_min_slots",
+ device_get_unit(sc->mpr_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->encl_min_slots);
+
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
@@ -1951,6 +1957,10 @@ mpr_setup_sysctl(struct mpr_softc *sc)
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD,
&sc->prp_page_alloc_fail, "PRP page allocation failures");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "encl_min_slots", CTLFLAG_RW, &sc->encl_min_slots, 0,
+ "force enclosure minimum slots");
}
static struct mpr_debug_string {
diff --git a/sys/dev/mpr/mpr_mapping.c b/sys/dev/mpr/mpr_mapping.c
index f9a9ac1c53d0..38aa4dfc7ef2 100644
--- a/sys/dev/mpr/mpr_mapping.c
+++ b/sys/dev/mpr/mpr_mapping.c
@@ -2785,6 +2785,8 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
* DPM, if it's being used.
*/
if (enc_idx != MPR_ENCTABLE_BAD_IDX) {
+ u16 new_num_slots;
+
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->init_complete &&
!et_entry->missing_count) {
@@ -2796,6 +2798,17 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
et_entry->enc_handle = le16toh(event_data->
EnclosureHandle);
et_entry->start_slot = le16toh(event_data->StartSlot);
+ new_num_slots = le16toh(event_data->NumSlots);
+ if (new_num_slots < sc->encl_min_slots) {
+ mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d num_slots %d, overriding with %d.\n",
+ __func__, enc_idx, new_num_slots, sc->encl_min_slots);
+ new_num_slots = sc->encl_min_slots;
+ }
+ if (et_entry->num_slots != new_num_slots) {
+ mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d old num_slots %d, new %d.\n",
+ __func__, enc_idx, et_entry->num_slots, sc->encl_min_slots);
+ et_entry->num_slots = new_num_slots;
+ }
saved_phy_bits = et_entry->phy_bits;
et_entry->phy_bits |= le32toh(event_data->PhyBits);
if (saved_phy_bits != et_entry->phy_bits)
@@ -2858,6 +2871,11 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
et_entry->start_index = MPR_MAPTABLE_BAD_IDX;
et_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
et_entry->num_slots = le16toh(event_data->NumSlots);
+ if (et_entry->num_slots < sc->encl_min_slots) {
+ mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Enclosure %d num_slots is %d, overriding with %d.\n",
+ __func__, enc_idx, et_entry->num_slots, sc->encl_min_slots);
+ et_entry->num_slots = sc->encl_min_slots;
+ }
et_entry->start_slot = le16toh(event_data->StartSlot);
et_entry->phy_bits = le32toh(event_data->PhyBits);
}
diff --git a/sys/dev/mpr/mprvar.h b/sys/dev/mpr/mprvar.h
index 0f1743f4266e..93f3fbffe079 100644
--- a/sys/dev/mpr/mprvar.h
+++ b/sys/dev/mpr/mprvar.h
@@ -366,6 +366,7 @@ struct mpr_softc {
int spinup_wait_time;
int use_phynum;
int dump_reqs_alltypes;
+ int encl_min_slots;
uint64_t chain_alloc_fail;
uint64_t prp_page_alloc_fail;
struct sysctl_ctx_list sysctl_ctx;
diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c
index 2570cbce525b..9f3d34f4f50d 100644
--- a/sys/dev/mwl/if_mwl.c
+++ b/sys/dev/mwl/if_mwl.c
@@ -433,6 +433,8 @@ mwl_attach(uint16_t devid, struct mwl_softc *sc)
| IEEE80211_HTC_SMPS /* SMPS available */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Mark h/w crypto support.
* XXX no way to query h/w support.
@@ -1797,7 +1799,7 @@ mwl_updateslot(struct ieee80211com *ic)
return;
/*
- * Calculate the ERP flags. The firwmare will use
+ * Calculate the ERP flags. The firmware will use
* this to carry out the appropriate measures.
*/
prot = 0;
@@ -3087,6 +3089,8 @@ mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *
} else
qos = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (iswep) {
const struct ieee80211_cipher *cip;
struct ieee80211_key *k;
@@ -4017,7 +4021,7 @@ mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
- if (ni->ni_chw != IEEE80211_STA_RX_BW_40)
+ if (ni->ni_chw != NET80211_STA_RX_BW_40)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
}
return pi;
diff --git a/sys/dev/nvme/nvme.c b/sys/dev/nvme/nvme.c
index 84f365024f13..ead91f0d01fe 100644
--- a/sys/dev/nvme/nvme.c
+++ b/sys/dev/nvme/nvme.c
@@ -295,7 +295,6 @@ nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
void
nvme_unregister_consumer(struct nvme_consumer *consumer)
{
-
consumer->id = INVALID_CONSUMER_ID;
}
diff --git a/sys/dev/nvme/nvme_ahci.c b/sys/dev/nvme/nvme_ahci.c
index 888207a454f7..b06661226d34 100644
--- a/sys/dev/nvme/nvme_ahci.c
+++ b/sys/dev/nvme/nvme_ahci.c
@@ -124,6 +124,5 @@ bad:
static int
nvme_ahci_detach(device_t dev)
{
-
return (nvme_detach(dev));
}
diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c
index fd7f00ced14b..3a1894bf754d 100644
--- a/sys/dev/nvme/nvme_ctrlr.c
+++ b/sys/dev/nvme/nvme_ctrlr.c
@@ -41,6 +41,9 @@
#include <sys/endian.h>
#include <sys/stdarg.h>
#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_map.h>
#include "nvme_private.h"
#include "nvme_linux.h"
@@ -597,7 +600,6 @@ nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
static bool
is_log_page_id_valid(uint8_t page_id)
{
-
switch (page_id) {
case NVME_LOG_ERROR:
case NVME_LOG_HEALTH_INFORMATION:
@@ -653,7 +655,6 @@ static void
nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
uint8_t state)
{
-
if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
nvme_printf(ctrlr, "SMART WARNING: available spare space below threshold\n");
@@ -781,7 +782,6 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
static void
nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
{
-
ctrlr->int_coal_time = 0;
TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
&ctrlr->int_coal_time);
@@ -1268,6 +1268,34 @@ nvme_ctrlr_shared_handler(void *arg)
nvme_mmio_write_4(ctrlr, intmc, 1);
}
+#define NVME_MAX_PAGES (int)(1024 / sizeof(vm_page_t))
+
+static int
+nvme_user_ioctl_req(vm_offset_t addr, size_t len, bool is_read,
+ vm_page_t *upages, int max_pages, int *npagesp, struct nvme_request **req,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ vm_prot_t prot = VM_PROT_READ;
+ int err;
+
+ if (is_read)
+ prot |= VM_PROT_WRITE; /* Device will write to host memory */
+ err = vm_fault_hold_pages(&curproc->p_vmspace->vm_map,
+ addr, len, prot, upages, max_pages, npagesp);
+ if (err != 0)
+ return (err);
+ *req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
+ (*req)->payload = memdesc_vmpages(upages, len, addr & PAGE_MASK);
+ (*req)->payload_valid = true;
+ return (0);
+}
+
+static void
+nvme_user_ioctl_free(vm_page_t *pages, int npage)
+{
+ vm_page_unhold_pages(pages, npage);
+}
+
static void
nvme_pt_done(void *arg, const struct nvme_completion *cpl)
{
@@ -1290,30 +1318,28 @@ nvme_pt_done(void *arg, const struct nvme_completion *cpl)
int
nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
- struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
+ struct nvme_pt_command *pt, uint32_t nsid, int is_user,
int is_admin_cmd)
{
- struct nvme_request *req;
- struct mtx *mtx;
- struct buf *buf = NULL;
- int ret = 0;
+ struct nvme_request *req;
+ struct mtx *mtx;
+ int ret = 0;
+ int npages = 0;
+ vm_page_t upages[NVME_MAX_PAGES];
if (pt->len > 0) {
if (pt->len > ctrlr->max_xfer_size) {
- nvme_printf(ctrlr, "pt->len (%d) "
- "exceeds max_xfer_size (%d)\n", pt->len,
- ctrlr->max_xfer_size);
- return EIO;
+ nvme_printf(ctrlr,
+ "len (%d) exceeds max_xfer_size (%d)\n",
+ pt->len, ctrlr->max_xfer_size);
+ return (EIO);
}
- if (is_user_buffer) {
- buf = uma_zalloc(pbuf_zone, M_WAITOK);
- buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
- if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
- ret = EFAULT;
- goto err;
- }
- req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
- M_WAITOK, nvme_pt_done, pt);
+ if (is_user) {
+ ret = nvme_user_ioctl_req((vm_offset_t)pt->buf, pt->len,
+ pt->is_read, upages, nitems(upages), &npages, &req,
+ nvme_pt_done, pt);
+ if (ret != 0)
+ return (ret);
} else
req = nvme_allocate_request_vaddr(pt->buf, pt->len,
M_WAITOK, nvme_pt_done, pt);
@@ -1347,11 +1373,8 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
mtx_unlock(mtx);
- if (buf != NULL) {
- vunmapbuf(buf);
-err:
- uma_zfree(pbuf_zone, buf);
- }
+ if (npages > 0)
+ nvme_user_ioctl_free(upages, npages);
return (ret);
}
@@ -1377,8 +1400,9 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
{
struct nvme_request *req;
struct mtx *mtx;
- struct buf *buf = NULL;
int ret = 0;
+ int npages = 0;
+ vm_page_t upages[NVME_MAX_PAGES];
/*
* We don't support metadata.
@@ -1389,28 +1413,16 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
if (npc->data_len > 0 && npc->addr != 0) {
if (npc->data_len > ctrlr->max_xfer_size) {
nvme_printf(ctrlr,
- "npc->data_len (%d) exceeds max_xfer_size (%d)\n",
+ "data_len (%d) exceeds max_xfer_size (%d)\n",
npc->data_len, ctrlr->max_xfer_size);
return (EIO);
}
- /*
- * We only support data out or data in commands, but not both at
- * once. However, there's some comands with lower bit cleared
- * that are really read commands, so we should filter & 3 == 0,
- * but don't.
- */
- if ((npc->opcode & 0x3) == 3)
- return (EINVAL);
if (is_user) {
- buf = uma_zalloc(pbuf_zone, M_WAITOK);
- buf->b_iocmd = npc->opcode & 1 ? BIO_WRITE : BIO_READ;
- if (vmapbuf(buf, (void *)(uintptr_t)npc->addr,
- npc->data_len, 1) < 0) {
- ret = EFAULT;
- goto err;
- }
- req = nvme_allocate_request_vaddr(buf->b_data,
- npc->data_len, M_WAITOK, nvme_npc_done, npc);
+ ret = nvme_user_ioctl_req(npc->addr, npc->data_len,
+ npc->opcode & 0x1, upages, nitems(upages), &npages,
+ &req, nvme_npc_done, npc);
+ if (ret != 0)
+ return (ret);
} else
req = nvme_allocate_request_vaddr(
(void *)(uintptr_t)npc->addr, npc->data_len,
@@ -1420,8 +1432,8 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
req->cmd.opc = npc->opcode;
req->cmd.fuse = npc->flags;
- req->cmd.rsvd2 = htole16(npc->cdw2);
- req->cmd.rsvd3 = htole16(npc->cdw3);
+ req->cmd.rsvd2 = htole32(npc->cdw2);
+ req->cmd.rsvd3 = htole32(npc->cdw3);
req->cmd.cdw10 = htole32(npc->cdw10);
req->cmd.cdw11 = htole32(npc->cdw11);
req->cmd.cdw12 = htole32(npc->cdw12);
@@ -1445,11 +1457,8 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
mtx_sleep(npc, mtx, PRIBIO, "nvme_npc", 0);
mtx_unlock(mtx);
- if (buf != NULL) {
- vunmapbuf(buf);
-err:
- uma_zfree(pbuf_zone, buf);
- }
+ if (npages > 0)
+ nvme_user_ioctl_free(upages, npages);
return (ret);
}
@@ -1776,7 +1785,6 @@ void
nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
struct nvme_request *req)
{
-
nvme_qpair_submit_request(&ctrlr->adminq, req);
}
@@ -1793,14 +1801,12 @@ nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
device_t
nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
{
-
return (ctrlr->dev);
}
const struct nvme_controller_data *
nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
{
-
return (&ctrlr->cdata);
}
@@ -1853,7 +1859,6 @@ nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
int
nvme_ctrlr_resume(struct nvme_controller *ctrlr)
{
-
/*
* Can't touch failed controllers, so nothing to do to resume.
*/
diff --git a/sys/dev/nvme/nvme_ctrlr_cmd.c b/sys/dev/nvme/nvme_ctrlr_cmd.c
index 993a7718356d..5a44ed425acb 100644
--- a/sys/dev/nvme/nvme_ctrlr_cmd.c
+++ b/sys/dev/nvme/nvme_ctrlr_cmd.c
@@ -281,7 +281,6 @@ nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
struct nvme_error_information_entry *payload, uint32_t num_entries,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__));
/* Controller's error log page entries is 0-based. */
@@ -302,7 +301,6 @@ nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
uint32_t nsid, struct nvme_health_information_page *payload,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION,
nsid, payload, sizeof(*payload), cb_fn, cb_arg);
}
@@ -311,7 +309,6 @@ void
nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT,
NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn,
cb_arg);
diff --git a/sys/dev/nvme/nvme_ns.c b/sys/dev/nvme/nvme_ns.c
index 3f29382fe42f..e84d2066930e 100644
--- a/sys/dev/nvme/nvme_ns.c
+++ b/sys/dev/nvme/nvme_ns.c
@@ -129,7 +129,6 @@ static int
nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
struct thread *td)
{
-
return (0);
}
@@ -231,7 +230,6 @@ nvme_ns_get_model_number(struct nvme_namespace *ns)
const struct nvme_namespace_data *
nvme_ns_get_data(struct nvme_namespace *ns)
{
-
return (&ns->data);
}
@@ -631,7 +629,6 @@ nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
void
nvme_ns_destruct(struct nvme_namespace *ns)
{
-
if (ns->cdev != NULL) {
if (ns->cdev->si_drv2 != NULL)
destroy_dev(ns->cdev->si_drv2);
diff --git a/sys/dev/nvme/nvme_pci.c b/sys/dev/nvme/nvme_pci.c
index 29b49b7df403..c07a68d2f0dc 100644
--- a/sys/dev/nvme/nvme_pci.c
+++ b/sys/dev/nvme/nvme_pci.c
@@ -151,7 +151,6 @@ nvme_pci_probe (device_t device)
static int
nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
{
-
ctrlr->resource_id = PCIR_BAR(0);
ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h
index 36f00fedc48e..52f9e12f8f9a 100644
--- a/sys/dev/nvme/nvme_private.h
+++ b/sys/dev/nvme/nvme_private.h
@@ -459,8 +459,7 @@ int nvme_detach(device_t dev);
* vast majority of these without waiting for a tick plus scheduling delays. Since
* these are on startup, this drastically reduces startup time.
*/
-static __inline
-void
+static __inline void
nvme_completion_poll(struct nvme_completion_poll_status *status)
{
int timeout = ticks + 10 * hz;
diff --git a/sys/dev/nvme/nvme_qpair.c b/sys/dev/nvme/nvme_qpair.c
index bd8626e32209..4f2c44da3b4f 100644
--- a/sys/dev/nvme/nvme_qpair.c
+++ b/sys/dev/nvme/nvme_qpair.c
@@ -793,7 +793,6 @@ nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
void
nvme_io_qpair_destroy(struct nvme_qpair *qpair)
{
-
nvme_qpair_destroy(qpair);
}
@@ -1202,7 +1201,6 @@ _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
void
nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
{
-
mtx_lock(&qpair->lock);
_nvme_qpair_submit_request(qpair, req);
mtx_unlock(&qpair->lock);
@@ -1226,7 +1224,6 @@ nvme_qpair_enable(struct nvme_qpair *qpair)
void
nvme_qpair_reset(struct nvme_qpair *qpair)
{
-
qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
/*
diff --git a/sys/dev/nvme/nvme_sim.c b/sys/dev/nvme/nvme_sim.c
index 4974bb718222..a06774a64761 100644
--- a/sys/dev/nvme/nvme_sim.c
+++ b/sys/dev/nvme/nvme_sim.c
@@ -301,7 +301,6 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
static void
nvme_sim_poll(struct cam_sim *sim)
{
-
nvme_ctrlr_poll(sim2ctrlr(sim));
}
diff --git a/sys/dev/nvme/nvme_sysctl.c b/sys/dev/nvme/nvme_sysctl.c
index a5a44721f9f9..50d19e730a16 100644
--- a/sys/dev/nvme/nvme_sysctl.c
+++ b/sys/dev/nvme/nvme_sysctl.c
@@ -153,7 +153,6 @@ nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
static void
nvme_qpair_reset_stats(struct nvme_qpair *qpair)
{
-
/*
* Reset the values. Due to sanity checks in
* nvme_qpair_process_completions, we reset the number of interrupt
diff --git a/sys/dev/nvme/nvme_util.c b/sys/dev/nvme/nvme_util.c
index 0a07653a7378..cb0ba729ac96 100644
--- a/sys/dev/nvme/nvme_util.c
+++ b/sys/dev/nvme/nvme_util.c
@@ -208,31 +208,33 @@ nvme_opcode_sbuf(bool admin, uint8_t opc, struct sbuf *sb)
if (s == NULL)
sbuf_printf(sb, "%s (%02x)", type, opc);
else
- sbuf_printf(sb, "%s", s);
+ sbuf_printf(sb, "%s (%02x)", s, opc);
}
void
nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sb)
{
const char *s, *type;
- uint16_t status;
+ uint16_t status, sc, sct;
status = le16toh(cpl->status);
- switch (NVME_STATUS_GET_SCT(status)) {
+ sc = NVME_STATUS_GET_SC(status);
+ sct = NVME_STATUS_GET_SCT(status);
+ switch (sct) {
case NVME_SCT_GENERIC:
- s = generic_status[NVME_STATUS_GET_SC(status)];
+ s = generic_status[sc];
type = "GENERIC";
break;
case NVME_SCT_COMMAND_SPECIFIC:
- s = command_specific_status[NVME_STATUS_GET_SC(status)];
+ s = command_specific_status[sc];
type = "COMMAND SPECIFIC";
break;
case NVME_SCT_MEDIA_ERROR:
- s = media_error_status[NVME_STATUS_GET_SC(status)];
+ s = media_error_status[sc];
type = "MEDIA ERROR";
break;
case NVME_SCT_PATH_RELATED:
- s = path_related_status[NVME_STATUS_GET_SC(status)];
+ s = path_related_status[sc];
type = "PATH RELATED";
break;
case NVME_SCT_VENDOR_SPECIFIC:
@@ -246,12 +248,11 @@ nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sb)
}
if (type == NULL)
- sbuf_printf(sb, "RESERVED (%02x/%02x)",
- NVME_STATUS_GET_SCT(status), NVME_STATUS_GET_SC(status));
+ sbuf_printf(sb, "RESERVED (%02x/%02x)", sct, sc);
else if (s == NULL)
- sbuf_printf(sb, "%s (%02x)", type, NVME_STATUS_GET_SC(status));
+ sbuf_printf(sb, "%s (%02x/%02x)", type, sct, sc);
else
- sbuf_printf(sb, "%s", s);
+ sbuf_printf(sb, "%s (%02x/%02x)", s, sct, sc);
}
void
diff --git a/sys/dev/nvmf/nvmf_tcp.c b/sys/dev/nvmf/nvmf_tcp.c
index 6ad5229f6043..e50d7ff48d2b 100644
--- a/sys/dev/nvmf/nvmf_tcp.c
+++ b/sys/dev/nvmf/nvmf_tcp.c
@@ -970,7 +970,7 @@ nvmf_tcp_handle_r2t(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu)
}
/*
- * XXX: The spec does not specify how to handle R2T tranfers
+ * XXX: The spec does not specify how to handle R2T transfers
* out of range of the original command.
*/
data_len = le32toh(r2t->r2tl);
diff --git a/sys/dev/otus/if_otus.c b/sys/dev/otus/if_otus.c
index 5919e75a59cf..f6c4a0118b68 100644
--- a/sys/dev/otus/if_otus.c
+++ b/sys/dev/otus/if_otus.c
@@ -728,6 +728,12 @@ otus_attachhook(struct otus_softc *sc)
IEEE80211_C_SWAMSDUTX | /* Do software A-MSDU TX */
IEEE80211_C_WPA; /* WPA/RSN. */
+ /*
+ * Although A-MPDU RX is fine, A-MPDU TX apparently has some
+ * hardware bugs. Looking at Linux carl9170, it has a work-around
+ * that forces all frames into the AC_BE queue regardless of
+ * the actual QoS queue.
+ */
ic->ic_htcaps =
IEEE80211_HTC_HT |
#if 0
@@ -737,6 +743,8 @@ otus_attachhook(struct otus_softc *sc)
IEEE80211_HTCAP_MAXAMSDU_3839 |
IEEE80211_HTCAP_SMPS_OFF;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
otus_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -2232,6 +2240,9 @@ otus_tx(struct otus_softc *sc, struct ieee80211_node *ni, struct mbuf *m,
int hasqos, xferlen, type, ismcast;
wh = mtod(m, struct ieee80211_frame *);
+
+ ieee80211_output_seqno_assign(ni, -1, m);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
diff --git a/sys/dev/pci/pci_user.c b/sys/dev/pci/pci_user.c
index f68b5b7e71ff..9768030995e7 100644
--- a/sys/dev/pci/pci_user.c
+++ b/sys/dev/pci/pci_user.c
@@ -79,6 +79,9 @@ struct pci_conf32 {
u_int8_t pc_revid; /* chip revision ID */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
u_int32_t pd_unit; /* device unit number */
+ int pd_numa_domain; /* device NUMA domain */
+ u_int32_t pc_reported_len;/* length of PCI data reported */
+ char pc_spare[64]; /* space for future fields */
};
struct pci_match_conf32 {
@@ -502,11 +505,58 @@ pci_conf_match_freebsd6_32(struct pci_match_conf_freebsd6_32 *matches, int num_m
#endif /* COMPAT_FREEBSD32 */
#endif /* !PRE7_COMPAT */
+#ifdef COMPAT_FREEBSD14
+struct pci_conf_freebsd14 {
+ struct pcisel pc_sel; /* domain+bus+slot+function */
+ u_int8_t pc_hdr; /* PCI header type */
+ u_int16_t pc_subvendor; /* card vendor ID */
+ u_int16_t pc_subdevice; /* card device ID, assigned by
+ card vendor */
+ u_int16_t pc_vendor; /* chip vendor ID */
+ u_int16_t pc_device; /* chip device ID, assigned by
+ chip vendor */
+ u_int8_t pc_class; /* chip PCI class */
+ u_int8_t pc_subclass; /* chip PCI subclass */
+ u_int8_t pc_progif; /* chip PCI programming interface */
+ u_int8_t pc_revid; /* chip revision ID */
+ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
+ u_long pd_unit; /* device unit number */
+};
+#define PCIOCGETCONF_FREEBSD14 _IOWR('p', 5, struct pci_conf_io)
+
+#ifdef COMPAT_FREEBSD32
+struct pci_conf_freebsd14_32 {
+ struct pcisel pc_sel; /* domain+bus+slot+function */
+ u_int8_t pc_hdr; /* PCI header type */
+ u_int16_t pc_subvendor; /* card vendor ID */
+ u_int16_t pc_subdevice; /* card device ID, assigned by
+ card vendor */
+ u_int16_t pc_vendor; /* chip vendor ID */
+ u_int16_t pc_device; /* chip device ID, assigned by
+ chip vendor */
+ u_int8_t pc_class; /* chip PCI class */
+ u_int8_t pc_subclass; /* chip PCI subclass */
+ u_int8_t pc_progif; /* chip PCI programming interface */
+ u_int8_t pc_revid; /* chip revision ID */
+ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
+ u_int32_t pd_unit; /* device unit number */
+};
+#define PCIOCGETCONF_FREEBSD14_32 \
+ _IOC_NEWTYPE(PCIOCGETCONF_FREEBSD14, struct pci_conf_io32)
+#endif /* COMPAT_FREEBSD32 */
+#endif /* COMPAT_FREEBSD14 */
+
union pci_conf_union {
struct pci_conf pc;
#ifdef COMPAT_FREEBSD32
struct pci_conf32 pc32;
#endif
+#ifdef COMPAT_FREEBSD14
+ struct pci_conf_freebsd14 pc14;
+#ifdef COMPAT_FREEBSD32
+ struct pci_conf_freebsd14_32 pc14_32;
+#endif
+#endif
#ifdef PRE7_COMPAT
struct pci_conf_freebsd6 pco;
#ifdef COMPAT_FREEBSD32
@@ -522,10 +572,16 @@ pci_conf_match(u_long cmd, struct pci_match_conf *matches, int num_matches,
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
return (pci_conf_match_native(
(struct pci_match_conf *)matches, num_matches, match_buf));
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
return (pci_conf_match32((struct pci_match_conf32 *)matches,
num_matches, match_buf));
#endif
@@ -645,9 +701,15 @@ pci_match_conf_size(u_long cmd)
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
return (sizeof(struct pci_match_conf));
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
return (sizeof(struct pci_match_conf32));
#endif
#ifdef PRE7_COMPAT
@@ -675,6 +737,14 @@ pci_conf_size(u_long cmd)
case PCIOCGETCONF32:
return (sizeof(struct pci_conf32));
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+ return (sizeof(struct pci_conf_freebsd14));
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+ return (sizeof(struct pci_conf_freebsd14_32));
+#endif
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
return (sizeof(struct pci_conf_freebsd6));
@@ -698,6 +768,9 @@ pci_conf_io_init(struct pci_conf_io *cio, caddr_t data, u_long cmd)
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#endif
@@ -706,6 +779,9 @@ pci_conf_io_init(struct pci_conf_io *cio, caddr_t data, u_long cmd)
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6_32:
#endif
@@ -739,6 +815,9 @@ pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data,
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#endif
@@ -751,6 +830,9 @@ pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data,
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6_32:
#endif
@@ -781,8 +863,17 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup,
pcup->pc = *pcp;
return;
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+ memcpy(&pcup->pc14, pcp, sizeof(pcup->pc14));
+ return;
+#endif
+
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
pcup->pc32.pc_sel = pcp->pc_sel;
pcup->pc32.pc_hdr = pcp->pc_hdr;
pcup->pc32.pc_subvendor = pcp->pc_subvendor;
@@ -796,8 +887,13 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup,
strlcpy(pcup->pc32.pd_name, pcp->pd_name,
sizeof(pcup->pc32.pd_name));
pcup->pc32.pd_unit = (uint32_t)pcp->pd_unit;
+ if (cmd == PCIOCGETCONF32) {
+ pcup->pc32.pd_numa_domain = pcp->pd_numa_domain;
+ pcup->pc32.pc_reported_len =
+ (uint32_t)offsetof(struct pci_conf32, pc_spare);
+ }
return;
-#endif
+#endif /* COMPAT_FREEBSD32 */
#ifdef PRE7_COMPAT
#ifdef COMPAT_FREEBSD32
@@ -1024,7 +1120,7 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
struct pci_map *pm;
struct pci_bar_mmap *pbm;
size_t confsz, iolen;
- int error, ionum, i, num_patterns;
+ int domain, error, ionum, i, num_patterns;
union pci_conf_union pcu;
#ifdef PRE7_COMPAT
struct pci_io iodata;
@@ -1044,6 +1140,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#ifdef COMPAT_FREEBSD32
@@ -1069,6 +1171,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#ifdef COMPAT_FREEBSD32
@@ -1201,6 +1309,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
dinfo->conf.pd_unit = 0;
}
+ if (dinfo->cfg.dev != NULL &&
+ bus_get_domain(dinfo->cfg.dev, &domain) == 0)
+ dinfo->conf.pd_numa_domain = domain;
+ else
+ dinfo->conf.pd_numa_domain = 0;
+
if (pattern_buf == NULL ||
pci_conf_match(cmd, pattern_buf, num_patterns,
&dinfo->conf) == 0) {
@@ -1217,6 +1331,9 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
break;
}
+ dinfo->conf.pc_reported_len =
+ offsetof(struct pci_conf, pc_spare);
+
pci_conf_for_copyout(&dinfo->conf, &pcu, cmd);
error = copyout(&pcu,
(caddr_t)cio->matches +
diff --git a/sys/dev/puc/pucdata.c b/sys/dev/puc/pucdata.c
index e911a407cca9..436af76001da 100644
--- a/sys/dev/puc/pucdata.c
+++ b/sys/dev/puc/pucdata.c
@@ -64,6 +64,7 @@ static puc_config_f puc_config_quatech;
static puc_config_f puc_config_syba;
static puc_config_f puc_config_siig;
static puc_config_f puc_config_sunix;
+static puc_config_f puc_config_systembase;
static puc_config_f puc_config_timedia;
static puc_config_f puc_config_titan;
@@ -1705,6 +1706,23 @@ const struct puc_cfg puc_pci_devices[] = {
PUC_PORT_4S, 0x10, 0, 8,
.config_function = puc_config_icbook
},
+
+ /*
+ * Systembase cards using SB16C1050 UARTs:
+ */
+ { 0x14a1, 0x0008, 0x14a1, 0x0008,
+ "Systembase SB16C1058",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_8S, 0x10, 0, 8,
+ .config_function = puc_config_systembase,
+ },
+ { 0x14a1, 0x0004, 0x14a1, 0x0004,
+ "Systembase SB16C1054",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_4S, 0x10, 0, 8,
+ .config_function = puc_config_systembase,
+ },
+
{ 0xffff, 0, 0xffff, 0, NULL, 0 }
};
@@ -2294,3 +2312,28 @@ puc_config_titan(struct puc_softc *sc __unused, enum puc_cfg_cmd cmd,
}
return (ENXIO);
}
+
+static int
+puc_config_systembase(struct puc_softc *sc __unused,
+ enum puc_cfg_cmd cmd, int port, intptr_t *res)
+{
+ struct puc_bar *bar;
+
+ switch (cmd) {
+ case PUC_CFG_SETUP:
+ bar = puc_get_bar(sc, 0x14);
+ if (bar == NULL)
+ return (ENXIO);
+
+ /*
+ * The Systembase SB16C1058 (and probably other devices
+ * based on the SB16C1050 UART core) require poking a
+ * register in the *other* RID to turn on interrupts.
+ */
+ bus_write_1(bar->b_res, /* OPT_IMRREG0 */ 0xc, 0xff);
+ return (0);
+ default:
+ break;
+ }
+ return (ENXIO);
+}
diff --git a/sys/dev/qat/include/common/adf_accel_devices.h b/sys/dev/qat/include/common/adf_accel_devices.h
index c09aee8ea4bd..eeffc6a9132c 100644
--- a/sys/dev/qat/include/common/adf_accel_devices.h
+++ b/sys/dev/qat/include/common/adf_accel_devices.h
@@ -39,12 +39,16 @@
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
#define ADF_401XX_PCI_DEVICE_ID 0x4942
#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
+#define ADF_402XX_PCI_DEVICE_ID 0x4944
+#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
#define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); })
static inline bool
IS_QAT_GEN4(const unsigned int id)
{
return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID ||
+ id == ADF_402XX_PCI_DEVICE_ID ||
+ id == ADF_402XXIOV_PCI_DEVICE_ID ||
id == ADF_4XXXIOV_PCI_DEVICE_ID ||
id == ADF_401XXIOV_PCI_DEVICE_ID);
}
diff --git a/sys/dev/qat/qat_api/include/icp_sal_versions.h b/sys/dev/qat/qat_api/include/icp_sal_versions.h
index 03bcef4fcbbb..0eb227ade09c 100644
--- a/sys/dev/qat/qat_api/include/icp_sal_versions.h
+++ b/sys/dev/qat/qat_api/include/icp_sal_versions.h
@@ -26,7 +26,7 @@
/* Part name and number of the accelerator device */
#define SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER 3
-#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 15
+#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 16
#define SAL_INFO2_DRIVER_SW_VERSION_PATCH_NUMBER 0
/**
diff --git a/sys/dev/qat/qat_common/adf_gen4_timer.c b/sys/dev/qat/qat_common/adf_gen4_timer.c
index 96b65cdff181..2c74d09418e5 100644
--- a/sys/dev/qat/qat_common/adf_gen4_timer.c
+++ b/sys/dev/qat/qat_common/adf_gen4_timer.c
@@ -57,7 +57,7 @@ end:
static void
timer_handler(struct timer_list *tl)
{
- struct adf_int_timer *int_timer = from_timer(int_timer, tl, timer);
+ struct adf_int_timer *int_timer = timer_container_of(int_timer, tl, timer);
struct adf_accel_dev *accel_dev = int_timer->accel_dev;
struct adf_hb_timer_data *hb_timer_data = NULL;
u64 timeout_val = adf_get_next_timeout(int_timer->timeout_val);
diff --git a/sys/dev/qat/qat_common/qat_uclo.c b/sys/dev/qat/qat_common/qat_uclo.c
index 54e8e8eb7421..b17020286d24 100644
--- a/sys/dev/qat/qat_common/qat_uclo.c
+++ b/sys/dev/qat/qat_common/qat_uclo.c
@@ -892,6 +892,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_C4XXX_DEV_TYPE;
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c
index d730efd5952b..49e1e1859e78 100644
--- a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c
+++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c
@@ -536,8 +536,8 @@ adf_exit_accel_units(struct adf_accel_dev *accel_dev)
}
static const char *
-get_obj_name(struct adf_accel_dev *accel_dev,
- enum adf_accel_unit_services service)
+get_obj_name_4xxx(struct adf_accel_dev *accel_dev,
+ enum adf_accel_unit_services service)
{
switch (service) {
case ADF_ACCEL_ASYM:
@@ -553,6 +553,24 @@ get_obj_name(struct adf_accel_dev *accel_dev,
}
}
+static const char *
+get_obj_name_402xx(struct adf_accel_dev *accel_dev,
+ enum adf_accel_unit_services service)
+{
+ switch (service) {
+ case ADF_ACCEL_ASYM:
+ return ADF_402XX_ASYM_OBJ;
+ case ADF_ACCEL_CRYPTO:
+ return ADF_402XX_SYM_OBJ;
+ case ADF_ACCEL_COMPRESSION:
+ return ADF_402XX_DC_OBJ;
+ case ADF_ACCEL_ADMIN:
+ return ADF_402XX_ADMIN_OBJ;
+ default:
+ return NULL;
+ }
+}
+
static uint32_t
get_objs_num(struct adf_accel_dev *accel_dev)
{
@@ -982,8 +1000,23 @@ adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
hw_data->get_sku = get_sku;
hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
- hw_data->fw_name = ADF_4XXX_FW;
- hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ switch (id) {
+ case ADF_402XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_402XX_FW;
+ hw_data->fw_mmp_name = ADF_402XX_MMP;
+ hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
+ break;
+ case ADF_401XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->asym_ae_active_thd_mask = DEFAULT_401XX_ASYM_AE_MASK;
+ break;
+
+ default:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
+ }
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->send_admin_init = adf_4xxx_send_admin_init;
@@ -1002,7 +1035,13 @@ adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
hw_data->get_ring_svc_map_data = get_ring_svc_map_data;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->get_objs_num = get_objs_num;
- hw_data->get_obj_name = get_obj_name;
+ switch (id) {
+ case ADF_402XX_PCI_DEVICE_ID:
+ hw_data->get_obj_name = get_obj_name_402xx;
+ break;
+ default:
+ hw_data->get_obj_name = get_obj_name_4xxx;
+ }
hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
hw_data->get_service_type = adf_4xxx_get_service_type;
hw_data->set_msix_rttable = set_msix_default_rttable;
@@ -1022,15 +1061,6 @@ adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
hw_data->query_storage_cap = 1;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
- switch (id) {
- case ADF_401XX_PCI_DEVICE_ID:
- hw_data->asym_ae_active_thd_mask = DEFAULT_401XX_ASYM_AE_MASK;
- break;
- case ADF_4XXX_PCI_DEVICE_ID:
- default:
- hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
- }
-
adf_gen4_init_hw_csr_info(&hw_data->csr_info);
adf_gen4_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
}
diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h
index c35ebbcadcd7..fa7249dca596 100644
--- a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h
+++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h
@@ -87,6 +87,12 @@
#define ADF_4XXX_SYM_OBJ "qat_4xxx_sym.bin"
#define ADF_4XXX_ASYM_OBJ "qat_4xxx_asym.bin"
#define ADF_4XXX_ADMIN_OBJ "qat_4xxx_admin.bin"
+#define ADF_402XX_FW "qat_402xx_fw"
+#define ADF_402XX_MMP "qat_402xx_mmp_fw"
+#define ADF_402XX_DC_OBJ "qat_402xx_dc.bin"
+#define ADF_402XX_SYM_OBJ "qat_402xx_sym.bin"
+#define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin"
+#define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin"
/* Only 3 types of images can be loaded including the admin image */
#define ADF_4XXX_MAX_OBJ 3
diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c
index cb534dd03b86..f9ad39fa45f0 100644
--- a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c
@@ -22,12 +22,14 @@ static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx");
PCI_VENDOR_ID_INTEL, device_id \
}
-static const struct pci_device_id adf_pci_tbl[] =
- { ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID),
- ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID),
- {
- 0,
- } };
+static const struct pci_device_id adf_pci_tbl[] = {
+ ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_402XX_PCI_DEVICE_ID),
+ {
+ 0,
+ }
+};
static int
adf_probe(device_t dev)
@@ -135,6 +137,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
switch (pci_get_device(accel_pci_dev->pci_dev)) {
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
adf_clean_hw_data_4xxx(accel_dev->hw_device);
break;
default:
diff --git a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
index 2bbccb4d6b17..dbe40835ccbf 100644
--- a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
@@ -22,12 +22,14 @@ static MALLOC_DEFINE(M_QAT_4XXXVF, "qat_4xxxvf", "qat_4xxxvf");
PCI_VENDOR_ID_INTEL, device_id \
}
-static const struct pci_device_id adf_pci_tbl[] =
- { ADF_SYSTEM_DEVICE(ADF_4XXXIOV_PCI_DEVICE_ID),
- ADF_SYSTEM_DEVICE(ADF_401XXIOV_PCI_DEVICE_ID),
- {
- 0,
- } };
+static const struct pci_device_id adf_pci_tbl[] = {
+ ADF_SYSTEM_DEVICE(ADF_4XXXIOV_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_401XXIOV_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_402XXIOV_PCI_DEVICE_ID),
+ {
+ 0,
+ }
+};
static int
adf_probe(device_t dev)
@@ -76,6 +78,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
switch (pci_get_device(accel_pci_dev->pci_dev)) {
case ADF_4XXXIOV_PCI_DEVICE_ID:
case ADF_401XXIOV_PCI_DEVICE_ID:
+ case ADF_402XXIOV_PCI_DEVICE_ID:
adf_clean_hw_data_4xxxiov(accel_dev->hw_device);
break;
default:
diff --git a/sys/dev/qlnx/qlnxe/ecore_dev.c b/sys/dev/qlnx/qlnxe/ecore_dev.c
index 6187ecdbc446..389a95a4164c 100644
--- a/sys/dev/qlnx/qlnxe/ecore_dev.c
+++ b/sys/dev/qlnx/qlnxe/ecore_dev.c
@@ -5268,7 +5268,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
+ "Read default link: Speed %u Mb/sec, Adv. Speeds 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%u usec]\n",
link->speed.forced_speed, link->speed.advertised_speeds,
link->speed.autoneg, link->pause.autoneg,
p_caps->default_eee, p_caps->eee_lpi_timer);
@@ -6860,7 +6860,7 @@ int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
p_hwfn->qm_info.pf_rl);
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configured MAX bandwidth to be %08x Mb/sec\n",
+ "Configured MAX bandwidth to be %u Mb/sec\n",
p_link->speed);
return rc;
@@ -6918,7 +6918,7 @@ int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configured MIN bandwidth to be %d Mb/sec\n",
+ "Configured MIN bandwidth to be %u Mb/sec\n",
p_link->min_pf_rate);
return rc;
diff --git a/sys/dev/qlnx/qlnxe/ecore_mcp.c b/sys/dev/qlnx/qlnxe/ecore_mcp.c
index ab14b1eb5186..6d1e5fe24d06 100644
--- a/sys/dev/qlnx/qlnxe/ecore_mcp.c
+++ b/sys/dev/qlnx/qlnxe/ecore_mcp.c
@@ -1638,7 +1638,7 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
if (b_up)
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
+ "Configuring Link: Speed %u Mb/sec, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
phy_cfg.loopback_mode);
else
diff --git a/sys/dev/qlnx/qlnxe/qlnx_def.h b/sys/dev/qlnx/qlnxe/qlnx_def.h
index 4342bba89587..796845f3f8c6 100644
--- a/sys/dev/qlnx/qlnxe/qlnx_def.h
+++ b/sys/dev/qlnx/qlnxe/qlnx_def.h
@@ -696,22 +696,6 @@ extern int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info,
* Some OS specific stuff
*/
-#if (defined IFM_100G_SR4)
-#define QLNX_IFM_100G_SR4 IFM_100G_SR4
-#define QLNX_IFM_100G_LR4 IFM_100G_LR4
-#define QLNX_IFM_100G_CR4 IFM_100G_CR4
-#else
-#define QLNX_IFM_100G_SR4 IFM_UNKNOWN
-#define QLNX_IFM_100G_LR4 IFM_UNKNOWN
-#endif /* #if (defined IFM_100G_SR4) */
-
-#if (defined IFM_25G_SR)
-#define QLNX_IFM_25G_SR IFM_25G_SR
-#define QLNX_IFM_25G_CR IFM_25G_CR
-#else
-#define QLNX_IFM_25G_SR IFM_UNKNOWN
-#define QLNX_IFM_25G_CR IFM_UNKNOWN
-#endif /* #if (defined IFM_25G_SR) */
#define QLNX_INC_IERRORS(ifp) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
#define QLNX_INC_IQDROPS(ifp) if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
diff --git a/sys/dev/qlnx/qlnxe/qlnx_os.c b/sys/dev/qlnx/qlnxe/qlnx_os.c
index 4ad190374f87..9963f472c615 100644
--- a/sys/dev/qlnx/qlnxe/qlnx_os.c
+++ b/sys/dev/qlnx/qlnxe/qlnx_os.c
@@ -2375,18 +2375,15 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
} else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
- ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
- ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_25G_SR), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_25G_CR), 0, NULL);
} else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
} else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
- ifmedia_add(&ha->media,
- (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
- ifmedia_add(&ha->media,
- (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
- ifmedia_add(&ha->media,
- (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_LR4), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_SR4), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_CR4), 0, NULL);
}
ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
@@ -2724,7 +2721,9 @@ qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
- QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
+ case SIOCGIFXMEDIA:
+ QL_DPRINT4(ha,
+ "SIOCSIFMEDIA/SIOCGIFMEDIA/SIOCGIFXMEDIA (0x%lx)\n", cmd);
ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
break;
@@ -3808,11 +3807,11 @@ qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
case MEDIA_MODULE_FIBER:
case MEDIA_UNSPECIFIED:
if (if_link->speed == (100 * 1000))
- ifm_type = QLNX_IFM_100G_SR4;
+ ifm_type = IFM_100G_SR4;
else if (if_link->speed == (40 * 1000))
ifm_type = IFM_40G_SR4;
else if (if_link->speed == (25 * 1000))
- ifm_type = QLNX_IFM_25G_SR;
+ ifm_type = IFM_25G_SR;
else if (if_link->speed == (10 * 1000))
ifm_type = (IFM_10G_LR | IFM_10G_SR);
else if (if_link->speed == (1 * 1000))
@@ -3822,11 +3821,11 @@ qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
case MEDIA_DA_TWINAX:
if (if_link->speed == (100 * 1000))
- ifm_type = QLNX_IFM_100G_CR4;
+ ifm_type = IFM_100G_CR4;
else if (if_link->speed == (40 * 1000))
ifm_type = IFM_40G_CR4;
else if (if_link->speed == (25 * 1000))
- ifm_type = QLNX_IFM_25G_CR;
+ ifm_type = IFM_25G_CR;
else if (if_link->speed == (10 * 1000))
ifm_type = IFM_10G_TWINAX;
diff --git a/sys/dev/ral/rt2560.c b/sys/dev/ral/rt2560.c
index 09b01ea55be9..7feb324eb21d 100644
--- a/sys/dev/ral/rt2560.c
+++ b/sys/dev/ral/rt2560.c
@@ -281,6 +281,8 @@ rt2560_attach(device_t dev, int id)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2560_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1516,6 +1518,8 @@ rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0,
wh = mtod(m0, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/ral/rt2661.c b/sys/dev/ral/rt2661.c
index 38cd99d899ed..c9c86d4f089a 100644
--- a/sys/dev/ral/rt2661.c
+++ b/sys/dev/ral/rt2661.c
@@ -282,6 +282,8 @@ rt2661_attach(device_t dev, int id)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2661_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1284,7 +1286,7 @@ rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0,
rate = ni->ni_txparms->mgmtrate;
wh = mtod(m0, struct ieee80211_frame *);
-
+ ieee80211_output_seqno_assign(ni, -1, m0);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/ral/rt2860.c b/sys/dev/ral/rt2860.c
index 1449df683a93..76fe4652839d 100644
--- a/sys/dev/ral/rt2860.c
+++ b/sys/dev/ral/rt2860.c
@@ -323,6 +323,8 @@ rt2860_attach(device_t dev, int id)
| IEEE80211_C_WME /* 802.11e */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2860_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1471,6 +1473,7 @@ rt2860_tx(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
diff --git a/sys/dev/random/fenestrasX/fx_pool.c b/sys/dev/random/fenestrasX/fx_pool.c
index d2e6f0db71ee..f4ad1e295d54 100644
--- a/sys/dev/random/fenestrasX/fx_pool.c
+++ b/sys/dev/random/fenestrasX/fx_pool.c
@@ -164,6 +164,9 @@ static const struct fxrng_ent_char {
[RANDOM_CALLOUT] = {
.entc_cls = &fxrng_lo_push,
},
+ [RANDOM_RANDOMDEV] = {
+ .entc_cls = &fxrng_lo_push,
+ },
[RANDOM_PURE_OCTEON] = {
.entc_cls = &fxrng_hi_push, /* Could be made pull. */
},
diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c
index 84ec174bd08e..2d7af254c52c 100644
--- a/sys/dev/random/random_harvestq.c
+++ b/sys/dev/random/random_harvestq.c
@@ -103,8 +103,10 @@ static const char *random_source_descr[ENTROPYSOURCE];
volatile int random_kthread_control;
-/* Allow the sysadmin to select the broad category of
- * entropy types to harvest.
+/*
+ * Allow the sysadmin to select the broad category of entropy types to harvest.
+ *
+ * Updates are synchronized by the harvest mutex.
*/
__read_frequently u_int hc_source_mask;
@@ -278,8 +280,15 @@ random_sources_feed(void)
epoch_enter_preempt(rs_epoch, &et);
CK_LIST_FOREACH(rrs, &source_list, rrs_entries) {
for (i = 0; i < npools; i++) {
+ if (rrs->rrs_source->rs_read == NULL) {
+ /* Source pushes entropy asynchronously. */
+ continue;
+ }
n = rrs->rrs_source->rs_read(entropy, sizeof(entropy));
- KASSERT((n <= sizeof(entropy)), ("%s: rs_read returned too much data (%u > %zu)", __func__, n, sizeof(entropy)));
+ KASSERT((n <= sizeof(entropy)),
+ ("%s: rs_read returned too much data (%u > %zu)",
+ __func__, n, sizeof(entropy)));
+
/*
* Sometimes the HW entropy source doesn't have anything
* ready for us. This isn't necessarily untrustworthy.
@@ -334,7 +343,17 @@ copy_event(uint32_t dst[static HARVESTSIZE + 1],
{
memset(dst, 0, sizeof(uint32_t) * (HARVESTSIZE + 1));
memcpy(dst, event->he_entropy, event->he_size);
- dst[HARVESTSIZE] = event->he_somecounter;
+ if (event->he_source <= RANDOM_ENVIRONMENTAL_END) {
+ /*
+ * For pure entropy sources the timestamp counter is generally
+ * quite determinstic since samples are taken at regular
+ * intervals, so does not contribute much to the entropy. To
+ * make health tests more effective, exclude it from the sample,
+ * since it might otherwise defeat the health tests in a
+ * scenario where the source is stuck.
+ */
+ dst[HARVESTSIZE] = event->he_somecounter;
+ }
}
static void
@@ -464,11 +483,12 @@ SYSCTL_BOOL(_kern_random, OID_AUTO, nist_healthtest_enabled,
"Enable NIST SP 800-90B health tests for noise sources");
static void
-random_healthtest_init(enum random_entropy_source source)
+random_healthtest_init(enum random_entropy_source source, int min_entropy)
{
struct health_test_softc *ht;
ht = &healthtest[source];
+ memset(ht, 0, sizeof(*ht));
KASSERT(ht->ht_state == INIT,
("%s: health test state is %d for source %d",
__func__, ht->ht_state, source));
@@ -485,20 +505,62 @@ random_healthtest_init(enum random_entropy_source source)
}
/*
- * Set cutoff values for the two tests, assuming that each sample has
- * min-entropy of 1 bit and allowing for an error rate of 1 in 2^{34}.
- * With a sample rate of RANDOM_KTHREAD_HZ, we expect to see an false
- * positive once in ~54.5 years.
+ * Set cutoff values for the two tests, given a min-entropy estimate for
+ * the source and allowing for an error rate of 1 in 2^{34}. With a
+ * min-entropy estimate of 1 bit and a sample rate of RANDOM_KTHREAD_HZ,
+ * we expect to see an false positive once in ~54.5 years.
*
* The RCT limit comes from the formula in section 4.4.1.
*
- * The APT cutoff is calculated using the formula in section 4.4.2
+ * The APT cutoffs are calculated using the formula in section 4.4.2
* footnote 10 with the number of Bernoulli trials changed from W to
* W-1, since the test as written counts the number of samples equal to
- * the first sample in the window, and thus tests W-1 samples.
+ * the first sample in the window, and thus tests W-1 samples. We
+ * provide cutoffs for estimates up to sizeof(uint32_t)*HARVESTSIZE*8
+ * bits.
*/
- ht->ht_rct_limit = 35;
- ht->ht_apt_cutoff = 330;
+ const int apt_cutoffs[] = {
+ [1] = 329,
+ [2] = 195,
+ [3] = 118,
+ [4] = 73,
+ [5] = 48,
+ [6] = 33,
+ [7] = 23,
+ [8] = 17,
+ [9] = 13,
+ [10] = 11,
+ [11] = 9,
+ [12] = 8,
+ [13] = 7,
+ [14] = 6,
+ [15] = 5,
+ [16] = 5,
+ [17 ... 19] = 4,
+ [20 ... 25] = 3,
+ [26 ... 42] = 2,
+ [43 ... 64] = 1,
+ };
+ const int error_rate = 34;
+
+ if (min_entropy == 0) {
+ /*
+ * For environmental sources, the main source of entropy is the
+ * associated timecounter value. Since these sources can be
+ * influenced by unprivileged users, we conservatively use a
+ * min-entropy estimate of 1 bit per sample. For "pure"
+ * sources, we assume 8 bits per sample, as such sources provide
+ * a variable amount of data per read and in particular might
+ * only provide a single byte at a time.
+ */
+ min_entropy = source >= RANDOM_PURE_START ? 8 : 1;
+ } else if (min_entropy < 0 || min_entropy >= nitems(apt_cutoffs)) {
+ panic("invalid min_entropy %d for %s", min_entropy,
+ random_source_descr[source]);
+ }
+
+ ht->ht_rct_limit = 1 + howmany(error_rate, min_entropy);
+ ht->ht_apt_cutoff = apt_cutoffs[min_entropy];
}
static int
@@ -533,9 +595,9 @@ random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS)
_RANDOM_HARVEST_ETHER_OFF | _RANDOM_HARVEST_UMA_OFF;
int error;
- u_int value, orig_value;
+ u_int value;
- orig_value = value = hc_source_mask;
+ value = atomic_load_int(&hc_source_mask);
error = sysctl_handle_int(oidp, &value, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
@@ -546,12 +608,14 @@ random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS)
/*
* Disallow userspace modification of pure entropy sources.
*/
+ RANDOM_HARVEST_LOCK();
hc_source_mask = (value & ~user_immutable_mask) |
- (orig_value & user_immutable_mask);
+ (hc_source_mask & user_immutable_mask);
+ RANDOM_HARVEST_UNLOCK();
return (0);
}
SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask,
- CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
random_check_uint_harvestmask, "IU",
"Entropy harvesting mask");
@@ -563,9 +627,16 @@ random_print_harvestmask(SYSCTL_HANDLER_ARGS)
error = sysctl_wire_old_buffer(req, 0);
if (error == 0) {
+ u_int mask;
+
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
- for (i = ENTROPYSOURCE - 1; i >= 0; i--)
- sbuf_cat(&sbuf, (hc_source_mask & (1 << i)) ? "1" : "0");
+ mask = atomic_load_int(&hc_source_mask);
+ for (i = ENTROPYSOURCE - 1; i >= 0; i--) {
+ bool present;
+
+ present = (mask & (1u << i)) != 0;
+ sbuf_cat(&sbuf, present ? "1" : "0");
+ }
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
}
@@ -619,16 +690,21 @@ random_print_harvestmask_symbolic(SYSCTL_HANDLER_ARGS)
first = true;
error = sysctl_wire_old_buffer(req, 0);
if (error == 0) {
+ u_int mask;
+
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
+ mask = atomic_load_int(&hc_source_mask);
for (i = ENTROPYSOURCE - 1; i >= 0; i--) {
- if (i >= RANDOM_PURE_START &&
- (hc_source_mask & (1 << i)) == 0)
+ bool present;
+
+ present = (mask & (1u << i)) != 0;
+ if (i >= RANDOM_PURE_START && !present)
continue;
if (!first)
sbuf_cat(&sbuf, ",");
- sbuf_cat(&sbuf, !(hc_source_mask & (1 << i)) ? "[" : "");
+ sbuf_cat(&sbuf, !present ? "[" : "");
sbuf_cat(&sbuf, random_source_descr[i]);
- sbuf_cat(&sbuf, !(hc_source_mask & (1 << i)) ? "]" : "");
+ sbuf_cat(&sbuf, !present ? "]" : "");
first = false;
}
error = sbuf_finish(&sbuf);
@@ -652,8 +728,8 @@ random_harvestq_init(void *unused __unused)
RANDOM_HARVEST_INIT_LOCK();
harvest_context.hc_active_buf = 0;
- for (int i = 0; i < ENTROPYSOURCE; i++)
- random_healthtest_init(i);
+ for (int i = RANDOM_START; i <= RANDOM_ENVIRONMENTAL_END; i++)
+ random_healthtest_init(i, 0);
}
SYSINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_init, NULL);
@@ -835,20 +911,6 @@ random_harvest_direct_(const void *entropy, u_int size, enum random_entropy_sour
}
void
-random_harvest_register_source(enum random_entropy_source source)
-{
-
- hc_source_mask |= (1 << source);
-}
-
-void
-random_harvest_deregister_source(enum random_entropy_source source)
-{
-
- hc_source_mask &= ~(1 << source);
-}
-
-void
random_source_register(const struct random_source *rsource)
{
struct random_sources *rrs;
@@ -858,11 +920,12 @@ random_source_register(const struct random_source *rsource)
rrs = malloc(sizeof(*rrs), M_ENTROPY, M_WAITOK);
rrs->rrs_source = rsource;
- random_harvest_register_source(rsource->rs_source);
-
printf("random: registering fast source %s\n", rsource->rs_ident);
+ random_healthtest_init(rsource->rs_source, rsource->rs_min_entropy);
+
RANDOM_HARVEST_LOCK();
+ hc_source_mask |= (1 << rsource->rs_source);
CK_LIST_INSERT_HEAD(&source_list, rrs, rrs_entries);
RANDOM_HARVEST_UNLOCK();
}
@@ -874,9 +937,8 @@ random_source_deregister(const struct random_source *rsource)
KASSERT(rsource != NULL, ("invalid input to %s", __func__));
- random_harvest_deregister_source(rsource->rs_source);
-
RANDOM_HARVEST_LOCK();
+ hc_source_mask &= ~(1 << rsource->rs_source);
CK_LIST_FOREACH(rrs, &source_list, rrs_entries)
if (rrs->rrs_source == rsource) {
CK_LIST_REMOVE(rrs, rrs_entries);
diff --git a/sys/dev/random/randomdev.h b/sys/dev/random/randomdev.h
index 6d742447ea8b..a6ca66c7d92e 100644
--- a/sys/dev/random/randomdev.h
+++ b/sys/dev/random/randomdev.h
@@ -52,7 +52,9 @@ random_check_uint_##name(SYSCTL_HANDLER_ARGS) \
}
#endif /* SYSCTL_DECL */
+#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_ENTROPY);
+#endif
extern bool random_bypass_before_seeding;
extern bool read_random_bypassed_before_seeding;
@@ -101,6 +103,7 @@ struct random_source {
const char *rs_ident;
enum random_entropy_source rs_source;
random_source_read_t *rs_read;
+ int rs_min_entropy;
};
void random_source_register(const struct random_source *);
diff --git a/sys/dev/re/if_re.c b/sys/dev/re/if_re.c
index 091ab2db72ec..d56c975a43d2 100644
--- a/sys/dev/re/if_re.c
+++ b/sys/dev/re/if_re.c
@@ -353,6 +353,8 @@ static driver_t re_driver = {
DRIVER_MODULE(re, pci, re_driver, 0, 0);
DRIVER_MODULE(miibus, re, miibus_driver, 0, 0);
+MODULE_PNP_INFO("U16:vendor;U16:device;U32:#;D:#", pci, re, re_devs,
+ nitems(re_devs) - 1);
#define EE_SET(x) \
CSR_WRITE_1(sc, RL_EECMD, \
@@ -3558,6 +3560,7 @@ re_ioctl(if_t ifp, u_long command, caddr_t data)
static void
re_watchdog(struct rl_softc *sc)
{
+ struct epoch_tracker et;
if_t ifp;
RL_LOCK_ASSERT(sc);
@@ -3578,7 +3581,9 @@ re_watchdog(struct rl_softc *sc)
if_printf(ifp, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ NET_EPOCH_ENTER(et);
re_rxeof(sc, NULL);
+ NET_EPOCH_EXIT(et);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
re_init_locked(sc);
if (!if_sendq_empty(ifp))
diff --git a/sys/dev/rtwn/if_rtwn.c b/sys/dev/rtwn/if_rtwn.c
index 7a547e13cafa..c5889937fb08 100644
--- a/sys/dev/rtwn/if_rtwn.c
+++ b/sys/dev/rtwn/if_rtwn.c
@@ -268,6 +268,14 @@ rtwn_attach(struct rtwn_softc *sc)
ic->ic_flags_ext |= IEEE80211_FEXT_WATCHDOG;
#endif
+ /* Enable seqno offload */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
+#ifdef RTWN_WITHOUT_UCODE
+ /* Don't originate NULL data frames - let firmware do this */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
+#endif
+
/* Adjust capabilities. */
rtwn_adj_devcaps(sc);
diff --git a/sys/dev/rtwn/if_rtwn_tx.c b/sys/dev/rtwn/if_rtwn_tx.c
index 2c9c246dfbb4..fa7f35f2de83 100644
--- a/sys/dev/rtwn/if_rtwn_tx.c
+++ b/sys/dev/rtwn/if_rtwn_tx.c
@@ -183,6 +183,10 @@ rtwn_tx_data(struct rtwn_softc *sc, struct ieee80211_node *ni,
}
}
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
cipher = IEEE80211_CIPHER_NONE;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
@@ -229,6 +233,10 @@ rtwn_tx_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
uint8_t type;
u_int cipher;
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
cipher = IEEE80211_CIPHER_NONE;
if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
diff --git a/sys/dev/rtwn/rtl8192c/r92c_tx.c b/sys/dev/rtwn/rtl8192c/r92c_tx.c
index 6b013de0c536..ba2f60bd9295 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_tx.c
+++ b/sys/dev/rtwn/rtl8192c/r92c_tx.c
@@ -452,11 +452,10 @@ r92c_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
} else {
uint16_t seqno;
- if (m->m_flags & M_AMPDU_MPDU) {
- seqno = ni->ni_txseqs[tid] % IEEE80211_SEQ_RANGE;
- ni->ni_txseqs[tid]++;
- } else
- seqno = M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE;
+ if (m->m_flags & M_AMPDU_MPDU)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
+ seqno = M_SEQNO_GET(m);
/* Set sequence number. */
txd->txdseq = htole16(seqno);
@@ -511,7 +510,7 @@ r92c_fill_tx_desc_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
rtwn_r92c_tx_setup_hwseq(sc, txd);
} else {
/* Set sequence number. */
- txd->txdseq |= htole16(M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE);
+ txd->txdseq |= htole16(M_SEQNO_GET(m));
}
}
diff --git a/sys/dev/rtwn/rtl8812a/r12a_tx.c b/sys/dev/rtwn/rtl8812a/r12a_tx.c
index acb238316559..6a7af0a9b674 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_tx.c
+++ b/sys/dev/rtwn/rtl8812a/r12a_tx.c
@@ -101,12 +101,12 @@ r12a_tx_set_vht_bw(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
prim_chan = r12a_get_primary_channel(sc, ni->ni_chan);
- if (ieee80211_vht_check_tx_bw(ni, IEEE80211_STA_RX_BW_80)) {
+ if (ieee80211_vht_check_tx_bw(ni, NET80211_STA_RX_BW_80)) {
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW,
R12A_TXDW5_DATA_BW80));
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN,
prim_chan));
- } else if (ieee80211_vht_check_tx_bw(ni, IEEE80211_STA_RX_BW_40)) {
+ } else if (ieee80211_vht_check_tx_bw(ni, NET80211_STA_RX_BW_40)) {
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW,
R12A_TXDW5_DATA_BW40));
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN,
@@ -433,12 +433,9 @@ r12a_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
} else {
uint16_t seqno;
- if (m->m_flags & M_AMPDU_MPDU) {
- seqno = ni->ni_txseqs[tid];
- ni->ni_txseqs[tid]++;
- } else
- seqno = M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE;
-
+ if (m->m_flags & M_AMPDU_MPDU)
+ ieee80211_output_seqno_assign(ni, -1, m);
+ seqno = M_SEQNO_GET(m);
/* Set sequence number. */
txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, seqno));
}
@@ -493,8 +490,7 @@ r12a_fill_tx_desc_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
txd->txdw3 |= htole32(SM(R12A_TXDW3_SEQ_SEL, uvp->id));
} else {
/* Set sequence number. */
- txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ,
- M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE));
+ txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, M_SEQNO_GET(m)));
}
}
diff --git a/sys/dev/sound/pci/hda/hdaa_patches.c b/sys/dev/sound/pci/hda/hdaa_patches.c
index 8967cb49125c..91bb244578c7 100644
--- a/sys/dev/sound/pci/hda/hdaa_patches.c
+++ b/sys/dev/sound/pci/hda/hdaa_patches.c
@@ -362,8 +362,10 @@ hdac_pin_patch(struct hdaa_widget *w)
patch_str = "as=3 seq=15 color=Black loc=Left";
break;
}
- } else if (id == HDA_CODEC_ALC295 &&
- subid == FRAMEWORK_LAPTOP_0005_SUBVENDOR) {
+ } else if ((id == HDA_CODEC_ALC295 &&
+ subid == FRAMEWORK_LAPTOP_0005_SUBVENDOR) ||
+ (id == HDA_CODEC_ALC285 &&
+ subid == FRAMEWORK_LAPTOP_000D_SUBVENDOR)) {
switch (nid) {
case 20:
/*
diff --git a/sys/dev/sound/pci/hda/hdac.c b/sys/dev/sound/pci/hda/hdac.c
index 900578b73de4..80028063bb0d 100644
--- a/sys/dev/sound/pci/hda/hdac.c
+++ b/sys/dev/sound/pci/hda/hdac.c
@@ -133,6 +133,7 @@ static const struct {
{ HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 },
{ HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 },
{ HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 },
+ { HDA_INTEL_ELLK2, "Intel Elkhart Lake", 0, 0 },
{ HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 },
{ HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 },
{ HDA_INTEL_SCH, "Intel SCH", 0, 0 },
@@ -1773,17 +1774,17 @@ hdac_detach(device_t dev)
struct hdac_softc *sc = device_get_softc(dev);
int i, error;
+ callout_drain(&sc->poll_callout);
+ hdac_irq_free(sc);
+ taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
+
error = bus_generic_detach(dev);
if (error != 0)
return (error);
hdac_lock(sc);
- callout_stop(&sc->poll_callout);
hdac_reset(sc, false);
hdac_unlock(sc);
- callout_drain(&sc->poll_callout);
- taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
- hdac_irq_free(sc);
for (i = 0; i < sc->num_ss; i++)
hdac_dma_free(sc, &sc->streams[i].bdl);
@@ -2206,4 +2207,4 @@ static driver_t hdac_driver = {
sizeof(struct hdac_softc),
};
-DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL);
+DRIVER_MODULE_ORDERED(snd_hda, pci, hdac_driver, NULL, NULL, SI_ORDER_ANY);
diff --git a/sys/dev/sound/pci/hda/hdac.h b/sys/dev/sound/pci/hda/hdac.h
index 223434a214b1..c11e6b2d6810 100644
--- a/sys/dev/sound/pci/hda/hdac.h
+++ b/sys/dev/sound/pci/hda/hdac.h
@@ -66,6 +66,7 @@
#define HDA_INTEL_PCH HDA_MODEL_CONSTRUCT(INTEL, 0x3b56)
#define HDA_INTEL_PCH2 HDA_MODEL_CONSTRUCT(INTEL, 0x3b57)
#define HDA_INTEL_ELLK HDA_MODEL_CONSTRUCT(INTEL, 0x4b55)
+#define HDA_INTEL_ELLK2 HDA_MODEL_CONSTRUCT(INTEL, 0x4b58)
#define HDA_INTEL_JLK2 HDA_MODEL_CONSTRUCT(INTEL, 0x4dc8)
#define HDA_INTEL_BXTNP HDA_MODEL_CONSTRUCT(INTEL, 0x5a98)
#define HDA_INTEL_MACBOOKPRO92 HDA_MODEL_CONSTRUCT(INTEL, 0x7270)
@@ -535,6 +536,7 @@
#define FRAMEWORK_LAPTOP_0003_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0003)
#define FRAMEWORK_LAPTOP_0005_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0005)
#define FRAMEWORK_LAPTOP_0006_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0006)
+#define FRAMEWORK_LAPTOP_000D_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x000d)
/* All codecs you can eat... */
#define HDA_CODEC_CONSTRUCT(vendor, id) \
diff --git a/sys/dev/sound/pcm/channel.h b/sys/dev/sound/pcm/channel.h
index fab182b22774..9ad21d219001 100644
--- a/sys/dev/sound/pcm/channel.h
+++ b/sys/dev/sound/pcm/channel.h
@@ -408,7 +408,7 @@ enum {
#define CHN_F_RESET (CHN_F_BUSY | CHN_F_DEAD | \
CHN_F_VIRTUAL | CHN_F_HAS_VCHAN | \
- CHN_F_VCHAN_DYNAMIC | \
+ CHN_F_VCHAN_DYNAMIC | CHN_F_NBIO | \
CHN_F_PASSTHROUGH | CHN_F_EXCLUSIVE)
#define CHN_F_MMAP_INVALID (CHN_F_DEAD | CHN_F_RUNNING)
diff --git a/sys/dev/sound/pcm/dsp.c b/sys/dev/sound/pcm/dsp.c
index aa6ad4a59778..da38f52021ae 100644
--- a/sys/dev/sound/pcm/dsp.c
+++ b/sys/dev/sound/pcm/dsp.c
@@ -299,7 +299,7 @@ dsp_close(void *data)
CHN_LOCK(rdch);
chn_abort(rdch); /* won't sleep */
rdch->flags &= ~(CHN_F_RUNNING | CHN_F_MMAP |
- CHN_F_DEAD | CHN_F_EXCLUSIVE);
+ CHN_F_DEAD | CHN_F_EXCLUSIVE | CHN_F_NBIO);
chn_reset(rdch, 0, 0);
chn_release(rdch);
if (rdch->flags & CHN_F_VIRTUAL) {
@@ -323,7 +323,7 @@ dsp_close(void *data)
CHN_LOCK(wrch);
chn_flush(wrch); /* may sleep */
wrch->flags &= ~(CHN_F_RUNNING | CHN_F_MMAP |
- CHN_F_DEAD | CHN_F_EXCLUSIVE);
+ CHN_F_DEAD | CHN_F_EXCLUSIVE | CHN_F_NBIO);
chn_reset(wrch, 0, 0);
chn_release(wrch);
if (wrch->flags & CHN_F_VIRTUAL) {
diff --git a/sys/dev/tpm/tpm20.c b/sys/dev/tpm/tpm20.c
index 876dd0bcc40d..067e7ccae8f9 100644
--- a/sys/dev/tpm/tpm20.c
+++ b/sys/dev/tpm/tpm20.c
@@ -25,8 +25,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include <sys/random.h>
+#include <dev/random/randomdev.h>
#include "tpm20.h"
@@ -184,6 +184,13 @@ tpm20_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
return (ENOTTY);
}
+#ifdef TPM_HARVEST
+static const struct random_source random_tpm = {
+ .rs_ident = "TPM",
+ .rs_source = RANDOM_PURE_TPM,
+};
+#endif
+
int
tpm20_init(struct tpm_sc *sc)
{
@@ -206,7 +213,7 @@ tpm20_init(struct tpm_sc *sc)
tpm20_release(sc);
#ifdef TPM_HARVEST
- random_harvest_register_source(RANDOM_PURE_TPM);
+ random_source_register(&random_tpm);
TIMEOUT_TASK_INIT(taskqueue_thread, &sc->harvest_task, 0,
tpm20_harvest, sc);
taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task, 0);
@@ -223,7 +230,7 @@ tpm20_release(struct tpm_sc *sc)
#ifdef TPM_HARVEST
if (device_is_attached(sc->dev))
taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task);
- random_harvest_deregister_source(RANDOM_PURE_TPM);
+ random_source_deregister(&random_tpm);
#endif
if (sc->buf != NULL)
diff --git a/sys/dev/tpm/tpm_tis_core.c b/sys/dev/tpm/tpm_tis_core.c
index d8421f8156c9..4159de4daf3b 100644
--- a/sys/dev/tpm/tpm_tis_core.c
+++ b/sys/dev/tpm/tpm_tis_core.c
@@ -97,6 +97,7 @@ tpmtis_attach(device_t dev)
{
struct tpm_sc *sc;
int result;
+ int poll = 0;
sc = device_get_softc(dev);
sc->dev = dev;
@@ -105,6 +106,12 @@ tpmtis_attach(device_t dev)
sx_init(&sc->dev_lock, "TPM driver lock");
sc->buf = malloc(TPM_BUFSIZE, M_TPM20, M_WAITOK);
+ resource_int_value("tpm", device_get_unit(dev), "use_polling", &poll);
+ if (poll != 0) {
+ device_printf(dev, "Using poll method to get TPM operation status \n");
+ goto skip_irq;
+ }
+
sc->irq_rid = 0;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
RF_ACTIVE | RF_SHAREABLE);
diff --git a/sys/dev/uart/uart_bus_pci.c b/sys/dev/uart/uart_bus_pci.c
index 14ac213066b8..22af8ee8663c 100644
--- a/sys/dev/uart/uart_bus_pci.c
+++ b/sys/dev/uart/uart_bus_pci.c
@@ -141,6 +141,8 @@ static const struct pci_id pci_ns8250_ids[] = {
0x10, 16384000 },
{ 0x1415, 0xc120, 0xffff, 0, "Oxford Semiconductor OXPCIe952 PCIe 16950 UART",
0x10 },
+{ 0x14a1, 0x0008, 0x14a1, 0x0008, "Systembase SB16C1058",
+ 0x10, 8 * DEFAULT_RCLK, },
{ 0x14e4, 0x160a, 0xffff, 0, "Broadcom TruManage UART", 0x10,
128 * DEFAULT_RCLK, 2},
{ 0x14e4, 0x4344, 0xffff, 0, "Sony Ericsson GC89 PC Card", 0x10},
diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h
index b96d82ff836e..b055d2d2d769 100644
--- a/sys/dev/ufshci/ufshci.h
+++ b/sys/dev/ufshci/ufshci.h
@@ -716,6 +716,42 @@ struct ufshci_device_descriptor {
_Static_assert(sizeof(struct ufshci_device_descriptor) == 89,
"bad size for ufshci_device_descriptor");
+/* Defines the bit field of dExtendedUfsFeaturesSupport. */
+enum ufshci_desc_wb_ext_ufs_feature {
+ UFSHCI_DESC_EXT_UFS_FEATURE_FFU = (1 << 0),
+ UFSHCI_DESC_EXT_UFS_FEATURE_PSA = (1 << 1),
+ UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LIFE_SPAN = (1 << 2),
+ UFSHCI_DESC_EXT_UFS_FEATURE_REFRESH_OP = (1 << 3),
+ UFSHCI_DESC_EXT_UFS_FEATURE_TOO_HIGH_TEMP = (1 << 4),
+ UFSHCI_DESC_EXT_UFS_FEATURE_TOO_LOW_TEMP = (1 << 5),
+ UFSHCI_DESC_EXT_UFS_FEATURE_EXT_TEMP = (1 << 6),
+ UFSHCI_DESC_EXT_UFS_FEATURE_HPB_SUPPORT = (1 << 7),
+ UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER = (1 << 8),
+ UFSHCI_DESC_EXT_UFS_FEATURE_PERF_THROTTLING = (1 << 9),
+ UFSHCI_DESC_EXT_UFS_FEATURE_ADVANCED_RPMB = (1 << 10),
+ UFSHCI_DESC_EXT_UFS_FEATURE_ZONED_UFS_EXTENSION = (1 << 11),
+ UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LEVEL_EXCEPTION = (1 << 12),
+ UFSHCI_DESC_EXT_UFS_FEATURE_HID = (1 << 13),
+ UFSHCI_DESC_EXT_UFS_FEATURE_BARRIER = (1 << 14),
+ UFSHCI_DESC_EXT_UFS_FEATURE_CLEAR_ERROR_HISTORY = (1 << 15),
+ UFSHCI_DESC_EXT_UFS_FEATURE_EXT_IID = (1 << 16),
+ UFSHCI_DESC_EXT_UFS_FEATURE_FBO = (1 << 17),
+ UFSHCI_DESC_EXT_UFS_FEATURE_FAST_RECOVERY_MODE = (1 << 18),
+ UFSHCI_DESC_EXT_UFS_FEATURE_RPMB_VENDOR_CMD = (1 << 19),
+};
+
+/* Defines the bit field of bWriteBoosterBufferType. */
+enum ufshci_desc_wb_buffer_type {
+ UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED = 0x00,
+ UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED = 0x01,
+};
+
+/* Defines the bit field of bWriteBoosterBufferPreserveUserSpaceEn. */
+enum ufshci_desc_user_space_config {
+ UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION = 0x00,
+ UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE = 0x01,
+};
+
/*
* UFS Spec 4.1, section 14.1.5.3 "Configuration Descriptor"
* ConfigurationDescriptor use big-endian byte ordering.
@@ -1014,4 +1050,37 @@ enum ufshci_attributes {
UFSHCI_ATTR_B_REFRESH_METHOD = 0x2f,
};
+/* bAvailableWriteBoosterBufferSize codes (UFS WriteBooster abailable buffer
+ * left %) */
+enum ufshci_wb_available_buffer_Size {
+ UFSHCI_ATTR_WB_AVAILABLE_0 = 0x00, /* 0% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_10 = 0x01, /* 10% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_20 = 0x02, /* 20% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_30 = 0x03, /* 30% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_40 = 0x04, /* 40% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_50 = 0x05, /* 50% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_60 = 0x06, /* 60% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_70 = 0x07, /* 70% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_80 = 0x08, /* 80% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_90 = 0x09, /* 90% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_100 = 0x0A, /* 100% buffer remains */
+};
+
+/* bWriteBoosterBufferLifeTimeEst codes (UFS WriteBooster buffer life %) */
+enum ufshci_wb_lifetime {
+ UFSHCI_ATTR_WB_LIFE_DISABLED = 0x00, /* Info not available */
+ UFSHCI_ATTR_WB_LIFE_0_10 = 0x01, /* 0%–10% used */
+ UFSHCI_ATTR_WB_LIFE_10_20 = 0x02, /* 10%–20% used */
+ UFSHCI_ATTR_WB_LIFE_20_30 = 0x03, /* 20%–30% used */
+ UFSHCI_ATTR_WB_LIFE_30_40 = 0x04, /* 30%–40% used */
+ UFSHCI_ATTR_WB_LIFE_40_50 = 0x05, /* 40%–50% used */
+ UFSHCI_ATTR_WB_LIFE_50_60 = 0x06, /* 50%–60% used */
+ UFSHCI_ATTR_WB_LIFE_60_70 = 0x07, /* 60%–70% used */
+ UFSHCI_ATTR_WB_LIFE_70_80 = 0x08, /* 70%–80% used */
+ UFSHCI_ATTR_WB_LIFE_80_90 = 0x09, /* 80%–90% used */
+ UFSHCI_ATTR_WB_LIFE_90_100 = 0x0A, /* 90%–100% used */
+ UFSHCI_ATTR_WB_LIFE_EXCEEDED =
+ 0x0B, /* Exceeded estimated life (treat as WB disabled) */
+};
+
#endif /* __UFSHCI_H__ */
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
index 37bd32665b2b..35663b480cfa 100644
--- a/sys/dev/ufshci/ufshci_ctrlr.c
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -12,8 +12,108 @@
#include "ufshci_private.h"
#include "ufshci_reg.h"
+static void
+ufshci_ctrlr_fail(struct ufshci_controller *ctrlr)
+{
+ ctrlr->is_failed = true;
+
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->task_mgmt_req_queue.qops.get_hw_queue(
+ &ctrlr->task_mgmt_req_queue));
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->transfer_req_queue.qops.get_hw_queue(
+ &ctrlr->transfer_req_queue));
+}
+
+static void
+ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting)
+{
+ TSENTER();
+
+ /*
+ * If `resetting` is true, we are on the reset path.
+ * Re-enable request queues here because ufshci_ctrlr_reset_task()
+ * disables them during reset.
+ */
+ if (resetting) {
+ if (ufshci_utmr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ if (ufshci_utr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ }
+
+ if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS target drvice */
+ if (ufshci_dev_init(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize Reference Clock */
+ if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize unipro */
+ if (ufshci_dev_init_unipro(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /*
+ * Initialize UIC Power Mode
+ * QEMU UFS devices do not support unipro and power mode.
+ */
+ if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
+ ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS Power Mode */
+ if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Read Controller Descriptor (Device, Geometry) */
+ if (ufshci_dev_get_descriptor(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (ufshci_dev_config_write_booster(ctrlr)) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* TODO: Configure Write Protect */
+
+ /* TODO: Configure Background Operations */
+
+ /*
+ * If the reset is due to a timeout, it is already attached to the SIM
+ * and does not need to be attached again.
+ */
+ if (!resetting && ufshci_sim_attach(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ TSEXIT();
+}
+
static int
-ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller *ctrlr)
{
int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
sbintime_t delta_t = SBT_1US;
@@ -27,6 +127,35 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
ufshci_mmio_write_4(ctrlr, hce, hce);
}
+ /* Wait for the HCE flag to change */
+ while (1) {
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+ if (!UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "host controller failed to disable "
+ "within %d ms\n",
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ pause_sbt("ufshci_disable_hce", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+{
+ int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ sbintime_t delta_t = SBT_1US;
+ uint32_t hce;
+
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+
/* Enable UFS host controller */
hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
ufshci_mmio_write_4(ctrlr, hce, hce);
@@ -36,7 +165,7 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
* unstable, so we need to read the HCE value after some time after
* initialization is complete.
*/
- pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1));
+ pause_sbt("ufshci_enable_hce", ustosbt(100), 0, C_PREL(1));
/* Wait for the HCE flag to change */
while (1) {
@@ -51,17 +180,103 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
return (ENXIO);
}
- pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1));
+ pause_sbt("ufshci_enable_hce", delta_t, 0, C_PREL(1));
delta_t = min(SBT_1MS, delta_t * 3 / 2);
}
return (0);
}
+static int
+ufshci_ctrlr_disable(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ /* Disable all interrupts */
+ ufshci_mmio_write_4(ctrlr, ie, 0);
+
+ error = ufshci_ctrlr_disable_host_ctrlr(ctrlr);
+ return (error);
+}
+
+static int
+ufshci_ctrlr_enable(struct ufshci_controller *ctrlr)
+{
+ uint32_t ie, hcs;
+ int error;
+
+ error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ if (error)
+ return (error);
+
+ /* Send DME_LINKSTARTUP command to start the link startup procedure */
+ error = ufshci_uic_send_dme_link_startup(ctrlr);
+ if (error)
+ return (error);
+
+ /*
+ * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
+ * controller has successfully received a Link Startup UIC command
+ * response and the UFS device has found a physical link to the
+ * controller.
+ */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
+ ufshci_printf(ctrlr, "UFS device not found\n");
+ return (ENXIO);
+ }
+
+ /* Enable additional interrupts by programming the IE register. */
+ ie = ufshci_mmio_read_4(ctrlr, ie);
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
+ ufshci_mmio_write_4(ctrlr, ie, ie);
+
+ /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_hw_reset(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ error = ufshci_ctrlr_disable(ctrlr);
+ if (error)
+ return (error);
+
+ error = ufshci_ctrlr_enable(ctrlr);
+ return (error);
+}
+
+static void
+ufshci_ctrlr_reset_task(void *arg, int pending)
+{
+ struct ufshci_controller *ctrlr = arg;
+ int error;
+
+ /* Release resources */
+ ufshci_utmr_req_queue_disable(ctrlr);
+ ufshci_utr_req_queue_disable(ctrlr);
+
+ error = ufshci_ctrlr_hw_reset(ctrlr);
+ if (error)
+ return (ufshci_ctrlr_fail(ctrlr));
+
+ ufshci_ctrlr_start(ctrlr, true);
+}
+
int
ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
{
- uint32_t ver, cap, hcs, ie;
+ uint32_t ver, cap, ahit;
uint32_t timeout_period, retry_count;
int error;
@@ -114,44 +329,24 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
ctrlr->retry_count = retry_count;
- /* Disable all interrupts */
- ufshci_mmio_write_4(ctrlr, ie, 0);
+ ctrlr->enable_aborts = 1;
+ if (ctrlr->quirks & UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK)
+ ctrlr->enable_aborts = 0;
+ else
+ TUNABLE_INT_FETCH("hw.ufshci.enable_aborts",
+ &ctrlr->enable_aborts);
- /* Enable Host Controller */
- error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ /* Reset the UFSHCI controller */
+ error = ufshci_ctrlr_hw_reset(ctrlr);
if (error)
return (error);
- /* Send DME_LINKSTARTUP command to start the link startup procedure */
- error = ufshci_uic_send_dme_link_startup(ctrlr);
- if (error)
- return (error);
+ /* Read the UECPA register to clear */
+ ufshci_mmio_read_4(ctrlr, uecpa);
- /*
- * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
- * controller has successfully received a Link Startup UIC command
- * response and the UFS device has found a physical link to the
- * controller.
- */
- hcs = ufshci_mmio_read_4(ctrlr, hcs);
- if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
- ufshci_printf(ctrlr, "UFS device not found\n");
- return (ENXIO);
- }
-
- /* Enable additional interrupts by programming the IE register. */
- ie = ufshci_mmio_read_4(ctrlr, ie);
- ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
- ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
- ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
- ufshci_mmio_write_4(ctrlr, ie, ie);
-
- /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
+ /* Diable Auto-hibernate */
+ ahit = 0;
+ ufshci_mmio_write_4(ctrlr, ahit, ahit);
/* Allocate and initialize UTP Task Management Request List. */
error = ufshci_utmr_req_queue_construct(ctrlr);
@@ -164,8 +359,19 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
return (error);
/* TODO: Separate IO and Admin slot */
- /* max_hw_pend_io is the number of slots in the transfer_req_queue */
- ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries;
+
+ /*
+ * max_hw_pend_io is the number of slots in the transfer_req_queue.
+ * Reduce num_entries by one to reserve an admin slot.
+ */
+ ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1;
+
+ /* Create a thread for the taskqueue. */
+ ctrlr->taskqueue = taskqueue_create("ufshci_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &ctrlr->taskqueue);
+ taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "ufshci taskq");
+
+ TASK_INIT(&ctrlr->reset_task, 0, ufshci_ctrlr_reset_task, ctrlr);
return (0);
}
@@ -198,50 +404,21 @@ ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
ctrlr->resource);
nores:
+ KASSERT(!mtx_owned(&ctrlr->uic_cmd_lock),
+ ("destroying uic_cmd_lock while still owned"));
mtx_destroy(&ctrlr->uic_cmd_lock);
+
+ KASSERT(!mtx_owned(&ctrlr->sc_mtx),
+ ("destroying sc_mtx while still owned"));
mtx_destroy(&ctrlr->sc_mtx);
return;
}
-int
+void
ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
{
- uint32_t ie;
- int error;
-
- /* Backup and disable all interrupts */
- ie = ufshci_mmio_read_4(ctrlr, ie);
- ufshci_mmio_write_4(ctrlr, ie, 0);
-
- /* Release resources */
- ufshci_utmr_req_queue_destroy(ctrlr);
- ufshci_utr_req_queue_destroy(ctrlr);
-
- /* Reset Host Controller */
- error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
- if (error)
- return (error);
-
- /* Send DME_LINKSTARTUP command to start the link startup procedure */
- error = ufshci_uic_send_dme_link_startup(ctrlr);
- if (error)
- return (error);
-
- /* Enable interrupts */
- ufshci_mmio_write_4(ctrlr, ie, ie);
-
- /* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utmr_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
- /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_utr_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
- return (0);
+ taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
}
int
@@ -285,83 +462,6 @@ ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr)
return (0);
}
-static void
-ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also)
-{
- printf("ufshci(4): ufshci_ctrlr_fail\n");
-
- ctrlr->is_failed = true;
-
- /* TODO: task_mgmt_req_queue should be handled as fail */
-
- ufshci_req_queue_fail(ctrlr,
- &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]);
-}
-
-static void
-ufshci_ctrlr_start(struct ufshci_controller *ctrlr)
-{
- TSENTER();
-
- if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize UFS target drvice */
- if (ufshci_dev_init(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize Reference Clock */
- if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize unipro */
- if (ufshci_dev_init_unipro(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /*
- * Initialize UIC Power Mode
- * QEMU UFS devices do not support unipro and power mode.
- */
- if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
- ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize UFS Power Mode */
- if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Read Controller Descriptor (Device, Geometry)*/
- if (ufshci_dev_get_descriptor(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* TODO: Configure Write Protect */
-
- /* TODO: Configure Background Operations */
-
- /* TODO: Configure Write Booster */
-
- if (ufshci_sim_attach(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- TSEXIT();
-}
-
void
ufshci_ctrlr_start_config_hook(void *arg)
{
@@ -371,9 +471,9 @@ ufshci_ctrlr_start_config_hook(void *arg)
if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
ufshci_utr_req_queue_enable(ctrlr) == 0)
- ufshci_ctrlr_start(ctrlr);
+ ufshci_ctrlr_start(ctrlr, false);
else
- ufshci_ctrlr_fail(ctrlr, false);
+ ufshci_ctrlr_fail(ctrlr);
ufshci_sysctl_initialize_ctrlr(ctrlr);
config_intrhook_disestablish(&ctrlr->config_hook);
diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
index 71d163d998af..253f31a93c2e 100644
--- a/sys/dev/ufshci/ufshci_ctrlr_cmd.c
+++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
@@ -15,7 +15,7 @@ ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req;
struct ufshci_task_mgmt_request_upiu *upiu;
- req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_NOWAIT, cb_fn, cb_arg);
req->request_size = sizeof(struct ufshci_task_mgmt_request_upiu);
req->response_size = sizeof(struct ufshci_task_mgmt_response_upiu);
diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c
index a0e32914e2aa..975468e5156f 100644
--- a/sys/dev/ufshci/ufshci_dev.c
+++ b/sys/dev/ufshci/ufshci_dev.c
@@ -60,6 +60,14 @@ ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
}
static int
+ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun,
+ struct ufshci_unit_descriptor *desc)
+{
+ return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0,
+ desc, sizeof(struct ufshci_unit_descriptor)));
+}
+
+static int
ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
enum ufshci_flags flag_type, uint8_t *flag)
{
@@ -114,6 +122,61 @@ ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
}
static int
+ufshci_dev_clear_flag(struct ufshci_controller *ctrlr,
+ enum ufshci_flags flag_type)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG;
+ param.type = flag_type;
+ param.index = 0;
+ param.selector = 0;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_dev_read_attribute(struct ufshci_controller *ctrlr,
+ enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
+ uint64_t *value)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE;
+ param.type = attr_type;
+ param.index = index;
+ param.selector = selector;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n");
+ return (ENXIO);
+ }
+
+ *value = status.cpl.response_upiu.query_response_upiu.value_64;
+
+ return (0);
+}
+
+static int
ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
uint64_t value)
@@ -270,7 +333,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
*/
const uint32_t fast_mode = 1;
const uint32_t rx_bit_shift = 4;
- const uint32_t power_mode = (fast_mode << rx_bit_shift) | fast_mode;
+ uint32_t power_mode, peer_granularity;
/* Update lanes with available TX/RX lanes */
if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
@@ -295,6 +358,20 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
ctrlr->rx_lanes))
return (ENXIO);
+ if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) {
+ /* Before changing gears, first change the number of lanes. */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
+ return (ENXIO);
+
+ /* Wait for power mode changed. */
+ if (ufshci_uic_power_mode_ready(ctrlr)) {
+ ufshci_reg_dump(ctrlr);
+ return (ENXIO);
+ }
+ }
+
/* Set HS-GEAR to max gear */
ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
@@ -346,6 +423,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
return (ENXIO);
/* Set TX/RX PWRMode */
+ power_mode = (fast_mode << rx_bit_shift) | fast_mode;
if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
return (ENXIO);
@@ -366,7 +444,8 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
/* Test with dme_peer_get to make sure there are no errors. */
- if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, NULL))
+ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
+ &peer_granularity))
return (ENXIO);
}
@@ -398,7 +477,7 @@ ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
return (error);
ver = be16toh(device->dev_desc.wSpecVersion);
- ufshci_printf(ctrlr, "UFS device spec version %u.%u%u\n",
+ ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n",
UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
UFSHCIV(UFSHCI_VER_REG_VS, ver));
ufshci_printf(ctrlr, "%u enabled LUNs found\n",
@@ -426,3 +505,272 @@ ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
return (0);
}
+
+static int
+ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ int error;
+
+ /* Enable WriteBooster */
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
+ if (error) {
+ ufshci_printf(ctrlr, "Failed to enable WriteBooster\n");
+ return (error);
+ }
+ dev->is_wb_enabled = true;
+
+ /* Enable WriteBooster buffer flush during hibernate */
+ error = ufshci_dev_set_flag(ctrlr,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to enable WriteBooster buffer flush during hibernate\n");
+ return (error);
+ }
+
+ /* Enable WriteBooster buffer flush */
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to enable WriteBooster buffer flush\n");
+ return (error);
+ }
+ dev->is_wb_flush_enabled = true;
+
+ return (0);
+}
+
+static int
+ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ int error;
+
+ /* Disable WriteBooster buffer flush */
+ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to disable WriteBooster buffer flush\n");
+ return (error);
+ }
+ dev->is_wb_flush_enabled = false;
+
+ /* Disable WriteBooster buffer flush during hibernate */
+ error = ufshci_dev_clear_flag(ctrlr,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to disable WriteBooster buffer flush during hibernate\n");
+ return (error);
+ }
+
+ /* Disable WriteBooster */
+ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
+ if (error) {
+ ufshci_printf(ctrlr, "Failed to disable WriteBooster\n");
+ return (error);
+ }
+ dev->is_wb_enabled = false;
+
+ return (0);
+}
+
+static int
+ufshci_dev_is_write_booster_buffer_life_time_left(
+ struct ufshci_controller *ctrlr, bool *is_life_time_left)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ uint8_t buffer_lun;
+ uint64_t life_time;
+ uint32_t error;
+
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
+ buffer_lun = dev->wb_dedicated_lu;
+ else
+ buffer_lun = 0;
+
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time);
+ if (error)
+ return (error);
+
+ *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED);
+
+ return (0);
+}
+
+/*
+ * This function is not yet in use. It will be used when suspend/resume is
+ * implemented.
+ */
+static __unused int
+ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr,
+ bool *need_flush)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ bool is_life_time_left = false;
+ uint64_t available_buffer_size, current_buffer_size;
+ uint8_t buffer_lun;
+ uint32_t error;
+
+ *need_flush = false;
+
+ if (!dev->is_wb_enabled)
+ return (0);
+
+ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
+ &is_life_time_left);
+ if (error)
+ return (error);
+
+ if (!is_life_time_left)
+ return (ufshci_dev_disable_write_booster(ctrlr));
+
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
+ buffer_lun = dev->wb_dedicated_lu;
+ else
+ buffer_lun = 0;
+
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0,
+ &available_buffer_size);
+ if (error)
+ return (error);
+
+ switch (dev->wb_user_space_config_option) {
+ case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION:
+ *need_flush = (available_buffer_size <=
+ UFSHCI_ATTR_WB_AVAILABLE_10);
+ break;
+ case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE:
+ /*
+ * In PRESERVE USER SPACE mode, flush should be performed when
+ * the current buffer is greater than 0 and the available buffer
+ * below write_booster_flush_threshold is left.
+ */
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0,
+ &current_buffer_size);
+ if (error)
+ return (error);
+
+ if (current_buffer_size == 0)
+ return (0);
+
+ *need_flush = (available_buffer_size <
+ dev->write_booster_flush_threshold);
+ break;
+ default:
+ ufshci_printf(ctrlr,
+ "Invalid bWriteBoosterBufferPreserveUserSpaceEn value");
+ return (EINVAL);
+ }
+
+ /*
+ * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from
+ * wExceptionEventStatus attribute.
+ */
+
+ return (0);
+}
+
+int
+ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ uint32_t extended_ufs_feature_support;
+ uint32_t alloc_units;
+ struct ufshci_unit_descriptor unit_desc;
+ uint8_t lun;
+ bool is_life_time_left;
+ uint32_t mega_byte = 1024 * 1024;
+ uint32_t error = 0;
+
+ extended_ufs_feature_support = be32toh(
+ dev->dev_desc.dExtendedUfsFeaturesSupport);
+ if (!(extended_ufs_feature_support &
+ UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) {
+ /* This device does not support Write Booster */
+ return (0);
+ }
+
+ if (ufshci_dev_enable_write_booster(ctrlr))
+ return (0);
+
+ /* Get WriteBooster buffer parameters */
+ dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType;
+ dev->wb_user_space_config_option =
+ dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn;
+
+ /*
+ * Find the size of the write buffer.
+ * With LU-dedicated (00h), the WriteBooster buffer is assigned
+ * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h)
+ * uses a single device-wide buffer shared by multiple LUs.
+ */
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) {
+ alloc_units = be32toh(
+ dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits);
+ ufshci_printf(ctrlr,
+ "WriteBooster buffer type = Shared, alloc_units=%d\n",
+ alloc_units);
+ } else if (dev->wb_buffer_type ==
+ UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) {
+ ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n");
+ for (lun = 0; lun < ctrlr->max_lun_count; lun++) {
+ /* Find a dedicated buffer using a unit descriptor */
+ if (ufshci_dev_read_unit_descriptor(ctrlr, lun,
+ &unit_desc))
+ continue;
+
+ alloc_units = be32toh(
+ unit_desc.dLUNumWriteBoosterBufferAllocUnits);
+ if (alloc_units) {
+ dev->wb_dedicated_lu = lun;
+ break;
+ }
+ }
+ } else {
+ ufshci_printf(ctrlr,
+ "Not supported WriteBooster buffer type: 0x%x\n",
+ dev->wb_buffer_type);
+ goto out;
+ }
+
+ if (alloc_units == 0) {
+ ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n");
+ goto out;
+ }
+
+ dev->wb_buffer_size_mb = alloc_units *
+ dev->geo_desc.bAllocationUnitSize *
+ (be32toh(dev->geo_desc.dSegmentSize)) /
+ (mega_byte / UFSHCI_SECTOR_SIZE);
+
+ /* Set to flush when 40% of the available buffer size remains */
+ dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40;
+
+ /*
+ * Check if WriteBooster Buffer lifetime is left.
+ * WriteBooster Buffer lifetime — percent of life used based on P/E
+ * cycles. If "preserve user space" is enabled, writes to normal user
+ * space also consume WB life since the area is shared.
+ */
+ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
+ &is_life_time_left);
+ if (error)
+ goto out;
+
+ if (!is_life_time_left) {
+ ufshci_printf(ctrlr,
+ "There is no WriteBooster buffer life time left.\n");
+ goto out;
+ }
+
+ ufshci_printf(ctrlr, "WriteBooster Enabled\n");
+ return (0);
+out:
+ ufshci_dev_disable_write_booster(ctrlr);
+ return (error);
+}
diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c
index 65a69ee0b518..992026fd4f4d 100644
--- a/sys/dev/ufshci/ufshci_pci.c
+++ b/sys/dev/ufshci/ufshci_pci.c
@@ -49,11 +49,13 @@ static struct _pcsid {
uint32_t ref_clk;
uint32_t quirks;
} pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz,
- UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE },
+ UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE |
+ UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK },
{ 0x98fa8086, "Intel Lakefield UFS Host Controller",
UFSHCI_REF_CLK_19_2MHz,
UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE |
- UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE },
+ UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE |
+ UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY },
{ 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz },
{ 0x00000000, NULL } };
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
index 1a2742ae2e80..ec388c06e248 100644
--- a/sys/dev/ufshci/ufshci_private.h
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -46,6 +46,8 @@ MALLOC_DECLARE(M_UFSHCI);
#define UFSHCI_UTR_ENTRIES (32)
#define UFSHCI_UTRM_ENTRIES (8)
+#define UFSHCI_SECTOR_SIZE (512)
+
struct ufshci_controller;
struct ufshci_completion_poll_status {
@@ -66,7 +68,6 @@ struct ufshci_request {
bool is_admin;
int32_t retries;
bool payload_valid;
- bool timeout;
bool spare[2]; /* Future use */
STAILQ_ENTRY(ufshci_request) stailq;
};
@@ -80,6 +81,7 @@ enum ufshci_slot_state {
};
struct ufshci_tracker {
+ TAILQ_ENTRY(ufshci_tracker) tailq;
struct ufshci_request *req;
struct ufshci_req_queue *req_queue;
struct ufshci_hw_queue *hwq;
@@ -119,6 +121,8 @@ struct ufshci_qops {
struct ufshci_req_queue *req_queue);
int (*enable)(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
+ void (*disable)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
int (*reserve_slot)(struct ufshci_req_queue *req_queue,
struct ufshci_tracker **tr);
int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
@@ -135,16 +139,27 @@ struct ufshci_qops {
#define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
+enum ufshci_recovery {
+ RECOVERY_NONE = 0, /* Normal operations */
+ RECOVERY_WAITING, /* waiting for the reset to complete */
+};
+
/*
* Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
* (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
* cq_head are not used in SDB but used in MCQ.
*/
struct ufshci_hw_queue {
+ struct ufshci_controller *ctrlr;
+ struct ufshci_req_queue *req_queue;
uint32_t id;
int domain;
int cpu;
+ struct callout timer; /* recovery lock */
+ bool timer_armed; /* recovery lock */
+ enum ufshci_recovery recovery_state; /* recovery lock */
+
union {
struct ufshci_utp_xfer_req_desc *utrd;
struct ufshci_utp_task_mgmt_req_desc *utmrd;
@@ -159,6 +174,9 @@ struct ufshci_hw_queue {
uint32_t num_entries;
uint32_t num_trackers;
+ TAILQ_HEAD(, ufshci_tracker) free_tr;
+ TAILQ_HEAD(, ufshci_tracker) outstanding_tr;
+
/*
* A Request List using the single doorbell method uses a dedicated
* ufshci_tracker, one per slot.
@@ -175,7 +193,13 @@ struct ufshci_hw_queue {
int64_t num_retries;
int64_t num_failures;
+ /*
+ * Each lock may be acquired independently.
+ * When both are required, acquire them in this order to avoid
+ * deadlocks. (recovery_lock -> qlock)
+ */
struct mtx_padalign qlock;
+ struct mtx_padalign recovery_lock;
};
struct ufshci_req_queue {
@@ -214,6 +238,15 @@ struct ufshci_device {
struct ufshci_geometry_descriptor geo_desc;
uint32_t unipro_version;
+
+ /* WriteBooster */
+ bool is_wb_enabled;
+ bool is_wb_flush_enabled;
+ uint32_t wb_buffer_type;
+ uint32_t wb_buffer_size_mb;
+ uint32_t wb_user_space_config_option;
+ uint8_t wb_dedicated_lu;
+ uint32_t write_booster_flush_threshold;
};
/*
@@ -229,6 +262,10 @@ struct ufshci_controller {
2 /* Need an additional 200 ms of PA_TActivate */
#define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \
4 /* Need to wait 1250us after power mode change */
+#define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \
+ 8 /* Need to change the number of lanes before changing HS-GEAR. */
+#define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \
+ 16 /* QEMU does not support Task Management Request */
uint32_t ref_clk;
@@ -252,6 +289,9 @@ struct ufshci_controller {
/* Fields for tracking progress during controller initialization. */
struct intr_config_hook config_hook;
+ struct task reset_task;
+ struct taskqueue *taskqueue;
+
/* For shared legacy interrupt. */
int rid;
struct resource *res;
@@ -260,6 +300,8 @@ struct ufshci_controller {
uint32_t major_version;
uint32_t minor_version;
+ uint32_t enable_aborts;
+
uint32_t num_io_queues;
uint32_t max_hw_pend_io;
@@ -333,7 +375,7 @@ void ufshci_sim_detach(struct ufshci_controller *ctrlr);
/* Controller */
int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
-int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
+void ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
/* ctrlr defined as void * to allow use with config_intrhook. */
void ufshci_ctrlr_start_config_hook(void *arg);
void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
@@ -356,6 +398,7 @@ int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr);
int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr);
int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
+int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr);
/* Controller Command */
void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
@@ -375,7 +418,9 @@ int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr);
int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr);
int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
struct ufshci_hw_queue *hwq);
@@ -391,6 +436,8 @@ void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
struct ufshci_req_queue *req_queue);
+void ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
@@ -476,13 +523,12 @@ _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
struct ufshci_request *req;
KASSERT(how == M_WAITOK || how == M_NOWAIT,
- ("nvme_allocate_request: invalid how %d", how));
+ ("ufshci_allocate_request: invalid how %d", how));
req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
if (req != NULL) {
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
- req->timeout = true;
}
return (req);
}
diff --git a/sys/dev/ufshci/ufshci_reg.h b/sys/dev/ufshci/ufshci_reg.h
index 6c9b3e2c8c04..6d5768505102 100644
--- a/sys/dev/ufshci/ufshci_reg.h
+++ b/sys/dev/ufshci/ufshci_reg.h
@@ -274,7 +274,7 @@ struct ufshci_registers {
#define UFSHCI_HCS_REG_UTMRLRDY_MASK (0x1)
#define UFSHCI_HCS_REG_UCRDY_SHIFT (3)
#define UFSHCI_HCS_REG_UCRDY_MASK (0x1)
-#define UFSHCI_HCS_REG_UPMCRS_SHIFT (7)
+#define UFSHCI_HCS_REG_UPMCRS_SHIFT (8)
#define UFSHCI_HCS_REG_UPMCRS_MASK (0x7)
#define UFSHCI_HCS_REG_UTPEC_SHIFT (12)
#define UFSHCI_HCS_REG_UTPEC_MASK (0xF)
diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c
index bb6efa6d2ccc..7aa164d00bec 100644
--- a/sys/dev/ufshci/ufshci_req_queue.c
+++ b/sys/dev/ufshci/ufshci_req_queue.c
@@ -24,6 +24,7 @@ static const struct ufshci_qops sdb_utmr_qops = {
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
.ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
@@ -38,6 +39,7 @@ static const struct ufshci_qops sdb_utr_qops = {
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
.ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
@@ -74,6 +76,13 @@ ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
&ctrlr->task_mgmt_req_queue);
}
+void
+ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->task_mgmt_req_queue.qops.disable(ctrlr,
+ &ctrlr->task_mgmt_req_queue);
+}
+
int
ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
{
@@ -109,6 +118,13 @@ ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
&ctrlr->transfer_req_queue);
}
+void
+ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->transfer_req_queue.qops.disable(ctrlr,
+ &ctrlr->transfer_req_queue);
+}
+
int
ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
{
@@ -226,31 +242,30 @@ void
ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
{
struct ufshci_req_queue *req_queue = tr->req_queue;
+ struct ufshci_hw_queue *hwq = tr->hwq;
struct ufshci_request *req = tr->req;
struct ufshci_completion cpl;
uint8_t ocs;
bool retry, error, retriable;
- mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
+ mtx_assert(&hwq->qlock, MA_NOTOWNED);
/* Copy the response from the Request Descriptor or UTP Command
* Descriptor. */
+ cpl.size = tr->response_size;
if (req_queue->is_task_mgmt) {
- cpl.size = tr->response_size;
memcpy(&cpl.response_upiu,
- (void *)tr->hwq->utmrd[tr->slot_num].response_upiu,
- cpl.size);
+ (void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size);
- ocs = tr->hwq->utmrd[tr->slot_num].overall_command_status;
+ ocs = hwq->utmrd[tr->slot_num].overall_command_status;
} else {
bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- cpl.size = tr->response_size;
memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
cpl.size);
- ocs = tr->hwq->utrd[tr->slot_num].overall_command_status;
+ ocs = hwq->utrd[tr->slot_num].overall_command_status;
}
error = ufshci_req_queue_response_is_error(req_queue, ocs,
@@ -262,9 +277,9 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
retry = error && retriable &&
req->retries < req_queue->ctrlr->retry_count;
if (retry)
- tr->hwq->num_retries++;
+ hwq->num_retries++;
if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
- tr->hwq->num_failures++;
+ hwq->num_failures++;
KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
KASSERT(cpl.response_upiu.header.task_tag ==
@@ -282,7 +297,7 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
req->cb_fn(req->cb_arg, &cpl, error);
}
- mtx_lock(&tr->hwq->qlock);
+ mtx_lock(&hwq->qlock);
/* Clear the UTRL Completion Notification register */
req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
@@ -301,6 +316,9 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
ufshci_free_request(req);
tr->req = NULL;
tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+
+ TAILQ_REMOVE(&hwq->outstanding_tr, tr, tailq);
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
}
mtx_unlock(&tr->hwq->qlock);
@@ -309,7 +327,16 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
bool
ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
{
- return (req_queue->qops.process_cpl(req_queue));
+ struct ufshci_hw_queue *hwq;
+ bool done;
+
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_lock(&hwq->recovery_lock);
+ done = req_queue->qops.process_cpl(req_queue);
+ mtx_unlock(&hwq->recovery_lock);
+
+ return (done);
}
static void
@@ -427,6 +454,225 @@ ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
desc->prdt_length = prdt_entry_cnt;
}
+static void
+ufshci_req_queue_timeout_recovery(struct ufshci_controller *ctrlr,
+ struct ufshci_hw_queue *hwq)
+{
+ /* TODO: Step 2. Logical unit reset */
+ /* TODO: Step 3. Target device reset */
+ /* TODO: Step 4. Bus reset */
+
+ /*
+ * Step 5. All previous commands were timeout.
+ * Recovery failed, reset the host controller.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 5: Resetting controller due to a timeout.\n");
+ hwq->recovery_state = RECOVERY_WAITING;
+
+ ufshci_ctrlr_reset(ctrlr);
+}
+
+static void
+ufshci_abort_complete(void *arg, const struct ufshci_completion *status,
+ bool error)
+{
+ struct ufshci_tracker *tr = arg;
+
+ /*
+ * We still need to check the active tracker array, to cover race where
+ * I/O timed out at same time controller was completing the I/O. An
+ * abort request always is on the Task Management Request queue, but
+ * affects either an Task Management Request or an I/O (UTRL) queue, so
+ * take the appropriate queue lock for the original command's queue,
+ * since we'll need it to avoid races with the completion code and to
+ * complete the command manually.
+ */
+ mtx_lock(&tr->hwq->qlock);
+ if (tr->slot_state != UFSHCI_SLOT_STATE_FREE) {
+ mtx_unlock(&tr->hwq->qlock);
+ /*
+ * An I/O has timed out, and the controller was unable to abort
+ * it for some reason. And we've not processed a completion for
+ * it yet. Construct a fake completion status, and then complete
+ * the I/O's tracker manually.
+ */
+ ufshci_printf(tr->hwq->ctrlr,
+ "abort task request failed, aborting task manually\n");
+ ufshci_req_queue_manual_complete_tracker(tr,
+ UFSHCI_DESC_ABORTED, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+
+ if ((status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE) ||
+ (status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED)) {
+ ufshci_printf(tr->hwq->ctrlr,
+ "Warning: the abort task request completed \
+ successfully, but the original task is still incomplete.");
+ return;
+ }
+
+ /* Abort Task failed. Perform recovery steps 2-5 */
+ ufshci_req_queue_timeout_recovery(tr->hwq->ctrlr, tr->hwq);
+ } else {
+ mtx_unlock(&tr->hwq->qlock);
+ }
+}
+
+static void
+ufshci_req_queue_timeout(void *arg)
+{
+ struct ufshci_hw_queue *hwq = arg;
+ struct ufshci_controller *ctrlr = hwq->ctrlr;
+ struct ufshci_tracker *tr;
+ sbintime_t now;
+ bool idle = true;
+ bool fast;
+
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
+ /*
+ * If the controller is failed, then stop polling. This ensures that any
+ * failure processing that races with the hwq timeout will fail safely.
+ */
+ if (ctrlr->is_failed) {
+ ufshci_printf(ctrlr,
+ "Failed controller, stopping watchdog timeout.\n");
+ hwq->timer_armed = false;
+ return;
+ }
+
+ /*
+ * Shutdown condition: We set hwq->timer_armed to false in
+ * ufshci_req_sdb_destroy before calling callout_drain. When we call
+ * that, this routine might get called one last time. Exit w/o setting a
+ * timeout. None of the watchdog stuff needs to be done since we're
+ * destroying the hwq.
+ */
+ if (!hwq->timer_armed) {
+ ufshci_printf(ctrlr,
+ "Timeout fired during ufshci_utr_req_queue_destroy\n");
+ return;
+ }
+
+ switch (hwq->recovery_state) {
+ case RECOVERY_NONE:
+ /*
+ * See if there's any recovery needed. First, do a fast check to
+ * see if anything could have timed out. If not, then skip
+ * everything else.
+ */
+ fast = false;
+ mtx_lock(&hwq->qlock);
+ now = getsbinuptime();
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If the first real transaction is not in timeout, then
+ * we're done. Otherwise, we try recovery.
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ fast = true;
+ break;
+ }
+ mtx_unlock(&hwq->qlock);
+ if (idle || fast)
+ break;
+
+ /*
+ * There's a stale transaction at the start of the queue whose
+ * deadline has passed. Poll the competions as a last-ditch
+ * effort in case an interrupt has been missed.
+ */
+ hwq->req_queue->qops.process_cpl(hwq->req_queue);
+
+ /*
+ * Now that we've run the ISR, re-rheck to see if there's any
+ * timed out commands and abort them or reset the card if so.
+ */
+ mtx_lock(&hwq->qlock);
+ idle = true;
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If we know this tracker hasn't timed out, we also
+ * know all subsequent ones haven't timed out. The tr
+ * queue is in submission order and all normal commands
+ * in a queue have the same timeout (or the timeout was
+ * changed by the user, but we eventually timeout then).
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ break;
+
+ /*
+ * Timeout recovery is performed in five steps. If
+ * recovery fails at any step, the process continues to
+ * the next one:
+ * next steps:
+ * Step 1. Abort task
+ * Step 2. Logical unit reset (TODO)
+ * Step 3. Target device reset (TODO)
+ * Step 4. Bus reset (TODO)
+ * Step 5. Host controller reset
+ *
+ * If the timeout occurred in the Task Management
+ * Request queue, ignore Step 1.
+ */
+ if (ctrlr->enable_aborts &&
+ !hwq->req_queue->is_task_mgmt &&
+ tr->req->cb_fn != ufshci_abort_complete) {
+ /*
+ * Step 1. Timeout expired, abort the task.
+ *
+ * This isn't an abort command, ask for a
+ * hardware abort. This goes to the Task
+ * Management Request queue which will reset the
+ * task if it times out.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 1: Timeout occurred. aborting the task(%d).\n",
+ tr->req->request_upiu.header.task_tag);
+ ufshci_ctrlr_cmd_send_task_mgmt_request(ctrlr,
+ ufshci_abort_complete, tr,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK,
+ tr->req->request_upiu.header.lun,
+ tr->req->request_upiu.header.task_tag, 0);
+ } else {
+ /* Recovery Step 2-5 */
+ ufshci_req_queue_timeout_recovery(ctrlr, hwq);
+ idle = false;
+ break;
+ }
+ }
+ mtx_unlock(&hwq->qlock);
+ break;
+
+ case RECOVERY_WAITING:
+ /*
+ * These messages aren't interesting while we're suspended. We
+ * put the queues into waiting state while suspending.
+ * Suspending takes a while, so we'll see these during that time
+ * and they aren't diagnostic. At other times, they indicate a
+ * problem that's worth complaining about.
+ */
+ if (!device_is_suspended(ctrlr->dev))
+ ufshci_printf(ctrlr, "Waiting for reset to complete\n");
+ idle = false; /* We want to keep polling */
+ break;
+ }
+
+ /*
+ * Rearm the timeout.
+ */
+ if (!idle) {
+ callout_schedule_sbt(&hwq->timer, SBT_1S / 2, SBT_1S / 2, 0);
+ } else {
+ hwq->timer_armed = false;
+ }
+}
+
/*
* Submit the tracker to the hardware.
*/
@@ -436,13 +682,30 @@ ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
{
struct ufshci_controller *ctrlr = req_queue->ctrlr;
struct ufshci_request *req = tr->req;
+ struct ufshci_hw_queue *hwq;
uint64_t ucd_paddr;
uint16_t request_len, response_off, response_len;
uint8_t slot_num = tr->slot_num;
+ int timeout;
- mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_assert(&hwq->qlock, MA_OWNED);
- /* TODO: Check timeout */
+ if (req->cb_fn == ufshci_completion_poll_cb)
+ timeout = 1;
+ else
+ timeout = ctrlr->timeout_period;
+ tr->deadline = getsbinuptime() + timeout * SBT_1S;
+ if (!hwq->timer_armed) {
+ hwq->timer_armed = true;
+ /*
+ * It wakes up once every 0.5 seconds to check if the deadline
+ * has passed.
+ */
+ callout_reset_sbt_on(&hwq->timer, SBT_1S / 2, SBT_1S / 2,
+ ufshci_req_queue_timeout, hwq, hwq->cpu, 0);
+ }
if (req_queue->is_task_mgmt) {
/* Prepare UTP Task Management Request Descriptor. */
@@ -508,6 +771,9 @@ _ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
tr->deadline = SBT_MAX;
tr->req = req;
+ TAILQ_REMOVE(&tr->hwq->free_tr, tr, tailq);
+ TAILQ_INSERT_TAIL(&tr->hwq->outstanding_tr, tr, tailq);
+
ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
return (0);
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
index 834a459d48e3..ca47aa159c5b 100644
--- a/sys/dev/ufshci/ufshci_req_sdb.c
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -40,6 +40,8 @@ ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
bus_dma_tag_destroy(req_queue->dma_tag_ucd);
req_queue->dma_tag_ucd = NULL;
}
+
+ free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
}
static void
@@ -74,6 +76,10 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
uint8_t *ucdmem;
int i, error;
+ req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
+ req_queue->num_trackers,
+ M_UFSHCI, M_ZERO | M_NOWAIT);
+
/*
* Each component must be page aligned, and individual PRP lists
* cannot cross a page boundary.
@@ -152,6 +158,9 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
uint64_t queuemem_phys;
uint8_t *queuemem;
struct ufshci_tracker *tr;
+ const size_t lock_name_len = 32;
+ char qlock_name[lock_name_len], recovery_lock_name[lock_name_len];
+ char *base;
int i, error;
req_queue->ctrlr = ctrlr;
@@ -169,11 +178,21 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
hwq = &req_queue->hwq[UFSHCI_SDB_Q];
hwq->num_entries = req_queue->num_entries;
hwq->num_trackers = req_queue->num_trackers;
- req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
- req_queue->num_trackers,
- M_UFSHCI, M_ZERO | M_NOWAIT);
+ hwq->ctrlr = ctrlr;
+ hwq->req_queue = req_queue;
+
+ base = is_task_mgmt ? "ufshci utmrq" : "ufshci utrq";
+ snprintf(qlock_name, sizeof(qlock_name), "%s #%d lock", base,
+ UFSHCI_SDB_Q);
+ snprintf(recovery_lock_name, sizeof(recovery_lock_name),
+ "%s #%d recovery lock", base, UFSHCI_SDB_Q);
- mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF);
+ mtx_init(&hwq->qlock, qlock_name, NULL, MTX_DEF);
+ mtx_init(&hwq->recovery_lock, recovery_lock_name, NULL, MTX_DEF);
+
+ callout_init_mtx(&hwq->timer, &hwq->recovery_lock, 0);
+ hwq->timer_armed = false;
+ hwq->recovery_state = RECOVERY_WAITING;
/*
* Allocate physical memory for request queue (UTP Transfer Request
@@ -219,6 +238,9 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
req_queue->num_entries,
M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+ TAILQ_INIT(&hwq->free_tr);
+ TAILQ_INIT(&hwq->outstanding_tr);
+
for (i = 0; i < req_queue->num_trackers; i++) {
tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
@@ -226,6 +248,7 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
tr->req_queue = req_queue;
tr->slot_num = i;
tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
hwq->act_tr[i] = tr;
}
@@ -255,8 +278,6 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
ctrlr) != 0) {
ufshci_printf(ctrlr,
"failed to construct cmd descriptor memory\n");
- bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
- hwq->queuemem_map);
goto out;
}
@@ -280,6 +301,11 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr;
int i;
+ mtx_lock(&hwq->recovery_lock);
+ hwq->timer_armed = false;
+ mtx_unlock(&hwq->recovery_lock);
+ callout_drain(&hwq->timer);
+
if (!req_queue->is_task_mgmt)
ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
@@ -305,10 +331,11 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
hwq->dma_tag_queue = NULL;
}
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_destroy(&hwq->recovery_lock);
if (mtx_initialized(&hwq->qlock))
mtx_destroy(&hwq->qlock);
- free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
free(req_queue->hwq, M_UFSHCI);
}
@@ -318,10 +345,36 @@ ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue)
return &req_queue->hwq[UFSHCI_SDB_Q];
}
+void
+ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr, *tr_temp;
+
+ mtx_lock(&hwq->recovery_lock);
+ mtx_lock(&hwq->qlock);
+
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+
+ hwq->recovery_state = RECOVERY_WAITING;
+ TAILQ_FOREACH_SAFE(tr, &hwq->outstanding_tr, tailq, tr_temp) {
+ tr->deadline = SBT_MAX;
+ }
+
+ mtx_unlock(&hwq->qlock);
+ mtx_unlock(&hwq->recovery_lock);
+}
+
int
ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+
if (req_queue->is_task_mgmt) {
uint32_t hcs, utmrldbr, utmrlrsr;
@@ -373,6 +426,14 @@ ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
}
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+ KASSERT(!req_queue->ctrlr->is_failed, ("Enabling a failed hwq\n"));
+
+ hwq->recovery_state = RECOVERY_NONE;
+
return (0);
}
@@ -466,6 +527,8 @@ ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
uint8_t slot;
bool done = false;
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
hwq->num_intr_handler_calls++;
bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
diff --git a/sys/dev/ufshci/ufshci_sim.c b/sys/dev/ufshci/ufshci_sim.c
index db24561f4169..828b520614a5 100644
--- a/sys/dev/ufshci/ufshci_sim.c
+++ b/sys/dev/ufshci/ufshci_sim.c
@@ -241,7 +241,6 @@ ufshci_cam_action(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_ABORT:
- /* TODO: Implement Task Management CMD*/
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
break;
case XPT_SET_TRAN_SETTINGS:
diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c
index 5e5069f12e5f..56bc06b13f3c 100644
--- a/sys/dev/ufshci/ufshci_sysctl.c
+++ b/sys/dev/ufshci/ufshci_sysctl.c
@@ -152,6 +152,7 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
struct sysctl_ctx_list *ctrlr_ctx;
struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree;
struct sysctl_oid_list *ctrlr_list, *ioq_list;
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
#define QUEUE_NAME_LENGTH 16
char queue_name[QUEUE_NAME_LENGTH];
int i;
@@ -177,6 +178,25 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD,
&ctrlr->cap, 0, "Number of I/O queue pairs");
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_enabled",
+ CTLFLAG_RD, &dev->is_wb_enabled, 0, "WriteBooster enable/disable");
+
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_flush_enabled",
+ CTLFLAG_RD, &dev->is_wb_flush_enabled, 0,
+ "WriteBooster flush enable/disable");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_type",
+ CTLFLAG_RD, &dev->wb_buffer_type, 0, "WriteBooster type");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_size_mb",
+ CTLFLAG_RD, &dev->wb_buffer_size_mb, 0,
+ "WriteBooster buffer size in MB");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "wb_user_space_config_option", CTLFLAG_RD,
+ &dev->wb_user_space_config_option, 0,
+ "WriteBooster preserve user space mode");
+
SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period,
0, ufshci_sysctl_timeout_period, "IU",
diff --git a/sys/dev/ufshci/ufshci_uic_cmd.c b/sys/dev/ufshci/ufshci_uic_cmd.c
index 2c5f635dc11e..b9c867ff7065 100644
--- a/sys/dev/ufshci/ufshci_uic_cmd.c
+++ b/sys/dev/ufshci/ufshci_uic_cmd.c
@@ -14,7 +14,7 @@
int
ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr)
{
- uint32_t is;
+ uint32_t is, hcs;
int timeout;
/* Wait for the IS flag to change */
@@ -40,6 +40,15 @@ ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr)
DELAY(10);
}
+ /* Check HCS power mode change request status */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs) != 0x01) {
+ ufshci_printf(ctrlr,
+ "Power mode change request status error: 0x%x\n",
+ UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs));
+ return (ENXIO);
+ }
+
return (0);
}
@@ -112,6 +121,7 @@ ufshci_uic_send_cmd(struct ufshci_controller *ctrlr,
struct ufshci_uic_cmd *uic_cmd, uint32_t *return_value)
{
int error;
+ uint32_t config_result_code;
mtx_lock(&ctrlr->uic_cmd_lock);
@@ -134,6 +144,13 @@ ufshci_uic_send_cmd(struct ufshci_controller *ctrlr,
if (error)
return (ENXIO);
+ config_result_code = ufshci_mmio_read_4(ctrlr, ucmdarg2);
+ if (config_result_code) {
+ ufshci_printf(ctrlr,
+ "Failed to send UIC command. (config result code = 0x%x)\n",
+ config_result_code);
+ }
+
if (return_value != NULL)
*return_value = ufshci_mmio_read_4(ctrlr, ucmdarg3);
diff --git a/sys/dev/usb/controller/xhci.c b/sys/dev/usb/controller/xhci.c
index 5be592512196..788b2b718062 100644
--- a/sys/dev/usb/controller/xhci.c
+++ b/sys/dev/usb/controller/xhci.c
@@ -156,6 +156,7 @@ struct xhci_std_temp {
static void xhci_do_poll(struct usb_bus *);
static void xhci_device_done(struct usb_xfer *, usb_error_t);
+static void xhci_get_xecp(struct xhci_softc *);
static void xhci_root_intr(struct xhci_softc *);
static void xhci_free_device_ext(struct usb_device *);
static struct xhci_endpoint_ext *xhci_get_endpoint_ext(struct usb_device *,
@@ -566,6 +567,8 @@ xhci_init(struct xhci_softc *sc, device_t self, uint8_t dma32)
device_printf(self, "%d bytes context size, %d-bit DMA\n",
sc->sc_ctx_is_64_byte ? 64 : 32, (int)sc->sc_bus.dma_bits);
+ xhci_get_xecp(sc);
+
/* enable 64Kbyte control endpoint quirk */
sc->sc_bus.control_ep_quirk = (xhcictlquirk ? 1 : 0);
@@ -654,6 +657,88 @@ xhci_uninit(struct xhci_softc *sc)
}
static void
+xhci_get_xecp(struct xhci_softc *sc)
+{
+
+ uint32_t hccp1;
+ uint32_t eec;
+ uint32_t eecp;
+ bool first = true;
+
+ hccp1 = XREAD4(sc, capa, XHCI_HCSPARAMS0);
+
+ if (XHCI_HCS0_XECP(hccp1) == 0) {
+ device_printf(sc->sc_bus.parent,
+ "xECP: no capabilities found\n");
+ return;
+ }
+
+ /*
+ * Parse the xECP Capabilities table and print known caps.
+ * Implemented, vendor and reserved xECP Capabilities values are
+ * documented in Table 7.2 of eXtensible Host Controller Interface for
+ * Universal Serial Bus (xHCI) Rev 1.2b 2023.
+ */
+ device_printf(sc->sc_bus.parent, "xECP capabilities <");
+
+ eec = -1;
+ for (eecp = XHCI_HCS0_XECP(hccp1) << 2;
+ eecp != 0 && XHCI_XECP_NEXT(eec) != 0;
+ eecp += XHCI_XECP_NEXT(eec) << 2) {
+ eec = XREAD4(sc, capa, eecp);
+
+ uint8_t xecpid = XHCI_XECP_ID(eec);
+
+ if ((xecpid >= 11 && xecpid <= 16) ||
+ (xecpid >= 19 && xecpid <= 191)) {
+ if (!first)
+ printf(",");
+ printf("RES(%x)", xecpid);
+ } else if (xecpid > 191) {
+ if (!first)
+ printf(",");
+ printf("VEND(%x)", xecpid);
+ } else {
+ if (!first)
+ printf(",");
+ switch (xecpid)
+ {
+ case XHCI_ID_USB_LEGACY:
+ printf("LEGACY");
+ break;
+ case XHCI_ID_PROTOCOLS:
+ printf("PROTO");
+ break;
+ case XHCI_ID_POWER_MGMT:
+ printf("POWER");
+ break;
+ case XHCI_ID_VIRTUALIZATION:
+ printf("VIRT");
+ break;
+ case XHCI_ID_MSG_IRQ:
+ printf("MSG IRQ");
+ break;
+ case XHCI_ID_USB_LOCAL_MEM:
+ printf("LOCAL MEM");
+ break;
+ case XHCI_ID_USB_DEBUG:
+ printf("DEBUG");
+ break;
+ case XHCI_ID_EXT_MSI:
+ printf("EXT MSI");
+ break;
+ case XHCI_ID_USB3_TUN:
+ printf("TUN");
+ break;
+
+ }
+ }
+ first = false;
+ }
+ printf(">\n");
+}
+
+static void
xhci_set_hw_power_sleep(struct usb_bus *bus, uint32_t state)
{
struct xhci_softc *sc = XHCI_BUS2SC(bus);
diff --git a/sys/dev/usb/controller/xhci_pci.c b/sys/dev/usb/controller/xhci_pci.c
index d5cfd228a429..820fb2f738a1 100644
--- a/sys/dev/usb/controller/xhci_pci.c
+++ b/sys/dev/usb/controller/xhci_pci.c
@@ -178,6 +178,8 @@ xhci_pci_match(device_t self)
return ("Intel Tiger Lake-H USB 3.2 controller");
case 0x461e8086:
return ("Intel Alder Lake-P Thunderbolt 4 USB controller");
+ case 0x4b7d8086:
+ return ("Intel Elkhart Lake USB 3.1 controller");
case 0x51ed8086:
return ("Intel Alder Lake USB 3.2 controller");
case 0x5aa88086:
diff --git a/sys/dev/usb/controller/xhcireg.h b/sys/dev/usb/controller/xhcireg.h
index 9d0b6e2f4b4b..821897155544 100644
--- a/sys/dev/usb/controller/xhcireg.h
+++ b/sys/dev/usb/controller/xhcireg.h
@@ -205,6 +205,11 @@
#define XHCI_ID_VIRTUALIZATION 0x0004
#define XHCI_ID_MSG_IRQ 0x0005
#define XHCI_ID_USB_LOCAL_MEM 0x0006
+/* values 7-9 are reserved */
+#define XHCI_ID_USB_DEBUG 0x000a
+/* values 11-16 are reserved */
+#define XHCI_ID_EXT_MSI 0x0011
+#define XHCI_ID_USB3_TUN 0x0012
/* XHCI register R/W wrappers */
#define XREAD1(sc, what, a) \
diff --git a/sys/dev/usb/net/if_umb.c b/sys/dev/usb/net/if_umb.c
index 5703bc03dd39..b1082b117259 100644
--- a/sys/dev/usb/net/if_umb.c
+++ b/sys/dev/usb/net/if_umb.c
@@ -177,9 +177,7 @@ static void umb_ncm_setup(struct umb_softc *, struct usb_config *);
static void umb_close_bulkpipes(struct umb_softc *);
static int umb_ioctl(if_t , u_long, caddr_t);
static void umb_init(void *);
-#ifdef DEV_NETMAP
static void umb_input(if_t , struct mbuf *);
-#endif
static int umb_output(if_t , struct mbuf *,
const struct sockaddr *, struct route *);
static void umb_start(if_t );
@@ -585,9 +583,7 @@ umb_attach_task(struct usb_proc_msg *msg)
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_POINTOPOINT);
if_setioctlfn(ifp, umb_ioctl);
-#ifdef DEV_NETMAP
if_setinputfn(ifp, umb_input);
-#endif
if_setoutputfn(ifp, umb_output);
if_setstartfn(ifp, umb_start);
if_setinitfn(ifp, umb_init);
@@ -666,7 +662,7 @@ umb_ncm_setup(struct umb_softc *sc, struct usb_config * config)
struct ncm_ntb_parameters np;
usb_error_t error;
- /* Query NTB tranfers sizes */
+ /* Query NTB transfers sizes */
req.bmRequestType = UT_READ_CLASS_INTERFACE;
req.bRequest = NCM_GET_NTB_PARAMETERS;
USETW(req.wValue, 0);
diff --git a/sys/dev/usb/serial/udbc.c b/sys/dev/usb/serial/udbc.c
new file mode 100644
index 000000000000..d7ca6b25bf32
--- /dev/null
+++ b/sys/dev/usb/serial/udbc.c
@@ -0,0 +1,404 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-NetBSD
+ *
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * Copyright (c) 2016-2024 Hiroki Sato <hrs@FreeBSD.org>
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/callout.h>
+#include <sys/condvar.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/priv.h>
+#include <sys/queue.h>
+#include <sys/stddef.h>
+#include <sys/stdint.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+#include <sys/unistd.h>
+
+#include <dev/usb/usb.h>
+#include <dev/usb/usb_ioctl.h>
+#include <dev/usb/usbdi.h>
+#include <dev/usb/usbdi_util.h>
+#include <dev/usb/usb_core.h>
+
+#include "usbdevs.h"
+
+#define USB_DEBUG_VAR udbc_debug
+#include <dev/usb/usb_process.h>
+#include <dev/usb/serial/usb_serial.h>
+#include <dev/usb/usb_debug.h>
+
+static SYSCTL_NODE(_hw_usb, OID_AUTO, udbc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "USB DbC Client");
+
+#ifdef USB_DEBUG
+static int udbc_debug = 0;
+SYSCTL_INT(_hw_usb_udbc, OID_AUTO, debug, CTLFLAG_RWTUN, &udbc_debug, 0,
+ "Debug level");
+#endif
+
+#define UDBC_CONFIG_INDEX 0
+
+#define UDBC_IBUFSIZE 1024
+#define UDBC_OBUFSIZE 1024
+
+enum {
+ UDBC_BULK_DT_WR,
+ UDBC_BULK_DT_RD,
+ UDBC_N_TRANSFER, /* n of EP */
+};
+
+struct udbc_softc {
+ struct ucom_super_softc sc_super_ucom;
+ struct ucom_softc sc_ucom;
+
+ struct usb_device *sc_udev;
+ struct usb_xfer *sc_xfer[UDBC_N_TRANSFER];
+ device_t sc_dev;
+ struct mtx sc_mtx;
+
+ uint32_t sc_unit;
+};
+
+/* prototypes */
+
+static device_probe_t udbc_probe;
+static device_attach_t udbc_attach;
+static device_detach_t udbc_detach;
+static void udbc_free_softc(struct udbc_softc *);
+
+static usb_callback_t udbc_write_callback;
+static usb_callback_t udbc_read_callback;
+
+static void udbc_free(struct ucom_softc *);
+static void udbc_cfg_open(struct ucom_softc *);
+static void udbc_cfg_close(struct ucom_softc *);
+static int udbc_pre_param(struct ucom_softc *, struct termios *);
+static int udbc_ioctl(struct ucom_softc *, uint32_t, caddr_t, int,
+ struct thread *);
+static void udbc_start_read(struct ucom_softc *);
+static void udbc_stop_read(struct ucom_softc *);
+static void udbc_start_write(struct ucom_softc *);
+static void udbc_stop_write(struct ucom_softc *);
+static void udbc_poll(struct ucom_softc *ucom);
+
+static const struct usb_config udbc_config[UDBC_N_TRANSFER] = {
+ [UDBC_BULK_DT_WR] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = UDBC_OBUFSIZE,
+ .flags = {.pipe_bof = 1,},
+ .callback = &udbc_write_callback,
+ },
+
+ [UDBC_BULK_DT_RD] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .bufsize = UDBC_IBUFSIZE,
+ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
+ .callback = &udbc_read_callback,
+ },
+};
+
+static const struct ucom_callback udbc_callback = {
+ .ucom_cfg_open = &udbc_cfg_open,
+ .ucom_cfg_close = &udbc_cfg_close,
+ .ucom_pre_param = &udbc_pre_param,
+ .ucom_ioctl = &udbc_ioctl,
+ .ucom_start_read = &udbc_start_read,
+ .ucom_stop_read = &udbc_stop_read,
+ .ucom_start_write = &udbc_start_write,
+ .ucom_stop_write = &udbc_stop_write,
+ .ucom_poll = &udbc_poll,
+ .ucom_free = &udbc_free,
+};
+
+static device_method_t udbc_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, udbc_probe),
+ DEVMETHOD(device_attach, udbc_attach),
+ DEVMETHOD(device_detach, udbc_detach),
+ DEVMETHOD_END
+};
+
+static int
+udbc_probe(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+
+ if (uaa->usb_mode != USB_MODE_HOST)
+ return (ENXIO);
+ if (uaa->info.bConfigIndex != UDBC_CONFIG_INDEX)
+ return (ENXIO);
+ if (uaa->info.bInterfaceClass != UICLASS_DIAGNOSTIC)
+ return (ENXIO);
+ if (uaa->info.bDeviceProtocol != 0x00) /* GNU GDB == 1 */
+ return (ENXIO);
+
+ return (BUS_PROBE_SPECIFIC);
+}
+
+static int
+udbc_attach(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct udbc_softc *sc = device_get_softc(dev);
+ int error;
+
+ DPRINTF("\n");
+
+ sc->sc_udev = uaa->device;
+ sc->sc_dev = dev;
+ sc->sc_unit = device_get_unit(dev);
+
+ device_set_usb_desc(dev);
+ mtx_init(&sc->sc_mtx, "udbc", NULL, MTX_DEF);
+ ucom_ref(&sc->sc_super_ucom);
+
+ sc->sc_ucom.sc_portno = 0;
+
+ error = usbd_transfer_setup(uaa->device, &uaa->info.bIfaceIndex,
+ sc->sc_xfer, udbc_config, UDBC_N_TRANSFER, sc, &sc->sc_mtx);
+
+ if (error) {
+ device_printf(dev,
+ "allocating USB transfers failed\n");
+ goto detach;
+ }
+ /* clear stall at first run */
+ mtx_lock(&sc->sc_mtx);
+ usbd_xfer_set_stall(sc->sc_xfer[UDBC_BULK_DT_WR]);
+ usbd_xfer_set_stall(sc->sc_xfer[UDBC_BULK_DT_RD]);
+ mtx_unlock(&sc->sc_mtx);
+
+ error = ucom_attach(&sc->sc_super_ucom, &sc->sc_ucom, 1, sc,
+ &udbc_callback, &sc->sc_mtx);
+ if (error)
+ goto detach;
+ ucom_set_pnpinfo_usb(&sc->sc_super_ucom, dev);
+
+ return (0); /* success */
+
+detach:
+ udbc_detach(dev);
+ return (ENXIO);
+}
+
+static int
+udbc_detach(device_t dev)
+{
+ struct udbc_softc *sc = device_get_softc(dev);
+
+ ucom_detach(&sc->sc_super_ucom, &sc->sc_ucom);
+ usbd_transfer_unsetup(sc->sc_xfer, UDBC_N_TRANSFER);
+
+ device_claim_softc(dev);
+
+ udbc_free_softc(sc);
+
+ return (0);
+}
+
+UCOM_UNLOAD_DRAIN(udbc);
+
+static void
+udbc_free_softc(struct udbc_softc *sc)
+{
+ if (ucom_unref(&sc->sc_super_ucom)) {
+ mtx_destroy(&sc->sc_mtx);
+ device_free_softc(sc);
+ }
+}
+
+static void
+udbc_free(struct ucom_softc *ucom)
+{
+ udbc_free_softc(ucom->sc_parent);
+}
+
+static void
+udbc_cfg_open(struct ucom_softc *ucom)
+{
+ /*
+ * This do-nothing open routine exists for the sole purpose of this
+ * DPRINTF() so that you can see the point at which open gets called
+ * when debugging is enabled.
+ */
+ DPRINTF("\n");
+}
+
+static void
+udbc_cfg_close(struct ucom_softc *ucom)
+{
+ /*
+ * This do-nothing close routine exists for the sole purpose of this
+ * DPRINTF() so that you can see the point at which close gets called
+ * when debugging is enabled.
+ */
+ DPRINTF("\n");
+}
+
+static void
+udbc_write_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct udbc_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc;
+ uint32_t buflen;
+
+ DPRINTFN(3, "\n");
+
+ switch (USB_GET_STATE(xfer)) {
+ default: /* Error */
+ if (error != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ }
+ /* FALLTHROUGH */
+ case USB_ST_SETUP:
+ case USB_ST_TRANSFERRED:
+ pc = usbd_xfer_get_frame(xfer, 0);
+ if (ucom_get_data(&sc->sc_ucom, pc, 0, UDBC_OBUFSIZE,
+ &buflen) == 0)
+ break;
+ if (buflen != 0) {
+ usbd_xfer_set_frame_len(xfer, 0, buflen);
+ usbd_transfer_submit(xfer);
+ }
+ break;
+ }
+}
+
+static void
+udbc_read_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct udbc_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc;
+ int buflen;
+
+ DPRINTFN(3, "\n");
+
+ usbd_xfer_status(xfer, &buflen, NULL, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ pc = usbd_xfer_get_frame(xfer, 0);
+ ucom_put_data(&sc->sc_ucom, pc, 0, buflen);
+ /* FALLTHROUGH */
+ case USB_ST_SETUP:
+tr_setup:
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ return;
+
+ default: /* Error */
+ if (error != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ goto tr_setup;
+ }
+ return;
+ }
+}
+
+static int
+udbc_pre_param(struct ucom_softc *ucom, struct termios *t)
+{
+ DPRINTF("\n");
+
+ return (0);
+}
+
+static int
+udbc_ioctl(struct ucom_softc *ucom, uint32_t cmd, caddr_t data, int flag,
+ struct thread *td)
+{
+ return (ENOIOCTL);
+}
+
+static void
+udbc_start_read(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_start(sc->sc_xfer[UDBC_BULK_DT_RD]);
+}
+
+static void
+udbc_stop_read(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_stop(sc->sc_xfer[UDBC_BULK_DT_RD]);
+}
+
+static void
+udbc_start_write(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_start(sc->sc_xfer[UDBC_BULK_DT_WR]);
+}
+
+static void
+udbc_stop_write(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_stop(sc->sc_xfer[UDBC_BULK_DT_WR]);
+}
+
+static void
+udbc_poll(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_poll(sc->sc_xfer, UDBC_N_TRANSFER);
+}
+
+static driver_t udbc_driver = {
+ .name = "udbc",
+ .methods = udbc_methods,
+ .size = sizeof(struct udbc_softc),
+};
+
+DRIVER_MODULE(udbc, uhub, udbc_driver, NULL, NULL);
+MODULE_DEPEND(udbc, ucom, 1, 1, 1);
+MODULE_DEPEND(udbc, usb, 1, 1, 1);
+MODULE_VERSION(udbc, 1);
diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c
index e3509862ef54..ee9d8ab0c9bb 100644
--- a/sys/dev/usb/usb_hub.c
+++ b/sys/dev/usb/usb_hub.c
@@ -954,7 +954,8 @@ done:
* packet. This function is called having the "bus_mtx" locked.
*------------------------------------------------------------------------*/
void
-uhub_root_intr(struct usb_bus *bus, const uint8_t *ptr, uint8_t len)
+uhub_root_intr(struct usb_bus *bus,
+ const uint8_t *ptr __unused, uint8_t len __unused)
{
USB_BUS_LOCK_ASSERT(bus, MA_OWNED);
diff --git a/sys/dev/usb/wlan/if_mtw.c b/sys/dev/usb/wlan/if_mtw.c
index 137590651948..6967e5081542 100644
--- a/sys/dev/usb/wlan/if_mtw.c
+++ b/sys/dev/usb/wlan/if_mtw.c
@@ -638,6 +638,7 @@ mtw_attach(device_t self)
ic->ic_flags |= IEEE80211_F_DATAPAD;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
mtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -3131,6 +3132,8 @@ mtw_tx(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
data->ni = ni;
data->ridx = ridx;
+ ieee80211_output_seqno_assign(ni, -1, m);
+
mtw_set_tx_desc(sc, data);
/*
@@ -3390,6 +3393,8 @@ mtw_tx_param(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
break;
data->ridx = ridx;
+ ieee80211_output_seqno_assign(ni, -1, m);
+
mtw_set_tx_desc(sc, data);
MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
diff --git a/sys/dev/usb/wlan/if_rsu.c b/sys/dev/usb/wlan/if_rsu.c
index 07f7b6f3a708..e976948f6849 100644
--- a/sys/dev/usb/wlan/if_rsu.c
+++ b/sys/dev/usb/wlan/if_rsu.c
@@ -371,18 +371,16 @@ rsu_update_chw(struct ieee80211com *ic)
/*
* notification from net80211 that it'd like to do A-MPDU on the given TID.
- *
- * Note: this actually hangs traffic at the present moment, so don't use it.
- * The firmware debug does indiciate it's sending and establishing a TX AMPDU
- * session, but then no traffic flows.
*/
static int
rsu_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
{
-#if 0
struct rsu_softc *sc = ni->ni_ic->ic_softc;
struct r92s_add_ba_req req;
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: called, tid=%d\n",
+ __func__, tap->txa_tid);
+
/* Don't enable if it's requested or running */
if (IEEE80211_AMPDU_REQUESTED(tap))
return (0);
@@ -397,23 +395,30 @@ rsu_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
return (0);
/* Send the firmware command */
- RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: establishing AMPDU TX for TID %d\n",
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU,
+ "%s: establishing AMPDU TX for TID %d\n",
__func__,
tap->txa_tid);
RSU_LOCK(sc);
- if (rsu_fw_cmd(sc, R92S_CMD_ADDBA_REQ, &req, sizeof(req)) != 1) {
+ if (rsu_fw_cmd(sc, R92S_CMD_ADDBA_REQ, &req, sizeof(req)) != 0) {
RSU_UNLOCK(sc);
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: AMPDU TX cmd failure\n",
+ __func__);
/* Mark failure */
- (void) ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 0);
+ ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 0);
+ /* Return 0, we've been driving this ourselves */
return (0);
}
RSU_UNLOCK(sc);
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: AMPDU TX cmd success\n",
+ __func__);
+
/* Mark success; we don't get any further notifications */
- (void) ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 1);
-#endif
- /* Return 0, we're driving this ourselves */
+ ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 1);
+
+ /* Return 0, we've been driving this ourselves */
return (0);
}
@@ -563,9 +568,7 @@ rsu_attach(device_t self)
/* Enable basic HT */
ic->ic_htcaps = IEEE80211_HTC_HT |
-#if 0
IEEE80211_HTC_AMPDU |
-#endif
IEEE80211_HTC_AMSDU |
IEEE80211_HTCAP_MAXAMSDU_3839 |
IEEE80211_HTCAP_SMPS_OFF;
@@ -576,6 +579,7 @@ rsu_attach(device_t self)
ic->ic_rxstream = sc->sc_nrxstream;
}
ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
rsu_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1537,6 +1541,10 @@ rsu_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
is_checked = 1;
k->wk_flags |= IEEE80211_KEY_SWCRYPT;
} else
+ /*
+ * TODO: should allocate these from the CAM space;
+ * skipping over the fixed slots and _BC / _BSS.
+ */
*keyix = R92S_MACID_BSS;
}
@@ -2166,7 +2174,7 @@ rsu_event_addba_req_report(struct rsu_softc *sc, uint8_t *buf, int len)
__func__,
ether_sprintf(ba->mac_addr),
(int) ba->tid,
- (int) le16toh(ba->ssn));
+ (int) le16toh(ba->ssn) >> 4);
/* XXX do node lookup; this is STA specific */
@@ -2212,6 +2220,11 @@ rsu_rx_event(struct rsu_softc *sc, uint8_t code, uint8_t *buf, int len)
if (vap->iv_state == IEEE80211_S_AUTH)
rsu_event_join_bss(sc, buf, len);
break;
+
+ /* TODO: what about R92S_EVT_ADD_STA? and decoding macid? */
+ /* It likely is required for IBSS/AP mode */
+
+ /* TODO: should I be doing this transition in AP mode? */
case R92S_EVT_DEL_STA:
RSU_DPRINTF(sc, RSU_DEBUG_FWCMD | RSU_DEBUG_STATE,
"%s: disassociated from %s\n", __func__,
@@ -2229,6 +2242,7 @@ rsu_rx_event(struct rsu_softc *sc, uint8_t code, uint8_t *buf, int len)
break;
case R92S_EVT_FWDBG:
buf[60] = '\0';
+ /* TODO: some are \n terminated, some aren't, sigh */
RSU_DPRINTF(sc, RSU_DEBUG_FWDBG, "FWDBG: %s\n", (char *)buf);
break;
case R92S_EVT_ADDBA_REQ_REPORT:
@@ -2782,6 +2796,9 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
if (rate != 0)
ridx = rate2ridx(rate);
+ /* Assign sequence number, A-MPDU or otherwise */
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
@@ -2838,8 +2855,10 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
SM(R92S_TXDW0_OFFSET, sizeof(*txd)) |
R92S_TXDW0_OWN | R92S_TXDW0_FSG | R92S_TXDW0_LSG);
+ /* TODO: correct macid here? It should be in the node */
txd->txdw1 |= htole32(
SM(R92S_TXDW1_MACID, R92S_MACID_BSS) | SM(R92S_TXDW1_QSEL, qid));
+
if (!hasqos)
txd->txdw1 |= htole32(R92S_TXDW1_NONQOS);
if (k != NULL && !(k->wk_flags & IEEE80211_KEY_SWENCRYPT)) {
@@ -2860,8 +2879,13 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
SM(R92S_TXDW1_CIPHER, cipher) |
SM(R92S_TXDW1_KEYIDX, k->wk_keyix));
}
- /* XXX todo: set AGGEN bit if appropriate? */
- txd->txdw2 |= htole32(R92S_TXDW2_BK);
+
+ /*
+ * Note: no need to set TXDW2_AGGEN/TXDW2_BK to mark
+ * A-MPDU and non-AMPDU candidates; the firmware will
+ * handle this for us.
+ */
+
if (ismcast)
txd->txdw2 |= htole32(R92S_TXDW2_BMCAST);
@@ -2880,8 +2904,11 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
}
/*
- * Firmware will use and increment the sequence number for the
- * specified priority.
+ * Pass in prio here, NOT the sequence number.
+ *
+ * The hardware is in theory incrementing sequence numbers
+ * for us, but I haven't yet figured out exactly when/how
+ * it's supposed to work.
*/
txd->txdw3 |= htole32(SM(R92S_TXDW3_SEQ, prio));
@@ -3481,7 +3508,8 @@ rsu_load_firmware(struct rsu_softc *sc)
dmem.vcs_mode = R92S_VCS_MODE_RTS_CTS;
dmem.turbo_mode = 0;
dmem.bw40_en = !! (ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40);
- dmem.amsdu2ampdu_en = !! (sc->sc_ht);
+ /* net80211 handles AMSDUs just fine */
+ dmem.amsdu2ampdu_en = 0;
dmem.ampdu_en = !! (sc->sc_ht);
dmem.agg_offload = !! (sc->sc_ht);
dmem.qos_en = 1;
diff --git a/sys/dev/usb/wlan/if_rsureg.h b/sys/dev/usb/wlan/if_rsureg.h
index fb706a4d9b1a..e2074e1dd2ad 100644
--- a/sys/dev/usb/wlan/if_rsureg.h
+++ b/sys/dev/usb/wlan/if_rsureg.h
@@ -593,7 +593,14 @@ struct r92s_event_join_bss {
struct ndis_wlan_bssid_ex bss;
} __packed;
-#define R92S_MACID_BSS 5 /* XXX hardcoded somewhere */
+/*
+ * This is hard-coded in the firmware for a STA mode
+ * BSS join. If you turn on FWDEBUG, you'll see this
+ * in the logs:
+ *
+ * rsu0: FWDBG: mac id #5: 0000005b, 000fffff, 00000000
+ */
+#define R92S_MACID_BSS 5
/* Rx MAC descriptor. */
struct r92s_rx_stat {
diff --git a/sys/dev/usb/wlan/if_run.c b/sys/dev/usb/wlan/if_run.c
index 97c790dd5b81..147aa4044057 100644
--- a/sys/dev/usb/wlan/if_run.c
+++ b/sys/dev/usb/wlan/if_run.c
@@ -882,6 +882,7 @@ run_attach(device_t self)
ic->ic_flags |= IEEE80211_F_DATAPAD;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
run_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -3522,6 +3523,9 @@ run_tx(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
data->ni = ni;
data->ridx = ridx;
+ /* Assign sequence number now, regardless of A-MPDU TX or otherwise (for now) */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
run_set_tx_desc(sc, data);
/*
@@ -3627,6 +3631,9 @@ run_tx_mgt(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
data->ni = ni;
data->ridx = ridx;
+ /* Assign sequence number now, regardless of A-MPDU TX or otherwise (for now) */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
run_set_tx_desc(sc, data);
RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n",
@@ -3771,6 +3778,9 @@ run_tx_param(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
break;
data->ridx = ridx;
+ /* Assign sequence number now, regardless of A-MPDU TX or otherwise (for now) */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
run_set_tx_desc(sc, data);
RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
@@ -6416,6 +6426,10 @@ run_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
{
/* For now, no A-MPDU TX support in the driver */
+ /*
+ * TODO: maybe we needed to enable seqno generation too?
+ * What other TX desc bits are missing/needed?
+ */
return (0);
}
diff --git a/sys/dev/usb/wlan/if_uath.c b/sys/dev/usb/wlan/if_uath.c
index b49c75032d77..cc303e565bca 100644
--- a/sys/dev/usb/wlan/if_uath.c
+++ b/sys/dev/usb/wlan/if_uath.c
@@ -432,6 +432,8 @@ uath_attach(device_t dev)
/* put a regulatory domain to reveal informations. */
uath_regdomain = sc->sc_devcap.regDomain;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
memset(bands, 0, sizeof(bands));
setbit(bands, IEEE80211_MODE_11B);
setbit(bands, IEEE80211_MODE_11G);
@@ -1548,6 +1550,8 @@ uath_tx_start(struct uath_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
ieee80211_radiotap_tx(vap, m0);
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
diff --git a/sys/dev/usb/wlan/if_upgt.c b/sys/dev/usb/wlan/if_upgt.c
index 642631ae34b7..1ab833301b3c 100644
--- a/sys/dev/usb/wlan/if_upgt.c
+++ b/sys/dev/usb/wlan/if_upgt.c
@@ -354,6 +354,8 @@ upgt_attach(device_t dev)
ic->ic_transmit = upgt_transmit;
ic->ic_parent = upgt_parent;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
UPGT_TX_RADIOTAP_PRESENT,
@@ -2116,6 +2118,9 @@ upgt_tx_start(struct upgt_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
upgt_set_led(sc, UPGT_LED_BLINK);
+ /* Assign sequence number */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/*
* Software crypto.
*/
diff --git a/sys/dev/usb/wlan/if_ural.c b/sys/dev/usb/wlan/if_ural.c
index 260d75a9821d..adef924a085c 100644
--- a/sys/dev/usb/wlan/if_ural.c
+++ b/sys/dev/usb/wlan/if_ural.c
@@ -473,6 +473,8 @@ ural_attach(device_t self)
| IEEE80211_C_WPA /* 802.11i */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ural_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1073,6 +1075,8 @@ ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
@@ -1229,6 +1233,8 @@ ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
rate = ieee80211_node_get_txrate_dot11rate(ni);
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/usb/wlan/if_urtw.c b/sys/dev/usb/wlan/if_urtw.c
index 439faeefc408..86cf4c653ae7 100644
--- a/sys/dev/usb/wlan/if_urtw.c
+++ b/sys/dev/usb/wlan/if_urtw.c
@@ -884,6 +884,8 @@ urtw_attach(device_t dev)
/* XXX TODO: setup regdomain if URTW_EPROM_CHANPLAN_BY_HW bit is set.*/
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
urtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1699,6 +1701,10 @@ urtw_tx_start(struct urtw_softc *sc, struct ieee80211_node *ni, struct mbuf *m0,
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+
+ /* Assign sequence number */
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/*
* Software crypto.
*/
diff --git a/sys/dev/usb/wlan/if_zyd.c b/sys/dev/usb/wlan/if_zyd.c
index 1a698caef3c5..7affdcdce089 100644
--- a/sys/dev/usb/wlan/if_zyd.c
+++ b/sys/dev/usb/wlan/if_zyd.c
@@ -384,6 +384,8 @@ zyd_attach(device_t dev)
| IEEE80211_C_WPA /* 802.11i */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
zyd_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -2463,6 +2465,8 @@ zyd_tx_start(struct zyd_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
}
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index ecb3dbb370e5..528ff3372097 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -133,12 +133,14 @@ static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_new_buf(struct vtnet_rxq *);
+#if defined(INET) || defined(INET6)
static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
- uint16_t, int, struct virtio_net_hdr *);
-static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
- uint16_t, int, struct virtio_net_hdr *);
+ bool, int, struct virtio_net_hdr *);
+static void vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
+ int);
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
struct virtio_net_hdr *);
+#endif
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
@@ -1178,6 +1180,7 @@ vtnet_setup_interface(struct vtnet_softc *sc)
if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
+ if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
/*
* Capabilities after here are not enabled by default.
@@ -1760,164 +1763,165 @@ vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
return (error);
}
+#if defined(INET) || defined(INET6)
static int
-vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
- int hoff, struct virtio_net_hdr *hdr)
+vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, bool isipv6,
+ int protocol, struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
- int error;
- sc = rxq->vtnrx_sc;
+ /*
+ * The packet is likely from another VM on the same host or from the
+ * host that itself performed checksum offloading so Tx/Rx is basically
+ * a memcpy and the checksum has little value so far.
+ */
+
+ KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP,
+ ("%s: unsupported IP protocol %d", __func__, protocol));
/*
- * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
- * not have an analogous CSUM flag. The checksum has been validated,
- * but is incomplete (TCP/UDP pseudo header).
- *
- * The packet is likely from another VM on the same host that itself
- * performed checksum offloading so Tx/Rx is basically a memcpy and
- * the checksum has little value.
- *
- * Default to receiving the packet as-is for performance reasons, but
- * this can cause issues if the packet is to be forwarded because it
- * does not contain a valid checksum. This patch may be helpful:
- * https://reviews.freebsd.org/D6611. In the meantime, have the driver
- * compute the checksum if requested.
- *
- * BMV: Need to add an CSUM_PARTIAL flag?
+ * If the user don't want us to fix it up here by computing the
+ * checksum, just forward the order to compute the checksum by setting
+ * the corresponding mbuf flag (e.g., CSUM_TCP).
*/
+ sc = rxq->vtnrx_sc;
if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
- error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
- return (error);
+ switch (protocol) {
+ case IPPROTO_TCP:
+ m->m_pkthdr.csum_flags |=
+ (isipv6 ? CSUM_TCP_IPV6 : CSUM_TCP);
+ break;
+ case IPPROTO_UDP:
+ m->m_pkthdr.csum_flags |=
+ (isipv6 ? CSUM_UDP_IPV6 : CSUM_UDP);
+ break;
+ }
+ m->m_pkthdr.csum_data = hdr->csum_offset;
+ return (0);
}
/*
* Compute the checksum in the driver so the packet will contain a
* valid checksum. The checksum is at csum_offset from csum_start.
*/
- switch (etype) {
-#if defined(INET) || defined(INET6)
- case ETHERTYPE_IP:
- case ETHERTYPE_IPV6: {
- int csum_off, csum_end;
- uint16_t csum;
-
- csum_off = hdr->csum_start + hdr->csum_offset;
- csum_end = csum_off + sizeof(uint16_t);
+ int csum_off, csum_end;
+ uint16_t csum;
- /* Assume checksum will be in the first mbuf. */
- if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
- return (1);
+ csum_off = hdr->csum_start + hdr->csum_offset;
+ csum_end = csum_off + sizeof(uint16_t);
- /*
- * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
- * checksum and write it at the specified offset. We could
- * try to verify the packet: csum_start should probably
- * correspond to the start of the TCP/UDP header.
- *
- * BMV: Need to properly handle UDP with zero checksum. Is
- * the IPv4 header checksum implicitly validated?
- */
- csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
- *(uint16_t *)(mtodo(m, csum_off)) = csum;
- m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
- m->m_pkthdr.csum_data = 0xFFFF;
- break;
- }
-#endif
- default:
- sc->vtnet_stats.rx_csum_bad_ethtype++;
+ /* Assume checksum will be in the first mbuf. */
+ if (m->m_len < csum_end || m->m_pkthdr.len < csum_end) {
+ sc->vtnet_stats.rx_csum_bad_offset++;
return (1);
}
+ /*
+ * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
+ * checksum and write it at the specified offset. We could
+ * try to verify the packet: csum_start should probably
+ * correspond to the start of the TCP/UDP header.
+ *
+ * BMV: Need to properly handle UDP with zero checksum. Is
+ * the IPv4 header checksum implicitly validated?
+ */
+ csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
+ *(uint16_t *)(mtodo(m, csum_off)) = csum;
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+
return (0);
}
+static void
+vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m, int protocol)
+{
+ KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP,
+ ("%s: unsupported IP protocol %d", __func__, protocol));
+
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+}
+
static int
-vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
- uint16_t etype, int hoff, struct virtio_net_hdr *hdr __unused)
+vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
+ struct virtio_net_hdr *hdr)
{
-#if 0
+ const struct ether_header *eh;
struct vtnet_softc *sc;
-#endif
- int protocol;
+ int hoff, protocol;
+ uint16_t etype;
+ bool isipv6;
+
+ KASSERT(hdr->flags &
+ (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID),
+ ("%s: missing checksum offloading flag %x", __func__, hdr->flags));
+
+ eh = mtod(m, const struct ether_header *);
+ etype = ntohs(eh->ether_type);
+ if (etype == ETHERTYPE_VLAN) {
+ /* TODO BMV: Handle QinQ. */
+ const struct ether_vlan_header *evh =
+ mtod(m, const struct ether_vlan_header *);
+ etype = ntohs(evh->evl_proto);
+ hoff = sizeof(struct ether_vlan_header);
+ } else
+ hoff = sizeof(struct ether_header);
-#if 0
sc = rxq->vtnrx_sc;
-#endif
+ /* Check whether ethernet type is IP or IPv6, and get protocol. */
switch (etype) {
#if defined(INET)
case ETHERTYPE_IP:
- if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
- protocol = IPPROTO_DONE;
- else {
+ if (__predict_false(m->m_len < hoff + sizeof(struct ip))) {
+ sc->vtnet_stats.rx_csum_inaccessible_ipproto++;
+ return (1);
+ } else {
struct ip *ip = (struct ip *)(m->m_data + hoff);
protocol = ip->ip_p;
}
+ isipv6 = false;
break;
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
- || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
- protocol = IPPROTO_DONE;
+ || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0) {
+ sc->vtnet_stats.rx_csum_inaccessible_ipproto++;
+ return (1);
+ }
+ isipv6 = true;
break;
#endif
default:
- protocol = IPPROTO_DONE;
- break;
+ sc->vtnet_stats.rx_csum_bad_ethtype++;
+ return (1);
}
+ /* Check whether protocol is TCP or UDP. */
switch (protocol) {
case IPPROTO_TCP:
case IPPROTO_UDP:
- m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
- m->m_pkthdr.csum_data = 0xFFFF;
break;
default:
/*
* FreeBSD does not support checksum offloading of this
- * protocol. Let the stack re-verify the checksum later
- * if the protocol is supported.
+ * protocol here.
*/
-#if 0
- if_printf(sc->vtnet_ifp,
- "%s: checksum offload of unsupported protocol "
- "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
- __func__, etype, protocol, hdr->csum_start,
- hdr->csum_offset);
-#endif
- break;
+ sc->vtnet_stats.rx_csum_bad_ipproto++;
+ return (1);
}
- return (0);
-}
-
-static int
-vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
- struct virtio_net_hdr *hdr)
-{
- const struct ether_header *eh;
- int hoff;
- uint16_t etype;
-
- eh = mtod(m, const struct ether_header *);
- etype = ntohs(eh->ether_type);
- if (etype == ETHERTYPE_VLAN) {
- /* TODO BMV: Handle QinQ. */
- const struct ether_vlan_header *evh =
- mtod(m, const struct ether_vlan_header *);
- etype = ntohs(evh->evl_proto);
- hoff = sizeof(struct ether_vlan_header);
- } else
- hoff = sizeof(struct ether_header);
-
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
- return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
+ return (vtnet_rxq_csum_needs_csum(rxq, m, isipv6, protocol,
+ hdr));
else /* VIRTIO_NET_HDR_F_DATA_VALID */
- return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
+ vtnet_rxq_csum_data_valid(rxq, m, protocol);
+
+ return (0);
}
+#endif
static void
vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
@@ -2040,10 +2044,15 @@ vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
if (hdr->flags &
(VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
+#if defined(INET) || defined(INET6)
if (vtnet_rxq_csum(rxq, m, hdr) == 0)
rxq->vtnrx_stats.vrxs_csum++;
else
rxq->vtnrx_stats.vrxs_csum_failed++;
+#else
+ sc->vtnet_stats.rx_csum_bad_ethtype++;
+ rxq->vtnrx_stats.vrxs_csum_failed++;
+#endif
}
if (hdr->gso_size != 0) {
@@ -2497,6 +2506,10 @@ vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
hdr->csum_start = vtnet_gtoh16(sc, csum_start);
hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
txq->vtntx_stats.vtxs_csum++;
+ } else if ((flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) &&
+ (proto == IPPROTO_TCP || proto == IPPROTO_UDP) &&
+ (m->m_pkthdr.csum_data == 0xFFFF)) {
+ hdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID;
}
if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
@@ -2551,8 +2564,10 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
error = sglist_append_mbuf(sg, m);
if (error) {
m = m_defrag(m, M_NOWAIT);
- if (m == NULL)
+ if (m == NULL) {
+ sc->vtnet_stats.tx_defrag_failed++;
goto fail;
+ }
*m_head = m;
sc->vtnet_stats.tx_defragged++;
@@ -2568,7 +2583,6 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
return (error);
fail:
- sc->vtnet_stats.tx_defrag_failed++;
m_freem(*m_head);
*m_head = NULL;
@@ -2609,7 +2623,8 @@ vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
m->m_flags &= ~M_VLANTAG;
}
- if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
+ if (m->m_pkthdr.csum_flags &
+ (VTNET_CSUM_ALL_OFFLOAD | CSUM_DATA_VALID)) {
m = vtnet_txq_offload(txq, m, hdr);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
@@ -3031,16 +3046,14 @@ vtnet_get_counter(if_t ifp, ift_counter cnt)
return (rxaccum.vrxs_iqdrops);
case IFCOUNTER_IERRORS:
return (rxaccum.vrxs_ierrors);
+ case IFCOUNTER_IBYTES:
+ return (rxaccum.vrxs_ibytes);
case IFCOUNTER_OPACKETS:
return (txaccum.vtxs_opackets);
case IFCOUNTER_OBYTES:
- if (!VTNET_ALTQ_ENABLED)
- return (txaccum.vtxs_obytes);
- /* FALLTHROUGH */
+ return (txaccum.vtxs_obytes);
case IFCOUNTER_OMCASTS:
- if (!VTNET_ALTQ_ENABLED)
- return (txaccum.vtxs_omcasts);
- /* FALLTHROUGH */
+ return (txaccum.vtxs_omcasts);
default:
return (if_get_counter_default(ifp, cnt));
}
@@ -3813,9 +3826,9 @@ vtnet_rx_filter_mac(struct vtnet_softc *sc)
if_printf(ifp, "error setting host MAC filter table\n");
out:
- if (promisc != 0 && vtnet_set_promisc(sc, true) != 0)
+ if (promisc && vtnet_set_promisc(sc, true) != 0)
if_printf(ifp, "cannot enable promiscuous mode\n");
- if (allmulti != 0 && vtnet_set_allmulti(sc, true) != 0)
+ if (allmulti && vtnet_set_allmulti(sc, true) != 0)
if_printf(ifp, "cannot enable all-multicast mode\n");
}
@@ -4100,21 +4113,29 @@ vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
stats = &rxq->vtnrx_stats;
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ipackets, "Receive packets");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ibytes, "Receive bytes");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_iqdrops, "Receive drops");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ierrors, "Receive errors");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_csum, "Receive checksum offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_csum_failed, "Receive checksum offload failed");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_host_lro, "Receive host segmentation offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_rescheduled,
"Receive interrupt handler rescheduled");
}
@@ -4135,17 +4156,23 @@ vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
stats = &txq->vtntx_stats;
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_opackets, "Transmit packets");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_obytes, "Transmit bytes");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_omcasts, "Transmit multicasts");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_csum, "Transmit checksum offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_tso, "Transmit TCP segmentation offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_rescheduled,
"Transmit interrupt handler rescheduled");
}
@@ -4170,6 +4197,102 @@ vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
}
}
+static int
+vtnet_sysctl_rx_csum_failed(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_csum_failed = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_csum_failed += rxst->vrxs_csum_failed;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_csum_failed, req));
+}
+
+static int
+vtnet_sysctl_rx_csum_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_csum_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_csum_offloaded += rxst->vrxs_csum;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_csum_offloaded, req));
+}
+
+static int
+vtnet_sysctl_rx_task_rescheduled(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_task_rescheduled = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_task_rescheduled += rxst->vrxs_rescheduled;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_task_rescheduled, req));
+}
+
+static int
+vtnet_sysctl_tx_csum_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_csum_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_csum_offloaded += txst->vtxs_csum;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_csum_offloaded, req));
+}
+
+static int
+vtnet_sysctl_tx_tso_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_tso_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_tso_offloaded += txst->vtxs_tso;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_tso_offloaded, req));
+}
+
+static int
+vtnet_sysctl_tx_task_rescheduled(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_task_rescheduled = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_task_rescheduled += txst->vtxs_rescheduled;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_task_rescheduled, req));
+}
+
static void
vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_softc *sc)
@@ -4189,69 +4312,75 @@ vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
- CTLFLAG_RD, &stats->mbuf_alloc_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->mbuf_alloc_failed,
"Mbuf cluster allocation failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
- CTLFLAG_RD, &stats->rx_frame_too_large,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_frame_too_large,
"Received frame larger than the mbuf chain");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
- CTLFLAG_RD, &stats->rx_enq_replacement_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_enq_replacement_failed,
"Enqueuing the replacement receive mbuf failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
- CTLFLAG_RD, &stats->rx_mergeable_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_mergeable_failed,
"Mergeable buffers receive failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
- CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ethtype,
"Received checksum offloaded buffer with unsupported "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
- CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ipproto,
"Received checksum offloaded buffer with incorrect IP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
- CTLFLAG_RD, &stats->rx_csum_bad_offset,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_offset,
"Received checksum offloaded buffer with incorrect offset");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
- CTLFLAG_RD, &stats->rx_csum_bad_proto,
- "Received checksum offloaded buffer with incorrect protocol");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
- CTLFLAG_RD, &stats->rx_csum_failed,
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_inaccessible_ipproto",
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_inaccessible_ipproto,
+ "Received checksum offloaded buffer with inaccessible IP protocol");
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_failed",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_csum_failed, "QU",
"Received buffer checksum offload failed");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
- CTLFLAG_RD, &stats->rx_csum_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_csum_offloaded, "QU",
"Received buffer checksum offload succeeded");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
- CTLFLAG_RD, &stats->rx_task_rescheduled,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_task_rescheduled",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_task_rescheduled, "QU",
"Times the receive interrupt task rescheduled itself");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
- CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_unknown_ethtype,
"Aborted transmit of checksum offloaded buffer with unknown "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
- CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_proto_mismatch,
"Aborted transmit of checksum offloaded buffer because mismatched "
"protocols");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
- CTLFLAG_RD, &stats->tx_tso_not_tcp,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_not_tcp,
"Aborted transmit of TSO buffer with non TCP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
- CTLFLAG_RD, &stats->tx_tso_without_csum,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_without_csum,
"Aborted transmit of TSO buffer without TCP checksum offload");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
- CTLFLAG_RD, &stats->tx_defragged,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defragged,
"Transmit mbufs defragged");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
- CTLFLAG_RD, &stats->tx_defrag_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defrag_failed,
"Aborted transmit of buffer because defrag failed");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
- CTLFLAG_RD, &stats->tx_csum_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_csum_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_csum_offloaded, "QU",
"Offloaded checksum of transmitted buffer");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
- CTLFLAG_RD, &stats->tx_tso_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_tso_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_tso_offloaded, "QU",
"Segmentation offload of transmitted buffer");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
- CTLFLAG_RD, &stats->tx_task_rescheduled,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_task_rescheduled",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_task_rescheduled, "QU",
"Times the transmit interrupt task rescheduled itself");
}
diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h
index 0144b0f3232d..cab7ced639a7 100644
--- a/sys/dev/virtio/network/if_vtnetvar.h
+++ b/sys/dev/virtio/network/if_vtnetvar.h
@@ -46,7 +46,7 @@ struct vtnet_statistics {
uint64_t rx_csum_bad_ethtype;
uint64_t rx_csum_bad_ipproto;
uint64_t rx_csum_bad_offset;
- uint64_t rx_csum_bad_proto;
+ uint64_t rx_csum_inaccessible_ipproto;
uint64_t tx_csum_unknown_ethtype;
uint64_t tx_csum_proto_mismatch;
uint64_t tx_tso_not_tcp;
diff --git a/sys/dev/vmgenc/vmgenc_acpi.c b/sys/dev/vmgenc/vmgenc_acpi.c
index 2ad8929dfd34..18519a8e4f22 100644
--- a/sys/dev/vmgenc/vmgenc_acpi.c
+++ b/sys/dev/vmgenc/vmgenc_acpi.c
@@ -56,6 +56,7 @@
#include <contrib/dev/acpica/include/acpi.h>
#include <dev/acpica/acpivar.h>
+#include <dev/random/randomdev.h>
#include <dev/random/random_harvestq.h>
#include <dev/vmgenc/vmgenc_acpi.h>
@@ -210,6 +211,11 @@ acpi_GetPackedUINT64(device_t dev, ACPI_HANDLE handle, char *path,
}
+static const struct random_source random_vmgenid = {
+ .rs_ident = "VM Generation ID",
+ .rs_source = RANDOM_PURE_VMGENID,
+};
+
static int
vmgenc_attach(device_t dev)
{
@@ -234,7 +240,7 @@ vmgenc_attach(device_t dev)
memcpy(sc->vmg_cache_guid, __DEVOLATILE(void *, sc->vmg_pguid),
sizeof(sc->vmg_cache_guid));
- random_harvest_register_source(RANDOM_PURE_VMGENID);
+ random_source_register(&random_vmgenid);
vmgenc_harvest_all(sc->vmg_cache_guid, sizeof(sc->vmg_cache_guid));
AcpiInstallNotifyHandler(h, ACPI_DEVICE_NOTIFY, vmgenc_notify, dev);
diff --git a/sys/dev/vmm/vmm_dev.c b/sys/dev/vmm/vmm_dev.c
index 9f2b009d02ec..460a508a60dc 100644
--- a/sys/dev/vmm/vmm_dev.c
+++ b/sys/dev/vmm/vmm_dev.c
@@ -901,6 +901,7 @@ vmmdev_lookup_and_destroy(const char *name, struct ucred *cred)
sc->cdev = NULL;
sx_xunlock(&vmmdev_mtx);
+ vm_suspend(sc->vm, VM_SUSPEND_DESTROY);
destroy_dev(cdev);
vmmdev_destroy(sc);