aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/include/cpufunc.h2
-rw-r--r--sys/arm/arm/generic_timer.c9
-rw-r--r--sys/arm/include/atomic.h8
-rw-r--r--sys/arm64/arm64/cpu_feat.c52
-rw-r--r--sys/arm64/arm64/identcpu.c2
-rw-r--r--sys/arm64/arm64/locore.S41
-rw-r--r--sys/arm64/arm64/machdep.c14
-rw-r--r--sys/arm64/arm64/pmap.c4
-rw-r--r--sys/arm64/arm64/ptrauth.c34
-rw-r--r--sys/arm64/include/armreg.h2
-rw-r--r--sys/arm64/include/cpu_feat.h8
-rw-r--r--sys/arm64/rockchip/rk_tsadc.c2
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c25
-rw-r--r--sys/cddl/dev/dtrace/aarch64/dtrace_subr.c27
-rw-r--r--sys/cddl/dev/dtrace/amd64/dtrace_subr.c25
-rw-r--r--sys/cddl/dev/dtrace/arm/dtrace_subr.c25
-rw-r--r--sys/cddl/dev/dtrace/i386/dtrace_subr.c25
-rw-r--r--sys/cddl/dev/dtrace/powerpc/dtrace_subr.c25
-rw-r--r--sys/cddl/dev/dtrace/riscv/dtrace_subr.c27
-rw-r--r--sys/compat/linux/linux_misc.c41
-rw-r--r--sys/compat/linux/linux_socket.c12
-rw-r--r--sys/compat/linux/linux_uid16.c39
-rw-r--r--sys/compat/linuxkpi/common/include/linux/string_choices.h71
-rw-r--r--sys/compat/linuxkpi/common/include/linux/string_helpers.h67
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c40
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c4
-rw-r--r--sys/dev/acpica/acpi.c378
-rw-r--r--sys/dev/acpica/acpi_lid.c4
-rw-r--r--sys/dev/acpica/acpivar.h15
-rw-r--r--sys/dev/ath/if_ath.c3
-rw-r--r--sys/dev/ath/if_ath_tx.c10
-rw-r--r--sys/dev/bwi/if_bwi.c4
-rw-r--r--sys/dev/bwn/if_bwn.c2
-rw-r--r--sys/dev/cpuctl/cpuctl.c22
-rw-r--r--sys/dev/gpio/gpioled.c2
-rw-r--r--sys/dev/ipw/if_ipw.c3
-rw-r--r--sys/dev/iwi/if_iwi.c4
-rw-r--r--sys/dev/iwm/if_iwm.c7
-rw-r--r--sys/dev/iwn/if_iwn.c12
-rw-r--r--sys/dev/iwx/if_iwx.c7
-rw-r--r--sys/dev/malo/if_malo.c4
-rw-r--r--sys/dev/mwl/if_mwl.c4
-rw-r--r--sys/dev/otus/if_otus.c11
-rw-r--r--sys/dev/ral/rt2560.c4
-rw-r--r--sys/dev/ral/rt2661.c4
-rw-r--r--sys/dev/ral/rt2860.c3
-rw-r--r--sys/dev/random/fenestrasX/fx_pool.c3
-rw-r--r--sys/dev/rtwn/if_rtwn.c5
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr.c399
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr_cmd.c2
-rw-r--r--sys/dev/ufshci/ufshci_dev.c1
-rw-r--r--sys/dev/ufshci/ufshci_pci.c3
-rw-r--r--sys/dev/ufshci/ufshci_private.h41
-rw-r--r--sys/dev/ufshci/ufshci_req_queue.c292
-rw-r--r--sys/dev/ufshci/ufshci_req_sdb.c77
-rw-r--r--sys/dev/ufshci/ufshci_sim.c1
-rw-r--r--sys/dev/usb/wlan/if_mtw.c5
-rw-r--r--sys/dev/usb/wlan/if_uath.c4
-rw-r--r--sys/dev/usb/wlan/if_upgt.c5
-rw-r--r--sys/dev/usb/wlan/if_ural.c6
-rw-r--r--sys/dev/usb/wlan/if_urtw.c6
-rw-r--r--sys/dev/usb/wlan/if_zyd.c4
-rw-r--r--sys/fs/fuse/fuse_ipc.c4
-rw-r--r--sys/fs/fuse/fuse_vnops.c10
-rw-r--r--sys/fs/nfsserver/nfs_nfsdport.c5
-rw-r--r--sys/i386/include/cpufunc.h2
-rw-r--r--sys/kern/kern_descrip.c3
-rw-r--r--sys/kern/kern_prot.c47
-rw-r--r--sys/kern/kern_sig.c2
-rw-r--r--sys/kern/subr_asan.c1
-rw-r--r--sys/kern/subr_msan.c1
-rw-r--r--sys/kern/subr_param.c13
-rw-r--r--sys/kern/subr_power.c46
-rw-r--r--sys/kern/subr_witness.c10
-rw-r--r--sys/kern/syscalls.master4
-rw-r--r--sys/kern/uipc_usrreq.c9
-rw-r--r--sys/kern/vfs_subr.c5
-rw-r--r--sys/kern/vfs_vnops.c184
-rw-r--r--sys/net/pfvar.h9
-rw-r--r--sys/netinet6/in6.h3
-rw-r--r--sys/netinet6/in6_ifattach.c275
-rw-r--r--sys/netinet6/in6_ifattach.h2
-rw-r--r--sys/netinet6/in6_proto.c10
-rw-r--r--sys/netinet6/ip6_input.c1
-rw-r--r--sys/netinet6/ip6_var.h12
-rw-r--r--sys/netinet6/nd6.c9
-rw-r--r--sys/netinet6/nd6.h2
-rw-r--r--sys/netinet6/nd6_nbr.c35
-rw-r--r--sys/netinet6/nd6_rtr.c128
-rw-r--r--sys/netpfil/pf/pf.c55
-rw-r--r--sys/netpfil/pf/pf_ioctl.c5
-rw-r--r--sys/netpfil/pf/pf_ruleset.c12
-rw-r--r--sys/netpfil/pf/pf_syncookies.c8
-rw-r--r--sys/powerpc/include/atomic.h33
-rw-r--r--sys/riscv/include/atomic.h3
-rw-r--r--sys/security/mac_bsdextended/mac_bsdextended.c4
-rw-r--r--sys/security/mac_do/mac_do.c2
-rw-r--r--sys/sys/_atomic_subword.h28
-rw-r--r--sys/sys/file.h10
-rw-r--r--sys/sys/power.h3
-rw-r--r--sys/sys/ucred.h14
-rw-r--r--sys/x86/acpica/acpi_apm.c25
-rw-r--r--sys/x86/include/ucode.h2
-rw-r--r--sys/x86/x86/ucode.c3
-rw-r--r--sys/x86/x86/ucode_subr.c10
105 files changed, 1924 insertions, 1113 deletions
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index d180f5c76afb..e6c9f47ea261 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -76,7 +76,7 @@ static __inline void
clflushopt(u_long addr)
{
- __asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
+ __asm __volatile("clflushopt %0" : : "m" (*(char *)addr));
}
static __inline void
diff --git a/sys/arm/arm/generic_timer.c b/sys/arm/arm/generic_timer.c
index dacef8de2257..c4a1f44a0079 100644
--- a/sys/arm/arm/generic_timer.c
+++ b/sys/arm/arm/generic_timer.c
@@ -905,8 +905,15 @@ wfxt_enable(const struct cpu_feat *feat __unused,
return (true);
}
+static void
+wfxt_disabled(const struct cpu_feat *feat __unused)
+{
+ if (PCPU_GET(cpuid) == 0)
+ update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_WFxT_MASK, 0);
+}
+
CPU_FEAT(feat_wfxt, "WFE and WFI instructions with timeout",
- wfxt_check, NULL, wfxt_enable,
+ wfxt_check, NULL, wfxt_enable, wfxt_disabled,
CPU_FEAT_AFTER_DEV | CPU_FEAT_SYSTEM);
#endif
diff --git a/sys/arm/include/atomic.h b/sys/arm/include/atomic.h
index f3313b136656..f66953710615 100644
--- a/sys/arm/include/atomic.h
+++ b/sys/arm/include/atomic.h
@@ -1103,11 +1103,9 @@ atomic_thread_fence_seq_cst(void)
#define atomic_store_rel_int atomic_store_rel_32
#define atomic_swap_int atomic_swap_32
-/*
- * For:
- * - atomic_load_acq_8
- * - atomic_load_acq_16
- */
#include <sys/_atomic_subword.h>
+#define atomic_set_short atomic_set_16
+#define atomic_clear_short atomic_clear_16
+
#endif /* _MACHINE_ATOMIC_H_ */
diff --git a/sys/arm64/arm64/cpu_feat.c b/sys/arm64/arm64/cpu_feat.c
index 986d5079e980..94114d47f846 100644
--- a/sys/arm64/arm64/cpu_feat.c
+++ b/sys/arm64/arm64/cpu_feat.c
@@ -54,6 +54,21 @@ enable_cpu_feat(uint32_t stage)
SET_FOREACH(featp, cpu_feat_set) {
feat = *featp;
+ /* Read any tunable the user may have set */
+ if (stage == CPU_FEAT_EARLY_BOOT && PCPU_GET(cpuid) == 0) {
+ snprintf(tunable, sizeof(tunable), "hw.feat.%s",
+ feat->feat_name);
+ if (TUNABLE_BOOL_FETCH(tunable, &val)) {
+ if (val) {
+ feat->feat_flags |=
+ CPU_FEAT_USER_ENABLED;
+ } else {
+ feat->feat_flags |=
+ CPU_FEAT_USER_DISABLED;
+ }
+ }
+ }
+
/* Run the enablement code at the correct stage of boot */
if ((feat->feat_flags & CPU_FEAT_STAGE_MASK) != stage)
continue;
@@ -63,25 +78,26 @@ enable_cpu_feat(uint32_t stage)
PCPU_GET(cpuid) != 0)
continue;
- if (feat->feat_check != NULL)
- continue;
-
- check_status = feat->feat_check(feat, midr);
+ if (feat->feat_check != NULL) {
+ check_status = feat->feat_check(feat, midr);
+ } else {
+ check_status = FEAT_DEFAULT_ENABLE;
+ }
/* Ignore features that are not present */
if (check_status == FEAT_ALWAYS_DISABLE)
- continue;
+ goto next;
- snprintf(tunable, sizeof(tunable), "hw.feat.%s",
- feat->feat_name);
- if (TUNABLE_BOOL_FETCH(tunable, &val)) {
- /* Is the feature disabled by the tunable? */
- if (!val)
- continue;
- /* If enabled by the tunable then enable it */
- } else if (check_status == FEAT_DEFAULT_DISABLE) {
- /* No tunable set and disabled by default */
- continue;
- }
+ /* The user disabled the feature */
+ if ((feat->feat_flags & CPU_FEAT_USER_DISABLED) != 0)
+ goto next;
+
+ /*
+ * The feature was disabled by default and the user
+ * didn't enable it then skip.
+ */
+ if (check_status == FEAT_DEFAULT_DISABLE &&
+ (feat->feat_flags & CPU_FEAT_USER_ENABLED) == 0)
+ goto next;
/*
* Check if the feature has any errata that may need a
@@ -122,6 +138,10 @@ enable_cpu_feat(uint32_t stage)
if (feat->feat_enable(feat, errata_status, errata_list,
errata_count))
feat->feat_enabled = true;
+
+next:
+ if (!feat->feat_enabled && feat->feat_disabled != NULL)
+ feat->feat_disabled(feat);
}
}
diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c
index 01b4ece59861..2d07420bcdb0 100644
--- a/sys/arm64/arm64/identcpu.c
+++ b/sys/arm64/arm64/identcpu.c
@@ -2353,7 +2353,7 @@ user_ctr_enable(const struct cpu_feat *feat __unused,
}
CPU_FEAT(trap_ctr, "Trap CTR_EL0",
- user_ctr_check, user_ctr_has_errata, user_ctr_enable,
+ user_ctr_check, user_ctr_has_errata, user_ctr_enable, NULL,
CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
static bool
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
index 4a10a2b4f2d3..50a3eda846da 100644
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -39,6 +39,23 @@
#define VIRT_BITS 48
+/*
+ * Loads a 64-bit value into reg using 1 to 4 mov/movk instructions.
+ * This can be used early on when we don't know the CPUs endianness.
+ */
+.macro mov_q reg, val
+ mov \reg, :abs_g0_nc:\val
+.if (\val >> 16) & 0xffff != 0
+ movk \reg, :abs_g1_nc:\val
+.endif
+.if (\val >> 32) & 0xffff != 0
+ movk \reg, :abs_g2_nc:\val
+.endif
+.if (\val >> 48) & 0xffff != 0
+ movk \reg, :abs_g3:\val
+.endif
+.endm
+
#if PAGE_SIZE == PAGE_SIZE_16K
/*
* The number of level 3 tables to create. 32 will allow for 1G of address
@@ -324,15 +341,23 @@ LENTRY(enter_kernel_el)
cmp x23, #(CURRENTEL_EL_EL2)
b.eq 1f
- ldr x2, =SCTLR_MMU_OFF
+ /*
+ * Ensure there are no memory operations here. If the boot loader
+ * enters the kernel in big-endian mode then loading sctlr will
+ * be incorrect. As instructions are the same in both endians it is
+ * safe to use mov instructions.
+ */
+ mov_q x2, SCTLR_MMU_OFF
msr sctlr_el1, x2
- /* SCTLR_EOS is set so eret is a context synchronizing event so we
+ /*
+ * SCTLR_EOS is set to make eret a context synchronizing event. We
* need an isb here to ensure it's observed by later instructions,
* but don't need it in the eret below.
*/
isb
- /* Ensure SPSR_EL1 and pstate are in sync. The only wat to set the
+ /*
+ * Ensure SPSR_EL1 and pstate are in sync. The only way to set the
* latter is to set the former and return from an exception with eret.
*/
mov x2, #(PSR_DAIF | PSR_M_EL1h)
@@ -346,11 +371,19 @@ LENTRY(enter_kernel_el)
* Set just the reserved bits in sctlr_el2. This will disable the
* MMU which may have broken the kernel if we enter the kernel in
* EL2, e.g. when using VHE.
+ *
+ * As with sctlr_el1 above use mov instructions to ensure there are
+ * no memory operations.
*/
- ldr x2, =(SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS)
+ mov_q x2, (SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS)
msr sctlr_el2, x2
isb
+ /*
+ * The hardware is now in little-endian mode so memory operations
+ * are safe.
+ */
+
/* Configure the Hypervisor */
ldr x2, =(HCR_RW | HCR_APK | HCR_API)
msr hcr_el2, x2
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
index 47c701e8588c..322bad273a08 100644
--- a/sys/arm64/arm64/machdep.c
+++ b/sys/arm64/arm64/machdep.c
@@ -178,7 +178,8 @@ pan_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
uint64_t id_aa64mfr1;
- id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
+ if (!get_kernel_reg(ID_AA64MMFR1_EL1, &id_aa64mfr1))
+ return (FEAT_ALWAYS_DISABLE);
if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) == ID_AA64MMFR1_PAN_NONE)
return (FEAT_ALWAYS_DISABLE);
@@ -207,9 +208,16 @@ pan_enable(const struct cpu_feat *feat __unused,
return (true);
}
+static void
+pan_disabled(const struct cpu_feat *feat __unused)
+{
+ if (PCPU_GET(cpuid) == 0)
+ update_special_reg(ID_AA64MMFR1_EL1, ID_AA64MMFR1_PAN_MASK, 0);
+}
+
CPU_FEAT(feat_pan, "Privileged access never",
- pan_check, NULL, pan_enable,
- CPU_FEAT_EARLY_BOOT | CPU_FEAT_PER_CPU);
+ pan_check, NULL, pan_enable, pan_disabled,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
bool
has_hyp(void)
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 8a4395aa1c89..dbf5c820d20b 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -1722,7 +1722,7 @@ pmap_dbm_enable(const struct cpu_feat *feat __unused,
}
CPU_FEAT(feat_hafdbs, "Hardware management of the Access flag and dirty state",
- pmap_dbm_check, pmap_dbm_has_errata, pmap_dbm_enable,
+ pmap_dbm_check, pmap_dbm_has_errata, pmap_dbm_enable, NULL,
CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
static cpu_feat_en
@@ -1767,7 +1767,7 @@ pmap_multiple_tlbi_enable(const struct cpu_feat *feat __unused,
}
CPU_FEAT(errata_multi_tlbi, "Multiple TLBI errata",
- pmap_multiple_tlbi_check, NULL, pmap_multiple_tlbi_enable,
+ pmap_multiple_tlbi_check, NULL, pmap_multiple_tlbi_enable, NULL,
CPU_FEAT_EARLY_BOOT | CPU_FEAT_PER_CPU);
/*
diff --git a/sys/arm64/arm64/ptrauth.c b/sys/arm64/arm64/ptrauth.c
index fdab5414e24c..ab40b72887e9 100644
--- a/sys/arm64/arm64/ptrauth.c
+++ b/sys/arm64/arm64/ptrauth.c
@@ -97,11 +97,11 @@ ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused)
if (!pac_enable) {
if (boothowto & RB_VERBOSE)
printf("Pointer authentication is disabled\n");
- goto out;
+ return (FEAT_ALWAYS_DISABLE);
}
if (ptrauth_disable())
- goto out;
+ return (FEAT_ALWAYS_DISABLE);
/*
* This assumes if there is pointer authentication on the boot CPU
@@ -127,17 +127,6 @@ ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused)
}
}
-out:
- /*
- * Pointer authentication may be disabled, mask out the ID fields we
- * expose to userspace and the rest of the kernel so they don't try
- * to use it.
- */
- update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK |
- ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK |
- ID_AA64ISAR1_GPI_MASK, 0);
- update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0);
-
return (FEAT_ALWAYS_DISABLE);
}
@@ -157,8 +146,25 @@ ptrauth_enable(const struct cpu_feat *feat __unused,
return (true);
}
+static void
+ptrauth_disabled(const struct cpu_feat *feat __unused)
+{
+ /*
+ * Pointer authentication may be disabled, mask out the ID fields we
+ * expose to userspace and the rest of the kernel so they don't try
+ * to use it.
+ */
+ if (PCPU_GET(cpuid) == 0) {
+ update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK |
+ ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK |
+ ID_AA64ISAR1_GPI_MASK, 0);
+ update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0);
+ }
+
+}
+
CPU_FEAT(feat_pauth, "Pointer Authentication",
- ptrauth_check, NULL, ptrauth_enable,
+ ptrauth_check, NULL, ptrauth_enable, ptrauth_disabled,
CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM);
/* Copy the keys when forking a new process */
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index 500f35c48787..c2065fdb3f8c 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -2612,10 +2612,12 @@
(SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | SCTLR_TSCXT | SCTLR_EOS)
#define SCTLR_MMU_ON \
(SCTLR_MMU_OFF | \
+ SCTLR_EPAN | \
SCTLR_BT1 | \
SCTLR_BT0 | \
SCTLR_UCI | \
SCTLR_SPAN | \
+ SCTLR_IESB | \
SCTLR_nTWE | \
SCTLR_nTWI | \
SCTLR_UCT | \
diff --git a/sys/arm64/include/cpu_feat.h b/sys/arm64/include/cpu_feat.h
index 6a554b6baedf..6a311d4000bb 100644
--- a/sys/arm64/include/cpu_feat.h
+++ b/sys/arm64/include/cpu_feat.h
@@ -73,6 +73,9 @@ typedef enum {
#define CPU_FEAT_PER_CPU 0x00000000
#define CPU_FEAT_SYSTEM 0x00000010
+#define CPU_FEAT_USER_ENABLED 0x40000000
+#define CPU_FEAT_USER_DISABLED 0x80000000
+
struct cpu_feat;
typedef cpu_feat_en (cpu_feat_check)(const struct cpu_feat *, u_int);
@@ -80,12 +83,14 @@ typedef bool (cpu_feat_has_errata)(const struct cpu_feat *, u_int,
u_int **, u_int *);
typedef bool (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata,
u_int *, u_int);
+typedef void (cpu_feat_disabled)(const struct cpu_feat *);
struct cpu_feat {
const char *feat_name;
cpu_feat_check *feat_check;
cpu_feat_has_errata *feat_has_errata;
cpu_feat_enable *feat_enable;
+ cpu_feat_disabled *feat_disabled;
uint32_t feat_flags;
bool feat_enabled;
};
@@ -93,12 +98,13 @@ SET_DECLARE(cpu_feat_set, struct cpu_feat);
SYSCTL_DECL(_hw_feat);
-#define CPU_FEAT(name, descr, check, has_errata, enable, flags) \
+#define CPU_FEAT(name, descr, check, has_errata, enable, disabled, flags) \
static struct cpu_feat name = { \
.feat_name = #name, \
.feat_check = check, \
.feat_has_errata = has_errata, \
.feat_enable = enable, \
+ .feat_disabled = disabled, \
.feat_flags = flags, \
.feat_enabled = false, \
}; \
diff --git a/sys/arm64/rockchip/rk_tsadc.c b/sys/arm64/rockchip/rk_tsadc.c
index e6cbad36f697..d83b09480a0c 100644
--- a/sys/arm64/rockchip/rk_tsadc.c
+++ b/sys/arm64/rockchip/rk_tsadc.c
@@ -484,7 +484,7 @@ tsadc_init_tsensor(struct tsadc_softc *sc, struct tsensor *sensor)
WR4(sc, TSADC_INT_EN, val);
/* Shutdown temperature */
- val = tsadc_raw_to_temp(sc, sc->shutdown_temp);
+ val = tsadc_temp_to_raw(sc, sc->shutdown_temp);
WR4(sc, TSADC_COMP_SHUT(sensor->channel), val);
val = RD4(sc, TSADC_AUTO_CON);
val |= TSADC_AUTO_SRC_EN(sensor->channel);
diff --git a/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c b/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
index 8078f3f6d4b1..853cfb845878 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
@@ -707,6 +707,31 @@ dtrace_error(uint32_t *counter)
} while (dtrace_cas32(counter, oval, nval) != oval);
}
+void
+dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
+{
+ cpuset_t cpus;
+
+ if (cpu == DTRACE_CPUALL)
+ cpus = all_cpus;
+ else
+ CPU_SETOF(cpu, &cpus);
+
+ smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
+ smp_no_rendezvous_barrier, arg);
+}
+
+static void
+dtrace_sync_func(void)
+{
+}
+
+void
+dtrace_sync(void)
+{
+ dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
+}
+
/*
* Use the DTRACE_LOADFUNC macro to define functions for each of loading a
* uint8_t, a uint16_t, a uint32_t and a uint64_t.
diff --git a/sys/cddl/dev/dtrace/aarch64/dtrace_subr.c b/sys/cddl/dev/dtrace/aarch64/dtrace_subr.c
index 32e84d8fbfe9..ff880e804770 100644
--- a/sys/cddl/dev/dtrace/aarch64/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/aarch64/dtrace_subr.c
@@ -124,33 +124,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
(*func)(0, (uintptr_t)VM_MIN_KERNEL_ADDRESS);
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-
-}
-
-void
-dtrace_sync(void)
-{
-
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
static uint64_t nsec_scale;
#define SCALE_SHIFT 25
diff --git a/sys/cddl/dev/dtrace/amd64/dtrace_subr.c b/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
index 81aa53d00bd8..877d52fe18a7 100644
--- a/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
@@ -142,31 +142,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
(*func)(0, la57 ? (uintptr_t)addr_P5Tmap : (uintptr_t)addr_P4Tmap);
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-}
-
-void
-dtrace_sync(void)
-{
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
#ifdef notyet
void
dtrace_safe_synchronous_signal(void)
diff --git a/sys/cddl/dev/dtrace/arm/dtrace_subr.c b/sys/cddl/dev/dtrace/arm/dtrace_subr.c
index bb42044aa477..10e9281709b6 100644
--- a/sys/cddl/dev/dtrace/arm/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/arm/dtrace_subr.c
@@ -138,31 +138,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
*/
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-}
-
-void
-dtrace_sync(void)
-{
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
/*
* DTrace needs a high resolution time function which can
* be called from a probe context and guaranteed not to have
diff --git a/sys/cddl/dev/dtrace/i386/dtrace_subr.c b/sys/cddl/dev/dtrace/i386/dtrace_subr.c
index 026581f5a899..ebe2194a4b2f 100644
--- a/sys/cddl/dev/dtrace/i386/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/i386/dtrace_subr.c
@@ -139,31 +139,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
(*func)(0, kernelbase);
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-}
-
-void
-dtrace_sync(void)
-{
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
#ifdef notyet
void
dtrace_safe_synchronous_signal(void)
diff --git a/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c b/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c
index 5dd083310e6f..ee8be8da642f 100644
--- a/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c
@@ -123,31 +123,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
*/
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-}
-
-void
-dtrace_sync(void)
-{
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
static int64_t tgt_cpu_tsc;
static int64_t hst_cpu_tsc;
static int64_t timebase_skew[MAXCPU];
diff --git a/sys/cddl/dev/dtrace/riscv/dtrace_subr.c b/sys/cddl/dev/dtrace/riscv/dtrace_subr.c
index 3a6aacd86fcd..ed2c0bdba7e2 100644
--- a/sys/cddl/dev/dtrace/riscv/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/riscv/dtrace_subr.c
@@ -127,33 +127,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
(*func)(0, (uintptr_t)VM_MIN_KERNEL_ADDRESS);
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-
-}
-
-void
-dtrace_sync(void)
-{
-
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
/*
* DTrace needs a high resolution time function which can
* be called from a probe context and guaranteed not to have
diff --git a/sys/compat/linux/linux_misc.c b/sys/compat/linux/linux_misc.c
index 5e32353c6b8e..0925ffb64480 100644
--- a/sys/compat/linux/linux_misc.c
+++ b/sys/compat/linux/linux_misc.c
@@ -1028,24 +1028,24 @@ linux_nice(struct thread *td, struct linux_nice_args *args)
int
linux_setgroups(struct thread *td, struct linux_setgroups_args *args)
{
+ const int ngrp = args->gidsetsize;
struct ucred *newcred, *oldcred;
l_gid_t *linux_gidset;
- int ngrp, error;
+ int error;
struct proc *p;
- ngrp = args->gidsetsize;
- if (ngrp < 0 || ngrp >= ngroups_max)
+ if (ngrp < 0 || ngrp > ngroups_max)
return (EINVAL);
linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t));
if (error)
goto out;
+
newcred = crget();
crextend(newcred, ngrp);
p = td->td_proc;
PROC_LOCK(p);
- oldcred = p->p_ucred;
- crcopy(newcred, oldcred);
+ oldcred = crcopysafe(p, newcred);
if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) {
PROC_UNLOCK(p);
@@ -1071,34 +1071,29 @@ out:
int
linux_getgroups(struct thread *td, struct linux_getgroups_args *args)
{
- struct ucred *cred;
+ const struct ucred *const cred = td->td_ucred;
l_gid_t *linux_gidset;
- gid_t *bsd_gidset;
- int bsd_gidsetsz, ngrp, error;
+ int ngrp, error;
- cred = td->td_ucred;
- bsd_gidset = cred->cr_groups;
- bsd_gidsetsz = cred->cr_ngroups;
+ ngrp = args->gidsetsize;
- if ((ngrp = args->gidsetsize) == 0) {
- td->td_retval[0] = bsd_gidsetsz;
+ if (ngrp == 0) {
+ td->td_retval[0] = cred->cr_ngroups;
return (0);
}
-
- if (ngrp < bsd_gidsetsz)
+ if (ngrp < cred->cr_ngroups)
return (EINVAL);
- ngrp = 0;
- linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset),
- M_LINUX, M_WAITOK);
- while (ngrp < bsd_gidsetsz) {
- linux_gidset[ngrp] = bsd_gidset[ngrp];
- ngrp++;
- }
+ ngrp = cred->cr_ngroups;
+
+ linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
+ for (int i = 0; i < ngrp; ++i)
+ linux_gidset[i] = cred->cr_groups[i];
error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t));
free(linux_gidset, M_LINUX);
- if (error)
+
+ if (error != 0)
return (error);
td->td_retval[0] = ngrp;
diff --git a/sys/compat/linux/linux_socket.c b/sys/compat/linux/linux_socket.c
index 539d153431c4..0e07b0a60ced 100644
--- a/sys/compat/linux/linux_socket.c
+++ b/sys/compat/linux/linux_socket.c
@@ -2179,6 +2179,7 @@ static int
linux_getsockopt_so_peergroups(struct thread *td,
struct linux_getsockopt_args *args)
{
+ l_gid_t *out = PTRIN(args->optval);
struct xucred xu;
socklen_t xulen, len;
int error, i;
@@ -2197,13 +2198,12 @@ linux_getsockopt_so_peergroups(struct thread *td,
return (error);
}
- /*
- * "- 1" to skip the primary group.
- */
+ /* "- 1" to skip the primary group. */
for (i = 0; i < xu.cr_ngroups - 1; i++) {
- error = copyout(xu.cr_groups + i + 1,
- (void *)(args->optval + i * sizeof(l_gid_t)),
- sizeof(l_gid_t));
+ /* Copy to cope with a possible type discrepancy. */
+ const l_gid_t g = xu.cr_groups[i + 1];
+
+ error = copyout(&g, out + i, sizeof(l_gid_t));
if (error != 0)
return (error);
}
diff --git a/sys/compat/linux/linux_uid16.c b/sys/compat/linux/linux_uid16.c
index 1d9a19916412..8ac093e004d0 100644
--- a/sys/compat/linux/linux_uid16.c
+++ b/sys/compat/linux/linux_uid16.c
@@ -85,13 +85,13 @@ linux_lchown16(struct thread *td, struct linux_lchown16_args *args)
int
linux_setgroups16(struct thread *td, struct linux_setgroups16_args *args)
{
+ const int ngrp = args->gidsetsize;
struct ucred *newcred, *oldcred;
l_gid16_t *linux_gidset;
- int ngrp, error;
+ int error;
struct proc *p;
- ngrp = args->gidsetsize;
- if (ngrp < 0 || ngrp >= ngroups_max)
+ if (ngrp < 0 || ngrp > ngroups_max)
return (EINVAL);
linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
error = copyin(args->gidset, linux_gidset, ngrp * sizeof(l_gid16_t));
@@ -100,7 +100,9 @@ linux_setgroups16(struct thread *td, struct linux_setgroups16_args *args)
free(linux_gidset, M_LINUX);
return (error);
}
+
newcred = crget();
+ crextend(newcred, ngrp);
p = td->td_proc;
PROC_LOCK(p);
oldcred = crcopysafe(p, newcred);
@@ -133,34 +135,29 @@ out:
int
linux_getgroups16(struct thread *td, struct linux_getgroups16_args *args)
{
- struct ucred *cred;
+ const struct ucred *const cred = td->td_ucred;
l_gid16_t *linux_gidset;
- gid_t *bsd_gidset;
- int bsd_gidsetsz, ngrp, error;
+ int ngrp, error;
- cred = td->td_ucred;
- bsd_gidset = cred->cr_groups;
- bsd_gidsetsz = cred->cr_ngroups;
+ ngrp = args->gidsetsize;
- if ((ngrp = args->gidsetsize) == 0) {
- td->td_retval[0] = bsd_gidsetsz;
+ if (ngrp == 0) {
+ td->td_retval[0] = cred->cr_ngroups;
return (0);
}
-
- if (ngrp < bsd_gidsetsz)
+ if (ngrp < cred->cr_ngroups)
return (EINVAL);
- ngrp = 0;
- linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset),
- M_LINUX, M_WAITOK);
- while (ngrp < bsd_gidsetsz) {
- linux_gidset[ngrp] = bsd_gidset[ngrp];
- ngrp++;
- }
+ ngrp = cred->cr_ngroups;
+
+ linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
+ for (int i = 0; i < ngrp; ++i)
+ linux_gidset[i] = cred->cr_groups[i];
error = copyout(linux_gidset, args->gidset, ngrp * sizeof(l_gid16_t));
free(linux_gidset, M_LINUX);
- if (error) {
+
+ if (error != 0) {
LIN_SDT_PROBE1(uid16, linux_getgroups16, copyout_error, error);
return (error);
}
diff --git a/sys/compat/linuxkpi/common/include/linux/string_choices.h b/sys/compat/linuxkpi/common/include/linux/string_choices.h
new file mode 100644
index 000000000000..74aa3fd019b2
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/string_choices.h
@@ -0,0 +1,71 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_STRING_CHOICES_H_
+#define _LINUXKPI_LINUX_STRING_CHOICES_H_
+
+#include <sys/types.h>
+
+static inline const char *
+str_yes_no(bool value)
+{
+ if (value)
+ return "yes";
+ else
+ return "no";
+}
+
+static inline const char *
+str_on_off(bool value)
+{
+ if (value)
+ return "on";
+ else
+ return "off";
+}
+
+static inline const char *
+str_enabled_disabled(bool value)
+{
+ if (value)
+ return "enabled";
+ else
+ return "disabled";
+}
+
+static inline const char *
+str_enable_disable(bool value)
+{
+ if (value)
+ return "enable";
+ else
+ return "disable";
+}
+
+#define str_disable_enable(_v) str_enable_disable(!(_v))
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/string_helpers.h b/sys/compat/linuxkpi/common/include/linux/string_helpers.h
index 2c6fe0b1708d..07d113c0cb21 100644
--- a/sys/compat/linuxkpi/common/include/linux/string_helpers.h
+++ b/sys/compat/linuxkpi/common/include/linux/string_helpers.h
@@ -1,71 +1,12 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2023 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice unmodified, this list of conditions, and the following
- * disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
*
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-2-Clause
*/
#ifndef _LINUXKPI_LINUX_STRING_HELPERS_H_
#define _LINUXKPI_LINUX_STRING_HELPERS_H_
-#include <sys/types.h>
-
-static inline const char *
-str_yes_no(bool value)
-{
- if (value)
- return "yes";
- else
- return "no";
-}
-
-static inline const char *
-str_on_off(bool value)
-{
- if (value)
- return "on";
- else
- return "off";
-}
-
-static inline const char *
-str_enabled_disabled(bool value)
-{
- if (value)
- return "enabled";
- else
- return "disabled";
-}
-
-static inline const char *
-str_enable_disable(bool value)
-{
- if (value)
- return "enable";
- else
- return "disable";
-}
-
-#define str_disable_enable(_v) str_enable_disable(!(_v))
+#include <linux/string_choices.h>
#endif
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
index a222c5de4a2a..4de48e013ec4 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
@@ -674,7 +674,6 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
- ssize_t orig_resid;
int error;
zfs_uio_init(&uio, ap->a_uio);
@@ -694,11 +693,13 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
return (0);
}
- orig_resid = zfs_uio_resid(&uio);
error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio,
&dots_offset);
- if (error != 0)
- goto err;
+ if (error != 0) {
+ if (error == ENAMETOOLONG) /* ran out of destination space */
+ error = 0;
+ return (error);
+ }
if (zfs_uio_offset(&uio) != dots_offset)
return (SET_ERROR(EINVAL));
@@ -711,11 +712,8 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
-err:
- if (error == ENAMETOOLONG) {
- error = orig_resid == zfs_uio_resid(&uio) ?
- EINVAL : 0;
- }
+ if (error == ENAMETOOLONG)
+ error = 0;
return (SET_ERROR(error));
}
if (eofp != NULL)
@@ -1060,21 +1058,17 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
- ssize_t orig_resid;
int error;
zfs_uio_init(&uio, ap->a_uio);
- orig_resid = zfs_uio_resid(&uio);
ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap,
&uio, &dots_offset);
if (error != 0) {
- if (error == ENAMETOOLONG) { /* ran out of destination space */
- error = orig_resid == zfs_uio_resid(&uio) ?
- EINVAL : 0;
- }
+ if (error == ENAMETOOLONG) /* ran out of destination space */
+ error = 0;
return (error);
}
@@ -1092,13 +1086,9 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error != 0) {
if (error == ENOENT) {
- if (orig_resid == zfs_uio_resid(&uio)) {
- error = EINVAL;
- } else {
- error = 0;
- if (eofp != NULL)
- *eofp = 1;
- }
+ if (eofp != NULL)
+ *eofp = 1;
+ error = 0;
}
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1111,10 +1101,8 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
- if (error == ENAMETOOLONG) {
- error = orig_resid == zfs_uio_resid(&uio) ?
- EINVAL : 0;
- }
+ if (error == ENAMETOOLONG)
+ error = 0;
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(error));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
index 8dce97baba66..411225786089 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -1698,7 +1698,6 @@ zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
objset_t *os;
caddr_t outbuf;
size_t bufsize;
- ssize_t orig_resid;
zap_cursor_t zc;
zap_attribute_t *zap;
uint_t bytes_wanted;
@@ -1747,7 +1746,6 @@ zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
error = 0;
os = zfsvfs->z_os;
offset = zfs_uio_offset(uio);
- orig_resid = zfs_uio_resid(uio);
prefetch = zp->z_zn_prefetch;
zap = zap_attribute_long_alloc();
@@ -1927,7 +1925,7 @@ update:
kmem_free(outbuf, bufsize);
if (error == ENOENT)
- error = orig_resid == zfs_uio_resid(uio) ? EINVAL : 0;
+ error = 0;
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 175bfe835e6f..574d3aacbcde 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -111,9 +111,8 @@ struct callout acpi_sleep_timer;
/* Bitmap of device quirks. */
int acpi_quirks;
-/* Supported sleep states and types. */
-static bool acpi_supported_stypes[POWER_STYPE_COUNT];
-static bool acpi_supported_sstates[ACPI_S_STATE_COUNT];
+/* Supported sleep states. */
+static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT];
static void acpi_lookup(void *arg, const char *name, device_t *dev);
static int acpi_modevent(struct module *mod, int event, void *junk);
@@ -170,27 +169,21 @@ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
void *context, void **status);
static void acpi_sleep_enable(void *arg);
static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
-static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc,
- enum power_stype stype);
+static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
static void acpi_shutdown_final(void *arg, int howto);
static void acpi_enable_fixed_events(struct acpi_softc *sc);
static void acpi_resync_clock(struct acpi_softc *sc);
-static int acpi_wake_sleep_prep(ACPI_HANDLE handle,
- enum power_stype stype);
-static int acpi_wake_run_prep(ACPI_HANDLE handle, enum power_stype stype);
-static int acpi_wake_prep_walk(enum power_stype stype);
+static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
+static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
+static int acpi_wake_prep_walk(int sstate);
static int acpi_wake_sysctl_walk(device_t dev);
static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
+static void acpi_system_eventhandler_sleep(void *arg, int state);
+static void acpi_system_eventhandler_wakeup(void *arg, int state);
+static int acpi_sname2sstate(const char *sname);
+static const char *acpi_sstate2sname(int sstate);
static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
-static void acpi_system_eventhandler_sleep(void *arg,
- enum power_stype stype);
-static void acpi_system_eventhandler_wakeup(void *arg,
- enum power_stype stype);
-static enum power_stype acpi_sstate_to_stype(int sstate);
-static int acpi_sname_to_sstate(const char *sname);
-static const char *acpi_sstate_to_sname(int sstate);
static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
-static int acpi_stype_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype);
static int acpi_pm_func(u_long cmd, void *arg, enum power_stype stype);
@@ -484,7 +477,6 @@ acpi_attach(device_t dev)
UINT32 flags;
UINT8 TypeA, TypeB;
char *env;
- enum power_stype stype;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -597,30 +589,31 @@ acpi_attach(device_t dev)
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "power_button_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_power_button_stype, 0, acpi_stype_sysctl, "A",
+ &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A",
"Power button ACPI sleep state.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "sleep_button_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_sleep_button_stype, 0, acpi_stype_sysctl, "A",
+ &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A",
"Sleep button ACPI sleep state.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "lid_switch_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_lid_switch_stype, 0, acpi_stype_sysctl, "A",
- "Lid ACPI sleep state. Set to s2idle or s2mem if you want to suspend "
- "your laptop when close the lid.");
+ &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A",
+ "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "standby_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A",
- "ACPI Sx state to use when going standby (S1 or S2).");
+ &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "suspend_state",
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
"sleep delay in seconds");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
- OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0,
- "Use S4BIOS when hibernating.");
+ OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
@@ -666,38 +659,31 @@ acpi_attach(device_t dev)
sc->acpi_s4bios = 1;
#endif
- /*
- * Probe all supported ACPI sleep states. Awake (S0) is always supported.
- */
- acpi_supported_sstates[ACPI_STATE_S0] = TRUE;
- acpi_supported_stypes[POWER_STYPE_AWAKE] = true;
- for (state = ACPI_STATE_S1; state <= ACPI_STATE_S5; state++)
+ /* Probe all supported sleep states. */
+ acpi_sleep_states[ACPI_STATE_S0] = TRUE;
+ for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT,
__DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) &&
- ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) {
- acpi_supported_sstates[state] = TRUE;
- acpi_supported_stypes[acpi_sstate_to_stype(state)] = true;
- }
+ ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
+ acpi_sleep_states[state] = TRUE;
/*
- * Dispatch the default sleep type to devices. The lid switch is set
+ * Dispatch the default sleep state to devices. The lid switch is set
* to UNKNOWN by default to avoid surprising users.
*/
- sc->acpi_power_button_stype = acpi_supported_stypes[POWER_STYPE_POWEROFF] ?
- POWER_STYPE_POWEROFF : POWER_STYPE_UNKNOWN;
- sc->acpi_lid_switch_stype = POWER_STYPE_UNKNOWN;
-
- sc->acpi_standby_sx = ACPI_STATE_UNKNOWN;
- if (acpi_supported_sstates[ACPI_STATE_S1])
- sc->acpi_standby_sx = ACPI_STATE_S1;
- else if (acpi_supported_sstates[ACPI_STATE_S2])
- sc->acpi_standby_sx = ACPI_STATE_S2;
-
- /* Pick the first valid sleep type for the sleep button default. */
- sc->acpi_sleep_button_stype = POWER_STYPE_UNKNOWN;
- for (stype = POWER_STYPE_STANDBY; stype <= POWER_STYPE_HIBERNATE; stype++)
- if (acpi_supported_stypes[stype]) {
- sc->acpi_sleep_button_stype = stype;
+ sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
+ ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
+ sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
+ sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
+ ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
+ sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
+ ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
+
+ /* Pick the first valid sleep state for the sleep button default. */
+ sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
+ for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
+ if (acpi_sleep_states[state]) {
+ sc->acpi_sleep_button_sx = state;
break;
}
@@ -722,7 +708,7 @@ acpi_attach(device_t dev)
/* Flag our initial states. */
sc->acpi_enabled = TRUE;
- sc->acpi_stype = POWER_STYPE_AWAKE;
+ sc->acpi_sstate = ACPI_STATE_S0;
sc->acpi_sleep_disabled = TRUE;
/* Create the control device */
@@ -734,8 +720,7 @@ acpi_attach(device_t dev)
goto out;
/* Register ACPI again to pass the correct argument of pm_func. */
- power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc,
- acpi_supported_stypes);
+ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
acpi_platform_osc(dev);
@@ -783,36 +768,6 @@ acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype)
return (ACPI_STATE_UNKNOWN);
}
-/*
- * XXX It would be nice if we didn't need this function, but we'd need
- * acpi_EnterSleepState and acpi_ReqSleepState to take in actual ACPI S-states,
- * which won't be possible at the moment because suspend-to-idle (which is not
- * an ACPI S-state nor maps to one) will be implemented here.
- *
- * In the future, we should make generic a lot of the logic in these functions
- * to enable suspend-to-idle on non-ACPI builds, and then make
- * acpi_EnterSleepState and acpi_ReqSleepState truly take in ACPI S-states
- * again.
- */
-static enum power_stype
-acpi_sstate_to_stype(int sstate)
-{
- switch (sstate) {
- case ACPI_STATE_S0:
- return (POWER_STYPE_AWAKE);
- case ACPI_STATE_S1:
- case ACPI_STATE_S2:
- return (POWER_STYPE_STANDBY);
- case ACPI_STATE_S3:
- return (POWER_STYPE_SUSPEND_TO_MEM);
- case ACPI_STATE_S4:
- return (POWER_STYPE_HIBERNATE);
- case ACPI_STATE_S5:
- return (POWER_STYPE_POWEROFF);
- }
- return (POWER_STYPE_UNKNOWN);
-}
-
static void
acpi_set_power_children(device_t dev, int state)
{
@@ -2108,7 +2063,7 @@ acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
* Note illegal _S0D is evaluated because some systems expect this.
*/
sc = device_get_softc(bus);
- snprintf(sxd, sizeof(sxd), "_S%dD", acpi_stype_to_sstate(sc, sc->acpi_stype));
+ snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
status = acpi_GetInteger(handle, sxd, dstate);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
device_printf(dev, "failed to get %s on %s: %s\n", sxd,
@@ -3206,9 +3161,9 @@ acpi_sleep_force_task(void *context)
{
struct acpi_softc *sc = (struct acpi_softc *)context;
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_stype)))
- device_printf(sc->acpi_dev, "force sleep state %s failed\n",
- power_stype_to_name(sc->acpi_next_stype));
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
+ device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
+ sc->acpi_next_sstate);
}
static void
@@ -3235,24 +3190,24 @@ acpi_sleep_force(void *arg)
* acks are in.
*/
int
-acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype)
+acpi_ReqSleepState(struct acpi_softc *sc, int state)
{
#if defined(__amd64__) || defined(__i386__)
struct apm_clone_data *clone;
ACPI_STATUS status;
- if (stype < POWER_STYPE_AWAKE || stype >= POWER_STYPE_COUNT)
+ if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
return (EINVAL);
- if (!acpi_supported_stypes[stype])
+ if (!acpi_sleep_states[state])
return (EOPNOTSUPP);
/*
* If a reboot/shutdown/suspend request is already in progress or
* suspend is blocked due to an upcoming shutdown, just return.
*/
- if (rebooting || sc->acpi_next_stype != POWER_STYPE_AWAKE ||
- suspend_blocked)
+ if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) {
return (0);
+ }
/* Wait until sleep is enabled. */
while (sc->acpi_sleep_disabled) {
@@ -3261,12 +3216,12 @@ acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype)
ACPI_LOCK(acpi);
- sc->acpi_next_stype = stype;
+ sc->acpi_next_sstate = state;
/* S5 (soft-off) should be entered directly with no waiting. */
- if (stype == POWER_STYPE_POWEROFF) {
+ if (state == ACPI_STATE_S5) {
ACPI_UNLOCK(acpi);
- status = acpi_EnterSleepState(sc, stype);
+ status = acpi_EnterSleepState(sc, state);
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
}
@@ -3282,7 +3237,7 @@ acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype)
/* If devd(8) is not running, immediately enter the sleep state. */
if (!devctl_process_running()) {
ACPI_UNLOCK(acpi);
- status = acpi_EnterSleepState(sc, stype);
+ status = acpi_EnterSleepState(sc, state);
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
}
@@ -3297,7 +3252,7 @@ acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype)
ACPI_UNLOCK(acpi);
/* Now notify devd(8) also. */
- acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, stype);
+ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
return (0);
#else
@@ -3320,17 +3275,17 @@ acpi_AckSleepState(struct apm_clone_data *clone, int error)
struct acpi_softc *sc;
int ret, sleeping;
- /* If no pending sleep type, return an error. */
+ /* If no pending sleep state, return an error. */
ACPI_LOCK(acpi);
sc = clone->acpi_sc;
- if (sc->acpi_next_stype == POWER_STYPE_AWAKE) {
+ if (sc->acpi_next_sstate == 0) {
ACPI_UNLOCK(acpi);
return (ENXIO);
}
/* Caller wants to abort suspend process. */
if (error) {
- sc->acpi_next_stype = POWER_STYPE_AWAKE;
+ sc->acpi_next_sstate = 0;
callout_stop(&sc->susp_force_to);
device_printf(sc->acpi_dev,
"listener on %s cancelled the pending suspend\n",
@@ -3360,7 +3315,7 @@ acpi_AckSleepState(struct apm_clone_data *clone, int error)
ACPI_UNLOCK(acpi);
ret = 0;
if (sleeping) {
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_stype)))
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
ret = ENODEV;
}
return (ret);
@@ -3417,7 +3372,7 @@ enum acpi_sleep_state {
* Currently we support S1-S5 but S4 is only S4BIOS
*/
static ACPI_STATUS
-acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
+acpi_EnterSleepState(struct acpi_softc *sc, int state)
{
register_t intr;
ACPI_STATUS status;
@@ -3425,13 +3380,13 @@ acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
enum acpi_sleep_state slp_state;
int sleep_result;
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
- if (stype <= POWER_STYPE_AWAKE || stype >= POWER_STYPE_COUNT)
+ if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
return_ACPI_STATUS (AE_BAD_PARAMETER);
- if (!acpi_supported_stypes[stype]) {
- device_printf(sc->acpi_dev, "Sleep type %s not supported on this "
- "platform\n", power_stype_to_name(stype));
+ if (!acpi_sleep_states[state]) {
+ device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
+ state);
return (AE_SUPPORT);
}
@@ -3443,7 +3398,7 @@ acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
return (status);
}
- if (stype == POWER_STYPE_POWEROFF) {
+ if (state == ACPI_STATE_S5) {
/*
* Shut down cleanly and power off. This will call us back through the
* shutdown handlers.
@@ -3471,16 +3426,16 @@ acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
#endif
/*
- * Be sure to hold bus topology lock across DEVICE_SUSPEND/RESUME.
+ * Be sure to hold Giant across DEVICE_SUSPEND/RESUME
*/
bus_topo_lock();
slp_state = ACPI_SS_NONE;
- sc->acpi_stype = stype;
+ sc->acpi_sstate = state;
/* Enable any GPEs as appropriate and requested by the user. */
- acpi_wake_prep_walk(stype);
+ acpi_wake_prep_walk(state);
slp_state = ACPI_SS_GPE_SET;
/*
@@ -3497,7 +3452,7 @@ acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
}
slp_state = ACPI_SS_DEV_SUSPEND;
- status = AcpiEnterSleepStatePrep(stype);
+ status = AcpiEnterSleepStatePrep(state);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
AcpiFormatException(status));
@@ -3510,9 +3465,9 @@ acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
suspendclock();
intr = intr_disable();
- if (stype != POWER_STYPE_STANDBY) {
- sleep_result = acpi_sleep_machdep(sc, stype);
- acpi_wakeup_machdep(sc, stype, sleep_result, 0);
+ if (state != ACPI_STATE_S1) {
+ sleep_result = acpi_sleep_machdep(sc, state);
+ acpi_wakeup_machdep(sc, state, sleep_result, 0);
/*
* XXX According to ACPI specification SCI_EN bit should be restored
@@ -3523,10 +3478,10 @@ acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
* This hack is picked up from Linux, which claims that it follows
* Windows behavior.
*/
- if (sleep_result == 1 && stype != POWER_STYPE_HIBERNATE)
+ if (sleep_result == 1 && state != ACPI_STATE_S4)
AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT);
- if (sleep_result == 1 && stype == POWER_STYPE_SUSPEND_TO_MEM) {
+ if (sleep_result == 1 && state == ACPI_STATE_S3) {
/*
* Prevent mis-interpretation of the wakeup by power button
* as a request for power off.
@@ -3552,20 +3507,20 @@ acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
intr_restore(intr);
/* call acpi_wakeup_machdep() again with interrupt enabled */
- acpi_wakeup_machdep(sc, stype, sleep_result, 1);
+ acpi_wakeup_machdep(sc, state, sleep_result, 1);
- AcpiLeaveSleepStatePrep(stype);
+ AcpiLeaveSleepStatePrep(state);
if (sleep_result == -1)
goto backout;
- /* Re-enable ACPI hardware on wakeup from hibernate. */
- if (stype == POWER_STYPE_HIBERNATE)
+ /* Re-enable ACPI hardware on wakeup from sleep state 4. */
+ if (state == ACPI_STATE_S4)
AcpiEnable();
} else {
- status = AcpiEnterSleepState(stype);
+ status = AcpiEnterSleepState(state);
intr_restore(intr);
- AcpiLeaveSleepStatePrep(stype);
+ AcpiLeaveSleepStatePrep(state);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
AcpiFormatException(status));
@@ -3582,13 +3537,13 @@ backout:
if (slp_state >= ACPI_SS_SLP_PREP)
resumeclock();
if (slp_state >= ACPI_SS_GPE_SET) {
- acpi_wake_prep_walk(stype);
- sc->acpi_stype = POWER_STYPE_AWAKE;
+ acpi_wake_prep_walk(state);
+ sc->acpi_sstate = ACPI_STATE_S0;
}
if (slp_state >= ACPI_SS_DEV_SUSPEND)
DEVICE_RESUME(root_bus);
if (slp_state >= ACPI_SS_SLP_PREP)
- AcpiLeaveSleepState(stype);
+ AcpiLeaveSleepState(state);
if (slp_state >= ACPI_SS_SLEPT) {
#if defined(__i386__) || defined(__amd64__)
/* NB: we are still using ACPI timecounter at this point. */
@@ -3597,7 +3552,7 @@ backout:
acpi_resync_clock(sc);
acpi_enable_fixed_events(sc);
}
- sc->acpi_next_stype = POWER_STYPE_AWAKE;
+ sc->acpi_next_sstate = 0;
bus_topo_unlock();
@@ -3623,7 +3578,7 @@ backout:
/* Run /etc/rc.resume after we are back. */
if (devctl_process_running())
- acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, stype);
+ acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
return_ACPI_STATUS (status);
}
@@ -3674,21 +3629,16 @@ acpi_wake_set_enable(device_t dev, int enable)
}
static int
-acpi_wake_sleep_prep(ACPI_HANDLE handle, enum power_stype stype)
+acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
{
- int sstate;
struct acpi_prw_data prw;
device_t dev;
- struct acpi_softc *sc;
/* Check that this is a wake-capable device and get its GPE. */
if (acpi_parse_prw(handle, &prw) != 0)
return (ENXIO);
dev = acpi_get_device(handle);
- sc = device_get_softc(dev);
- sstate = acpi_stype_to_sstate(sc, stype);
-
/*
* The destination sleep state must be less than (i.e., higher power)
* or equal to the value specified by _PRW. If this GPE cannot be
@@ -3699,26 +3649,24 @@ acpi_wake_sleep_prep(ACPI_HANDLE handle, enum power_stype stype)
if (sstate > prw.lowest_wake) {
AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
if (bootverbose)
- device_printf(dev, "wake_prep disabled wake for %s (%s)\n",
- acpi_name(handle), power_stype_to_name(stype));
+ device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
+ acpi_name(handle), sstate);
} else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
acpi_pwr_wake_enable(handle, 1);
acpi_SetInteger(handle, "_PSW", 1);
if (bootverbose)
- device_printf(dev, "wake_prep enabled for %s (%s)\n",
- acpi_name(handle), power_stype_to_name(stype));
+ device_printf(dev, "wake_prep enabled for %s (S%d)\n",
+ acpi_name(handle), sstate);
}
return (0);
}
static int
-acpi_wake_run_prep(ACPI_HANDLE handle, enum power_stype stype)
+acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
{
- int sstate;
struct acpi_prw_data prw;
device_t dev;
- struct acpi_softc *sc;
/*
* Check that this is a wake-capable device and get its GPE. Return
@@ -3730,9 +3678,6 @@ acpi_wake_run_prep(ACPI_HANDLE handle, enum power_stype stype)
if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
return (0);
- sc = device_get_softc(dev);
- sstate = acpi_stype_to_sstate(sc, stype);
-
/*
* If this GPE couldn't be enabled for the previous sleep state, it was
* disabled before going to sleep so re-enable it. If it was enabled,
@@ -3756,26 +3701,26 @@ acpi_wake_run_prep(ACPI_HANDLE handle, enum power_stype stype)
static ACPI_STATUS
acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
{
- enum power_stype stype;
+ int sstate;
/* If suspending, run the sleep prep function, otherwise wake. */
- stype = *(enum power_stype *)context;
+ sstate = *(int *)context;
if (AcpiGbl_SystemAwakeAndRunning)
- acpi_wake_sleep_prep(handle, stype);
+ acpi_wake_sleep_prep(handle, sstate);
else
- acpi_wake_run_prep(handle, stype);
+ acpi_wake_run_prep(handle, sstate);
return (AE_OK);
}
/* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
static int
-acpi_wake_prep_walk(enum power_stype stype)
+acpi_wake_prep_walk(int sstate)
{
ACPI_HANDLE sb_handle;
if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
- acpi_wake_prep, NULL, &stype, NULL);
+ acpi_wake_prep, NULL, &sstate, NULL);
return (0);
}
@@ -3934,35 +3879,31 @@ out:
/* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
static void
-acpi_system_eventhandler_sleep(void *arg, enum power_stype stype)
+acpi_system_eventhandler_sleep(void *arg, int state)
{
struct acpi_softc *sc = (struct acpi_softc *)arg;
int ret;
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
/* Check if button action is disabled or unknown. */
- if (stype == ACPI_STATE_UNKNOWN)
+ if (state == ACPI_STATE_UNKNOWN)
return;
- /*
- * Request that the system prepare to enter the given suspend state. We can
- * totally pass an ACPI S-state to an enum power_stype.
- */
- ret = acpi_ReqSleepState(sc, stype);
+ /* Request that the system prepare to enter the given suspend state. */
+ ret = acpi_ReqSleepState(sc, state);
if (ret != 0)
device_printf(sc->acpi_dev,
- "request to enter state %s failed (err %d)\n",
- power_stype_to_name(stype), ret);
+ "request to enter state S%d failed (err %d)\n", state, ret);
return_VOID;
}
static void
-acpi_system_eventhandler_wakeup(void *arg, enum power_stype stype)
+acpi_system_eventhandler_wakeup(void *arg, int state)
{
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
/* Currently, nothing to do for wakeup. */
@@ -3976,14 +3917,14 @@ static void
acpi_invoke_sleep_eventhandler(void *context)
{
- EVENTHANDLER_INVOKE(acpi_sleep_event, *(enum power_stype *)context);
+ EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context);
}
static void
acpi_invoke_wake_eventhandler(void *context)
{
- EVENTHANDLER_INVOKE(acpi_wakeup_event, *(enum power_stype *)context);
+ EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context);
}
UINT32
@@ -3999,7 +3940,7 @@ acpi_event_power_button_sleep(void *context)
#if defined(__amd64__) || defined(__i386__)
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_stype)))
+ acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
#else
shutdown_nice(RB_POWEROFF);
@@ -4016,7 +3957,7 @@ acpi_event_power_button_wake(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_wake_eventhandler, &sc->acpi_power_button_stype)))
+ acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -4029,7 +3970,7 @@ acpi_event_sleep_button_sleep(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_stype)))
+ acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -4042,7 +3983,7 @@ acpi_event_sleep_button_wake(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_stype)))
+ acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -4238,8 +4179,7 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
{
struct acpi_softc *sc;
struct acpi_ioctl_hook *hp;
- int error;
- int sstate;
+ int error, state;
error = 0;
hp = NULL;
@@ -4269,9 +4209,9 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
/* Core system ioctls. */
switch (cmd) {
case ACPIIO_REQSLPSTATE:
- sstate = *(int *)addr;
- if (sstate != ACPI_STATE_S5)
- return (acpi_ReqSleepState(sc, acpi_sstate_to_stype(sstate)));
+ state = *(int *)addr;
+ if (state != ACPI_STATE_S5)
+ return (acpi_ReqSleepState(sc, state));
device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
error = EOPNOTSUPP;
break;
@@ -4280,12 +4220,12 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
error = acpi_AckSleepState(sc->acpi_clone, error);
break;
case ACPIIO_SETSLPSTATE: /* DEPRECATED */
- sstate = *(int *)addr;
- if (sstate < ACPI_STATE_S0 || sstate > ACPI_STATE_S5)
+ state = *(int *)addr;
+ if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
return (EINVAL);
- if (!acpi_supported_sstates[sstate])
+ if (!acpi_sleep_states[state])
return (EOPNOTSUPP);
- if (ACPI_FAILURE(acpi_SetSleepState(sc, acpi_sstate_to_stype(sstate))))
+ if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
error = ENXIO;
break;
default:
@@ -4297,7 +4237,7 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
}
static int
-acpi_sname_to_sstate(const char *sname)
+acpi_sname2sstate(const char *sname)
{
int sstate;
@@ -4312,15 +4252,14 @@ acpi_sname_to_sstate(const char *sname)
}
static const char *
-acpi_sstate_to_sname(int state)
+acpi_sstate2sname(int sstate)
{
- static const char *snames[ACPI_S_STATE_COUNT] = {"S0", "S1", "S2", "S3",
- "S4", "S5"};
+ static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
- if (state == ACPI_STATE_UNKNOWN)
+ if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
+ return (snames[sstate]);
+ else if (sstate == ACPI_STATE_UNKNOWN)
return ("NONE");
- if (state >= ACPI_STATE_S0 && state < ACPI_S_STATE_COUNT)
- return (snames[state]);
return (NULL);
}
@@ -4333,8 +4272,8 @@ acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
- if (acpi_supported_sstates[state])
- sbuf_printf(&sb, "%s ", acpi_sstate_to_sname(state));
+ if (acpi_sleep_states[state])
+ sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
sbuf_trim(&sb);
sbuf_finish(&sb);
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
@@ -4342,64 +4281,27 @@ acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
return (error);
}
-
static int
acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
{
char sleep_state[10];
- int error;
- int new_sstate, old_sstate;
+ int error, new_state, old_state;
- old_sstate = *(int *)oidp->oid_arg1;
- strlcpy(sleep_state, acpi_sstate_to_sname(old_sstate), sizeof(sleep_state));
+ old_state = *(int *)oidp->oid_arg1;
+ strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
if (error == 0 && req->newptr != NULL) {
- new_sstate = acpi_sname_to_sstate(sleep_state);
- if (new_sstate < 0)
+ new_state = acpi_sname2sstate(sleep_state);
+ if (new_state < ACPI_STATE_S1)
return (EINVAL);
- if (new_sstate < ACPI_S_STATE_COUNT &&
- !acpi_supported_sstates[new_sstate])
+ if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
return (EOPNOTSUPP);
- if (new_sstate != old_sstate)
- *(int *)oidp->oid_arg1 = new_sstate;
+ if (new_state != old_state)
+ *(int *)oidp->oid_arg1 = new_state;
}
return (error);
}
-static int
-acpi_stype_sysctl(SYSCTL_HANDLER_ARGS)
-{
- char name[10];
- int err;
- int sstate;
- enum power_stype new_stype, old_stype;
-
- old_stype = *(enum power_stype *)oidp->oid_arg1;
- strlcpy(name, power_stype_to_name(old_stype), sizeof(name));
- err = sysctl_handle_string(oidp, name, sizeof(name), req);
- if (err != 0 || req->newptr == NULL)
- return (err);
-
- new_stype = power_name_to_stype(name);
- if (new_stype == POWER_STYPE_UNKNOWN) {
- sstate = acpi_sname_to_sstate(name);
- if (sstate < 0)
- return (EINVAL);
- printf("warning: this sysctl expects a sleep type, but an ACPI S-state has "
- "been passed to it. This functionality is deprecated; see acpi(4).\n");
- MPASS(sstate < ACPI_S_STATE_COUNT);
- if (acpi_supported_sstates[sstate] == false)
- return (EOPNOTSUPP);
- new_stype = acpi_sstate_to_stype(sstate);
- }
-
- if (acpi_supported_stypes[new_stype] == false)
- return (EOPNOTSUPP);
- if (new_stype != old_stype)
- *(enum power_stype *)oidp->oid_arg1 = new_stype;
- return (0);
-}
-
/* Inform devctl(4) when we receive a Notify. */
void
acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
@@ -4748,7 +4650,7 @@ acpi_reset_interfaces(device_t dev)
static int
acpi_pm_func(u_long cmd, void *arg, enum power_stype stype)
{
- int error;
+ int error, sstate;
struct acpi_softc *sc;
error = 0;
@@ -4759,7 +4661,8 @@ acpi_pm_func(u_long cmd, void *arg, enum power_stype stype)
error = EINVAL;
goto out;
}
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, stype)))
+ sstate = acpi_stype_to_sstate(sc, stype);
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sstate)))
error = ENXIO;
break;
default:
@@ -4777,8 +4680,7 @@ acpi_pm_register(void *arg)
if (!cold || resource_disabled("acpi", 0))
return;
- power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL,
- acpi_supported_stypes);
+ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
}
SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL);
diff --git a/sys/dev/acpica/acpi_lid.c b/sys/dev/acpica/acpi_lid.c
index fb8755d9f0fe..142791f7282a 100644
--- a/sys/dev/acpica/acpi_lid.c
+++ b/sys/dev/acpica/acpi_lid.c
@@ -235,9 +235,9 @@ acpi_lid_notify_status_changed(void *arg)
sc->lid_status ? "opened" : "closed");
if (sc->lid_status == 0)
- EVENTHANDLER_INVOKE(acpi_sleep_event, acpi_sc->acpi_lid_switch_stype);
+ EVENTHANDLER_INVOKE(acpi_sleep_event, acpi_sc->acpi_lid_switch_sx);
else
- EVENTHANDLER_INVOKE(acpi_wakeup_event, acpi_sc->acpi_lid_switch_stype);
+ EVENTHANDLER_INVOKE(acpi_wakeup_event, acpi_sc->acpi_lid_switch_sx);
out:
ACPI_SERIAL_END(lid);
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 4c789dd3e9f2..fac32d832598 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -54,19 +54,20 @@ struct acpi_softc {
struct cdev *acpi_dev_t;
int acpi_enabled;
- enum power_stype acpi_stype;
+ int acpi_sstate;
int acpi_sleep_disabled;
struct sysctl_ctx_list acpi_sysctl_ctx;
struct sysctl_oid *acpi_sysctl_tree;
- enum power_stype acpi_power_button_stype;
- enum power_stype acpi_sleep_button_stype;
- enum power_stype acpi_lid_switch_stype;
+ int acpi_power_button_sx;
+ int acpi_sleep_button_sx;
+ int acpi_lid_switch_sx;
int acpi_standby_sx;
- int acpi_s4bios;
+ int acpi_suspend_sx;
int acpi_sleep_delay;
+ int acpi_s4bios;
int acpi_do_disable;
int acpi_verbose;
int acpi_handle_reboot;
@@ -74,7 +75,7 @@ struct acpi_softc {
vm_offset_t acpi_wakeaddr;
vm_paddr_t acpi_wakephys;
- enum power_stype acpi_next_stype; /* Next suspend sleep type. */
+ int acpi_next_sstate; /* Next suspend Sx state. */
struct apm_clone_data *acpi_clone; /* Pseudo-dev for devd(8). */
STAILQ_HEAD(,apm_clone_data) apm_cdevs; /* All apm/apmctl/acpi cdevs. */
struct callout susp_force_to; /* Force suspend if no acks. */
@@ -411,7 +412,7 @@ ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid,
uint32_t *caps_out, bool query);
ACPI_STATUS acpi_OverrideInterruptLevel(UINT32 InterruptNumber);
ACPI_STATUS acpi_SetIntrModel(int model);
-int acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype);
+int acpi_ReqSleepState(struct acpi_softc *sc, int state);
int acpi_AckSleepState(struct apm_clone_data *clone, int error);
ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state);
int acpi_wake_set_enable(device_t dev, int enable);
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 934024ddfbcf..1304b597c545 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -924,6 +924,9 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
| IEEE80211_C_PMGT /* Station side power mgmt */
| IEEE80211_C_SWSLEEP
;
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Query the hal to figure out h/w crypto support.
*/
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index 1559b66a7c7d..deadd63c3d18 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -1588,6 +1588,10 @@ ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
*/
pktlen = m0->m_pkthdr.len - (hdrlen & 3);
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m0->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/* Handle encryption twiddling if needed */
if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
&pktlen, &keyix)) {
@@ -2201,6 +2205,10 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
* for QoS frames.
*/
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m0->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/* Handle encryption twiddling if needed */
if (! ath_tx_tag_crypto(sc, ni,
m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
@@ -2981,6 +2989,8 @@ ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
ATH_TX_LOCK_ASSERT(sc);
+ /* TODO: can this use ieee80211_output_seqno_assign() now? */
+
/*
* Is it a QOS NULL Data frame? Give it a sequence number from
* the default TID (IEEE80211_NONQOS_TID.)
diff --git a/sys/dev/bwi/if_bwi.c b/sys/dev/bwi/if_bwi.c
index 1087ca813d65..85146d4c4010 100644
--- a/sys/dev/bwi/if_bwi.c
+++ b/sys/dev/bwi/if_bwi.c
@@ -498,6 +498,9 @@ bwi_attach(struct bwi_softc *sc)
IEEE80211_C_BGSCAN |
IEEE80211_C_MONITOR;
ic->ic_opmode = IEEE80211_M_STA;
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ieee80211_ifattach(ic);
ic->ic_headroom = sizeof(struct bwi_txbuf_hdr);
@@ -1361,6 +1364,7 @@ bwi_start_locked(struct bwi_softc *sc)
(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) != 0 &&
ieee80211_crypto_encap(ni, m) == NULL) {
if_inc_counter(ni->ni_vap->iv_ifp,
diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c
index 38bf6f5d31a3..ec9d56661034 100644
--- a/sys/dev/bwn/if_bwn.c
+++ b/sys/dev/bwn/if_bwn.c
@@ -774,6 +774,7 @@ bwn_attach_post(struct bwn_softc *sc)
;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
/* Determine the NVRAM variable containing our MAC address */
core_unit = bhnd_get_core_unit(sc->sc_dev);
@@ -999,6 +1000,7 @@ bwn_start(struct bwn_softc *sc)
continue;
}
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
diff --git a/sys/dev/cpuctl/cpuctl.c b/sys/dev/cpuctl/cpuctl.c
index 9253b17a259d..b0ab3467df69 100644
--- a/sys/dev/cpuctl/cpuctl.c
+++ b/sys/dev/cpuctl/cpuctl.c
@@ -402,19 +402,20 @@ out:
* its workings.
*/
static void
-amd_ucode_wrmsr(void *ucode_ptr)
+amd_ucode_wrmsr(void *arg)
{
+ struct ucode_update_data *d = arg;
uint32_t tmp[4];
- wrmsr_safe(MSR_K8_UCODE_UPDATE, (uintptr_t)ucode_ptr);
+ if (PCPU_GET(cpuid) == d->cpu)
+ d->ret = wrmsr_safe(MSR_K8_UCODE_UPDATE, (uintptr_t)d->ptr);
do_cpuid(0, tmp);
}
static int
update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td)
{
- void *ptr;
- int ret;
+ struct ucode_update_data d = { .cpu = cpu };
if (args->size == 0 || args->data == NULL) {
DPRINTF("[cpuctl,%d]: zero-sized firmware image", __LINE__);
@@ -430,18 +431,17 @@ update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td)
* malloc(9) always returns the pointer aligned at least on
* the size of the allocation.
*/
- ptr = malloc(args->size + 16, M_CPUCTL, M_ZERO | M_WAITOK);
- if (copyin(args->data, ptr, args->size) != 0) {
+ d.ptr = malloc(args->size + 16, M_CPUCTL, M_ZERO | M_WAITOK);
+ if (copyin(args->data, d.ptr, args->size) != 0) {
DPRINTF("[cpuctl,%d]: copyin %p->%p of %zd bytes failed",
__LINE__, args->data, ptr, args->size);
- ret = EFAULT;
+ d.ret = EFAULT;
goto fail;
}
- smp_rendezvous(NULL, amd_ucode_wrmsr, NULL, ptr);
- ret = 0;
+ smp_rendezvous(NULL, amd_ucode_wrmsr, NULL, &d);
fail:
- free(ptr, M_CPUCTL);
- return (ret);
+ free(d.ptr, M_CPUCTL);
+ return (d.ret);
}
static int
diff --git a/sys/dev/gpio/gpioled.c b/sys/dev/gpio/gpioled.c
index 71af5741b2fe..a36c2faef379 100644
--- a/sys/dev/gpio/gpioled.c
+++ b/sys/dev/gpio/gpioled.c
@@ -75,8 +75,6 @@ gpioled_control(void *priv, int onoff)
struct gpioled_softc *sc;
sc = (struct gpioled_softc *)priv;
- if (onoff == -1) /* Keep the current state. */
- return;
if (sc->sc_softinvert)
onoff = !onoff;
GPIOLED_LOCK(sc);
diff --git a/sys/dev/ipw/if_ipw.c b/sys/dev/ipw/if_ipw.c
index 01d713cdae18..9db562669487 100644
--- a/sys/dev/ipw/if_ipw.c
+++ b/sys/dev/ipw/if_ipw.c
@@ -283,6 +283,8 @@ ipw_attach(device_t dev)
| IEEE80211_C_WPA /* 802.11i supported */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/* read MAC address from EEPROM */
val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 0);
ic->ic_macaddr[0] = val >> 8;
@@ -1557,6 +1559,7 @@ ipw_tx_start(struct ipw_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
wh = mtod(m0, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m0);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/iwi/if_iwi.c b/sys/dev/iwi/if_iwi.c
index 3a410a5cbf2c..26b8037186a6 100644
--- a/sys/dev/iwi/if_iwi.c
+++ b/sys/dev/iwi/if_iwi.c
@@ -371,6 +371,8 @@ iwi_attach(device_t dev)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/* read MAC address from EEPROM */
val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 0);
ic->ic_macaddr[0] = val & 0xff;
@@ -1834,6 +1836,8 @@ iwi_tx_start(struct iwi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
} else
staid = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/iwm/if_iwm.c b/sys/dev/iwm/if_iwm.c
index 1e9090310ece..6840c6a4d00a 100644
--- a/sys/dev/iwm/if_iwm.c
+++ b/sys/dev/iwm/if_iwm.c
@@ -3773,6 +3773,10 @@ iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
+ /* Offloaded sequence number assignment; non-AMPDU case */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* Retrieve key for TX && do software encryption. */
@@ -6142,7 +6146,8 @@ iwm_attach(device_t dev)
// IEEE80211_C_BGSCAN /* capable of bg scanning */
;
/* Advertise full-offload scanning */
- ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
sc->sc_phyctxt[i].id = i;
sc->sc_phyctxt[i].color = 0;
diff --git a/sys/dev/iwn/if_iwn.c b/sys/dev/iwn/if_iwn.c
index b7c452a4f074..a949103f20d4 100644
--- a/sys/dev/iwn/if_iwn.c
+++ b/sys/dev/iwn/if_iwn.c
@@ -584,6 +584,11 @@ iwn_attach(device_t dev)
| IEEE80211_C_PMGT /* Station-side power mgmt */
;
+ /* Driver / firmware assigned sequence numbers */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+ /* Don't originate null data frames in net80211 */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
+
/* Read MAC address, channels, etc from EEPROM. */
if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) {
device_printf(dev, "could not read EEPROM, error %d\n",
@@ -4577,6 +4582,9 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
* XXX TODO: Group addressed frames aren't aggregated and must
* go to the normal non-aggregation queue, and have a NONQOS TID
* assigned from net80211.
+ *
+ * TODO: same with NULL QOS frames, which we shouldn't be sending
+ * anyway ourselves (and should stub out / warn / etc.)
*/
ac = M_WME_GETAC(m);
@@ -4589,6 +4597,10 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
ac = *(int *)tap->txa_private;
}
+ /* Only assign if not A-MPDU; the A-MPDU TX path will do its own */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* Retrieve key for TX. */
diff --git a/sys/dev/iwx/if_iwx.c b/sys/dev/iwx/if_iwx.c
index 1fe531d69933..8422fcb787c3 100644
--- a/sys/dev/iwx/if_iwx.c
+++ b/sys/dev/iwx/if_iwx.c
@@ -5673,8 +5673,9 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
if (rinfo == NULL)
return EINVAL;
- /* Offloaded sequence number assignment */
- /* Note: Should be done in firmware on all supported devices */
+ /* Offloaded sequence number assignment; non-AMPDU case */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
/* Radiotap */
if (ieee80211_radiotap_active_vap(vap)) {
@@ -10474,6 +10475,8 @@ iwx_attach(device_t dev)
ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
/* Enable seqno offload */
ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+ /* Don't send null data frames; let firmware do it */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
ic->ic_txstream = 2;
ic->ic_rxstream = 2;
diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c
index 79a3213c6802..2e4f3967ace4 100644
--- a/sys/dev/malo/if_malo.c
+++ b/sys/dev/malo/if_malo.c
@@ -263,6 +263,8 @@ malo_attach(uint16_t devid, struct malo_softc *sc)
;
IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->malo_hwspecs.macaddr);
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Transmit requires space in the packet for a special format transmit
* record and optional padding between this record and the payload.
@@ -1040,6 +1042,8 @@ malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni,
} else
qos = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (iswep) {
struct ieee80211_key *k;
diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c
index c885968dfe15..9f3d34f4f50d 100644
--- a/sys/dev/mwl/if_mwl.c
+++ b/sys/dev/mwl/if_mwl.c
@@ -433,6 +433,8 @@ mwl_attach(uint16_t devid, struct mwl_softc *sc)
| IEEE80211_HTC_SMPS /* SMPS available */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Mark h/w crypto support.
* XXX no way to query h/w support.
@@ -3087,6 +3089,8 @@ mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *
} else
qos = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (iswep) {
const struct ieee80211_cipher *cip;
struct ieee80211_key *k;
diff --git a/sys/dev/otus/if_otus.c b/sys/dev/otus/if_otus.c
index 5919e75a59cf..f6c4a0118b68 100644
--- a/sys/dev/otus/if_otus.c
+++ b/sys/dev/otus/if_otus.c
@@ -728,6 +728,12 @@ otus_attachhook(struct otus_softc *sc)
IEEE80211_C_SWAMSDUTX | /* Do software A-MSDU TX */
IEEE80211_C_WPA; /* WPA/RSN. */
+ /*
+ * Although A-MPDU RX is fine, A-MPDU TX apparently has some
+ * hardware bugs. Looking at Linux carl9170, it has a work-around
+ * that forces all frames into the AC_BE queue regardless of
+ * the actual QoS queue.
+ */
ic->ic_htcaps =
IEEE80211_HTC_HT |
#if 0
@@ -737,6 +743,8 @@ otus_attachhook(struct otus_softc *sc)
IEEE80211_HTCAP_MAXAMSDU_3839 |
IEEE80211_HTCAP_SMPS_OFF;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
otus_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -2232,6 +2240,9 @@ otus_tx(struct otus_softc *sc, struct ieee80211_node *ni, struct mbuf *m,
int hasqos, xferlen, type, ismcast;
wh = mtod(m, struct ieee80211_frame *);
+
+ ieee80211_output_seqno_assign(ni, -1, m);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
diff --git a/sys/dev/ral/rt2560.c b/sys/dev/ral/rt2560.c
index 09b01ea55be9..7feb324eb21d 100644
--- a/sys/dev/ral/rt2560.c
+++ b/sys/dev/ral/rt2560.c
@@ -281,6 +281,8 @@ rt2560_attach(device_t dev, int id)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2560_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1516,6 +1518,8 @@ rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0,
wh = mtod(m0, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/ral/rt2661.c b/sys/dev/ral/rt2661.c
index 38cd99d899ed..c9c86d4f089a 100644
--- a/sys/dev/ral/rt2661.c
+++ b/sys/dev/ral/rt2661.c
@@ -282,6 +282,8 @@ rt2661_attach(device_t dev, int id)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2661_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1284,7 +1286,7 @@ rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0,
rate = ni->ni_txparms->mgmtrate;
wh = mtod(m0, struct ieee80211_frame *);
-
+ ieee80211_output_seqno_assign(ni, -1, m0);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/ral/rt2860.c b/sys/dev/ral/rt2860.c
index 1449df683a93..76fe4652839d 100644
--- a/sys/dev/ral/rt2860.c
+++ b/sys/dev/ral/rt2860.c
@@ -323,6 +323,8 @@ rt2860_attach(device_t dev, int id)
| IEEE80211_C_WME /* 802.11e */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2860_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1471,6 +1473,7 @@ rt2860_tx(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
diff --git a/sys/dev/random/fenestrasX/fx_pool.c b/sys/dev/random/fenestrasX/fx_pool.c
index d2e6f0db71ee..f4ad1e295d54 100644
--- a/sys/dev/random/fenestrasX/fx_pool.c
+++ b/sys/dev/random/fenestrasX/fx_pool.c
@@ -164,6 +164,9 @@ static const struct fxrng_ent_char {
[RANDOM_CALLOUT] = {
.entc_cls = &fxrng_lo_push,
},
+ [RANDOM_RANDOMDEV] = {
+ .entc_cls = &fxrng_lo_push,
+ },
[RANDOM_PURE_OCTEON] = {
.entc_cls = &fxrng_hi_push, /* Could be made pull. */
},
diff --git a/sys/dev/rtwn/if_rtwn.c b/sys/dev/rtwn/if_rtwn.c
index 25287f222270..c5889937fb08 100644
--- a/sys/dev/rtwn/if_rtwn.c
+++ b/sys/dev/rtwn/if_rtwn.c
@@ -271,6 +271,11 @@ rtwn_attach(struct rtwn_softc *sc)
/* Enable seqno offload */
ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+#ifdef RTWN_WITHOUT_UCODE
+ /* Don't originate NULL data frames - let firmware do this */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
+#endif
+
/* Adjust capabilities. */
rtwn_adj_devcaps(sc);
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
index 36be94b8b8b7..35663b480cfa 100644
--- a/sys/dev/ufshci/ufshci_ctrlr.c
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -12,8 +12,108 @@
#include "ufshci_private.h"
#include "ufshci_reg.h"
+static void
+ufshci_ctrlr_fail(struct ufshci_controller *ctrlr)
+{
+ ctrlr->is_failed = true;
+
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->task_mgmt_req_queue.qops.get_hw_queue(
+ &ctrlr->task_mgmt_req_queue));
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->transfer_req_queue.qops.get_hw_queue(
+ &ctrlr->transfer_req_queue));
+}
+
+static void
+ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting)
+{
+ TSENTER();
+
+ /*
+ * If `resetting` is true, we are on the reset path.
+ * Re-enable request queues here because ufshci_ctrlr_reset_task()
+ * disables them during reset.
+ */
+ if (resetting) {
+ if (ufshci_utmr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ if (ufshci_utr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ }
+
+ if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS target drvice */
+ if (ufshci_dev_init(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize Reference Clock */
+ if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize unipro */
+ if (ufshci_dev_init_unipro(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /*
+ * Initialize UIC Power Mode
+ * QEMU UFS devices do not support unipro and power mode.
+ */
+ if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
+ ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS Power Mode */
+ if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Read Controller Descriptor (Device, Geometry) */
+ if (ufshci_dev_get_descriptor(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (ufshci_dev_config_write_booster(ctrlr)) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* TODO: Configure Write Protect */
+
+ /* TODO: Configure Background Operations */
+
+ /*
+ * If the reset is due to a timeout, it is already attached to the SIM
+ * and does not need to be attached again.
+ */
+ if (!resetting && ufshci_sim_attach(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ TSEXIT();
+}
+
static int
-ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller *ctrlr)
{
int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
sbintime_t delta_t = SBT_1US;
@@ -27,6 +127,35 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
ufshci_mmio_write_4(ctrlr, hce, hce);
}
+ /* Wait for the HCE flag to change */
+ while (1) {
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+ if (!UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "host controller failed to disable "
+ "within %d ms\n",
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ pause_sbt("ufshci_disable_hce", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+{
+ int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ sbintime_t delta_t = SBT_1US;
+ uint32_t hce;
+
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+
/* Enable UFS host controller */
hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
ufshci_mmio_write_4(ctrlr, hce, hce);
@@ -36,7 +165,7 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
* unstable, so we need to read the HCE value after some time after
* initialization is complete.
*/
- pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1));
+ pause_sbt("ufshci_enable_hce", ustosbt(100), 0, C_PREL(1));
/* Wait for the HCE flag to change */
while (1) {
@@ -51,17 +180,103 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
return (ENXIO);
}
- pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1));
+ pause_sbt("ufshci_enable_hce", delta_t, 0, C_PREL(1));
delta_t = min(SBT_1MS, delta_t * 3 / 2);
}
return (0);
}
+static int
+ufshci_ctrlr_disable(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ /* Disable all interrupts */
+ ufshci_mmio_write_4(ctrlr, ie, 0);
+
+ error = ufshci_ctrlr_disable_host_ctrlr(ctrlr);
+ return (error);
+}
+
+static int
+ufshci_ctrlr_enable(struct ufshci_controller *ctrlr)
+{
+ uint32_t ie, hcs;
+ int error;
+
+ error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ if (error)
+ return (error);
+
+ /* Send DME_LINKSTARTUP command to start the link startup procedure */
+ error = ufshci_uic_send_dme_link_startup(ctrlr);
+ if (error)
+ return (error);
+
+ /*
+ * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
+ * controller has successfully received a Link Startup UIC command
+ * response and the UFS device has found a physical link to the
+ * controller.
+ */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
+ ufshci_printf(ctrlr, "UFS device not found\n");
+ return (ENXIO);
+ }
+
+ /* Enable additional interrupts by programming the IE register. */
+ ie = ufshci_mmio_read_4(ctrlr, ie);
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
+ ufshci_mmio_write_4(ctrlr, ie, ie);
+
+ /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_hw_reset(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ error = ufshci_ctrlr_disable(ctrlr);
+ if (error)
+ return (error);
+
+ error = ufshci_ctrlr_enable(ctrlr);
+ return (error);
+}
+
+static void
+ufshci_ctrlr_reset_task(void *arg, int pending)
+{
+ struct ufshci_controller *ctrlr = arg;
+ int error;
+
+ /* Release resources */
+ ufshci_utmr_req_queue_disable(ctrlr);
+ ufshci_utr_req_queue_disable(ctrlr);
+
+ error = ufshci_ctrlr_hw_reset(ctrlr);
+ if (error)
+ return (ufshci_ctrlr_fail(ctrlr));
+
+ ufshci_ctrlr_start(ctrlr, true);
+}
+
int
ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
{
- uint32_t ver, cap, hcs, ie, ahit;
+ uint32_t ver, cap, ahit;
uint32_t timeout_period, retry_count;
int error;
@@ -114,16 +329,15 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
ctrlr->retry_count = retry_count;
- /* Disable all interrupts */
- ufshci_mmio_write_4(ctrlr, ie, 0);
-
- /* Enable Host Controller */
- error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
- if (error)
- return (error);
+ ctrlr->enable_aborts = 1;
+ if (ctrlr->quirks & UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK)
+ ctrlr->enable_aborts = 0;
+ else
+ TUNABLE_INT_FETCH("hw.ufshci.enable_aborts",
+ &ctrlr->enable_aborts);
- /* Send DME_LINKSTARTUP command to start the link startup procedure */
- error = ufshci_uic_send_dme_link_startup(ctrlr);
+ /* Reset the UFSHCI controller */
+ error = ufshci_ctrlr_hw_reset(ctrlr);
if (error)
return (error);
@@ -134,18 +348,6 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
ahit = 0;
ufshci_mmio_write_4(ctrlr, ahit, ahit);
- /*
- * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
- * controller has successfully received a Link Startup UIC command
- * response and the UFS device has found a physical link to the
- * controller.
- */
- hcs = ufshci_mmio_read_4(ctrlr, hcs);
- if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
- ufshci_printf(ctrlr, "UFS device not found\n");
- return (ENXIO);
- }
-
/* Allocate and initialize UTP Task Management Request List. */
error = ufshci_utmr_req_queue_construct(ctrlr);
if (error)
@@ -156,27 +358,21 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
if (error)
return (error);
- /* Enable additional interrupts by programming the IE register. */
- ie = ufshci_mmio_read_4(ctrlr, ie);
- ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
- ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
- ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
- ufshci_mmio_write_4(ctrlr, ie, ie);
-
- /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
-
/* TODO: Separate IO and Admin slot */
+
/*
* max_hw_pend_io is the number of slots in the transfer_req_queue.
* Reduce num_entries by one to reserve an admin slot.
*/
ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1;
+ /* Create a thread for the taskqueue. */
+ ctrlr->taskqueue = taskqueue_create("ufshci_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &ctrlr->taskqueue);
+ taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "ufshci taskq");
+
+ TASK_INIT(&ctrlr->reset_task, 0, ufshci_ctrlr_reset_task, ctrlr);
+
return (0);
}
@@ -208,50 +404,21 @@ ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
ctrlr->resource);
nores:
+ KASSERT(!mtx_owned(&ctrlr->uic_cmd_lock),
+ ("destroying uic_cmd_lock while still owned"));
mtx_destroy(&ctrlr->uic_cmd_lock);
+
+ KASSERT(!mtx_owned(&ctrlr->sc_mtx),
+ ("destroying sc_mtx while still owned"));
mtx_destroy(&ctrlr->sc_mtx);
return;
}
-int
+void
ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
{
- uint32_t ie;
- int error;
-
- /* Backup and disable all interrupts */
- ie = ufshci_mmio_read_4(ctrlr, ie);
- ufshci_mmio_write_4(ctrlr, ie, 0);
-
- /* Release resources */
- ufshci_utmr_req_queue_destroy(ctrlr);
- ufshci_utr_req_queue_destroy(ctrlr);
-
- /* Reset Host Controller */
- error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
- if (error)
- return (error);
-
- /* Send DME_LINKSTARTUP command to start the link startup procedure */
- error = ufshci_uic_send_dme_link_startup(ctrlr);
- if (error)
- return (error);
-
- /* Enable interrupts */
- ufshci_mmio_write_4(ctrlr, ie, ie);
-
- /* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utmr_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
- /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_utr_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
- return (0);
+ taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
}
int
@@ -295,84 +462,6 @@ ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr)
return (0);
}
-static void
-ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also)
-{
- printf("ufshci(4): ufshci_ctrlr_fail\n");
-
- ctrlr->is_failed = true;
-
- /* TODO: task_mgmt_req_queue should be handled as fail */
-
- ufshci_req_queue_fail(ctrlr,
- &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]);
-}
-
-static void
-ufshci_ctrlr_start(struct ufshci_controller *ctrlr)
-{
- TSENTER();
-
- if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize UFS target drvice */
- if (ufshci_dev_init(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize Reference Clock */
- if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize unipro */
- if (ufshci_dev_init_unipro(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /*
- * Initialize UIC Power Mode
- * QEMU UFS devices do not support unipro and power mode.
- */
- if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
- ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize UFS Power Mode */
- if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Read Controller Descriptor (Device, Geometry) */
- if (ufshci_dev_get_descriptor(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- if (ufshci_dev_config_write_booster(ctrlr)) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* TODO: Configure Background Operations */
-
- if (ufshci_sim_attach(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- TSEXIT();
-}
-
void
ufshci_ctrlr_start_config_hook(void *arg)
{
@@ -382,9 +471,9 @@ ufshci_ctrlr_start_config_hook(void *arg)
if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
ufshci_utr_req_queue_enable(ctrlr) == 0)
- ufshci_ctrlr_start(ctrlr);
+ ufshci_ctrlr_start(ctrlr, false);
else
- ufshci_ctrlr_fail(ctrlr, false);
+ ufshci_ctrlr_fail(ctrlr);
ufshci_sysctl_initialize_ctrlr(ctrlr);
config_intrhook_disestablish(&ctrlr->config_hook);
diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
index 71d163d998af..253f31a93c2e 100644
--- a/sys/dev/ufshci/ufshci_ctrlr_cmd.c
+++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
@@ -15,7 +15,7 @@ ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req;
struct ufshci_task_mgmt_request_upiu *upiu;
- req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_NOWAIT, cb_fn, cb_arg);
req->request_size = sizeof(struct ufshci_task_mgmt_request_upiu);
req->response_size = sizeof(struct ufshci_task_mgmt_response_upiu);
diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c
index dd196b1d638b..975468e5156f 100644
--- a/sys/dev/ufshci/ufshci_dev.c
+++ b/sys/dev/ufshci/ufshci_dev.c
@@ -774,4 +774,3 @@ out:
ufshci_dev_disable_write_booster(ctrlr);
return (error);
}
-
diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c
index d64b7526f713..992026fd4f4d 100644
--- a/sys/dev/ufshci/ufshci_pci.c
+++ b/sys/dev/ufshci/ufshci_pci.c
@@ -49,7 +49,8 @@ static struct _pcsid {
uint32_t ref_clk;
uint32_t quirks;
} pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz,
- UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE },
+ UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE |
+ UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK },
{ 0x98fa8086, "Intel Lakefield UFS Host Controller",
UFSHCI_REF_CLK_19_2MHz,
UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE |
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
index 2e033f84c373..ec388c06e248 100644
--- a/sys/dev/ufshci/ufshci_private.h
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -68,7 +68,6 @@ struct ufshci_request {
bool is_admin;
int32_t retries;
bool payload_valid;
- bool timeout;
bool spare[2]; /* Future use */
STAILQ_ENTRY(ufshci_request) stailq;
};
@@ -82,6 +81,7 @@ enum ufshci_slot_state {
};
struct ufshci_tracker {
+ TAILQ_ENTRY(ufshci_tracker) tailq;
struct ufshci_request *req;
struct ufshci_req_queue *req_queue;
struct ufshci_hw_queue *hwq;
@@ -121,6 +121,8 @@ struct ufshci_qops {
struct ufshci_req_queue *req_queue);
int (*enable)(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
+ void (*disable)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
int (*reserve_slot)(struct ufshci_req_queue *req_queue,
struct ufshci_tracker **tr);
int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
@@ -137,16 +139,27 @@ struct ufshci_qops {
#define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
+enum ufshci_recovery {
+ RECOVERY_NONE = 0, /* Normal operations */
+ RECOVERY_WAITING, /* waiting for the reset to complete */
+};
+
/*
* Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
* (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
* cq_head are not used in SDB but used in MCQ.
*/
struct ufshci_hw_queue {
+ struct ufshci_controller *ctrlr;
+ struct ufshci_req_queue *req_queue;
uint32_t id;
int domain;
int cpu;
+ struct callout timer; /* recovery lock */
+ bool timer_armed; /* recovery lock */
+ enum ufshci_recovery recovery_state; /* recovery lock */
+
union {
struct ufshci_utp_xfer_req_desc *utrd;
struct ufshci_utp_task_mgmt_req_desc *utmrd;
@@ -161,6 +174,9 @@ struct ufshci_hw_queue {
uint32_t num_entries;
uint32_t num_trackers;
+ TAILQ_HEAD(, ufshci_tracker) free_tr;
+ TAILQ_HEAD(, ufshci_tracker) outstanding_tr;
+
/*
* A Request List using the single doorbell method uses a dedicated
* ufshci_tracker, one per slot.
@@ -177,7 +193,13 @@ struct ufshci_hw_queue {
int64_t num_retries;
int64_t num_failures;
+ /*
+ * Each lock may be acquired independently.
+ * When both are required, acquire them in this order to avoid
+ * deadlocks. (recovery_lock -> qlock)
+ */
struct mtx_padalign qlock;
+ struct mtx_padalign recovery_lock;
};
struct ufshci_req_queue {
@@ -242,6 +264,9 @@ struct ufshci_controller {
4 /* Need to wait 1250us after power mode change */
#define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \
8 /* Need to change the number of lanes before changing HS-GEAR. */
+#define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \
+ 16 /* QEMU does not support Task Management Request */
+
uint32_t ref_clk;
struct cam_sim *ufshci_sim;
@@ -264,6 +289,9 @@ struct ufshci_controller {
/* Fields for tracking progress during controller initialization. */
struct intr_config_hook config_hook;
+ struct task reset_task;
+ struct taskqueue *taskqueue;
+
/* For shared legacy interrupt. */
int rid;
struct resource *res;
@@ -272,6 +300,8 @@ struct ufshci_controller {
uint32_t major_version;
uint32_t minor_version;
+ uint32_t enable_aborts;
+
uint32_t num_io_queues;
uint32_t max_hw_pend_io;
@@ -345,7 +375,7 @@ void ufshci_sim_detach(struct ufshci_controller *ctrlr);
/* Controller */
int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
-int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
+void ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
/* ctrlr defined as void * to allow use with config_intrhook. */
void ufshci_ctrlr_start_config_hook(void *arg);
void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
@@ -388,7 +418,9 @@ int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr);
int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr);
int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
struct ufshci_hw_queue *hwq);
@@ -404,6 +436,8 @@ void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
struct ufshci_req_queue *req_queue);
+void ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
@@ -489,13 +523,12 @@ _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
struct ufshci_request *req;
KASSERT(how == M_WAITOK || how == M_NOWAIT,
- ("nvme_allocate_request: invalid how %d", how));
+ ("ufshci_allocate_request: invalid how %d", how));
req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
if (req != NULL) {
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
- req->timeout = true;
}
return (req);
}
diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c
index bb6efa6d2ccc..7aa164d00bec 100644
--- a/sys/dev/ufshci/ufshci_req_queue.c
+++ b/sys/dev/ufshci/ufshci_req_queue.c
@@ -24,6 +24,7 @@ static const struct ufshci_qops sdb_utmr_qops = {
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
.ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
@@ -38,6 +39,7 @@ static const struct ufshci_qops sdb_utr_qops = {
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
.ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
@@ -74,6 +76,13 @@ ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
&ctrlr->task_mgmt_req_queue);
}
+void
+ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->task_mgmt_req_queue.qops.disable(ctrlr,
+ &ctrlr->task_mgmt_req_queue);
+}
+
int
ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
{
@@ -109,6 +118,13 @@ ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
&ctrlr->transfer_req_queue);
}
+void
+ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->transfer_req_queue.qops.disable(ctrlr,
+ &ctrlr->transfer_req_queue);
+}
+
int
ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
{
@@ -226,31 +242,30 @@ void
ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
{
struct ufshci_req_queue *req_queue = tr->req_queue;
+ struct ufshci_hw_queue *hwq = tr->hwq;
struct ufshci_request *req = tr->req;
struct ufshci_completion cpl;
uint8_t ocs;
bool retry, error, retriable;
- mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
+ mtx_assert(&hwq->qlock, MA_NOTOWNED);
/* Copy the response from the Request Descriptor or UTP Command
* Descriptor. */
+ cpl.size = tr->response_size;
if (req_queue->is_task_mgmt) {
- cpl.size = tr->response_size;
memcpy(&cpl.response_upiu,
- (void *)tr->hwq->utmrd[tr->slot_num].response_upiu,
- cpl.size);
+ (void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size);
- ocs = tr->hwq->utmrd[tr->slot_num].overall_command_status;
+ ocs = hwq->utmrd[tr->slot_num].overall_command_status;
} else {
bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- cpl.size = tr->response_size;
memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
cpl.size);
- ocs = tr->hwq->utrd[tr->slot_num].overall_command_status;
+ ocs = hwq->utrd[tr->slot_num].overall_command_status;
}
error = ufshci_req_queue_response_is_error(req_queue, ocs,
@@ -262,9 +277,9 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
retry = error && retriable &&
req->retries < req_queue->ctrlr->retry_count;
if (retry)
- tr->hwq->num_retries++;
+ hwq->num_retries++;
if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
- tr->hwq->num_failures++;
+ hwq->num_failures++;
KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
KASSERT(cpl.response_upiu.header.task_tag ==
@@ -282,7 +297,7 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
req->cb_fn(req->cb_arg, &cpl, error);
}
- mtx_lock(&tr->hwq->qlock);
+ mtx_lock(&hwq->qlock);
/* Clear the UTRL Completion Notification register */
req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
@@ -301,6 +316,9 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
ufshci_free_request(req);
tr->req = NULL;
tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+
+ TAILQ_REMOVE(&hwq->outstanding_tr, tr, tailq);
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
}
mtx_unlock(&tr->hwq->qlock);
@@ -309,7 +327,16 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
bool
ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
{
- return (req_queue->qops.process_cpl(req_queue));
+ struct ufshci_hw_queue *hwq;
+ bool done;
+
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_lock(&hwq->recovery_lock);
+ done = req_queue->qops.process_cpl(req_queue);
+ mtx_unlock(&hwq->recovery_lock);
+
+ return (done);
}
static void
@@ -427,6 +454,225 @@ ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
desc->prdt_length = prdt_entry_cnt;
}
+static void
+ufshci_req_queue_timeout_recovery(struct ufshci_controller *ctrlr,
+ struct ufshci_hw_queue *hwq)
+{
+ /* TODO: Step 2. Logical unit reset */
+ /* TODO: Step 3. Target device reset */
+ /* TODO: Step 4. Bus reset */
+
+ /*
+ * Step 5. All previous commands were timeout.
+ * Recovery failed, reset the host controller.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 5: Resetting controller due to a timeout.\n");
+ hwq->recovery_state = RECOVERY_WAITING;
+
+ ufshci_ctrlr_reset(ctrlr);
+}
+
+static void
+ufshci_abort_complete(void *arg, const struct ufshci_completion *status,
+ bool error)
+{
+ struct ufshci_tracker *tr = arg;
+
+ /*
+ * We still need to check the active tracker array, to cover race where
+ * I/O timed out at same time controller was completing the I/O. An
+ * abort request always is on the Task Management Request queue, but
+ * affects either an Task Management Request or an I/O (UTRL) queue, so
+ * take the appropriate queue lock for the original command's queue,
+ * since we'll need it to avoid races with the completion code and to
+ * complete the command manually.
+ */
+ mtx_lock(&tr->hwq->qlock);
+ if (tr->slot_state != UFSHCI_SLOT_STATE_FREE) {
+ mtx_unlock(&tr->hwq->qlock);
+ /*
+ * An I/O has timed out, and the controller was unable to abort
+ * it for some reason. And we've not processed a completion for
+ * it yet. Construct a fake completion status, and then complete
+ * the I/O's tracker manually.
+ */
+ ufshci_printf(tr->hwq->ctrlr,
+ "abort task request failed, aborting task manually\n");
+ ufshci_req_queue_manual_complete_tracker(tr,
+ UFSHCI_DESC_ABORTED, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+
+ if ((status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE) ||
+ (status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED)) {
+ ufshci_printf(tr->hwq->ctrlr,
+ "Warning: the abort task request completed \
+ successfully, but the original task is still incomplete.");
+ return;
+ }
+
+ /* Abort Task failed. Perform recovery steps 2-5 */
+ ufshci_req_queue_timeout_recovery(tr->hwq->ctrlr, tr->hwq);
+ } else {
+ mtx_unlock(&tr->hwq->qlock);
+ }
+}
+
+static void
+ufshci_req_queue_timeout(void *arg)
+{
+ struct ufshci_hw_queue *hwq = arg;
+ struct ufshci_controller *ctrlr = hwq->ctrlr;
+ struct ufshci_tracker *tr;
+ sbintime_t now;
+ bool idle = true;
+ bool fast;
+
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
+ /*
+ * If the controller is failed, then stop polling. This ensures that any
+ * failure processing that races with the hwq timeout will fail safely.
+ */
+ if (ctrlr->is_failed) {
+ ufshci_printf(ctrlr,
+ "Failed controller, stopping watchdog timeout.\n");
+ hwq->timer_armed = false;
+ return;
+ }
+
+ /*
+ * Shutdown condition: We set hwq->timer_armed to false in
+ * ufshci_req_sdb_destroy before calling callout_drain. When we call
+ * that, this routine might get called one last time. Exit w/o setting a
+ * timeout. None of the watchdog stuff needs to be done since we're
+ * destroying the hwq.
+ */
+ if (!hwq->timer_armed) {
+ ufshci_printf(ctrlr,
+ "Timeout fired during ufshci_utr_req_queue_destroy\n");
+ return;
+ }
+
+ switch (hwq->recovery_state) {
+ case RECOVERY_NONE:
+ /*
+ * See if there's any recovery needed. First, do a fast check to
+ * see if anything could have timed out. If not, then skip
+ * everything else.
+ */
+ fast = false;
+ mtx_lock(&hwq->qlock);
+ now = getsbinuptime();
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If the first real transaction is not in timeout, then
+ * we're done. Otherwise, we try recovery.
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ fast = true;
+ break;
+ }
+ mtx_unlock(&hwq->qlock);
+ if (idle || fast)
+ break;
+
+ /*
+ * There's a stale transaction at the start of the queue whose
+ * deadline has passed. Poll the competions as a last-ditch
+ * effort in case an interrupt has been missed.
+ */
+ hwq->req_queue->qops.process_cpl(hwq->req_queue);
+
+ /*
+ * Now that we've run the ISR, re-rheck to see if there's any
+ * timed out commands and abort them or reset the card if so.
+ */
+ mtx_lock(&hwq->qlock);
+ idle = true;
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If we know this tracker hasn't timed out, we also
+ * know all subsequent ones haven't timed out. The tr
+ * queue is in submission order and all normal commands
+ * in a queue have the same timeout (or the timeout was
+ * changed by the user, but we eventually timeout then).
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ break;
+
+ /*
+ * Timeout recovery is performed in five steps. If
+ * recovery fails at any step, the process continues to
+ * the next one:
+ * next steps:
+ * Step 1. Abort task
+ * Step 2. Logical unit reset (TODO)
+ * Step 3. Target device reset (TODO)
+ * Step 4. Bus reset (TODO)
+ * Step 5. Host controller reset
+ *
+ * If the timeout occurred in the Task Management
+ * Request queue, ignore Step 1.
+ */
+ if (ctrlr->enable_aborts &&
+ !hwq->req_queue->is_task_mgmt &&
+ tr->req->cb_fn != ufshci_abort_complete) {
+ /*
+ * Step 1. Timeout expired, abort the task.
+ *
+ * This isn't an abort command, ask for a
+ * hardware abort. This goes to the Task
+ * Management Request queue which will reset the
+ * task if it times out.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 1: Timeout occurred. aborting the task(%d).\n",
+ tr->req->request_upiu.header.task_tag);
+ ufshci_ctrlr_cmd_send_task_mgmt_request(ctrlr,
+ ufshci_abort_complete, tr,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK,
+ tr->req->request_upiu.header.lun,
+ tr->req->request_upiu.header.task_tag, 0);
+ } else {
+ /* Recovery Step 2-5 */
+ ufshci_req_queue_timeout_recovery(ctrlr, hwq);
+ idle = false;
+ break;
+ }
+ }
+ mtx_unlock(&hwq->qlock);
+ break;
+
+ case RECOVERY_WAITING:
+ /*
+ * These messages aren't interesting while we're suspended. We
+ * put the queues into waiting state while suspending.
+ * Suspending takes a while, so we'll see these during that time
+ * and they aren't diagnostic. At other times, they indicate a
+ * problem that's worth complaining about.
+ */
+ if (!device_is_suspended(ctrlr->dev))
+ ufshci_printf(ctrlr, "Waiting for reset to complete\n");
+ idle = false; /* We want to keep polling */
+ break;
+ }
+
+ /*
+ * Rearm the timeout.
+ */
+ if (!idle) {
+ callout_schedule_sbt(&hwq->timer, SBT_1S / 2, SBT_1S / 2, 0);
+ } else {
+ hwq->timer_armed = false;
+ }
+}
+
/*
* Submit the tracker to the hardware.
*/
@@ -436,13 +682,30 @@ ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
{
struct ufshci_controller *ctrlr = req_queue->ctrlr;
struct ufshci_request *req = tr->req;
+ struct ufshci_hw_queue *hwq;
uint64_t ucd_paddr;
uint16_t request_len, response_off, response_len;
uint8_t slot_num = tr->slot_num;
+ int timeout;
- mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_assert(&hwq->qlock, MA_OWNED);
- /* TODO: Check timeout */
+ if (req->cb_fn == ufshci_completion_poll_cb)
+ timeout = 1;
+ else
+ timeout = ctrlr->timeout_period;
+ tr->deadline = getsbinuptime() + timeout * SBT_1S;
+ if (!hwq->timer_armed) {
+ hwq->timer_armed = true;
+ /*
+ * It wakes up once every 0.5 seconds to check if the deadline
+ * has passed.
+ */
+ callout_reset_sbt_on(&hwq->timer, SBT_1S / 2, SBT_1S / 2,
+ ufshci_req_queue_timeout, hwq, hwq->cpu, 0);
+ }
if (req_queue->is_task_mgmt) {
/* Prepare UTP Task Management Request Descriptor. */
@@ -508,6 +771,9 @@ _ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
tr->deadline = SBT_MAX;
tr->req = req;
+ TAILQ_REMOVE(&tr->hwq->free_tr, tr, tailq);
+ TAILQ_INSERT_TAIL(&tr->hwq->outstanding_tr, tr, tailq);
+
ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
return (0);
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
index 834a459d48e3..ca47aa159c5b 100644
--- a/sys/dev/ufshci/ufshci_req_sdb.c
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -40,6 +40,8 @@ ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
bus_dma_tag_destroy(req_queue->dma_tag_ucd);
req_queue->dma_tag_ucd = NULL;
}
+
+ free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
}
static void
@@ -74,6 +76,10 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
uint8_t *ucdmem;
int i, error;
+ req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
+ req_queue->num_trackers,
+ M_UFSHCI, M_ZERO | M_NOWAIT);
+
/*
* Each component must be page aligned, and individual PRP lists
* cannot cross a page boundary.
@@ -152,6 +158,9 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
uint64_t queuemem_phys;
uint8_t *queuemem;
struct ufshci_tracker *tr;
+ const size_t lock_name_len = 32;
+ char qlock_name[lock_name_len], recovery_lock_name[lock_name_len];
+ char *base;
int i, error;
req_queue->ctrlr = ctrlr;
@@ -169,11 +178,21 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
hwq = &req_queue->hwq[UFSHCI_SDB_Q];
hwq->num_entries = req_queue->num_entries;
hwq->num_trackers = req_queue->num_trackers;
- req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
- req_queue->num_trackers,
- M_UFSHCI, M_ZERO | M_NOWAIT);
+ hwq->ctrlr = ctrlr;
+ hwq->req_queue = req_queue;
+
+ base = is_task_mgmt ? "ufshci utmrq" : "ufshci utrq";
+ snprintf(qlock_name, sizeof(qlock_name), "%s #%d lock", base,
+ UFSHCI_SDB_Q);
+ snprintf(recovery_lock_name, sizeof(recovery_lock_name),
+ "%s #%d recovery lock", base, UFSHCI_SDB_Q);
- mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF);
+ mtx_init(&hwq->qlock, qlock_name, NULL, MTX_DEF);
+ mtx_init(&hwq->recovery_lock, recovery_lock_name, NULL, MTX_DEF);
+
+ callout_init_mtx(&hwq->timer, &hwq->recovery_lock, 0);
+ hwq->timer_armed = false;
+ hwq->recovery_state = RECOVERY_WAITING;
/*
* Allocate physical memory for request queue (UTP Transfer Request
@@ -219,6 +238,9 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
req_queue->num_entries,
M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+ TAILQ_INIT(&hwq->free_tr);
+ TAILQ_INIT(&hwq->outstanding_tr);
+
for (i = 0; i < req_queue->num_trackers; i++) {
tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
@@ -226,6 +248,7 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
tr->req_queue = req_queue;
tr->slot_num = i;
tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
hwq->act_tr[i] = tr;
}
@@ -255,8 +278,6 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
ctrlr) != 0) {
ufshci_printf(ctrlr,
"failed to construct cmd descriptor memory\n");
- bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
- hwq->queuemem_map);
goto out;
}
@@ -280,6 +301,11 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr;
int i;
+ mtx_lock(&hwq->recovery_lock);
+ hwq->timer_armed = false;
+ mtx_unlock(&hwq->recovery_lock);
+ callout_drain(&hwq->timer);
+
if (!req_queue->is_task_mgmt)
ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
@@ -305,10 +331,11 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
hwq->dma_tag_queue = NULL;
}
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_destroy(&hwq->recovery_lock);
if (mtx_initialized(&hwq->qlock))
mtx_destroy(&hwq->qlock);
- free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
free(req_queue->hwq, M_UFSHCI);
}
@@ -318,10 +345,36 @@ ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue)
return &req_queue->hwq[UFSHCI_SDB_Q];
}
+void
+ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr, *tr_temp;
+
+ mtx_lock(&hwq->recovery_lock);
+ mtx_lock(&hwq->qlock);
+
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+
+ hwq->recovery_state = RECOVERY_WAITING;
+ TAILQ_FOREACH_SAFE(tr, &hwq->outstanding_tr, tailq, tr_temp) {
+ tr->deadline = SBT_MAX;
+ }
+
+ mtx_unlock(&hwq->qlock);
+ mtx_unlock(&hwq->recovery_lock);
+}
+
int
ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+
if (req_queue->is_task_mgmt) {
uint32_t hcs, utmrldbr, utmrlrsr;
@@ -373,6 +426,14 @@ ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
}
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+ KASSERT(!req_queue->ctrlr->is_failed, ("Enabling a failed hwq\n"));
+
+ hwq->recovery_state = RECOVERY_NONE;
+
return (0);
}
@@ -466,6 +527,8 @@ ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
uint8_t slot;
bool done = false;
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
hwq->num_intr_handler_calls++;
bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
diff --git a/sys/dev/ufshci/ufshci_sim.c b/sys/dev/ufshci/ufshci_sim.c
index db24561f4169..828b520614a5 100644
--- a/sys/dev/ufshci/ufshci_sim.c
+++ b/sys/dev/ufshci/ufshci_sim.c
@@ -241,7 +241,6 @@ ufshci_cam_action(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_ABORT:
- /* TODO: Implement Task Management CMD*/
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
break;
case XPT_SET_TRAN_SETTINGS:
diff --git a/sys/dev/usb/wlan/if_mtw.c b/sys/dev/usb/wlan/if_mtw.c
index 137590651948..6967e5081542 100644
--- a/sys/dev/usb/wlan/if_mtw.c
+++ b/sys/dev/usb/wlan/if_mtw.c
@@ -638,6 +638,7 @@ mtw_attach(device_t self)
ic->ic_flags |= IEEE80211_F_DATAPAD;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
mtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -3131,6 +3132,8 @@ mtw_tx(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
data->ni = ni;
data->ridx = ridx;
+ ieee80211_output_seqno_assign(ni, -1, m);
+
mtw_set_tx_desc(sc, data);
/*
@@ -3390,6 +3393,8 @@ mtw_tx_param(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
break;
data->ridx = ridx;
+ ieee80211_output_seqno_assign(ni, -1, m);
+
mtw_set_tx_desc(sc, data);
MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
diff --git a/sys/dev/usb/wlan/if_uath.c b/sys/dev/usb/wlan/if_uath.c
index b49c75032d77..cc303e565bca 100644
--- a/sys/dev/usb/wlan/if_uath.c
+++ b/sys/dev/usb/wlan/if_uath.c
@@ -432,6 +432,8 @@ uath_attach(device_t dev)
/* put a regulatory domain to reveal informations. */
uath_regdomain = sc->sc_devcap.regDomain;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
memset(bands, 0, sizeof(bands));
setbit(bands, IEEE80211_MODE_11B);
setbit(bands, IEEE80211_MODE_11G);
@@ -1548,6 +1550,8 @@ uath_tx_start(struct uath_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
ieee80211_radiotap_tx(vap, m0);
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
diff --git a/sys/dev/usb/wlan/if_upgt.c b/sys/dev/usb/wlan/if_upgt.c
index 642631ae34b7..1ab833301b3c 100644
--- a/sys/dev/usb/wlan/if_upgt.c
+++ b/sys/dev/usb/wlan/if_upgt.c
@@ -354,6 +354,8 @@ upgt_attach(device_t dev)
ic->ic_transmit = upgt_transmit;
ic->ic_parent = upgt_parent;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
UPGT_TX_RADIOTAP_PRESENT,
@@ -2116,6 +2118,9 @@ upgt_tx_start(struct upgt_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
upgt_set_led(sc, UPGT_LED_BLINK);
+ /* Assign sequence number */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/*
* Software crypto.
*/
diff --git a/sys/dev/usb/wlan/if_ural.c b/sys/dev/usb/wlan/if_ural.c
index 260d75a9821d..adef924a085c 100644
--- a/sys/dev/usb/wlan/if_ural.c
+++ b/sys/dev/usb/wlan/if_ural.c
@@ -473,6 +473,8 @@ ural_attach(device_t self)
| IEEE80211_C_WPA /* 802.11i */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ural_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1073,6 +1075,8 @@ ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
@@ -1229,6 +1233,8 @@ ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
rate = ieee80211_node_get_txrate_dot11rate(ni);
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/usb/wlan/if_urtw.c b/sys/dev/usb/wlan/if_urtw.c
index 439faeefc408..86cf4c653ae7 100644
--- a/sys/dev/usb/wlan/if_urtw.c
+++ b/sys/dev/usb/wlan/if_urtw.c
@@ -884,6 +884,8 @@ urtw_attach(device_t dev)
/* XXX TODO: setup regdomain if URTW_EPROM_CHANPLAN_BY_HW bit is set.*/
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
urtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1699,6 +1701,10 @@ urtw_tx_start(struct urtw_softc *sc, struct ieee80211_node *ni, struct mbuf *m0,
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+
+ /* Assign sequence number */
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/*
* Software crypto.
*/
diff --git a/sys/dev/usb/wlan/if_zyd.c b/sys/dev/usb/wlan/if_zyd.c
index 1a698caef3c5..7affdcdce089 100644
--- a/sys/dev/usb/wlan/if_zyd.c
+++ b/sys/dev/usb/wlan/if_zyd.c
@@ -384,6 +384,8 @@ zyd_attach(device_t dev)
| IEEE80211_C_WPA /* 802.11i */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
zyd_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -2463,6 +2465,8 @@ zyd_tx_start(struct zyd_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
}
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/fs/fuse/fuse_ipc.c b/sys/fs/fuse/fuse_ipc.c
index a751c09159ff..7f754ab7f1d4 100644
--- a/sys/fs/fuse/fuse_ipc.c
+++ b/sys/fs/fuse/fuse_ipc.c
@@ -193,7 +193,6 @@ fuse_interrupt_send(struct fuse_ticket *otick, int err)
struct fuse_data *data = otick->tk_data;
struct fuse_ticket *tick, *xtick;
struct ucred reused_creds;
- gid_t reused_groups[1];
if (otick->irq_unique == 0) {
/*
@@ -237,8 +236,7 @@ fuse_interrupt_send(struct fuse_ticket *otick, int err)
*/
ftick_hdr = fticket_in_header(otick);
reused_creds.cr_uid = ftick_hdr->uid;
- reused_groups[0] = ftick_hdr->gid;
- reused_creds.cr_groups = reused_groups;
+ reused_creds.cr_gid = ftick_hdr->gid;
fdisp_init(&fdi, sizeof(*fii));
fdisp_make_pid(&fdi, FUSE_INTERRUPT, data, ftick_hdr->nodeid,
ftick_hdr->pid, &reused_creds);
diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c
index 8712679375c6..5c28db29fc63 100644
--- a/sys/fs/fuse/fuse_vnops.c
+++ b/sys/fs/fuse/fuse_vnops.c
@@ -795,11 +795,15 @@ fuse_vnop_close(struct vop_close_args *ap)
struct mount *mp = vnode_mount(vp);
struct ucred *cred = ap->a_cred;
int fflag = ap->a_fflag;
- struct thread *td = ap->a_td;
- pid_t pid = td->td_proc->p_pid;
+ struct thread *td;
struct fuse_vnode_data *fvdat = VTOFUD(vp);
+ pid_t pid;
int err = 0;
+ /* NB: a_td will be NULL from some async kernel contexts */
+ td = ap->a_td ? ap->a_td : curthread;
+ pid = td->td_proc->p_pid;
+
if (fuse_isdeadfs(vp))
return 0;
if (vnode_isdir(vp))
@@ -838,7 +842,7 @@ fuse_vnop_close(struct vop_close_args *ap)
}
/* TODO: close the file handle, if we're sure it's no longer used */
if ((fvdat->flag & FN_SIZECHANGE) != 0) {
- fuse_vnode_savesize(vp, cred, td->td_proc->p_pid);
+ fuse_vnode_savesize(vp, cred, pid);
}
return err;
}
diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c
index 7040c4afb797..5ba86314ead1 100644
--- a/sys/fs/nfsserver/nfs_nfsdport.c
+++ b/sys/fs/nfsserver/nfs_nfsdport.c
@@ -3478,11 +3478,6 @@ nfsd_excred(struct nfsrv_descript *nd, struct nfsexstuff *exp,
(nd->nd_flag & ND_AUTHNONE) != 0) {
nd->nd_cred->cr_uid = credanon->cr_uid;
nd->nd_cred->cr_gid = credanon->cr_gid;
- /*
- * 'credanon' is already a 'struct ucred' that was built
- * internally with calls to crsetgroups_and_egid(), so
- * we don't need a fallback here.
- */
crsetgroups(nd->nd_cred, credanon->cr_ngroups,
credanon->cr_groups);
} else if ((nd->nd_flag & ND_GSS) == 0) {
diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h
index 4bed57b5afbf..b200588b0739 100644
--- a/sys/i386/include/cpufunc.h
+++ b/sys/i386/include/cpufunc.h
@@ -74,7 +74,7 @@ static __inline void
clflushopt(u_long addr)
{
- __asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
+ __asm __volatile("clflushopt %0" : : "m" (*(char *)addr));
}
static __inline void
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 2a833d2eafbe..19118eb7f275 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -658,6 +658,7 @@ kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
error = EBADF;
break;
}
+ fsetfl_lock(fp);
do {
tmp = flg = fp->f_flag;
tmp &= ~FCNTLFLAGS;
@@ -677,6 +678,7 @@ kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
if (error != 0)
goto revert_nonblock;
}
+ fsetfl_unlock(fp);
fdrop(fp, td);
break;
revert_nonblock:
@@ -691,6 +693,7 @@ revert_flags:
tmp |= got_cleared;
tmp &= ~got_set;
} while (atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
+ fsetfl_unlock(fp);
fdrop(fp, td);
break;
diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c
index 0ca42d640767..a4c5bcc52529 100644
--- a/sys/kern/kern_prot.c
+++ b/sys/kern/kern_prot.c
@@ -291,11 +291,6 @@ sys_getgid(struct thread *td, struct getgid_args *uap)
return (0);
}
-/*
- * Get effective group ID. The "egid" is groups[0], and could be obtained
- * via getgroups. This syscall exists because it is somewhat painful to do
- * correctly in a library function.
- */
#ifndef _SYS_SYSPROTO_H_
struct getegid_args {
int dummy;
@@ -1803,12 +1798,6 @@ groupmember(gid_t gid, const struct ucred *cred)
bool
realgroupmember(gid_t gid, const struct ucred *cred)
{
- /*
- * Although the equality test on 'cr_rgid' below doesn't access
- * 'cr_groups', we check for the latter's length here as we assume that,
- * if 'cr_ngroups' is 0, the passed 'struct ucred' is invalid, and
- * 'cr_rgid' may not have been filled.
- */
groups_check_positive_len(cred->cr_ngroups);
if (gid == cred->cr_rgid)
@@ -1896,19 +1885,22 @@ SYSCTL_INT(_security_bsd, OID_AUTO, see_other_gids, CTLFLAG_RW,
static int
cr_canseeothergids(struct ucred *u1, struct ucred *u2)
{
- if (!see_other_gids) {
- if (realgroupmember(u1->cr_rgid, u2))
- return (0);
+ if (see_other_gids)
+ return (0);
- for (int i = 1; i < u1->cr_ngroups; i++)
- if (realgroupmember(u1->cr_groups[i], u2))
- return (0);
+ /* Restriction in force. */
- if (priv_check_cred(u1, PRIV_SEEOTHERGIDS) != 0)
- return (ESRCH);
- }
+ if (realgroupmember(u1->cr_rgid, u2))
+ return (0);
- return (0);
+ for (int i = 0; i < u1->cr_ngroups; i++)
+ if (realgroupmember(u1->cr_groups[i], u2))
+ return (0);
+
+ if (priv_check_cred(u1, PRIV_SEEOTHERGIDS) == 0)
+ return (0);
+
+ return (ESRCH);
}
/*
@@ -2276,6 +2268,7 @@ cr_xids_subset(struct ucred *active_cred, struct ucred *obj_cred)
}
}
grpsubset = grpsubset &&
+ groupmember(obj_cred->cr_gid, active_cred) &&
groupmember(obj_cred->cr_rgid, active_cred) &&
groupmember(obj_cred->cr_svgid, active_cred);
@@ -2921,8 +2914,8 @@ crextend(struct ucred *cr, int n)
* Normalizes a set of groups to be applied to a 'struct ucred'.
*
* Normalization ensures that the supplementary groups are sorted in ascending
- * order and do not contain duplicates. This allows group_is_supplementary
- * to do a binary search.
+ * order and do not contain duplicates. This allows group_is_supplementary() to
+ * do a binary search.
*/
static void
groups_normalize(int *ngrp, gid_t *groups)
@@ -2985,9 +2978,9 @@ crsetgroups_internal(struct ucred *cr, int ngrp, const gid_t *groups)
* Copy groups in to a credential after expanding it if required.
*
* May sleep in order to allocate memory (except if, e.g., crextend() was called
- * before with 'ngrp' or greater). Truncates the list to ngroups_max if
+ * before with 'ngrp' or greater). Truncates the list to 'ngroups_max' if
* it is too large. Array 'groups' doesn't need to be sorted. 'ngrp' must be
- * strictly positive.
+ * positive.
*/
void
crsetgroups(struct ucred *cr, int ngrp, const gid_t *groups)
@@ -3018,8 +3011,8 @@ crsetgroups(struct ucred *cr, int ngrp, const gid_t *groups)
* Same as crsetgroups() but sets the effective GID as well.
*
* This function ensures that an effective GID is always present in credentials.
- * An empty array will only set the effective GID to the default_egid, while a
- * non-empty array will peel off groups[0] to set as the effective GID and use
+ * An empty array will only set the effective GID to 'default_egid', while
+ * a non-empty array will peel off groups[0] to set as the effective GID and use
* the remainder, if any, as supplementary groups.
*/
void
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index da0efac0598d..8efc0886988b 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -2656,9 +2656,11 @@ ptrace_coredumpreq(struct thread *td, struct proc *p,
return;
}
+ memset(&wctx, 0, sizeof(wctx));
wctx.vp = tcq->tc_vp;
wctx.fcred = NOCRED;
+ memset(&cdw, 0, sizeof(wctx));
cdw.ctx = &wctx;
cdw.write_fn = core_vn_write;
cdw.extend_fn = core_vn_extend;
diff --git a/sys/kern/subr_asan.c b/sys/kern/subr_asan.c
index 464efda1e91a..fee6c1a844e2 100644
--- a/sys/kern/subr_asan.c
+++ b/sys/kern/subr_asan.c
@@ -835,6 +835,7 @@ ASAN_ATOMIC_FUNC_TESTANDSET(32, uint32_t);
ASAN_ATOMIC_FUNC_TESTANDSET(64, uint64_t);
ASAN_ATOMIC_FUNC_TESTANDSET(int, u_int);
ASAN_ATOMIC_FUNC_TESTANDSET(long, u_long);
+ASAN_ATOMIC_FUNC_TESTANDSET(acq_long, u_long);
ASAN_ATOMIC_FUNC_TESTANDSET(ptr, uintptr_t);
ASAN_ATOMIC_FUNC_SWAP(32, uint32_t);
diff --git a/sys/kern/subr_msan.c b/sys/kern/subr_msan.c
index a3238b61482b..883dbd2b7604 100644
--- a/sys/kern/subr_msan.c
+++ b/sys/kern/subr_msan.c
@@ -1301,6 +1301,7 @@ MSAN_ATOMIC_FUNC_TESTANDSET(32, uint32_t);
MSAN_ATOMIC_FUNC_TESTANDSET(64, uint64_t);
MSAN_ATOMIC_FUNC_TESTANDSET(int, u_int);
MSAN_ATOMIC_FUNC_TESTANDSET(long, u_long);
+MSAN_ATOMIC_FUNC_TESTANDSET(acq_long, u_long);
MSAN_ATOMIC_FUNC_TESTANDSET(ptr, uintptr_t);
MSAN_ATOMIC_FUNC_SWAP(32, uint32_t);
diff --git a/sys/kern/subr_param.c b/sys/kern/subr_param.c
index 471640c290a7..a67e5fa6cbff 100644
--- a/sys/kern/subr_param.c
+++ b/sys/kern/subr_param.c
@@ -235,14 +235,11 @@ init_param1(void)
* specification for <limits.h>, paragraph "Runtime Increasable
* Values").
*
- * On the other hand, INT_MAX would result in an overflow for the common
- * 'ngroups_max + 1' computation (to obtain the size of the internal
- * groups array, its first element being reserved for the effective
- * GID). Also, the number of allocated bytes for the group array must
- * not overflow on 32-bit machines. For all these reasons, we limit the
- * number of supplementary groups to some very high number that we
- * expect will never be reached in all practical uses and ensures we
- * avoid the problems just exposed, even if 'gid_t' was to be enlarged
+ * On the other hand, a too high value would result in an overflow when
+ * computing the number of bytes to allocate for the groups array. We
+ * thus limit the number of supplementary groups to some very high
+ * number that we expect will never be reached in all practical uses,
+ * avoiding the problem just exposed even if 'gid_t' were to be enlarged
* by a magnitude.
*/
ngroups_max = NGROUPS_MAX;
diff --git a/sys/kern/subr_power.c b/sys/kern/subr_power.c
index 44ad82860649..eb5bd03f5018 100644
--- a/sys/kern/subr_power.c
+++ b/sys/kern/subr_power.c
@@ -39,14 +39,13 @@
#include <sys/systm.h>
#include <sys/taskqueue.h>
-enum power_stype power_standby_stype = POWER_STYPE_UNKNOWN;
-enum power_stype power_suspend_stype = POWER_STYPE_UNKNOWN;
-enum power_stype power_hibernate_stype = POWER_STYPE_UNKNOWN;
+enum power_stype power_standby_stype = POWER_STYPE_STANDBY;
+enum power_stype power_suspend_stype = POWER_STYPE_SUSPEND_TO_IDLE;
+enum power_stype power_hibernate_stype = POWER_STYPE_HIBERNATE;
static u_int power_pm_type = POWER_PM_TYPE_NONE;
static power_pm_fn_t power_pm_fn = NULL;
static void *power_pm_arg = NULL;
-static bool power_pm_supported[POWER_STYPE_COUNT] = {0};
static struct task power_pm_task;
enum power_stype
@@ -72,26 +71,6 @@ power_stype_to_name(enum power_stype stype)
}
static int
-sysctl_supported_stypes(SYSCTL_HANDLER_ARGS)
-{
- int error;
- struct sbuf sb;
- enum power_stype stype;
-
- sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
- for (stype = 0; stype < POWER_STYPE_COUNT; stype++) {
- if (power_pm_supported[stype])
- sbuf_printf(&sb, "%s ", power_stype_to_name(stype));
- }
- sbuf_trim(&sb);
- sbuf_finish(&sb);
- error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
- sbuf_delete(&sb);
-
- return (error);
-}
-
-static int
power_sysctl_stype(SYSCTL_HANDLER_ARGS)
{
char name[10];
@@ -107,8 +86,7 @@ power_sysctl_stype(SYSCTL_HANDLER_ARGS)
new_stype = power_name_to_stype(name);
if (new_stype == POWER_STYPE_UNKNOWN)
return (EINVAL);
- if (!power_pm_supported[new_stype])
- return (EOPNOTSUPP);
+ /* TODO Check to see if the new stype is supported. */
if (new_stype != old_stype)
*(enum power_stype *)oidp->oid_arg1 = new_stype;
return (0);
@@ -117,9 +95,6 @@ power_sysctl_stype(SYSCTL_HANDLER_ARGS)
static SYSCTL_NODE(_kern, OID_AUTO, power, CTLFLAG_RW, 0,
"Generic power management related sysctls");
-SYSCTL_PROC(_kern_power, OID_AUTO, supported_stype,
- CTLTYPE_STRING | CTLFLAG_RD, 0, 0, sysctl_supported_stypes, "A",
- "List supported sleep types");
SYSCTL_PROC(_kern_power, OID_AUTO, standby, CTLTYPE_STRING | CTLFLAG_RW,
&power_standby_stype, 0, power_sysctl_stype, "A",
"Sleep type to enter on standby");
@@ -139,8 +114,7 @@ power_pm_deferred_fn(void *arg, int pending)
}
int
-power_pm_register(u_int pm_type, power_pm_fn_t pm_fn, void *pm_arg,
- bool pm_supported[static POWER_STYPE_COUNT])
+power_pm_register(u_int pm_type, power_pm_fn_t pm_fn, void *pm_arg)
{
int error;
@@ -149,16 +123,6 @@ power_pm_register(u_int pm_type, power_pm_fn_t pm_fn, void *pm_arg,
power_pm_type = pm_type;
power_pm_fn = pm_fn;
power_pm_arg = pm_arg;
- memcpy(power_pm_supported, pm_supported,
- sizeof(power_pm_supported));
- if (power_pm_supported[POWER_STYPE_STANDBY])
- power_standby_stype = POWER_STYPE_STANDBY;
- if (power_pm_supported[POWER_STYPE_SUSPEND_TO_IDLE])
- power_suspend_stype = POWER_STYPE_SUSPEND_TO_IDLE;
- else if (power_pm_supported[POWER_STYPE_SUSPEND_TO_MEM])
- power_suspend_stype = POWER_STYPE_SUSPEND_TO_MEM;
- if (power_pm_supported[POWER_STYPE_HIBERNATE])
- power_hibernate_stype = POWER_STYPE_HIBERNATE;
error = 0;
TASK_INIT(&power_pm_task, 0, power_pm_deferred_fn, NULL);
} else {
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index a65c3ca128d9..c937f6a82757 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -1515,6 +1515,10 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
else
lock_list = PCPU_PTR(spinlocks);
+ /* Update per-witness last file and line acquire. */
+ w->w_file = file;
+ w->w_line = line;
+
/* Check to see if we are recursing on a lock we already own. */
instance = find_instance(*lock_list, lock);
if (instance != NULL) {
@@ -1522,15 +1526,9 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
td->td_proc->p_pid, lock->lo_name,
instance->li_flags & LI_RECURSEMASK);
- instance->li_file = file;
- instance->li_line = line;
return;
}
- /* Update per-witness last file and line acquire. */
- w->w_file = file;
- w->w_line = line;
-
/* Find the next open lock instance in the list and fill it. */
lle = *lock_list;
if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master
index 911f9093824b..967af1f5313c 100644
--- a/sys/kern/syscalls.master
+++ b/sys/kern/syscalls.master
@@ -552,13 +552,13 @@
_Out_writes_bytes_(len/PAGE_SIZE) char *vec
);
}
-79 AUE_GETGROUPS STD|CAPENABLED|COMPAT14 {
+79 AUE_GETGROUPS COMPAT14|CAPENABLED {
int getgroups(
int gidsetsize,
_Out_writes_opt_(gidsetsize) gid_t *gidset
);
}
-80 AUE_SETGROUPS STD|COMPAT14 {
+80 AUE_SETGROUPS COMPAT14 {
int setgroups(
int gidsetsize,
_In_reads_(gidsetsize) const gid_t *gidset
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
index 6138e543fae7..340d84666459 100644
--- a/sys/kern/uipc_usrreq.c
+++ b/sys/kern/uipc_usrreq.c
@@ -3667,11 +3667,14 @@ unp_internalize(struct mbuf *control, struct mchain *mc, struct thread *td)
cmcred->cmcred_uid = td->td_ucred->cr_ruid;
cmcred->cmcred_gid = td->td_ucred->cr_rgid;
cmcred->cmcred_euid = td->td_ucred->cr_uid;
- cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups,
+ _Static_assert(CMGROUP_MAX >= 1,
+ "Room needed for the effective GID.");
+ cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups + 1,
CMGROUP_MAX);
- for (i = 0; i < cmcred->cmcred_ngroups; i++)
+ cmcred->cmcred_groups[0] = td->td_ucred->cr_gid;
+ for (i = 1; i < cmcred->cmcred_ngroups; i++)
cmcred->cmcred_groups[i] =
- td->td_ucred->cr_groups[i];
+ td->td_ucred->cr_groups[i - 1];
break;
case SCM_RIGHTS:
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index f86bda2aa6f0..fe299ecc9c56 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -3567,11 +3567,6 @@ enum vput_op { VRELE, VPUT, VUNREF };
* exclusive lock on the vnode, while it is legal to call here with only a
* shared lock (or no locks). If locking the vnode in an expected manner fails,
* inactive processing gets deferred to the syncer.
- *
- * XXX Some filesystems pass in an exclusively locked vnode and strongly depend
- * on the lock being held all the way until VOP_INACTIVE. This in particular
- * happens with UFS which adds half-constructed vnodes to the hash, where they
- * can be found by other code.
*/
static void
vput_final(struct vnode *vp, enum vput_op func)
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index a4f41192f684..3d4567b6ab1e 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -798,58 +798,82 @@ vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base, size_t len,
}
#if OFF_MAX <= LONG_MAX
-off_t
-foffset_lock(struct file *fp, int flags)
+static void
+file_v_lock(struct file *fp, short lock_bit, short lock_wait_bit)
{
- volatile short *flagsp;
- off_t res;
+ short *flagsp;
short state;
- KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
-
- if ((flags & FOF_NOLOCK) != 0)
- return (atomic_load_long(&fp->f_offset));
-
- /*
- * According to McKusick the vn lock was protecting f_offset here.
- * It is now protected by the FOFFSET_LOCKED flag.
- */
- flagsp = &fp->f_vnread_flags;
- if (atomic_cmpset_acq_16(flagsp, 0, FOFFSET_LOCKED))
- return (atomic_load_long(&fp->f_offset));
+ flagsp = &fp->f_vflags;
+ state = atomic_load_16(flagsp);
+ if ((state & lock_bit) == 0 &&
+ atomic_cmpset_acq_16(flagsp, state, state | lock_bit))
+ return;
- sleepq_lock(&fp->f_vnread_flags);
+ sleepq_lock(flagsp);
state = atomic_load_16(flagsp);
for (;;) {
- if ((state & FOFFSET_LOCKED) == 0) {
+ if ((state & lock_bit) == 0) {
if (!atomic_fcmpset_acq_16(flagsp, &state,
- FOFFSET_LOCKED))
+ state | lock_bit))
continue;
break;
}
- if ((state & FOFFSET_LOCK_WAITING) == 0) {
+ if ((state & lock_wait_bit) == 0) {
if (!atomic_fcmpset_acq_16(flagsp, &state,
- state | FOFFSET_LOCK_WAITING))
+ state | lock_wait_bit))
continue;
}
DROP_GIANT();
- sleepq_add(&fp->f_vnread_flags, NULL, "vofflock", 0, 0);
- sleepq_wait(&fp->f_vnread_flags, PRI_MAX_KERN);
+ sleepq_add(flagsp, NULL, "vofflock", 0, 0);
+ sleepq_wait(flagsp, PRI_MAX_KERN);
PICKUP_GIANT();
- sleepq_lock(&fp->f_vnread_flags);
+ sleepq_lock(flagsp);
state = atomic_load_16(flagsp);
}
- res = atomic_load_long(&fp->f_offset);
- sleepq_release(&fp->f_vnread_flags);
- return (res);
+ sleepq_release(flagsp);
}
-void
-foffset_unlock(struct file *fp, off_t val, int flags)
+static void
+file_v_unlock(struct file *fp, short lock_bit, short lock_wait_bit)
{
- volatile short *flagsp;
+ short *flagsp;
short state;
+ flagsp = &fp->f_vflags;
+ state = atomic_load_16(flagsp);
+ if ((state & lock_wait_bit) == 0 &&
+ atomic_cmpset_rel_16(flagsp, state, state & ~lock_bit))
+ return;
+
+ sleepq_lock(flagsp);
+ MPASS((*flagsp & lock_bit) != 0);
+ MPASS((*flagsp & lock_wait_bit) != 0);
+ atomic_clear_16(flagsp, lock_bit | lock_wait_bit);
+ sleepq_broadcast(flagsp, SLEEPQ_SLEEP, 0, 0);
+ sleepq_release(flagsp);
+}
+
+off_t
+foffset_lock(struct file *fp, int flags)
+{
+ KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
+
+ if ((flags & FOF_NOLOCK) == 0) {
+ file_v_lock(fp, FILE_V_FOFFSET_LOCKED,
+ FILE_V_FOFFSET_LOCK_WAITING);
+ }
+
+ /*
+ * According to McKusick the vn lock was protecting f_offset here.
+ * It is now protected by the FOFFSET_LOCKED flag.
+ */
+ return (atomic_load_long(&fp->f_offset));
+}
+
+void
+foffset_unlock(struct file *fp, off_t val, int flags)
+{
KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
if ((flags & FOF_NOUPDATE) == 0)
@@ -859,21 +883,10 @@ foffset_unlock(struct file *fp, off_t val, int flags)
if ((flags & FOF_NEXTOFF_W) != 0)
fp->f_nextoff[UIO_WRITE] = val;
- if ((flags & FOF_NOLOCK) != 0)
- return;
-
- flagsp = &fp->f_vnread_flags;
- state = atomic_load_16(flagsp);
- if ((state & FOFFSET_LOCK_WAITING) == 0 &&
- atomic_cmpset_rel_16(flagsp, state, 0))
- return;
-
- sleepq_lock(&fp->f_vnread_flags);
- MPASS((fp->f_vnread_flags & FOFFSET_LOCKED) != 0);
- MPASS((fp->f_vnread_flags & FOFFSET_LOCK_WAITING) != 0);
- fp->f_vnread_flags = 0;
- sleepq_broadcast(&fp->f_vnread_flags, SLEEPQ_SLEEP, 0, 0);
- sleepq_release(&fp->f_vnread_flags);
+ if ((flags & FOF_NOLOCK) == 0) {
+ file_v_unlock(fp, FILE_V_FOFFSET_LOCKED,
+ FILE_V_FOFFSET_LOCK_WAITING);
+ }
}
static off_t
@@ -882,7 +895,47 @@ foffset_read(struct file *fp)
return (atomic_load_long(&fp->f_offset));
}
-#else
+
+void
+fsetfl_lock(struct file *fp)
+{
+ file_v_lock(fp, FILE_V_SETFL_LOCKED, FILE_V_SETFL_LOCK_WAITING);
+}
+
+void
+fsetfl_unlock(struct file *fp)
+{
+ file_v_unlock(fp, FILE_V_SETFL_LOCKED, FILE_V_SETFL_LOCK_WAITING);
+}
+
+#else /* OFF_MAX <= LONG_MAX */
+
+static void
+file_v_lock_mtxp(struct file *fp, struct mtx *mtxp, short lock_bit,
+ short lock_wait_bit)
+{
+ mtx_assert(mtxp, MA_OWNED);
+
+ while ((fp->f_vflags & lock_bit) != 0) {
+ fp->f_vflags |= lock_wait_bit;
+ msleep(&fp->f_vflags, mtxp, PRI_MAX_KERN,
+ "vofflock", 0);
+ }
+ fp->f_vflags |= lock_bit;
+}
+
+static void
+file_v_unlock_mtxp(struct file *fp, struct mtx *mtxp, short lock_bit,
+ short lock_wait_bit)
+{
+ mtx_assert(mtxp, MA_OWNED);
+
+ KASSERT((fp->f_vflags & lock_bit) != 0, ("Lost lock_bit"));
+ if ((fp->f_vflags & lock_wait_bit) != 0)
+ wakeup(&fp->f_vflags);
+ fp->f_vflags &= ~(lock_bit | lock_wait_bit);
+}
+
off_t
foffset_lock(struct file *fp, int flags)
{
@@ -894,12 +947,8 @@ foffset_lock(struct file *fp, int flags)
mtxp = mtx_pool_find(mtxpool_sleep, fp);
mtx_lock(mtxp);
if ((flags & FOF_NOLOCK) == 0) {
- while (fp->f_vnread_flags & FOFFSET_LOCKED) {
- fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
- msleep(&fp->f_vnread_flags, mtxp, PRI_MAX_KERN,
- "vofflock", 0);
- }
- fp->f_vnread_flags |= FOFFSET_LOCKED;
+ file_v_lock_mtxp(fp, mtxp, FILE_V_FOFFSET_LOCKED,
+ FILE_V_FOFFSET_LOCK_WAITING);
}
res = fp->f_offset;
mtx_unlock(mtxp);
@@ -922,11 +971,8 @@ foffset_unlock(struct file *fp, off_t val, int flags)
if ((flags & FOF_NEXTOFF_W) != 0)
fp->f_nextoff[UIO_WRITE] = val;
if ((flags & FOF_NOLOCK) == 0) {
- KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0,
- ("Lost FOFFSET_LOCKED"));
- if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
- wakeup(&fp->f_vnread_flags);
- fp->f_vnread_flags = 0;
+ file_v_unlock_mtxp(fp, mtxp, FILE_V_FOFFSET_LOCKED,
+ FILE_V_FOFFSET_LOCK_WAITING);
}
mtx_unlock(mtxp);
}
@@ -937,6 +983,30 @@ foffset_read(struct file *fp)
return (foffset_lock(fp, FOF_NOLOCK));
}
+
+void
+fsetfl_lock(struct file *fp)
+{
+ struct mtx *mtxp;
+
+ mtxp = mtx_pool_find(mtxpool_sleep, fp);
+ mtx_lock(mtxp);
+ file_v_lock_mtxp(fp, mtxp, FILE_V_SETFL_LOCKED,
+ FILE_V_SETFL_LOCK_WAITING);
+ mtx_unlock(mtxp);
+}
+
+void
+fsetfl_unlock(struct file *fp)
+{
+ struct mtx *mtxp;
+
+ mtxp = mtx_pool_find(mtxpool_sleep, fp);
+ mtx_lock(mtxp);
+ file_v_unlock_mtxp(fp, mtxp, FILE_V_SETFL_LOCKED,
+ FILE_V_SETFL_LOCK_WAITING);
+ mtx_unlock(mtxp);
+}
#endif
void
diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index e0fda082fefe..8b102f198de8 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -2543,22 +2543,23 @@ struct mbuf *pf_build_tcp(const struct pf_krule *, sa_family_t,
const struct pf_addr *, const struct pf_addr *,
u_int16_t, u_int16_t, u_int32_t, u_int32_t,
u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
- u_int16_t, u_int16_t, u_int, int);
+ u_int16_t, u_int16_t, u_int, int, u_short *);
void pf_send_tcp(const struct pf_krule *, sa_family_t,
const struct pf_addr *, const struct pf_addr *,
u_int16_t, u_int16_t, u_int32_t, u_int32_t,
u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
- u_int16_t, u_int16_t, int);
+ u_int16_t, u_int16_t, int, u_short *);
void pf_syncookies_init(void);
void pf_syncookies_cleanup(void);
int pf_get_syncookies(struct pfioc_nv *);
int pf_set_syncookies(struct pfioc_nv *);
int pf_synflood_check(struct pf_pdesc *);
-void pf_syncookie_send(struct pf_pdesc *);
+void pf_syncookie_send(struct pf_pdesc *, u_short *);
bool pf_syncookie_check(struct pf_pdesc *);
u_int8_t pf_syncookie_validate(struct pf_pdesc *);
-struct mbuf * pf_syncookie_recreate_syn(struct pf_pdesc *);
+struct mbuf * pf_syncookie_recreate_syn(struct pf_pdesc *,
+ u_short *);
VNET_DECLARE(struct pf_kstatus, pf_status);
#define V_pf_status VNET(pf_status)
diff --git a/sys/netinet6/in6.h b/sys/netinet6/in6.h
index 67c3ccbb1be8..a7fe03b9c3d7 100644
--- a/sys/netinet6/in6.h
+++ b/sys/netinet6/in6.h
@@ -609,6 +609,8 @@ struct ip6_mtuinfo {
/* IPV6CTL_RTMINEXPIRE 26 deprecated */
/* IPV6CTL_RTMAXCACHE 27 deprecated */
+#define IPV6CTL_STABLEADDR_NETIFSRC 30 /* semantically opaque addresses (RFC7217) hash algo netif parameter src */
+#define IPV6CTL_STABLEADDR_MAXRETRIES 31 /* semantically opaque addresses (RFC7217) max DAD retries */
#define IPV6CTL_USETEMPADDR 32 /* use temporary addresses (RFC3041) */
#define IPV6CTL_TEMPPLTIME 33 /* preferred lifetime for tmpaddrs */
#define IPV6CTL_TEMPVLTIME 34 /* valid lifetime for tmpaddrs */
@@ -617,6 +619,7 @@ struct ip6_mtuinfo {
#define IPV6CTL_PREFER_TEMPADDR 37 /* prefer temporary addr as src */
#define IPV6CTL_ADDRCTLPOLICY 38 /* get/set address selection policy */
#define IPV6CTL_USE_DEFAULTZONE 39 /* use default scope zone */
+#define IPV6CTL_USESTABLEADDR 40 /* use semantically opaque addresses (RFC7217) */
#define IPV6CTL_MAXFRAGS 41 /* max fragments */
#if 0
diff --git a/sys/netinet6/in6_ifattach.c b/sys/netinet6/in6_ifattach.c
index cc149616006e..57fe12a1c93b 100644
--- a/sys/netinet6/in6_ifattach.c
+++ b/sys/netinet6/in6_ifattach.c
@@ -33,6 +33,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/counter.h>
#include <sys/malloc.h>
#include <sys/socket.h>
#include <sys/sockio.h>
@@ -43,6 +44,7 @@
#include <sys/rmlock.h>
#include <sys/syslog.h>
#include <sys/md5.h>
+#include <crypto/sha2/sha256.h>
#include <net/if.h>
#include <net/if_var.h>
@@ -79,6 +81,8 @@ VNET_DEFINE(int, ip6_auto_linklocal) = 1; /* enabled by default */
VNET_DEFINE(struct callout, in6_tmpaddrtimer_ch);
#define V_in6_tmpaddrtimer_ch VNET(in6_tmpaddrtimer_ch)
+VNET_DEFINE(int, ip6_stableaddr_netifsource) = IP6_STABLEADDR_NETIFSRC_NAME; /* Use interface name by default */
+
VNET_DECLARE(struct inpcbinfo, ripcbinfo);
#define V_ripcbinfo VNET(ripcbinfo)
@@ -98,6 +102,9 @@ static void in6_purgemaddrs(struct ifnet *);
#define IFID_LOCAL(in6) (!EUI64_LOCAL(in6))
#define IFID_UNIVERSAL(in6) (!EUI64_UNIVERSAL(in6))
+#define HMAC_IPAD 0x36
+#define HMAC_OPAD 0x5C
+
/*
* Generate a last-resort interface identifier, when the machine has no
* IEEE802/EUI64 address sources.
@@ -147,22 +154,14 @@ get_rand_ifid(struct ifnet *ifp, struct in6_addr *in6)
}
-/*
- * Get interface identifier for the specified interface.
- * XXX assumes single sockaddr_dl (AF_LINK address) per an interface
- *
- * in6 - upper 64bits are preserved
+/**
+ * Get interface link level sockaddr
*/
-int
-in6_get_hw_ifid(struct ifnet *ifp, struct in6_addr *in6)
+static struct sockaddr_dl *
+get_interface_link_level(struct ifnet *ifp)
{
struct ifaddr *ifa;
struct sockaddr_dl *sdl;
- u_int8_t *addr;
- size_t addrlen;
- static u_int8_t allzero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
- static u_int8_t allone[8] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
NET_EPOCH_ASSERT();
@@ -175,14 +174,30 @@ in6_get_hw_ifid(struct ifnet *ifp, struct in6_addr *in6)
if (sdl->sdl_alen == 0)
continue;
- goto found;
+ return sdl;
}
- return -1;
+ return NULL;
+}
+
+/*
+ * Get hwaddr from link interface
+ */
+static uint8_t *
+in6_get_interface_hwaddr(struct ifnet *ifp, size_t *len)
+{
+ struct sockaddr_dl *sdl;
+ u_int8_t *addr;
+ static u_int8_t allzero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ static u_int8_t allone[8] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+ sdl = get_interface_link_level(ifp);
+ if (sdl == NULL)
+ return (NULL);
-found:
addr = LLADDR(sdl);
- addrlen = sdl->sdl_alen;
+ *len = sdl->sdl_alen;
/* get EUI64 */
switch (ifp->if_type) {
@@ -193,36 +208,21 @@ found:
case IFT_IEEE1394:
/* IEEE802/EUI64 cases - what others? */
/* IEEE1394 uses 16byte length address starting with EUI64 */
- if (addrlen > 8)
- addrlen = 8;
+ if (*len > 8)
+ *len = 8;
/* look at IEEE802/EUI64 only */
- if (addrlen != 8 && addrlen != 6)
- return -1;
+ if (*len != 8 && *len != 6)
+ return (NULL);
/*
* check for invalid MAC address - on bsdi, we see it a lot
* since wildboar configures all-zero MAC on pccard before
* card insertion.
*/
- if (bcmp(addr, allzero, addrlen) == 0)
- return -1;
- if (bcmp(addr, allone, addrlen) == 0)
- return -1;
-
- /* make EUI64 address */
- if (addrlen == 8)
- bcopy(addr, &in6->s6_addr[8], 8);
- else if (addrlen == 6) {
- in6->s6_addr[8] = addr[0];
- in6->s6_addr[9] = addr[1];
- in6->s6_addr[10] = addr[2];
- in6->s6_addr[11] = 0xff;
- in6->s6_addr[12] = 0xfe;
- in6->s6_addr[13] = addr[3];
- in6->s6_addr[14] = addr[4];
- in6->s6_addr[15] = addr[5];
- }
+ if (memcmp(addr, allzero, *len) == 0 || memcmp(addr, allone, *len) == 0)
+ return (NULL);
+
break;
case IFT_GIF:
@@ -233,16 +233,51 @@ found:
* identifier source (can be renumbered).
* we don't do this.
*/
- return -1;
+ return (NULL);
case IFT_INFINIBAND:
- if (addrlen != 20)
- return -1;
- bcopy(addr + 12, &in6->s6_addr[8], 8);
+ if (*len != 20)
+ return (NULL);
+ *len = 8;
+ addr += 12;
break;
default:
+ return (NULL);
+ }
+
+ return addr;
+}
+
+ /*
+ * Get interface identifier for the specified interface.
+ * XXX assumes single sockaddr_dl (AF_LINK address) per an interface
+ *
+ * in6 - upper 64bits are preserved
+ */
+int
+in6_get_hw_ifid(struct ifnet *ifp, struct in6_addr *in6)
+{
+ size_t hwaddr_len;
+ uint8_t *hwaddr;
+ static u_int8_t allzero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ hwaddr = in6_get_interface_hwaddr(ifp, &hwaddr_len);
+ if (hwaddr == NULL || (hwaddr_len != 6 && hwaddr_len != 8))
return -1;
+
+ /* make EUI64 address */
+ if (hwaddr_len == 8)
+ memcpy(&in6->s6_addr[8], hwaddr, 8);
+ else if (hwaddr_len == 6) {
+ in6->s6_addr[8] = hwaddr[0];
+ in6->s6_addr[9] = hwaddr[1];
+ in6->s6_addr[10] = hwaddr[2];
+ in6->s6_addr[11] = 0xff;
+ in6->s6_addr[12] = 0xfe;
+ in6->s6_addr[13] = hwaddr[3];
+ in6->s6_addr[14] = hwaddr[4];
+ in6->s6_addr[15] = hwaddr[5];
}
/* sanity check: g bit must not indicate "group" */
@@ -264,6 +299,153 @@ found:
}
/*
+ * Validate generated interface id to make sure it does not fall in any reserved range:
+ *
+ * https://www.iana.org/assignments/ipv6-interface-ids/ipv6-interface-ids.xhtml
+ */
+static bool
+validate_ifid(uint8_t *iid)
+{
+ static uint8_t allzero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ static uint8_t reserved_eth[5] = { 0x02, 0x00, 0x5E, 0xFF, 0xFE };
+ static uint8_t reserved_anycast[7] = { 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ /* Subnet-Router Anycast (RFC 4291)*/
+ if (memcmp(iid, allzero, 8) == 0)
+ return (false);
+
+ /*
+ * Reserved IPv6 Interface Identifiers corresponding to the IANA Ethernet Block (RFC 4291)
+ * and
+ * Proxy Mobile IPv6 (RFC 6543)
+ */
+ if (memcmp(iid, reserved_eth, 5) == 0)
+ return (false);
+
+ /* Reserved Subnet Anycast Addresses (RFC 2526) */
+ if (memcmp(iid, reserved_anycast, 7) == 0 && iid[7] >= 0x80)
+ return (false);
+
+ return (true);
+}
+
+/*
+ * Get interface identifier for the specified interface, according to
+ * RFC 7217 Stable and Opaque IDs with SLAAC, using HMAC-SHA256 digest.
+ *
+ * in6 - upper 64bits are preserved
+ */
+bool
+in6_get_stableifid(struct ifnet *ifp, struct in6_addr *in6, int prefixlen)
+{
+ struct sockaddr_dl *sdl;
+ const uint8_t *netiface;
+ size_t netiface_len, hostuuid_len;
+ uint8_t hostuuid[HOSTUUIDLEN + 1], hmac_key[SHA256_BLOCK_LENGTH],
+ hk_ipad[SHA256_BLOCK_LENGTH], hk_opad[SHA256_BLOCK_LENGTH];
+ uint64_t dad_failures;
+ SHA256_CTX ctxt;
+
+ switch (V_ip6_stableaddr_netifsource) {
+ case IP6_STABLEADDR_NETIFSRC_ID:
+ sdl = get_interface_link_level(ifp);
+ if (sdl == NULL)
+ return (false);
+ netiface = (uint8_t *)&LLINDEX(sdl);
+ netiface_len = sizeof(u_short); /* real return type of LLINDEX */
+ break;
+
+ case IP6_STABLEADDR_NETIFSRC_MAC:
+ netiface = in6_get_interface_hwaddr(ifp, &netiface_len);
+ if (netiface == NULL)
+ return (false);
+ break;
+
+ case IP6_STABLEADDR_NETIFSRC_NAME:
+ default:
+ netiface = (const uint8_t *)if_name(ifp);
+ netiface_len = strlen(netiface);
+ break;
+ }
+
+ /* Use hostuuid as constant "secret" key */
+ getcredhostuuid(curthread->td_ucred, hostuuid, sizeof(hostuuid));
+ if (strncmp(hostuuid, DEFAULT_HOSTUUID, sizeof(hostuuid)) == 0) {
+ // If hostuuid is not set, use a random value
+ arc4rand(hostuuid, HOSTUUIDLEN, 0);
+ hostuuid[HOSTUUIDLEN] = '\0';
+ }
+ hostuuid_len = strlen(hostuuid);
+
+ dad_failures = counter_u64_fetch(ND_IFINFO(ifp)->dad_failures);
+
+ /*
+ * RFC 7217 section 7
+ *
+ * default max retries
+ */
+ if (dad_failures > V_ip6_stableaddr_maxretries)
+ return (false);
+
+ /*
+ * Use hostuuid as basis for HMAC key
+ */
+ memset(hmac_key, 0, sizeof(hmac_key));
+ if (hostuuid_len <= SHA256_BLOCK_LENGTH) {
+ /* copy to hmac key variable, zero padded */
+ memcpy(hmac_key, hostuuid, hostuuid_len);
+ } else {
+ /* if longer than block length, use hash of the value, zero padded */
+ SHA256_Init(&ctxt);
+ SHA256_Update(&ctxt, hostuuid, hostuuid_len);
+ SHA256_Final(hmac_key, &ctxt);
+ }
+ /* XOR key with ipad and opad values */
+ for (uint16_t i = 0; i < sizeof(hmac_key); i++) {
+ hk_ipad[i] = hmac_key[i] ^ HMAC_IPAD;
+ hk_opad[i] = hmac_key[i] ^ HMAC_OPAD;
+ }
+
+ /*
+ * Generate interface id in a loop, adding an offset to be factored in the hash function.
+ * This is necessary, because if the generated interface id happens to be invalid we
+ * want to force the hash function to generate a different one, otherwise we would end up
+ * in an infinite loop trying the same invalid interface id over and over again.
+ *
+ * Using an uint8 counter for the offset, so limit iteration at UINT8_MAX. This is a safety
+ * measure, this will never iterate more than once or twice in practice.
+ */
+ for(uint8_t offset = 0; offset < UINT8_MAX; offset++) {
+ uint8_t digest[SHA256_DIGEST_LENGTH];
+
+ /* Calculate inner hash */
+ SHA256_Init(&ctxt);
+ SHA256_Update(&ctxt, hk_ipad, sizeof(hk_ipad));
+ SHA256_Update(&ctxt, in6->s6_addr, prefixlen / 8);
+ SHA256_Update(&ctxt, netiface, netiface_len);
+ SHA256_Update(&ctxt, (uint8_t *)&dad_failures, 8);
+ SHA256_Update(&ctxt, hostuuid, hostuuid_len);
+ SHA256_Update(&ctxt, &offset, 1);
+ SHA256_Final(digest, &ctxt);
+
+ /* Calculate outer hash */
+ SHA256_Init(&ctxt);
+ SHA256_Update(&ctxt, hk_opad, sizeof(hk_opad));
+ SHA256_Update(&ctxt, digest, sizeof(digest));
+ SHA256_Final(digest, &ctxt);
+
+ if (validate_ifid(digest)) {
+ /* assumes sizeof(digest) > sizeof(ifid) */
+ memcpy(&in6->s6_addr[8], digest, 8);
+
+ return (true);
+ }
+ }
+
+ return (false);
+}
+
+/*
* Get interface identifier for the specified interface. If it is not
* available on ifp0, borrow interface identifier from other information
* sources.
@@ -278,7 +460,14 @@ in6_get_ifid(struct ifnet *ifp0, struct ifnet *altifp,
NET_EPOCH_ASSERT();
- /* first, try to get it from the interface itself */
+ /* first, try to get it from the interface itself, with stable algorithm, if configured */
+ if ((ND_IFINFO(ifp0)->flags & ND6_IFF_STABLEADDR) && in6_get_stableifid(ifp0, in6, 64) == 0) {
+ nd6log((LOG_DEBUG, "%s: got interface identifier from itself (stable private)\n",
+ if_name(ifp0)));
+ goto success;
+ }
+
+ /* then/otherwise try to get it from the interface itself */
if (in6_get_hw_ifid(ifp0, in6) == 0) {
nd6log((LOG_DEBUG, "%s: got interface identifier from itself\n",
if_name(ifp0)));
diff --git a/sys/netinet6/in6_ifattach.h b/sys/netinet6/in6_ifattach.h
index fd52422b10be..75b2ca4fa018 100644
--- a/sys/netinet6/in6_ifattach.h
+++ b/sys/netinet6/in6_ifattach.h
@@ -39,6 +39,8 @@ void in6_ifattach(struct ifnet *, struct ifnet *);
void in6_ifattach_destroy(void);
void in6_ifdetach(struct ifnet *);
void in6_ifdetach_destroy(struct ifnet *);
+int in6_get_tmpifid(struct ifnet *, u_int8_t *, const u_int8_t *, int);
+bool in6_get_stableifid(struct ifnet *, struct in6_addr *, int);
void in6_tmpaddrtimer(void *);
int in6_get_hw_ifid(struct ifnet *, struct in6_addr *);
int in6_get_ifid(struct ifnet *, struct ifnet *, struct in6_addr *);
diff --git a/sys/netinet6/in6_proto.c b/sys/netinet6/in6_proto.c
index b289d4eeb0a2..6669a2ba56ce 100644
--- a/sys/netinet6/in6_proto.c
+++ b/sys/netinet6/in6_proto.c
@@ -167,6 +167,7 @@ VNET_DEFINE(int, ip6_rr_prune) = 5; /* router renumbering prefix
* walk list every 5 sec. */
VNET_DEFINE(int, ip6_mcast_pmtu) = 0; /* enable pMTU discovery for multicast? */
VNET_DEFINE(int, ip6_v6only) = 1;
+VNET_DEFINE(int, ip6_stableaddr_maxretries) = IP6_IDGEN_RETRIES;
#ifdef IPSTEALTH
VNET_DEFINE(int, ip6stealth) = 0;
@@ -313,6 +314,15 @@ SYSCTL_INT(_net_inet6_ip6, IPV6CTL_RR_PRUNE, rr_prune,
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_USETEMPADDR, use_tempaddr,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_use_tempaddr), 0,
"Create RFC3041 temporary addresses for autoconfigured addresses");
+SYSCTL_BOOL(_net_inet6_ip6, IPV6CTL_USESTABLEADDR, use_stableaddr,
+ CTLFLAG_VNET | CTLFLAG_RWTUN, &VNET_NAME(ip6_use_stableaddr), 0,
+ "Create RFC7217 semantically opaque address for autoconfigured addresses (default for new interfaces)");
+SYSCTL_INT(_net_inet6_ip6, IPV6CTL_STABLEADDR_MAXRETRIES, stableaddr_maxretries,
+ CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_stableaddr_maxretries), IP6_IDGEN_RETRIES,
+ "RFC7217 semantically opaque address DAD max retries");
+SYSCTL_INT(_net_inet6_ip6, IPV6CTL_STABLEADDR_NETIFSRC, stableaddr_netifsource,
+ CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_stableaddr_netifsource), IP6_STABLEADDR_NETIFSRC_NAME,
+ "RFC7217 semantically opaque address Net_Iface source (0 - name, 1 - ID, 2 - MAC addr)");
SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_TEMPPLTIME, temppltime,
CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
NULL, 0, sysctl_ip6_temppltime, "I",
diff --git a/sys/netinet6/ip6_input.c b/sys/netinet6/ip6_input.c
index b22491a6007f..99dad1e7c309 100644
--- a/sys/netinet6/ip6_input.c
+++ b/sys/netinet6/ip6_input.c
@@ -235,6 +235,7 @@ ip6_vnet_init(void *arg __unused)
&V_ip6_auto_linklocal);
TUNABLE_INT_FETCH("net.inet6.ip6.accept_rtadv", &V_ip6_accept_rtadv);
TUNABLE_INT_FETCH("net.inet6.ip6.no_radr", &V_ip6_no_radr);
+ TUNABLE_BOOL_FETCH("net.inet6.ip6.use_stableaddr", &V_ip6_use_stableaddr);
CK_STAILQ_INIT(&V_in6_ifaddrhead);
V_in6_ifaddrhashtbl = hashinit(IN6ADDR_NHASH, M_IFADDR,
diff --git a/sys/netinet6/ip6_var.h b/sys/netinet6/ip6_var.h
index 12b00d4f9934..e1a4e8678ebb 100644
--- a/sys/netinet6/ip6_var.h
+++ b/sys/netinet6/ip6_var.h
@@ -338,8 +338,20 @@ VNET_DECLARE(int, ip6_use_tempaddr); /* Whether to use temporary addresses */
VNET_DECLARE(int, ip6_prefer_tempaddr); /* Whether to prefer temporary
* addresses in the source address
* selection */
+VNET_DECLARE(bool, ip6_use_stableaddr); /* Whether to use stable address generation (RFC 7217) */
#define V_ip6_use_tempaddr VNET(ip6_use_tempaddr)
#define V_ip6_prefer_tempaddr VNET(ip6_prefer_tempaddr)
+#define V_ip6_use_stableaddr VNET(ip6_use_stableaddr)
+
+#define IP6_IDGEN_RETRIES 3 /* RFC 7217 section 7 default max retries */
+VNET_DECLARE(int, ip6_stableaddr_maxretries);
+#define V_ip6_stableaddr_maxretries VNET(ip6_stableaddr_maxretries)
+
+#define IP6_STABLEADDR_NETIFSRC_NAME 0
+#define IP6_STABLEADDR_NETIFSRC_ID 1
+#define IP6_STABLEADDR_NETIFSRC_MAC 2
+VNET_DECLARE(int, ip6_stableaddr_netifsource);
+#define V_ip6_stableaddr_netifsource VNET(ip6_stableaddr_netifsource)
VNET_DECLARE(int, ip6_use_defzone); /* Whether to use the default scope
* zone when unspecified */
diff --git a/sys/netinet6/nd6.c b/sys/netinet6/nd6.c
index 8480e7fc90e3..938d411711f0 100644
--- a/sys/netinet6/nd6.c
+++ b/sys/netinet6/nd6.c
@@ -324,6 +324,13 @@ nd6_ifattach(struct ifnet *ifp)
/* XXX: we cannot call nd6_setmtu since ifp is not fully initialized */
nd6_setmtu0(ifp, nd);
+ /* Configure default value for stable addresses algorithm, skip loopback interface */
+ if (V_ip6_use_stableaddr && !(ifp->if_flags & IFF_LOOPBACK)) {
+ nd->flags |= ND6_IFF_STABLEADDR;
+ }
+
+ nd->dad_failures = counter_u64_alloc(M_WAITOK);
+
return nd;
}
@@ -343,6 +350,8 @@ nd6_ifdetach(struct ifnet *ifp, struct nd_ifinfo *nd)
}
NET_EPOCH_EXIT(et);
+ counter_u64_free(nd->dad_failures);
+
free(nd, M_IP6NDP);
}
diff --git a/sys/netinet6/nd6.h b/sys/netinet6/nd6.h
index 9cb2571da58b..1de2a77ddf6d 100644
--- a/sys/netinet6/nd6.h
+++ b/sys/netinet6/nd6.h
@@ -76,6 +76,7 @@ struct nd_ifinfo {
u_int8_t randomseed0[8]; /* upper 64 bits of MD5 digest */
u_int8_t randomseed1[8]; /* lower 64 bits (usually the EUI64 IFID) */
u_int8_t randomid[8]; /* current random ID */
+ counter_u64_t dad_failures; /* DAD failures when using RFC 7217 stable addresses */
};
#define ND6_IFF_PERFORMNUD 0x1
@@ -89,6 +90,7 @@ struct nd_ifinfo {
#define ND6_IFF_NO_RADR 0x40
#define ND6_IFF_NO_PREFER_IFACE 0x80 /* XXX: not related to ND. */
#define ND6_IFF_NO_DAD 0x100
+#define ND6_IFF_STABLEADDR 0x800
#ifdef EXPERIMENTAL
/* XXX: not related to ND. */
#define ND6_IFF_IPV6_ONLY 0x200 /* draft-ietf-6man-ipv6only-flag */
diff --git a/sys/netinet6/nd6_nbr.c b/sys/netinet6/nd6_nbr.c
index 640348a1d198..76b1fd86ee08 100644
--- a/sys/netinet6/nd6_nbr.c
+++ b/sys/netinet6/nd6_nbr.c
@@ -38,6 +38,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/counter.h>
#include <sys/eventhandler.h>
#include <sys/malloc.h>
#include <sys/libkern.h>
@@ -1466,9 +1467,14 @@ nd6_dad_timer(void *arg)
* No duplicate address found. Check IFDISABLED flag
* again in case that it is changed between the
* beginning of this function and here.
+ *
+ * Reset DAD failures counter if using stable addresses.
*/
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) == 0)
+ if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) == 0) {
ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
+ if ((ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) && !(ia->ia6_flags & IN6_IFF_TEMPORARY))
+ counter_u64_zero(ND_IFINFO(ifp)->dad_failures);
+ }
nd6log((LOG_DEBUG,
"%s: DAD complete for %s - no duplicates found\n",
@@ -1497,20 +1503,39 @@ nd6_dad_duplicated(struct ifaddr *ifa, struct dadq *dp)
struct ifnet *ifp;
char ip6buf[INET6_ADDRSTRLEN];
+ ifp = ifa->ifa_ifp;
+
log(LOG_ERR, "%s: DAD detected duplicate IPv6 address %s: "
"NS in/out/loopback=%d/%d/%d, NA in=%d\n",
- if_name(ifa->ifa_ifp), ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr),
+ if_name(ifp), ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr),
dp->dad_ns_icount, dp->dad_ns_ocount, dp->dad_ns_lcount,
dp->dad_na_icount);
ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
ia->ia6_flags |= IN6_IFF_DUPLICATED;
- ifp = ifa->ifa_ifp;
log(LOG_ERR, "%s: DAD complete for %s - duplicate found\n",
if_name(ifp), ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr));
- log(LOG_ERR, "%s: manual intervention required\n",
- if_name(ifp));
+
+ /*
+ * For RFC 7217 stable addresses, increment failure counter here if we still have retries.
+ * More addresses will be generated as long as retries are not exhausted.
+ */
+ if ((ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) && !(ia->ia6_flags & IN6_IFF_TEMPORARY)) {
+ uint64_t dad_failures = counter_u64_fetch(ND_IFINFO(ifp)->dad_failures);
+
+ if (dad_failures <= V_ip6_stableaddr_maxretries) {
+ counter_u64_add(ND_IFINFO(ifp)->dad_failures, 1);
+ /* if retries exhausted, output an informative error message */
+ if (dad_failures == V_ip6_stableaddr_maxretries)
+ log(LOG_ERR, "%s: manual intervention required, consider disabling \"stableaddr\" on the interface"
+ " or checking hostuuid for uniqueness\n",
+ if_name(ifp));
+ }
+ } else {
+ log(LOG_ERR, "%s: manual intervention required\n",
+ if_name(ifp));
+ }
/*
* If the address is a link-local address formed from an interface
diff --git a/sys/netinet6/nd6_rtr.c b/sys/netinet6/nd6_rtr.c
index 6fe78083df23..01623a4506be 100644
--- a/sys/netinet6/nd6_rtr.c
+++ b/sys/netinet6/nd6_rtr.c
@@ -92,6 +92,7 @@ VNET_DEFINE(int, nd6_defifindex);
#define V_nd6_defifp VNET(nd6_defifp)
VNET_DEFINE(int, ip6_use_tempaddr) = 0;
+VNET_DEFINE(bool, ip6_use_stableaddr) = 0;
VNET_DEFINE(int, ip6_desync_factor);
VNET_DEFINE(uint32_t, ip6_temp_max_desync_factor) = TEMP_MAX_DESYNC_FACTOR_BASE;
@@ -1184,7 +1185,7 @@ in6_ifadd(struct nd_prefixctl *pr, int mcast)
struct in6_aliasreq ifra;
struct in6_ifaddr *ia = NULL, *ib = NULL;
int error, plen0;
- struct in6_addr *ifid_addr = NULL, mask;
+ struct in6_addr *ifid_addr = NULL, mask, newaddr;
int prefixlen = pr->ndpr_plen;
int updateflags;
char ip6buf[INET6_ADDRSTRLEN];
@@ -1210,61 +1211,70 @@ in6_ifadd(struct nd_prefixctl *pr, int mcast)
* (4) it is easier to manage when an interface has addresses
* with the same interface identifier, than to have multiple addresses
* with different interface identifiers.
+ *
+ * If using stable privacy generation, generate a new address with
+ * the algorithm specified in RFC 7217 section 5
*/
- ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp, 0); /* 0 is OK? */
- if (ifa) {
- ib = (struct in6_ifaddr *)ifa;
- ifid_addr = &ib->ia_addr.sin6_addr;
-
- /* prefixlen + ifidlen must be equal to 128 */
- plen0 = in6_mask2len(&ib->ia_prefixmask.sin6_addr, NULL);
- if (prefixlen != plen0) {
- ifa_free(ifa);
- ifid_addr = NULL;
- nd6log((LOG_DEBUG,
- "%s: wrong prefixlen for %s (prefix=%d ifid=%d)\n",
- __func__, if_name(ifp), prefixlen, 128 - plen0));
- }
- }
- /* No suitable LL address, get the ifid directly */
- if (ifid_addr == NULL) {
- struct in6_addr taddr;
- ifa = ifa_alloc(sizeof(taddr), M_WAITOK);
+ /* make ifaddr */
+ in6_prepare_ifra(&ifra, &pr->ndpr_prefix.sin6_addr, &mask);
+
+ if (ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) {
+ memcpy(&newaddr, &pr->ndpr_prefix.sin6_addr, sizeof(pr->ndpr_prefix.sin6_addr));
+
+ if(!in6_get_stableifid(ifp, &newaddr, prefixlen))
+ return NULL;
+ } else {
+ ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp, 0); /* 0 is OK? */
if (ifa) {
ib = (struct in6_ifaddr *)ifa;
ifid_addr = &ib->ia_addr.sin6_addr;
- if(in6_get_ifid(ifp, NULL, ifid_addr) != 0) {
- nd6log((LOG_DEBUG,
- "%s: failed to get ifid for %s\n",
- __func__, if_name(ifp)));
+
+ /* prefixlen + ifidlen must be equal to 128 */
+ plen0 = in6_mask2len(&ib->ia_prefixmask.sin6_addr, NULL);
+ if (prefixlen != plen0) {
ifa_free(ifa);
ifid_addr = NULL;
+ nd6log((LOG_DEBUG,
+ "%s: wrong prefixlen for %s (prefix=%d ifid=%d)\n",
+ __func__, if_name(ifp), prefixlen, 128 - plen0));
}
}
- }
- if (ifid_addr == NULL) {
- nd6log((LOG_INFO,
- "%s: could not determine ifid for %s\n",
- __func__, if_name(ifp)));
- return NULL;
- }
+ /* No suitable LL address, get the ifid directly */
+ if (ifid_addr == NULL) {
+ struct in6_addr taddr;
+ ifa = ifa_alloc(sizeof(taddr), M_WAITOK);
+ if (ifa) {
+ ib = (struct in6_ifaddr *)ifa;
+ ifid_addr = &ib->ia_addr.sin6_addr;
+ if(in6_get_ifid(ifp, NULL, ifid_addr) != 0) {
+ nd6log((LOG_DEBUG,
+ "%s: failed to get ifid for %s\n",
+ __func__, if_name(ifp)));
+ ifa_free(ifa);
+ ifid_addr = NULL;
+ }
+ }
+ }
- /* make ifaddr */
- in6_prepare_ifra(&ifra, &pr->ndpr_prefix.sin6_addr, &mask);
+ if (ifid_addr == NULL) {
+ nd6log((LOG_INFO,
+ "%s: could not determine ifid for %s\n",
+ __func__, if_name(ifp)));
+ return NULL;
+ }
+
+ memcpy(&newaddr, &ib->ia_addr.sin6_addr, sizeof(ib->ia_addr.sin6_addr));
+ ifa_free(ifa);
+ }
IN6_MASK_ADDR(&ifra.ifra_addr.sin6_addr, &mask);
/* interface ID */
- ifra.ifra_addr.sin6_addr.s6_addr32[0] |=
- (ifid_addr->s6_addr32[0] & ~mask.s6_addr32[0]);
- ifra.ifra_addr.sin6_addr.s6_addr32[1] |=
- (ifid_addr->s6_addr32[1] & ~mask.s6_addr32[1]);
- ifra.ifra_addr.sin6_addr.s6_addr32[2] |=
- (ifid_addr->s6_addr32[2] & ~mask.s6_addr32[2]);
- ifra.ifra_addr.sin6_addr.s6_addr32[3] |=
- (ifid_addr->s6_addr32[3] & ~mask.s6_addr32[3]);
- ifa_free(ifa);
+ ifra.ifra_addr.sin6_addr.s6_addr32[0] |= (newaddr.s6_addr32[0] & ~mask.s6_addr32[0]);
+ ifra.ifra_addr.sin6_addr.s6_addr32[1] |= (newaddr.s6_addr32[1] & ~mask.s6_addr32[1]);
+ ifra.ifra_addr.sin6_addr.s6_addr32[2] |= (newaddr.s6_addr32[2] & ~mask.s6_addr32[2]);
+ ifra.ifra_addr.sin6_addr.s6_addr32[3] |= (newaddr.s6_addr32[3] & ~mask.s6_addr32[3]);
/* lifetimes. */
ifra.ifra_lifetime.ia6t_vltime = pr->ndpr_vltime;
@@ -1495,6 +1505,7 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
int auth;
struct in6_addrlifetime lt6_tmp;
char ip6buf[INET6_ADDRSTRLEN];
+ bool has_temporary = false;
NET_EPOCH_ASSERT();
@@ -1640,9 +1651,6 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
if (ifa6->ia6_ndpr != pr)
continue;
- if (ia6_match == NULL) /* remember the first one */
- ia6_match = ifa6;
-
/*
* An already autoconfigured address matched. Now that we
* are sure there is at least one matched address, we can
@@ -1702,6 +1710,13 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
if ((ifa6->ia6_flags & IN6_IFF_TEMPORARY) != 0) {
u_int32_t maxvltime, maxpltime;
+ /*
+ * if stable addresses (RFC 7217) are enabled, mark that a temporary address has been found
+ * to avoid generating uneeded extra ones.
+ */
+ if (ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR)
+ has_temporary = true;
+
if (V_ip6_temp_valid_lifetime >
(u_int32_t)((time_uptime - ifa6->ia6_createtime) +
V_ip6_desync_factor)) {
@@ -1730,6 +1745,24 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
}
ifa6->ia6_lifetime = lt6_tmp;
ifa6->ia6_updatetime = time_uptime;
+
+ /*
+ * If using stable addresses (RFC 7217) and we still have retries to perform, ignore
+ * addresses already marked as duplicated, since a new one will be generated.
+ * Also ignore addresses marked as temporary, since their generation is orthogonal to
+ * opaque stable ones.
+ *
+ * There is a small race condition, in that the dad_counter could be incremented
+ * between here and when a new address is generated, but this will cause that generation
+ * to fail and no further retries should happen.
+ */
+ if (ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR &&
+ counter_u64_fetch(ND_IFINFO(ifp)->dad_failures) <= V_ip6_stableaddr_maxretries &&
+ ifa6->ia6_flags & (IN6_IFF_DUPLICATED | IN6_IFF_TEMPORARY))
+ continue;
+
+ if (ia6_match == NULL) /* remember the first one */
+ ia6_match = ifa6;
}
if (ia6_match == NULL && new->ndpr_vltime) {
int ifidlen;
@@ -1780,8 +1813,11 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
* immediately together with a new set of temporary
* addresses. Thus, we specifiy 1 as the 2nd arg of
* in6_tmpifadd().
+ *
+ * Skip this if a temporary address has been marked as
+ * found (happens only if stable addresses (RFC 7217) is in use)
*/
- if (V_ip6_use_tempaddr) {
+ if (V_ip6_use_tempaddr && !has_temporary) {
int e;
if ((e = in6_tmpifadd(ia6, 1, 1)) != 0) {
nd6log((LOG_NOTICE, "%s: failed to "
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index 5889bb9d68e6..2705df61a1f7 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -2834,7 +2834,7 @@ pf_remove_state(struct pf_kstate *s)
s->key[PF_SK_WIRE]->port[0],
s->src.seqhi, s->src.seqlo + 1,
TH_RST|TH_ACK, 0, 0, 0, M_SKIP_FIREWALL, s->tag, 0,
- s->act.rtableid);
+ s->act.rtableid, NULL);
}
LIST_REMOVE(s, entry);
@@ -4080,7 +4080,7 @@ pf_build_tcp(const struct pf_krule *r, sa_family_t af,
u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
u_int8_t tcp_flags, u_int16_t win, u_int16_t mss, u_int8_t ttl,
int mbuf_flags, u_int16_t mtag_tag, u_int16_t mtag_flags, u_int sack,
- int rtableid)
+ int rtableid, u_short *reason)
{
struct mbuf *m;
int len, tlen;
@@ -4120,13 +4120,16 @@ pf_build_tcp(const struct pf_krule *r, sa_family_t af,
}
m = m_gethdr(M_NOWAIT, MT_DATA);
- if (m == NULL)
+ if (m == NULL) {
+ REASON_SET(reason, PFRES_MEMORY);
return (NULL);
+ }
#ifdef MAC
mac_netinet_firewall_send(m);
#endif
if ((pf_mtag = pf_get_mtag(m)) == NULL) {
+ REASON_SET(reason, PFRES_MEMORY);
m_freem(m);
return (NULL);
}
@@ -4346,13 +4349,14 @@ pf_send_tcp(const struct pf_krule *r, sa_family_t af,
const struct pf_addr *saddr, const struct pf_addr *daddr,
u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
u_int8_t tcp_flags, u_int16_t win, u_int16_t mss, u_int8_t ttl,
- int mbuf_flags, u_int16_t mtag_tag, u_int16_t mtag_flags, int rtableid)
+ int mbuf_flags, u_int16_t mtag_tag, u_int16_t mtag_flags, int rtableid,
+ u_short *reason)
{
struct pf_send_entry *pfse;
struct mbuf *m;
m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, tcp_flags,
- win, mss, ttl, mbuf_flags, mtag_tag, mtag_flags, 0, rtableid);
+ win, mss, ttl, mbuf_flags, mtag_tag, mtag_flags, 0, rtableid, reason);
if (m == NULL)
return;
@@ -4360,6 +4364,7 @@ pf_send_tcp(const struct pf_krule *r, sa_family_t af,
pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
if (pfse == NULL) {
m_freem(m);
+ REASON_SET(reason, PFRES_MEMORY);
return;
}
@@ -4421,9 +4426,10 @@ pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd,
if (tcp_get_flags(th) & TH_FIN)
ack++;
pf_send_tcp(r, pd->af, pd->dst,
- pd->src, th->th_dport, th->th_sport,
- ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
- r->return_ttl, M_SKIP_FIREWALL, 0, 0, rtableid);
+ pd->src, th->th_dport, th->th_sport,
+ ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
+ r->return_ttl, M_SKIP_FIREWALL, 0, 0, rtableid,
+ reason);
}
} else if (pd->proto == IPPROTO_SCTP &&
(r->rule_flag & PFRULE_RETURN)) {
@@ -4474,7 +4480,8 @@ pf_icmp_to_bandlim(uint8_t type)
static void
pf_send_challenge_ack(struct pf_pdesc *pd, struct pf_kstate *s,
- struct pf_state_peer *src, struct pf_state_peer *dst)
+ struct pf_state_peer *src, struct pf_state_peer *dst,
+ u_short *reason)
{
/*
* We are sending challenge ACK as a response to SYN packet, which
@@ -4488,7 +4495,7 @@ pf_send_challenge_ack(struct pf_pdesc *pd, struct pf_kstate *s,
pf_send_tcp(s->rule, pd->af, pd->dst, pd->src,
pd->hdr.tcp.th_dport, pd->hdr.tcp.th_sport, dst->seqlo,
src->seqlo, TH_ACK, 0, 0, s->rule->return_ttl, 0, 0, 0,
- s->rule->rtableid);
+ s->rule->rtableid, reason);
}
static void
@@ -6320,7 +6327,7 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
TH_SYN|TH_ACK, 0, s->src.mss, 0, M_SKIP_FIREWALL, 0, 0,
- pd->act.rtableid);
+ pd->act.rtableid, &ctx->reason);
REASON_SET(&ctx->reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
}
@@ -6768,8 +6775,12 @@ pf_tcp_track_full(struct pf_kstate *state, struct pf_pdesc *pd,
(ackskew <= (MAXACKWINDOW << sws)) &&
/* Acking not more than one window forward */
((tcp_get_flags(th) & TH_RST) == 0 || orig_seq == src->seqlo ||
- (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo))) {
+ (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
/* Require an exact/+1 sequence match on resets when possible */
+ (SEQ_GEQ(orig_seq, src->seqlo - (dst->max_win << dws)) &&
+ SEQ_LEQ(orig_seq, src->seqlo + 1) && ackskew == 0 &&
+ (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)))) {
+ /* Allow resets to match sequence window if ack is perfect match */
if (dst->scrub || src->scrub) {
if (pf_normalize_tcp_stateful(pd, reason, th,
@@ -6910,7 +6921,7 @@ pf_tcp_track_full(struct pf_kstate *state, struct pf_pdesc *pd,
th->th_sport, ntohl(th->th_ack), 0,
TH_RST, 0, 0,
state->rule->return_ttl, M_SKIP_FIREWALL,
- 0, 0, state->act.rtableid);
+ 0, 0, state->act.rtableid, reason);
src->seqlo = 0;
src->seqhi = 1;
src->max_win = 1;
@@ -7035,7 +7046,8 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate *state, u_short *reason)
pd->src, th->th_dport, th->th_sport,
state->src.seqhi, ntohl(th->th_seq) + 1,
TH_SYN|TH_ACK, 0, state->src.mss, 0,
- M_SKIP_FIREWALL, 0, 0, state->act.rtableid);
+ M_SKIP_FIREWALL, 0, 0, state->act.rtableid,
+ reason);
REASON_SET(reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
} else if ((tcp_get_flags(th) & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
@@ -7068,7 +7080,8 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate *state, u_short *reason)
state->dst.seqhi, 0, TH_SYN, 0,
state->src.mss, 0,
state->orig_kif->pfik_ifp == V_loif ? M_LOOP : 0,
- state->tag, 0, state->act.rtableid);
+ state->tag, 0, state->act.rtableid,
+ reason);
REASON_SET(reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
} else if (((tcp_get_flags(th) & (TH_SYN|TH_ACK)) !=
@@ -7083,13 +7096,15 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate *state, u_short *reason)
pd->src, th->th_dport, th->th_sport,
ntohl(th->th_ack), ntohl(th->th_seq) + 1,
TH_ACK, state->src.max_win, 0, 0, 0,
- state->tag, 0, state->act.rtableid);
+ state->tag, 0, state->act.rtableid,
+ reason);
pf_send_tcp(state->rule, pd->af,
&sk->addr[pd->sidx], &sk->addr[pd->didx],
sk->port[pd->sidx], sk->port[pd->didx],
state->src.seqhi + 1, state->src.seqlo + 1,
TH_ACK, state->dst.max_win, 0, 0,
- M_SKIP_FIREWALL, 0, 0, state->act.rtableid);
+ M_SKIP_FIREWALL, 0, 0, state->act.rtableid,
+ reason);
state->src.seqdiff = state->dst.seqhi -
state->src.seqlo;
state->dst.seqdiff = state->src.seqhi -
@@ -7189,7 +7204,7 @@ pf_test_state(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason)
* ACK enables all parties (firewall and peers)
* to get in sync again.
*/
- pf_send_challenge_ack(pd, *state, src, dst);
+ pf_send_challenge_ack(pd, *state, src, dst, reason);
return (PF_DROP);
}
}
@@ -10899,7 +10914,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
/* Respond to SYN with a syncookie. */
if ((tcp_get_flags(&pd.hdr.tcp) & (TH_SYN|TH_ACK|TH_RST)) == TH_SYN &&
pd.dir == PF_IN && pf_synflood_check(&pd)) {
- pf_syncookie_send(&pd);
+ pf_syncookie_send(&pd, &reason);
action = PF_DROP;
break;
}
@@ -10923,7 +10938,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
pd.dir == PF_IN) {
struct mbuf *msyn;
- msyn = pf_syncookie_recreate_syn(&pd);
+ msyn = pf_syncookie_recreate_syn(&pd, &reason);
if (msyn == NULL) {
action = PF_DROP;
break;
diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c
index 5bfbb2c83f0e..bd506c092da2 100644
--- a/sys/netpfil/pf/pf_ioctl.c
+++ b/sys/netpfil/pf/pf_ioctl.c
@@ -6551,6 +6551,11 @@ shutdown_pf(void)
pf_kill_srcnodes(NULL);
+ for (int i = 0; i < PF_RULESET_MAX; i++) {
+ pf_rule_tree_free(pf_main_ruleset.rules[i].active.tree);
+ pf_rule_tree_free(pf_main_ruleset.rules[i].inactive.tree);
+ }
+
/* status does not use malloced mem so no need to cleanup */
/* fingerprints and interfaces have their own cleanup code */
} while(0);
diff --git a/sys/netpfil/pf/pf_ruleset.c b/sys/netpfil/pf/pf_ruleset.c
index 1711e690f6bb..4e16eaa76f9d 100644
--- a/sys/netpfil/pf/pf_ruleset.c
+++ b/sys/netpfil/pf/pf_ruleset.c
@@ -336,12 +336,6 @@ pf_remove_if_empty_kruleset(struct pf_kruleset *ruleset)
int i;
while (ruleset != NULL) {
- for (int i = 0; i < PF_RULESET_MAX; i++) {
- pf_rule_tree_free(ruleset->rules[i].active.tree);
- ruleset->rules[i].active.tree = NULL;
- pf_rule_tree_free(ruleset->rules[i].inactive.tree);
- ruleset->rules[i].inactive.tree = NULL;
- }
if (ruleset == &pf_main_ruleset ||
!RB_EMPTY(&ruleset->anchor->children) ||
ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
@@ -352,6 +346,12 @@ pf_remove_if_empty_kruleset(struct pf_kruleset *ruleset)
!TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
ruleset->rules[i].inactive.open)
return;
+ for (int i = 0; i < PF_RULESET_MAX; i++) {
+ pf_rule_tree_free(ruleset->rules[i].active.tree);
+ ruleset->rules[i].active.tree = NULL;
+ pf_rule_tree_free(ruleset->rules[i].inactive.tree);
+ ruleset->rules[i].inactive.tree = NULL;
+ }
RB_REMOVE(pf_kanchor_global, &V_pf_anchors, ruleset->anchor);
if ((parent = ruleset->anchor->parent) != NULL)
RB_REMOVE(pf_kanchor_node, &parent->children,
diff --git a/sys/netpfil/pf/pf_syncookies.c b/sys/netpfil/pf/pf_syncookies.c
index 4a935bc65767..d11551ffb6ae 100644
--- a/sys/netpfil/pf/pf_syncookies.c
+++ b/sys/netpfil/pf/pf_syncookies.c
@@ -287,7 +287,7 @@ pf_synflood_check(struct pf_pdesc *pd)
}
void
-pf_syncookie_send(struct pf_pdesc *pd)
+pf_syncookie_send(struct pf_pdesc *pd, u_short *reason)
{
uint16_t mss;
uint32_t iss;
@@ -297,7 +297,7 @@ pf_syncookie_send(struct pf_pdesc *pd)
pf_send_tcp(NULL, pd->af, pd->dst, pd->src, *pd->dport, *pd->sport,
iss, ntohl(pd->hdr.tcp.th_seq) + 1, TH_SYN|TH_ACK, 0, mss,
0, M_SKIP_FIREWALL | (pd->m->m_flags & M_LOOP), 0, 0,
- pd->act.rtableid);
+ pd->act.rtableid, reason);
counter_u64_add(V_pf_status.lcounters[KLCNT_SYNCOOKIES_SENT], 1);
/* XXX Maybe only in adaptive mode? */
atomic_add_64(&V_pf_status.syncookies_inflight[V_pf_syncookie_status.oddeven],
@@ -495,7 +495,7 @@ pf_syncookie_generate(struct pf_pdesc *pd, uint16_t mss)
}
struct mbuf *
-pf_syncookie_recreate_syn(struct pf_pdesc *pd)
+pf_syncookie_recreate_syn(struct pf_pdesc *pd, u_short *reason)
{
uint8_t wscale;
uint16_t mss;
@@ -516,5 +516,5 @@ pf_syncookie_recreate_syn(struct pf_pdesc *pd)
return (pf_build_tcp(NULL, pd->af, pd->src, pd->dst, *pd->sport,
*pd->dport, seq, 0, TH_SYN, wscale, mss, pd->ttl,
(pd->m->m_flags & M_LOOP), 0, PF_MTAG_FLAG_SYNCOOKIE_RECREATED,
- cookie.flags.sack_ok, pd->act.rtableid));
+ cookie.flags.sack_ok, pd->act.rtableid, reason));
}
diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h
index 015a283e2de7..b2d7549e5bd0 100644
--- a/sys/powerpc/include/atomic.h
+++ b/sys/powerpc/include/atomic.h
@@ -1137,7 +1137,38 @@ atomic_thread_fence_seq_cst(void)
#define atomic_cmpset_short atomic_cmpset_16
#define atomic_fcmpset_char atomic_fcmpset_8
#define atomic_fcmpset_short atomic_fcmpset_16
-#endif
+#define atomic_set_short atomic_set_16
+#define atomic_clear_short atomic_clear_16
+#else
+
+static __inline void
+atomic_set_short(volatile u_short *p, u_short bit)
+{
+ u_short v;
+
+ v = atomic_load_short(p);
+ for (;;) {
+ if (atomic_fcmpset_16(p, &v, v | bit))
+ break;
+ }
+}
+
+static __inline void
+atomic_clear_short(volatile u_short *p, u_short bit)
+{
+ u_short v;
+
+ v = atomic_load_short(p);
+ for (;;) {
+ if (atomic_fcmpset_16(p, &v, v & ~bit))
+ break;
+ }
+}
+
+#define atomic_set_16 atomic_set_short
+#define atomic_clear_16 atomic_clear_short
+
+#endif /* ISA_206_ATOMICS */
/* These need sys/_atomic_subword.h on non-ISA-2.06-atomic platforms. */
ATOMIC_CMPSET_ACQ_REL(char);
diff --git a/sys/riscv/include/atomic.h b/sys/riscv/include/atomic.h
index 74ffc171b028..c90cb02c482c 100644
--- a/sys/riscv/include/atomic.h
+++ b/sys/riscv/include/atomic.h
@@ -656,4 +656,7 @@ atomic_thread_fence_seq_cst(void)
#include <sys/_atomic_subword.h>
+#define atomic_set_short atomic_set_16
+#define atomic_clear_short atomic_clear_16
+
#endif /* _MACHINE_ATOMIC_H_ */
diff --git a/sys/security/mac_bsdextended/mac_bsdextended.c b/sys/security/mac_bsdextended/mac_bsdextended.c
index 8a6549214380..bf95c008e2f2 100644
--- a/sys/security/mac_bsdextended/mac_bsdextended.c
+++ b/sys/security/mac_bsdextended/mac_bsdextended.c
@@ -246,7 +246,9 @@ ugidfw_rulecheck(struct mac_bsdextended_rule *rule,
}
if (rule->mbr_subject.mbs_flags & MBS_GID_DEFINED) {
- match = ((cred->cr_rgid <= rule->mbr_subject.mbs_gid_max &&
+ match = ((cred->cr_gid <= rule->mbr_subject.mbs_gid_max &&
+ cred->cr_gid >= rule->mbr_subject.mbs_gid_min) ||
+ (cred->cr_rgid <= rule->mbr_subject.mbs_gid_max &&
cred->cr_rgid >= rule->mbr_subject.mbs_gid_min) ||
(cred->cr_svgid <= rule->mbr_subject.mbs_gid_max &&
cred->cr_svgid >= rule->mbr_subject.mbs_gid_min));
diff --git a/sys/security/mac_do/mac_do.c b/sys/security/mac_do/mac_do.c
index 7a5ac2e01f75..6f3e63d06198 100644
--- a/sys/security/mac_do/mac_do.c
+++ b/sys/security/mac_do/mac_do.c
@@ -1650,7 +1650,7 @@ rule_grant_supplementary_groups(const struct rule *const rule,
const bool current_has_supp = (gid_flags & MDF_CURRENT) != 0 &&
(gid_flags & MDF_SUPP_MASK) != 0;
id_nb_t rule_idx = 0;
- int old_idx = 1, new_idx = 1;
+ int old_idx = 0, new_idx = 0;
if ((gid_flags & MDF_ANY_SUPP) != 0 &&
(gid_flags & MDF_MAY_REJ_SUPP) == 0)
diff --git a/sys/sys/_atomic_subword.h b/sys/sys/_atomic_subword.h
index dee5a3bed871..284e2bfa340f 100644
--- a/sys/sys/_atomic_subword.h
+++ b/sys/sys/_atomic_subword.h
@@ -205,4 +205,32 @@ atomic_load_acq_16(const volatile uint16_t *p)
#undef _ATOMIC_BYTE_SHIFT
#undef _ATOMIC_HWORD_SHIFT
+#ifndef atomic_set_16
+static __inline void
+atomic_set_16(volatile uint16_t *p, uint16_t bit)
+{
+ uint16_t v;
+
+ v = atomic_load_16(p);
+ for (;;) {
+ if (atomic_fcmpset_16(p, &v, v | bit))
+ break;
+ }
+}
+#endif
+
+#ifndef atomic_clear_16
+static __inline void
+atomic_clear_16(volatile uint16_t *p, uint16_t bit)
+{
+ uint16_t v;
+
+ v = atomic_load_16(p);
+ for (;;) {
+ if (atomic_fcmpset_16(p, &v, v & ~bit))
+ break;
+ }
+}
+#endif
+
#endif /* _SYS__ATOMIC_SUBWORD_H_ */
diff --git a/sys/sys/file.h b/sys/sys/file.h
index cc3c733580fd..c44fd0f28929 100644
--- a/sys/sys/file.h
+++ b/sys/sys/file.h
@@ -93,6 +93,8 @@ void foffset_lock_pair(struct file *fp1, off_t *off1p, struct file *fp2,
void foffset_lock_uio(struct file *fp, struct uio *uio, int flags);
void foffset_unlock(struct file *fp, off_t val, int flags);
void foffset_unlock_uio(struct file *fp, struct uio *uio, int flags);
+void fsetfl_lock(struct file *fp);
+void fsetfl_unlock(struct file *fp);
static inline off_t
foffset_get(struct file *fp)
@@ -197,7 +199,7 @@ struct file {
struct vnode *f_vnode; /* NULL or applicable vnode */
struct ucred *f_cred; /* associated credentials. */
short f_type; /* descriptor type */
- short f_vnread_flags; /* (f) Sleep lock for f_offset */
+ short f_vflags; /* (f) Sleep lock flags for members */
/*
* DTYPE_VNODE specific fields.
*/
@@ -220,8 +222,10 @@ struct file {
#define f_cdevpriv f_vnun.fvn_cdevpriv
#define f_advice f_vnun.fvn_advice
-#define FOFFSET_LOCKED 0x1
-#define FOFFSET_LOCK_WAITING 0x2
+#define FILE_V_FOFFSET_LOCKED 0x0001
+#define FILE_V_FOFFSET_LOCK_WAITING 0x0002
+#define FILE_V_SETFL_LOCKED 0x0004
+#define FILE_V_SETFL_LOCK_WAITING 0x0008
#endif /* __BSD_VISIBLE */
#endif /* _KERNEL || _WANT_FILE */
diff --git a/sys/sys/power.h b/sys/sys/power.h
index 33ace400bfd2..44d7fc354423 100644
--- a/sys/sys/power.h
+++ b/sys/sys/power.h
@@ -91,8 +91,7 @@ extern const char *power_stype_to_name(enum power_stype _stype);
typedef int (*power_pm_fn_t)(u_long _cmd, void* _arg, enum power_stype _stype);
extern int power_pm_register(u_int _pm_type, power_pm_fn_t _pm_fn,
- void *_pm_arg,
- bool _pm_supported[static POWER_STYPE_COUNT]);
+ void *_pm_arg);
extern u_int power_pm_get_type(void);
extern void power_pm_suspend(int);
diff --git a/sys/sys/ucred.h b/sys/sys/ucred.h
index 9c1d8545af34..254f58841993 100644
--- a/sys/sys/ucred.h
+++ b/sys/sys/ucred.h
@@ -112,15 +112,21 @@ struct xucred {
short cr_ngroups; /* number of groups (incl. cr_gid). */
union {
/*
- * Special little hack to avoid needing a cr_gid macro, which
- * would cause problems if one were to use it with struct ucred
- * which also has a cr_groups member.
+ * The effective GID has been the first element of cr_groups[]
+ * for historical reasons. It should be accessed using the
+ * 'cr_gid' identifier. Supplementary groups should be accessed
+ * using cr_sgroups[]. Note that 'cr_ngroups' currently
+ * includes the effective GID.
+ *
+ * XXXOC: On the next API change (requires versioning), please
+ * replace this union with a true unaliased field 'cr_gid' and
+ * make sure that cr_groups[]/'cr_ngroups' only account for
+ * supplementary groups.
*/
struct {
gid_t cr_gid; /* effective group id */
gid_t cr_sgroups[XU_NGROUPS - 1];
};
-
gid_t cr_groups[XU_NGROUPS]; /* groups */
};
union {
diff --git a/sys/x86/acpica/acpi_apm.c b/sys/x86/acpica/acpi_apm.c
index 8e5785cf0ed6..be161cd6171b 100644
--- a/sys/x86/acpica/acpi_apm.c
+++ b/sys/x86/acpica/acpi_apm.c
@@ -235,7 +235,7 @@ apmdtor(void *data)
acpi_sc = clone->acpi_sc;
/* We are about to lose a reference so check if suspend should occur */
- if (acpi_sc->acpi_next_stype != POWER_STYPE_AWAKE &&
+ if (acpi_sc->acpi_next_sstate != 0 &&
clone->notify_status != APM_EV_ACKED)
acpi_AckSleepState(clone, 0);
@@ -283,10 +283,10 @@ apmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td
case APMIO_SUSPEND:
if ((flag & FWRITE) == 0)
return (EPERM);
- if (acpi_sc->acpi_next_stype == POWER_STYPE_AWAKE) {
- if (power_suspend_stype != POWER_STYPE_POWEROFF) {
+ if (acpi_sc->acpi_next_sstate == 0) {
+ if (acpi_sc->acpi_suspend_sx != ACPI_STATE_S5) {
error = acpi_ReqSleepState(acpi_sc,
- power_suspend_stype);
+ acpi_sc->acpi_suspend_sx);
} else {
printf(
"power off via apm suspend not supported\n");
@@ -298,10 +298,10 @@ apmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td
case APMIO_STANDBY:
if ((flag & FWRITE) == 0)
return (EPERM);
- if (acpi_sc->acpi_next_stype == POWER_STYPE_AWAKE) {
- if (power_standby_stype != POWER_STYPE_POWEROFF) {
+ if (acpi_sc->acpi_next_sstate == 0) {
+ if (acpi_sc->acpi_standby_sx != ACPI_STATE_S5) {
error = acpi_ReqSleepState(acpi_sc,
- power_standby_stype);
+ acpi_sc->acpi_standby_sx);
} else {
printf(
"power off via apm standby not supported\n");
@@ -313,11 +313,10 @@ apmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td
case APMIO_NEXTEVENT:
printf("apm nextevent start\n");
ACPI_LOCK(acpi);
- if (acpi_sc->acpi_next_stype != POWER_STYPE_AWAKE &&
- clone->notify_status == APM_EV_NONE) {
+ if (acpi_sc->acpi_next_sstate != 0 && clone->notify_status ==
+ APM_EV_NONE) {
ev_info = (struct apm_event_info *)addr;
- /* XXX Check this. */
- if (acpi_sc->acpi_next_stype == POWER_STYPE_STANDBY)
+ if (acpi_sc->acpi_next_sstate <= ACPI_STATE_S3)
ev_info->type = PMEV_STANDBYREQ;
else
ev_info->type = PMEV_SUSPENDREQ;
@@ -393,7 +392,7 @@ apmpoll(struct cdev *dev, int events, struct thread *td)
revents = 0;
devfs_get_cdevpriv((void **)&clone);
ACPI_LOCK(acpi);
- if (clone->acpi_sc->acpi_next_stype != POWER_STYPE_AWAKE)
+ if (clone->acpi_sc->acpi_next_sstate)
revents |= events & (POLLIN | POLLRDNORM);
else
selrecord(td, &clone->sel_read);
@@ -434,7 +433,7 @@ apmreadfilt(struct knote *kn, long hint)
ACPI_LOCK(acpi);
clone = kn->kn_hook;
- sleeping = clone->acpi_sc->acpi_next_stype != POWER_STYPE_AWAKE;
+ sleeping = clone->acpi_sc->acpi_next_sstate ? 1 : 0;
ACPI_UNLOCK(acpi);
return (sleeping);
}
diff --git a/sys/x86/include/ucode.h b/sys/x86/include/ucode.h
index 0338d48a0832..75b9ff3afbd0 100644
--- a/sys/x86/include/ucode.h
+++ b/sys/x86/include/ucode.h
@@ -63,7 +63,7 @@ struct ucode_intel_extsig_table {
};
const void *ucode_amd_find(const char *path, uint32_t signature,
- uint32_t revision, const uint8_t *fw_data, size_t fw_size,
+ uint32_t *revision, const uint8_t *fw_data, size_t fw_size,
size_t *selected_sizep);
int ucode_intel_load(const void *data, bool unsafe,
uint64_t *nrevp, uint64_t *orevp);
diff --git a/sys/x86/x86/ucode.c b/sys/x86/x86/ucode.c
index 0c153c0b656c..1973047fafd1 100644
--- a/sys/x86/x86/ucode.c
+++ b/sys/x86/x86/ucode.c
@@ -277,7 +277,8 @@ ucode_amd_match(const uint8_t *data, size_t *len)
signature = regs[0];
revision = rdmsr(MSR_BIOS_SIGN);
- return (ucode_amd_find("loader blob", signature, revision, data, *len, len));
+ return (ucode_amd_find("loader blob", signature, &revision, data, *len,
+ len));
}
/*
diff --git a/sys/x86/x86/ucode_subr.c b/sys/x86/x86/ucode_subr.c
index 9e128ad2bf04..53d7cfc06769 100644
--- a/sys/x86/x86/ucode_subr.c
+++ b/sys/x86/x86/ucode_subr.c
@@ -94,7 +94,7 @@ typedef struct container_header {
* source code.
*/
const void *
-ucode_amd_find(const char *path, uint32_t signature, uint32_t revision,
+ucode_amd_find(const char *path, uint32_t signature, uint32_t *revision,
const uint8_t *fw_data, size_t fw_size, size_t *selected_sizep)
{
const amd_10h_fw_header_t *fw_header;
@@ -112,7 +112,7 @@ ucode_amd_find(const char *path, uint32_t signature, uint32_t revision,
(signature >> 4) & 0x0f,
(signature >> 0) & 0x0f, (signature >> 20) & 0xff,
(signature >> 16) & 0x0f);
- WARNX(1, "microcode revision %#x", revision);
+ WARNX(1, "microcode revision %#x", *revision);
nextfile:
WARNX(1, "checking %s for update.", path);
@@ -212,9 +212,9 @@ nextfile:
fw_header->processor_rev_id, equiv_id);
continue; /* different cpu */
}
- if (fw_header->patch_id <= revision) {
+ if (fw_header->patch_id <= *revision) {
WARNX(1, "patch_id %x, revision %x",
- fw_header->patch_id, revision);
+ fw_header->patch_id, *revision);
continue; /* not newer revision */
}
if (fw_header->nb_dev_id != 0 || fw_header->sb_dev_id != 0) {
@@ -222,7 +222,7 @@ nextfile:
}
WARNX(3, "selecting revision: %x", fw_header->patch_id);
- revision = fw_header->patch_id;
+ *revision = fw_header->patch_id;
selected_fw = fw_header;
selected_size = section_header->size;
}