aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorStefan Eßer <se@FreeBSD.org>2021-12-30 11:20:32 +0000
committerStefan Eßer <se@FreeBSD.org>2021-12-30 11:20:32 +0000
commite2650af157bc7489deaf2c9054995f0f88a6e5da (patch)
tree21f452bd8fa6cfb4196cd06bab318092c9ea157b /sys
parent133180557479cd9676758e6f3f93a9d3e1c6b532 (diff)
downloadsrc-e2650af157bc7489deaf2c9054995f0f88a6e5da.tar.gz
src-e2650af157bc7489deaf2c9054995f0f88a6e5da.zip
Make CPU_SET macros compliant with other implementations
The introduction of <sched.h> improved compatibility with some 3rd party software, but caused the configure scripts of some ports to assume that they were run in a GLIBC compatible environment. Parts of sched.h were made conditional on -D_WITH_CPU_SET_T being added to ports, but there still were compatibility issues due to invalid assumptions made in autoconfigure scripts. The differences between the FreeBSD version of macros like CPU_AND, CPU_OR, etc. and the GLIBC versions was in the number of arguments: FreeBSD used a 2-address scheme (one source argument is also used as the destination of the operation), while GLIBC uses a 3-adderess scheme (2 source operands and a separately passed destination). The GLIBC scheme provides a super-set of the functionality of the FreeBSD macros, since it does not prevent passing the same variable as source and destination arguments. In code that wanted to preserve both source arguments, the FreeBSD macros required a temporary copy of one of the source arguments. This patch set allows to unconditionally provide functions and macros expected by 3rd party software written for GLIBC based systems, but breaks builds of externally maintained sources that use any of the following macros: CPU_AND, CPU_ANDNOT, CPU_OR, CPU_XOR. One contributed driver (contrib/ofed/libmlx5) has been patched to support both the old and the new CPU_OR signatures. If this commit is merged to -STABLE, the version test will have to be extended to cover more ranges. Ports that have added -D_WITH_CPU_SET_T to build on -CURRENT do no longer require that option. The FreeBSD version has been bumped to 1400046 to reflect this incompatible change. Reviewed by: kib MFC after: 2 weeks Relnotes: yes Differential Revision: https://reviews.freebsd.org/D33451
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c2
-rw-r--r--sys/amd64/vmm/vmm.c2
-rw-r--r--sys/dev/acpica/acpi.c2
-rw-r--r--sys/i386/i386/pmap.c6
-rw-r--r--sys/i386/i386/vm_machdep.c4
-rw-r--r--sys/kern/kern_cpuset.c15
-rw-r--r--sys/kern/kern_rmlock.c3
-rw-r--r--sys/kern/sched_4bsd.c10
-rw-r--r--sys/kern/subr_kdb.c5
-rw-r--r--sys/kern/subr_smp.c2
-rw-r--r--sys/net/iflib.c2
-rw-r--r--sys/powerpc/ofw/ofw_pcibus.c2
-rw-r--r--sys/sys/_cpuset.h9
-rw-r--r--sys/sys/bitset.h8
-rw-r--r--sys/sys/cpuset.h22
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/x86/x86/cpu_machdep.c2
17 files changed, 58 insertions, 40 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 42ad1bd24136..a2d55b219505 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -8290,7 +8290,7 @@ pmap_remove_pages(pmap_t pmap)
other_cpus = all_cpus;
critical_enter();
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
- CPU_AND(&other_cpus, &pmap->pm_active);
+ CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
critical_exit();
KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
}
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 80fcde8d80fa..f0674784903f 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1304,7 +1304,7 @@ vm_handle_rendezvous(struct vm *vm, int vcpuid)
mtx_lock(&vm->rendezvous_mtx);
while (vm->rendezvous_func != NULL) {
/* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
- CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
+ CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus);
if (vcpuid != -1 &&
CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 0d03fa063988..cd465fdce72b 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -1173,7 +1173,7 @@ acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize,
return (error);
if (setsize != sizeof(cpuset_t))
return (EINVAL);
- CPU_AND(cpuset, &cpuset_domain[d]);
+ CPU_AND(cpuset, cpuset, &cpuset_domain[d]);
return (0);
default:
return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 3b8e335701d2..f7c0cb6b68ec 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1243,7 +1243,7 @@ pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va)
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
- CPU_AND(&other_cpus, &pmap->pm_active);
+ CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
mask = &other_cpus;
}
smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy);
@@ -1276,7 +1276,7 @@ pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
- CPU_AND(&other_cpus, &pmap->pm_active);
+ CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
mask = &other_cpus;
}
smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy);
@@ -1299,7 +1299,7 @@ pmap_invalidate_all_int(pmap_t pmap)
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
- CPU_AND(&other_cpus, &pmap->pm_active);
+ CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
mask = &other_cpus;
}
smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy);
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 9dbcb9c909ce..ba1bc996bda4 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -604,9 +604,9 @@ sf_buf_shootdown(struct sf_buf *sf, int flags)
if ((flags & SFB_CPUPRIVATE) == 0) {
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
- CPU_ANDNOT(&other_cpus, &sf->cpumask);
+ CPU_ANDNOT(&other_cpus, &other_cpus, &sf->cpumask);
if (!CPU_EMPTY(&other_cpus)) {
- CPU_OR(&sf->cpumask, &other_cpus);
+ CPU_OR(&sf->cpumask, &sf->cpumask, &other_cpus);
smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap,
sf_buf_shootdown_curcpu_cb);
}
diff --git a/sys/kern/kern_cpuset.c b/sys/kern/kern_cpuset.c
index 119df57c7b02..962e7abb44b7 100644
--- a/sys/kern/kern_cpuset.c
+++ b/sys/kern/kern_cpuset.c
@@ -326,7 +326,7 @@ cpuset_init(struct cpuset *set, struct cpuset *parent,
set->cs_flags = 0;
mtx_lock_spin(&cpuset_lock);
set->cs_domain = domain;
- CPU_AND(&set->cs_mask, &parent->cs_mask);
+ CPU_AND(&set->cs_mask, &set->cs_mask, &parent->cs_mask);
set->cs_id = id;
set->cs_parent = cpuset_ref(parent);
LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
@@ -645,8 +645,7 @@ cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask)
if (set->cs_flags & CPU_SET_RDONLY)
return (EPERM);
if (augment_mask) {
- CPU_COPY(&set->cs_mask, &newmask);
- CPU_AND(&newmask, mask);
+ CPU_AND(&newmask, &set->cs_mask, mask);
} else
CPU_COPY(mask, &newmask);
@@ -668,7 +667,7 @@ cpuset_update(struct cpuset *set, cpuset_t *mask)
struct cpuset *nset;
mtx_assert(&cpuset_lock, MA_OWNED);
- CPU_AND(&set->cs_mask, mask);
+ CPU_AND(&set->cs_mask, &set->cs_mask, mask);
LIST_FOREACH(nset, &set->cs_children, cs_siblings)
cpuset_update(nset, &set->cs_mask);
@@ -1083,8 +1082,7 @@ cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set,
* restriction to the new set, otherwise take it wholesale.
*/
if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) {
- CPU_COPY(&tdset->cs_mask, mask);
- CPU_AND(mask, &set->cs_mask);
+ CPU_AND(mask, &tdset->cs_mask, &set->cs_mask);
} else
CPU_COPY(&set->cs_mask, mask);
@@ -1153,8 +1151,7 @@ cpuset_setproc_newbase(struct thread *td, struct cpuset *set,
pbase = cpuset_getbase(td->td_cpuset);
/* Copy process mask, then further apply the new root mask. */
- CPU_COPY(&pbase->cs_mask, &nmask);
- CPU_AND(&nmask, &nroot->cs_mask);
+ CPU_AND(&nmask, &pbase->cs_mask, &nroot->cs_mask);
domainset_copy(pbase->cs_domain, &ndomain);
DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask);
@@ -1946,7 +1943,7 @@ kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
case CPU_WHICH_PID:
FOREACH_THREAD_IN_PROC(p, ttd) {
thread_lock(ttd);
- CPU_OR(mask, &ttd->td_cpuset->cs_mask);
+ CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask);
thread_unlock(ttd);
}
break;
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index f9b5559a648c..5e64f79e3836 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -548,8 +548,7 @@ _rm_wlock(struct rmlock *rm)
if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
/* Get all read tokens back */
- readcpus = all_cpus;
- CPU_ANDNOT(&readcpus, &rm->rm_writecpus);
+ CPU_ANDNOT(&readcpus, &all_cpus, &rm->rm_writecpus);
rm->rm_writecpus = all_cpus;
/*
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 25f8bfc04d48..ae8c77f3aa4a 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1165,8 +1165,8 @@ forward_wakeup(int cpunum)
return (0);
CPU_SETOF(me, &dontuse);
- CPU_OR(&dontuse, &stopped_cpus);
- CPU_OR(&dontuse, &hlt_cpus_mask);
+ CPU_OR(&dontuse, &dontuse, &stopped_cpus);
+ CPU_OR(&dontuse, &dontuse, &hlt_cpus_mask);
CPU_ZERO(&map2);
if (forward_wakeup_use_loop) {
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
@@ -1179,8 +1179,7 @@ forward_wakeup(int cpunum)
}
if (forward_wakeup_use_mask) {
- map = idle_cpus_mask;
- CPU_ANDNOT(&map, &dontuse);
+ CPU_ANDNOT(&map, &idle_cpus_mask, &dontuse);
/* If they are both on, compare and use loop if different. */
if (forward_wakeup_use_loop) {
@@ -1366,8 +1365,7 @@ sched_add(struct thread *td, int flags)
kick_other_cpu(td->td_priority, cpu);
} else {
if (!single_cpu) {
- tidlemsk = idle_cpus_mask;
- CPU_ANDNOT(&tidlemsk, &hlt_cpus_mask);
+ CPU_ANDNOT(&tidlemsk, &idle_cpus_mask, &hlt_cpus_mask);
CPU_CLR(cpuid, &tidlemsk);
if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c
index 4f439ff103d7..2218c20586f2 100644
--- a/sys/kern/subr_kdb.c
+++ b/sys/kern/subr_kdb.c
@@ -707,8 +707,7 @@ kdb_trap(int type, int code, struct trapframe *tf)
if (!SCHEDULER_STOPPED()) {
#ifdef SMP
- other_cpus = all_cpus;
- CPU_ANDNOT(&other_cpus, &stopped_cpus);
+ CPU_ANDNOT(&other_cpus, &all_cpus, &stopped_cpus);
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
stop_cpus_hard(other_cpus);
#endif
@@ -746,7 +745,7 @@ kdb_trap(int type, int code, struct trapframe *tf)
if (did_stop_cpus) {
curthread->td_stopsched = 0;
#ifdef SMP
- CPU_AND(&other_cpus, &stopped_cpus);
+ CPU_AND(&other_cpus, &other_cpus, &stopped_cpus);
restart_cpus(other_cpus);
#endif
}
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index bfe890d773f9..a677075937e6 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -761,7 +761,7 @@ smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
parent,
cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
- CPU_OR(&parent->cg_mask, &child->cg_mask);
+ CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask);
parent->cg_count += child->cg_count;
}
diff --git a/sys/net/iflib.c b/sys/net/iflib.c
index 599d9f93bcbc..3fcab699f9e0 100644
--- a/sys/net/iflib.c
+++ b/sys/net/iflib.c
@@ -4970,7 +4970,7 @@ get_ctx_core_offset(if_ctx_t ctx)
for (i = 0; i < scctx->isc_nrxqsets; i++)
CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, false),
&assigned_cpus);
- CPU_AND(&assigned_cpus, &ctx->ifc_cpus);
+ CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus);
cores_consumed = CPU_COUNT(&assigned_cpus);
mtx_lock(&cpu_offset_mtx);
diff --git a/sys/powerpc/ofw/ofw_pcibus.c b/sys/powerpc/ofw/ofw_pcibus.c
index 2c0a78812cc6..cdda3ee779f4 100644
--- a/sys/powerpc/ofw/ofw_pcibus.c
+++ b/sys/powerpc/ofw/ofw_pcibus.c
@@ -405,7 +405,7 @@ ofw_pcibus_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsi
return (error);
if (setsize != sizeof(cpuset_t))
return (EINVAL);
- CPU_AND(cpuset, &cpuset_domain[d]);
+ CPU_AND(cpuset, cpuset, &cpuset_domain[d]);
return (0);
default:
return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
diff --git a/sys/sys/_cpuset.h b/sys/sys/_cpuset.h
index 9033acb9ba14..fb39420238c8 100644
--- a/sys/sys/_cpuset.h
+++ b/sys/sys/_cpuset.h
@@ -1,4 +1,4 @@
-/*-
+#/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
@@ -49,4 +49,11 @@
__BITSET_DEFINE(_cpuset, CPU_SETSIZE);
typedef struct _cpuset cpuset_t;
+#ifndef _KERNEL
+__BEGIN_DECLS
+cpuset_t *__cpuset_alloc(size_t set_size);
+void __cpuset_free(cpuset_t *ptr);
+__END_DECLS
+#endif
+
#endif /* !_SYS__CPUSET_H_ */
diff --git a/sys/sys/bitset.h b/sys/sys/bitset.h
index 47c4106d4127..4952d48b1b3c 100644
--- a/sys/sys/bitset.h
+++ b/sys/sys/bitset.h
@@ -312,8 +312,6 @@
/*
* Dynamically allocate a bitset.
*/
-#define __BITSET_ALLOC(_s, mt, mf) malloc(__BITSET_SIZE((_s)), mt, (mf))
-
#define BIT_AND(_s, d, s) __BIT_AND(_s, d, s)
#define BIT_AND2(_s, d, s1, s2) __BIT_AND2(_s, d, s1, s2)
#define BIT_ANDNOT(_s, d, s) __BIT_ANDNOT(_s, d, s)
@@ -351,7 +349,11 @@
#define BIT_XOR2(_s, d, s1, s2) __BIT_XOR2(_s, d, s1, s2)
#define BIT_ZERO(_s, p) __BIT_ZERO(_s, p)
-#define BITSET_ALLOC(_s, mt, mf) __BITSET_ALLOC(_s, mt, mf)
+#if defined(_KERNEL)
+#define BITSET_ALLOC(_s, mt, mf) malloc(__BITSET_SIZE((_s)), mt, (mf))
+#define BITSET_FREE(p, mt) free(p, mt)
+#endif /* _KERNEL */
+
#define BITSET_FSET(n) __BITSET_FSET(n)
#define BITSET_SIZE(_s) __BITSET_SIZE(_s)
#define BITSET_T_INITIALIZER(x) __BITSET_T_INITIALIZER(x)
diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h
index 71317670c593..9f777e3af523 100644
--- a/sys/sys/cpuset.h
+++ b/sys/sys/cpuset.h
@@ -36,6 +36,7 @@
#include <sys/_cpuset.h>
+#include <sys/_bitset.h>
#include <sys/bitset.h>
#define _NCPUBITS _BITSET_BITS
@@ -56,9 +57,10 @@
#define CPU_SUBSET(p, c) __BIT_SUBSET(CPU_SETSIZE, p, c)
#define CPU_OVERLAP(p, c) __BIT_OVERLAP(CPU_SETSIZE, p, c)
#define CPU_CMP(p, c) __BIT_CMP(CPU_SETSIZE, p, c)
-#define CPU_OR(d, s) __BIT_OR(CPU_SETSIZE, d, s)
-#define CPU_AND(d, s) __BIT_AND(CPU_SETSIZE, d, s)
-#define CPU_ANDNOT(d, s) __BIT_ANDNOT(CPU_SETSIZE, d, s)
+#define CPU_OR(d, s1, s2) __BIT_OR2(CPU_SETSIZE, d, s1, s2)
+#define CPU_AND(d, s1, s2) __BIT_AND2(CPU_SETSIZE, d, s1, s2)
+#define CPU_ANDNOT(d, s1, s2) __BIT_ANDNOT2(CPU_SETSIZE, d, s1, s2)
+#define CPU_XOR(d, s1, s2) __BIT_XOR2(CPU_SETSIZE, d, s1, s2)
#define CPU_CLR_ATOMIC(n, p) __BIT_CLR_ATOMIC(CPU_SETSIZE, n, p)
#define CPU_SET_ATOMIC(n, p) __BIT_SET_ATOMIC(CPU_SETSIZE, n, p)
#define CPU_SET_ATOMIC_ACQ(n, p) __BIT_SET_ATOMIC_ACQ(CPU_SETSIZE, n, p)
@@ -73,6 +75,20 @@
#define CPUSET_FSET __BITSET_FSET(_NCPUWORDS)
#define CPUSET_T_INITIALIZER(x) __BITSET_T_INITIALIZER(x)
+#if !defined(_KERNEL)
+#define CPU_ALLOC_SIZE(_s) __BITSET_SIZE(_s)
+#define CPU_ALLOC(_s) __cpuset_alloc(_s)
+#define CPU_FREE(p) __cpuset_free(p)
+
+#define CPU_ISSET_S(n, _s, p) __BIT_ISSET(_s, n, p)
+#define CPU_SET_S(n, _s, p) __BIT_SET(_s, n, p)
+#define CPU_ZERO_S(_s, p) __BIT_ZERO(_s, p)
+
+#define CPU_OR_S(_s, d, s1, s2) __BIT_OR2(_s, d, s1, s2)
+#define CPU_AND_S(_s, d, s1, s2) __BIT_AND2(_s, d, s1, s2)
+#define CPU_XOR_S(_s, d, s1, s2) __BIT_XOR2(_s, d, s1, s2)
+#endif
+
/*
* Valid cpulevel_t values.
*/
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 03c66c2d1b79..d661ff40162a 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -76,7 +76,7 @@
* cannot include sys/param.h and should only be updated here.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1400045
+#define __FreeBSD_version 1400046
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/x86/x86/cpu_machdep.c b/sys/x86/x86/cpu_machdep.c
index c5026ed8acda..f20611ffa20e 100644
--- a/sys/x86/x86/cpu_machdep.c
+++ b/sys/x86/x86/cpu_machdep.c
@@ -475,7 +475,7 @@ cpu_reset(void)
if (smp_started) {
map = all_cpus;
CPU_CLR(PCPU_GET(cpuid), &map);
- CPU_ANDNOT(&map, &stopped_cpus);
+ CPU_ANDNOT(&map, &map, &stopped_cpus);
if (!CPU_EMPTY(&map)) {
printf("cpu_reset: Stopping other CPUs\n");
stop_cpus(map);