aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2021-09-21 15:36:55 +0000
committerMark Johnston <markj@FreeBSD.org>2021-09-21 16:07:47 +0000
commit9068f6ea697b1b28ad1326a4c7a9ba86f08b985e (patch)
treeb27f5bdbc2ccb1d346c7a315560957a76ee288d8
parentdfd3bde5775ecf88851d5dffd6a8ed6076b53566 (diff)
downloadsrc-9068f6ea697b1b28ad1326a4c7a9ba86f08b985e.tar.gz
src-9068f6ea697b1b28ad1326a4c7a9ba86f08b985e.zip
cpuset(9): Add CPU_FOREACH_IS(SET|CLR) and modify consumers to use it
This implementation is faster and doesn't modify the cpuset, so it lets us avoid some unnecessary copying as well. No functional change intended. Reviewed by: cem, kib, jhb MFC after: 2 weeks Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D32029
-rw-r--r--sys/amd64/amd64/mp_machdep.c11
-rw-r--r--sys/amd64/vmm/io/vlapic.c13
-rw-r--r--sys/amd64/vmm/vmm_lapic.c4
-rw-r--r--sys/i386/i386/mp_machdep.c4
-rw-r--r--sys/sys/cpuset.h2
-rw-r--r--sys/x86/x86/mp_x86.c4
6 files changed, 11 insertions, 27 deletions
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 6c66bd622855..16ec277e9c34 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -618,7 +618,7 @@ static void
smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
vm_offset_t addr2, smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
{
- cpuset_t other_cpus, mask1;
+ cpuset_t other_cpus;
uint32_t generation, *p_cpudone;
int cpu;
bool is_all;
@@ -662,10 +662,7 @@ smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
/* Fence between filling smp_tlb fields and clearing scoreboard. */
atomic_thread_fence_rel();
- mask1 = mask;
- while ((cpu = CPU_FFS(&mask1)) != 0) {
- cpu--;
- CPU_CLR(cpu, &mask1);
+ CPU_FOREACH_ISSET(cpu, &mask) {
KASSERT(*invl_scoreboard_slot(cpu) != 0,
("IPI scoreboard is zero, initiator %d target %d",
PCPU_GET(cpuid), cpu));
@@ -686,9 +683,7 @@ smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
ipi_selected(mask, IPI_INVLOP);
}
curcpu_cb(pmap, addr1, addr2);
- while ((cpu = CPU_FFS(&other_cpus)) != 0) {
- cpu--;
- CPU_CLR(cpu, &other_cpus);
+ CPU_FOREACH_ISSET(cpu, &other_cpus) {
p_cpudone = invl_scoreboard_slot(cpu);
while (atomic_load_int(p_cpudone) != generation)
ia32_pause();
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index 06df1c1a87e5..4e7ddbafd447 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -860,10 +860,7 @@ vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32_t dest, bool phys,
*/
CPU_ZERO(dmask);
amask = vm_active_cpus(vm);
- while ((vcpuid = CPU_FFS(&amask)) != 0) {
- vcpuid--;
- CPU_CLR(vcpuid, &amask);
-
+ CPU_FOREACH_ISSET(vcpuid, &amask) {
vlapic = vm_lapic(vm, vcpuid);
dfr = vlapic->apic_page->dfr;
ldr = vlapic->apic_page->ldr;
@@ -1003,9 +1000,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
break;
}
- while ((i = CPU_FFS(&dmask)) != 0) {
- i--;
- CPU_CLR(i, &dmask);
+ CPU_FOREACH_ISSET(i, &dmask) {
if (mode == APIC_DELMODE_FIXED) {
lapic_intr_edge(vlapic->vm, i, vec);
vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid,
@@ -1554,9 +1549,7 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
*/
vlapic_calcdest(vm, &dmask, dest, phys, lowprio, false);
- while ((vcpuid = CPU_FFS(&dmask)) != 0) {
- vcpuid--;
- CPU_CLR(vcpuid, &dmask);
+ CPU_FOREACH_ISSET(vcpuid, &dmask) {
if (delmode == IOART_DELEXINT) {
vm_inject_extint(vm, vcpuid);
} else {
diff --git a/sys/amd64/vmm/vmm_lapic.c b/sys/amd64/vmm/vmm_lapic.c
index 89a1ebc8eff9..8191da758100 100644
--- a/sys/amd64/vmm/vmm_lapic.c
+++ b/sys/amd64/vmm/vmm_lapic.c
@@ -87,9 +87,7 @@ lapic_set_local_intr(struct vm *vm, int cpu, int vector)
else
CPU_SETOF(cpu, &dmask);
error = 0;
- while ((cpu = CPU_FFS(&dmask)) != 0) {
- cpu--;
- CPU_CLR(cpu, &dmask);
+ CPU_FOREACH_ISSET(cpu, &dmask) {
vlapic = vm_lapic(vm, cpu);
error = vlapic_trigger_lvt(vlapic, vector);
if (error)
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index 156702118c45..777aefa021b3 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -598,9 +598,7 @@ smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
ipi_selected(mask, vector);
}
curcpu_cb(pmap, addr1, addr2);
- while ((cpu = CPU_FFS(&other_cpus)) != 0) {
- cpu--;
- CPU_CLR(cpu, &other_cpus);
+ CPU_FOREACH_ISSET(cpu, &other_cpus) {
p_cpudone = &cpuid_to_pcpu[cpu]->pc_smp_tlb_done;
while (*p_cpudone != generation)
ia32_pause();
diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h
index 1a96bb4766ce..9ef1a65f4506 100644
--- a/sys/sys/cpuset.h
+++ b/sys/sys/cpuset.h
@@ -66,6 +66,8 @@
#define CPU_COPY_STORE_REL(f, t) BIT_COPY_STORE_REL(CPU_SETSIZE, f, t)
#define CPU_FFS(p) BIT_FFS(CPU_SETSIZE, p)
#define CPU_FLS(p) BIT_FLS(CPU_SETSIZE, p)
+#define CPU_FOREACH_ISSET(i, p) BIT_FOREACH_ISSET(CPU_SETSIZE, i, p)
+#define CPU_FOREACH_ISCLR(i, p) BIT_FOREACH_ISCLR(CPU_SETSIZE, i, p)
#define CPU_COUNT(p) ((int)BIT_COUNT(CPU_SETSIZE, p))
#define CPUSET_FSET BITSET_FSET(_NCPUWORDS)
#define CPUSET_T_INITIALIZER BITSET_T_INITIALIZER
diff --git a/sys/x86/x86/mp_x86.c b/sys/x86/x86/mp_x86.c
index 5e9a9735b09a..09d05d3f6de4 100644
--- a/sys/x86/x86/mp_x86.c
+++ b/sys/x86/x86/mp_x86.c
@@ -1290,9 +1290,7 @@ ipi_selected(cpuset_t cpus, u_int ipi)
if (ipi == IPI_STOP_HARD)
CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &cpus);
- while ((cpu = CPU_FFS(&cpus)) != 0) {
- cpu--;
- CPU_CLR(cpu, &cpus);
+ CPU_FOREACH_ISSET(cpu, &cpus) {
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
}