aboutsummaryrefslogtreecommitdiff
path: root/sys/ia64/ia64/mp_machdep.c
diff options
context:
space:
mode:
authorMarcel Moolenaar <marcel@FreeBSD.org>2010-03-17 00:37:15 +0000
committerMarcel Moolenaar <marcel@FreeBSD.org>2010-03-17 00:37:15 +0000
commit3804454ac0e4fa672cade1e83fbed72f0b3ad61b (patch)
tree23d71a6afc74f7cdf6522ea5d3b42da8cb8da732 /sys/ia64/ia64/mp_machdep.c
parentd740b7c7e8cb6dd65356a99879c0ec91c4781e67 (diff)
downloadsrc-3804454ac0e4fa672cade1e83fbed72f0b3ad61b.tar.gz
src-3804454ac0e4fa672cade1e83fbed72f0b3ad61b.zip
Revamp the interrupt code based on the previous commit:
o Introduce XIV, eXternal Interrupt Vector, to differentiate from the interrupts vectors that are offsets in the IVT (Interrupt Vector Table). There's a vector for external interrupts, which are based on the XIVs. o Keep track of allocated and reserved XIVs so that we can assign XIVs without hardcoding anything. When XIVs are allocated, an interrupt handler and a class is specified for the XIV. Classes are: 1. architecture-defined: XIV 15 is returned when no external interrupt are pending, 2. platform-defined: SAL reports which XIV is used to wakeup an AP (typically 0xFF, but it's 0x12 for the Altix 350). 3. inter-processor interrupts: allocated for SMP support and non-redirectable. 4. device interrupts (i.e. IRQs): allocated when devices are discovered and are redirectable. o Rewrite the central interrupt handler to call the per-XIV interrupt handler and rename it to ia64_handle_intr(). Move the per-XIV handler implementation to the file where we have the XIV allocation/reservation. Clock interrupt handling is moved to clock.c. IPI handling is moved to mp_machdep.c. o Drop support for the Intel 8259A because it was broken. When XIV 0 is received, the CPU should initiate an INTA cycle to obtain the interrupt vector of the 8259-based interrupt. In these cases the interrupt controller we should be talking to WRT to masking on signalling EOI is the 8259 and not the I/O SAPIC. This requires adriver for the Intel 8259A which isn't available for ia64. Thus stop pretending to support ExtINTs and instead panic() so that if we come across hardware that has an Intel 8259A, so have something real to work with. o With XIVs for IPIs dynamically allocatedi and also based on priority, define the IPI_* symbols as variables rather than constants. The variable holds the XIV allocated for the IPI. o IPI_STOP_HARD delivers a NMI if possible. Otherwise the XIV assigned to IPI_STOP is delivered.
Notes
Notes: svn path=/head/; revision=205234
Diffstat (limited to 'sys/ia64/ia64/mp_machdep.c')
-rw-r--r--sys/ia64/ia64/mp_machdep.c126
1 files changed, 96 insertions, 30 deletions
diff --git a/sys/ia64/ia64/mp_machdep.c b/sys/ia64/ia64/mp_machdep.c
index c5ed48f501d6..b1d32f706c1a 100644
--- a/sys/ia64/ia64/mp_machdep.c
+++ b/sys/ia64/ia64/mp_machdep.c
@@ -46,11 +46,6 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/uuid.h>
-#include <vm/vm.h>
-#include <vm/pmap.h>
-#include <vm/vm_extern.h>
-#include <vm/vm_kern.h>
-
#include <machine/atomic.h>
#include <machine/cpu.h>
#include <machine/fpu.h>
@@ -59,10 +54,13 @@ __FBSDID("$FreeBSD$");
#include <machine/md_var.h>
#include <machine/pal.h>
#include <machine/pcb.h>
-#include <machine/pmap.h>
#include <machine/sal.h>
#include <machine/smp.h>
-#include <i386/include/specialreg.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
MALLOC_DEFINE(M_SMP, "SMP", "SMP related allocations");
@@ -81,7 +79,78 @@ volatile int ap_delay;
volatile int ap_awake;
volatile int ap_spin;
-static void cpu_mp_unleash(void *);
+int ia64_ipi_ast;
+int ia64_ipi_highfp;
+int ia64_ipi_nmi;
+int ia64_ipi_preempt;
+int ia64_ipi_rndzvs;
+int ia64_ipi_stop;
+
+static u_int
+ia64_ih_ast(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+ ia64_set_eoi(0);
+ PCPU_INC(md.stats.pcs_nasts);
+ CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid));
+ ia64_srlz_d();
+ return (0);
+}
+
+static u_int
+ia64_ih_highfp(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+ ia64_set_eoi(0);
+ PCPU_INC(md.stats.pcs_nhighfps);
+ ia64_highfp_save_ipi();
+ ia64_srlz_d();
+ return (0);
+}
+
+static u_int
+ia64_ih_preempt(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+ ia64_set_eoi(0);
+ PCPU_INC(md.stats.pcs_npreempts);
+ CTR1(KTR_SMP, "IPI_PREEMPT, cpuid=%d", PCPU_GET(cpuid));
+ sched_preempt(curthread);
+ ia64_srlz_d();
+ return (0);
+}
+
+static u_int
+ia64_ih_rndzvs(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+ ia64_set_eoi(0);
+ PCPU_INC(md.stats.pcs_nrdvs);
+ CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid));
+ smp_rendezvous_action();
+ ia64_srlz_d();
+ return (0);
+}
+
+static u_int
+ia64_ih_stop(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+ cpumask_t mybit;
+
+ ia64_set_eoi(0);
+ PCPU_INC(md.stats.pcs_nstops);
+ mybit = PCPU_GET(cpumask);
+ ia64_srlz_d();
+
+ savectx(PCPU_PTR(md.pcb));
+
+ atomic_set_int(&stopped_cpus, mybit);
+ while ((started_cpus & mybit) == 0)
+ cpu_spinwait();
+ atomic_clear_int(&started_cpus, mybit);
+ atomic_clear_int(&stopped_cpus, mybit);
+ return (0);
+}
struct cpu_group *
cpu_topo(void)
@@ -116,7 +185,6 @@ void
ia64_ap_startup(void)
{
uint64_t vhpt;
- int vector;
pcpup = ap_pcpu;
ia64_set_k4((intptr_t)pcpup);
@@ -148,18 +216,6 @@ ia64_ap_startup(void)
CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid));
- /* Acknowledge and EOI all interrupts. */
- vector = ia64_get_ivr();
- while (vector != 15) {
- ia64_srlz_d();
- if (vector == 0)
- vector = (int)ia64_ld1(&ia64_pib->ib_inta);
- ia64_set_eoi(0);
- ia64_srlz_d();
- vector = ia64_get_ivr();
- }
- ia64_srlz_d();
-
/* kick off the clock on this AP */
pcpu_initclock();
@@ -200,7 +256,7 @@ cpu_mp_probe(void)
* case we can have multiple processors, but we simply can't wake
* them up...
*/
- return (mp_ncpus > 1 && ipi_vector[IPI_AP_WAKEUP] != 0);
+ return (mp_ncpus > 1 && ia64_ipi_wakeup != 0);
}
void
@@ -276,7 +332,7 @@ cpu_mp_start()
if (bootverbose)
printf("SMP: waking up cpu%d\n", pc->pc_cpuid);
- ipi_send(pc, IPI_AP_WAKEUP);
+ ipi_send(pc, ia64_ipi_wakeup);
do {
DELAY(1000);
@@ -300,6 +356,18 @@ cpu_mp_unleash(void *dummy)
if (mp_ncpus <= 1)
return;
+ /* Allocate XIVs for IPIs */
+ ia64_ipi_ast = ia64_xiv_alloc(PI_DULL, IA64_XIV_IPI, ia64_ih_ast);
+ ia64_ipi_highfp = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_highfp);
+ ia64_ipi_preempt = ia64_xiv_alloc(PI_SOFT, IA64_XIV_IPI,
+ ia64_ih_preempt);
+ ia64_ipi_rndzvs = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_rndzvs);
+ ia64_ipi_stop = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI, ia64_ih_stop);
+
+ /* Reserve the NMI vector for IPI_STOP_HARD if possible */
+ ia64_ipi_nmi = (ia64_xiv_reserve(2, IA64_XIV_IPI, ia64_ih_stop) != 0)
+ ? ia64_ipi_stop : 0x400; /* DM=NMI, Vector=n/a */
+
cpus = 0;
smp_cpus = 0;
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
@@ -361,20 +429,18 @@ ipi_all_but_self(int ipi)
* fields are used here.
*/
void
-ipi_send(struct pcpu *cpu, int ipi)
+ipi_send(struct pcpu *cpu, int xiv)
{
u_int lid;
- uint8_t vector;
+
+ KASSERT(xiv != 0, ("ipi_send"));
lid = LID_SAPIC(cpu->pc_md.lid);
- vector = ipi_vector[ipi];
- KASSERT(vector != 0, ("IPI %d is not assigned a vector", ipi));
ia64_mf();
- ia64_st8(&(ia64_pib->ib_ipi[lid][0]), vector);
+ ia64_st8(&(ia64_pib->ib_ipi[lid][0]), xiv);
ia64_mf_a();
- CTR4(KTR_SMP, "ipi_send(%p, %ld): cpuid=%d, vector=%u", cpu, ipi,
- PCPU_GET(cpuid), vector);
+ CTR3(KTR_SMP, "ipi_send(%p, %d): cpuid=%d", cpu, xiv, PCPU_GET(cpuid));
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);