aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorRobert Watson <rwatson@FreeBSD.org>2004-07-27 16:41:01 +0000
committerRobert Watson <rwatson@FreeBSD.org>2004-07-27 16:41:01 +0000
commit1a8cfbc45096bb34daeddc44ba75ff8377e9e5c8 (patch)
treede9f7b08c7ab02bd97e32e53b40a626c8216f214 /sys
parentf66145c6bd8178a68fb3e0e702d0473469722fba (diff)
Pass a thread argument into cpu_critical_{enter,exit}() rather than
dereference curthread. It is called only from critical_{enter,exit}(), which already dereferences curthread. This doesn't seem to affect SMP performance in my benchmarks, but improves MySQL transaction throughput by about 1% on UP on my Xeon. Head nodding: jhb, bmilekic
Notes
Notes: svn path=/head/; revision=132700
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/include/critical.h12
-rw-r--r--sys/amd64/include/critical.h13
-rw-r--r--sys/arm/include/critical.h8
-rw-r--r--sys/i386/include/critical.h12
-rw-r--r--sys/ia64/include/critical.h12
-rw-r--r--sys/kern/kern_switch.c4
-rw-r--r--sys/powerpc/include/critical.h10
-rw-r--r--sys/sparc64/include/critical.h12
8 files changed, 35 insertions, 48 deletions
diff --git a/sys/alpha/include/critical.h b/sys/alpha/include/critical.h
index 88d68e15be48..22ff08cecb22 100644
--- a/sys/alpha/include/critical.h
+++ b/sys/alpha/include/critical.h
@@ -55,11 +55,9 @@ void cpu_critical_fork_exit(void);
* of td_critnest, prior to it being incremented to 1.
*/
static __inline void
-cpu_critical_enter(void)
+cpu_critical_enter(struct thread *td)
{
- struct thread *td;
- td = curthread;
td->td_md.md_savecrit = intr_disable();
}
@@ -71,18 +69,16 @@ cpu_critical_enter(void)
* exiting the last critical section.
*/
static __inline void
-cpu_critical_exit(void)
+cpu_critical_exit(struct thread *td)
{
- struct thread *td;
- td = curthread;
intr_restore(td->td_md.md_savecrit);
}
#else /* !__GNUC__ */
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
+void cpu_critical_enter(struct thread *td);
+void cpu_critical_exit(struct thread *td);
#endif /* __GNUC__ */
diff --git a/sys/amd64/include/critical.h b/sys/amd64/include/critical.h
index 3b2d4727c062..0e02e1fab640 100644
--- a/sys/amd64/include/critical.h
+++ b/sys/amd64/include/critical.h
@@ -55,9 +55,10 @@ void cpu_critical_fork_exit(void);
* of td_critnest, prior to it being incremented to 1.
*/
static __inline void
-cpu_critical_enter(void)
+cpu_critical_enter(struct thread *td)
{
- curthread->td_md.md_savecrit = intr_disable();
+
+ td->td_md.md_savecrit = intr_disable();
}
/*
@@ -68,15 +69,15 @@ cpu_critical_enter(void)
* exiting the last critical section.
*/
static __inline void
-cpu_critical_exit(void)
+cpu_critical_exit(struct thread *td)
{
- intr_restore(curthread->td_md.md_savecrit);
+ intr_restore(td->td_md.md_savecrit);
}
#else /* !__GNUC__ */
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
+void cpu_critical_enter(struct thread *td);
+void cpu_critical_exit(struct thread *td);
#endif /* __GNUC__ */
diff --git a/sys/arm/include/critical.h b/sys/arm/include/critical.h
index 6d3d46d8ce28..0f05c9fc1926 100644
--- a/sys/arm/include/critical.h
+++ b/sys/arm/include/critical.h
@@ -40,15 +40,15 @@
#define MACHINE_CRITICAL_H
void cpu_critical_fork_exit(void);
static __inline void
-cpu_critical_enter(void)
+cpu_critical_enter(struct thread *td)
{
- curthread->td_md.md_savecrit = disable_interrupts(I32_bit | F32_bit);
+ cd->td_md.md_savecrit = disable_interrupts(I32_bit | F32_bit);
}
static __inline void
-cpu_critical_exit(void)
+cpu_critical_exit(struct thread *td)
{
- restore_interrupts(curthread->td_md.md_savecrit);
+ restore_interrupts(td->td_md.md_savecrit);
}
#endif
diff --git a/sys/i386/include/critical.h b/sys/i386/include/critical.h
index 8ddc7ef1576d..9826988322d3 100644
--- a/sys/i386/include/critical.h
+++ b/sys/i386/include/critical.h
@@ -59,9 +59,9 @@ void cpu_critical_fork_exit(void);
* is non-zero will be deferred.
*/
static __inline void
-cpu_critical_enter(void)
+cpu_critical_enter(struct thread *td)
{
- curthread->td_md.md_savecrit = intr_disable();
+ td->td_md.md_savecrit = intr_disable();
}
/*
@@ -76,15 +76,15 @@ cpu_critical_enter(void)
* code for us, so we do not have to do anything fancy.
*/
static __inline void
-cpu_critical_exit(void)
+cpu_critical_exit(struct thread *td)
{
- intr_restore(curthread->td_md.md_savecrit);
+ intr_restore(td->td_md.md_savecrit);
}
#else /* !(__GNUC__ || __INTEL_COMPILER) */
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
+void cpu_critical_enter(struct thread *td);
+void cpu_critical_exit(struct thread *td);
#endif /* __GNUC__ || __INTEL_COMPILER */
diff --git a/sys/ia64/include/critical.h b/sys/ia64/include/critical.h
index 42e38b2b6206..08c7d61f9f1f 100644
--- a/sys/ia64/include/critical.h
+++ b/sys/ia64/include/critical.h
@@ -55,11 +55,9 @@ void cpu_critical_fork_exit(void);
* of td_critnest, prior to it being incremented to 1.
*/
static __inline void
-cpu_critical_enter(void)
+cpu_critical_enter(struct thread *td)
{
- struct thread *td;
- td = curthread;
td->td_md.md_savecrit = intr_disable();
}
@@ -71,19 +69,17 @@ cpu_critical_enter(void)
* exiting the last critical section.
*/
static __inline void
-cpu_critical_exit(void)
+cpu_critical_exit(struct thread *td)
{
- struct thread *td;
- td = curthread;
intr_restore(td->td_md.md_savecrit);
}
#else /* !__GNUC__ */
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
+void cpu_critical_enter(struct thread *td)
+void cpu_critical_exit(struct thread *td)
#endif /* __GNUC__ */
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 5183a355995b..b19de433f772 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -437,7 +437,7 @@ critical_enter(void)
td = curthread;
if (td->td_critnest == 0)
- cpu_critical_enter();
+ cpu_critical_enter(td);
td->td_critnest++;
}
@@ -459,7 +459,7 @@ critical_exit(void)
}
#endif
td->td_critnest = 0;
- cpu_critical_exit();
+ cpu_critical_exit(td);
} else {
td->td_critnest--;
}
diff --git a/sys/powerpc/include/critical.h b/sys/powerpc/include/critical.h
index 7e0eace10cb9..93a177b1fcac 100644
--- a/sys/powerpc/include/critical.h
+++ b/sys/powerpc/include/critical.h
@@ -56,10 +56,9 @@ void cpu_critical_fork_exit(void);
*/
static __inline void
-cpu_critical_enter(void)
+cpu_critical_enter(struct thread *td)
{
u_int msr;
- struct thread *td = curthread;
msr = mfmsr();
td->td_md.md_savecrit = msr;
@@ -75,9 +74,8 @@ cpu_critical_enter(void)
* exiting the last critical section.
*/
static __inline void
-cpu_critical_exit(void)
+cpu_critical_exit(struct thread *td)
{
- struct thread *td = curthread;
mtmsr(td->td_md.md_savecrit);
}
@@ -85,8 +83,8 @@ cpu_critical_exit(void)
#else /* !__GNUC__ */
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
+void cpu_critical_enter(struct thread *td);
+void cpu_critical_exit(struct thread *td);
#endif /* __GNUC__ */
diff --git a/sys/sparc64/include/critical.h b/sys/sparc64/include/critical.h
index fb5fa7088fc3..381ec453602a 100644
--- a/sys/sparc64/include/critical.h
+++ b/sys/sparc64/include/critical.h
@@ -55,12 +55,10 @@ void cpu_critical_fork_exit(void);
* of td_critnest, prior to it being incremented to 1.
*/
static __inline void
-cpu_critical_enter(void)
+cpu_critical_enter(struct thread *td)
{
- struct thread *td;
critical_t pil;
- td = curthread;
pil = rdpr(pil);
wrpr(pil, 0, 14);
td->td_md.md_savecrit = pil;
@@ -75,18 +73,16 @@ cpu_critical_enter(void)
* exiting the last critical section.
*/
static __inline void
-cpu_critical_exit(void)
+cpu_critical_exit(struct thread *td)
{
- struct thread *td;
- td = curthread;
wrpr(pil, td->td_md.md_savecrit, 0);
}
#else /* !__GNUC__ */
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
+void cpu_critical_enter(struct thread *td);
+void cpu_critical_exit(struct thread *td);
#endif /* __GNUC__ */