aboutsummaryrefslogtreecommitdiff
path: root/sys/amd64/include
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64/include')
-rw-r--r--sys/amd64/include/atomic.h120
-rw-r--r--sys/amd64/include/cpufunc.h73
-rw-r--r--sys/amd64/include/ieeefp.h10
-rw-r--r--sys/amd64/include/in_cksum.h6
-rw-r--r--sys/amd64/include/limits.h4
-rw-r--r--sys/amd64/include/pcpu.h12
-rw-r--r--sys/amd64/include/profile.h6
-rw-r--r--sys/amd64/include/vmm.h2
-rw-r--r--sys/amd64/include/vmm_dev.h1
-rw-r--r--sys/amd64/include/xen/hypercall.h3
10 files changed, 24 insertions, 213 deletions
diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h
index 4a9095ca831b..159d807f777c 100644
--- a/sys/amd64/include/atomic.h
+++ b/sys/amd64/include/atomic.h
@@ -30,10 +30,6 @@
#ifndef _MACHINE_ATOMIC_H_
#define _MACHINE_ATOMIC_H_
-#ifndef _SYS_CDEFS_H_
-#error this file needs sys/cdefs.h as a prerequisite
-#endif
-
/*
* To express interprocessor (as opposed to processor and device) memory
* ordering constraints, use the atomic_*() functions with acquire and release
@@ -103,56 +99,10 @@
*/
/*
- * The above functions are expanded inline in the statically-linked
- * kernel. Lock prefixes are generated if an SMP kernel is being
- * built.
+ * Always use lock prefixes. The result is slighly less optimal for
+ * UP systems, but it matters less now, and sometimes UP is emulated
+ * over SMP.
*
- * Kernel modules call real functions which are built into the kernel.
- * This allows kernel modules to be portable between UP and SMP systems.
- */
-#if !defined(__GNUCLIKE_ASM)
-#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
-void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
-void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
-
-int atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
-int atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
-int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
-int atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
-int atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
-int atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
- u_short src);
-int atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
-int atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src);
-u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
-u_long atomic_fetchadd_long(volatile u_long *p, u_long v);
-int atomic_testandset_int(volatile u_int *p, u_int v);
-int atomic_testandset_long(volatile u_long *p, u_int v);
-int atomic_testandclear_int(volatile u_int *p, u_int v);
-int atomic_testandclear_long(volatile u_long *p, u_int v);
-void atomic_thread_fence_acq(void);
-void atomic_thread_fence_acq_rel(void);
-void atomic_thread_fence_rel(void);
-void atomic_thread_fence_seq_cst(void);
-
-#define ATOMIC_LOAD(TYPE) \
-u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
-#define ATOMIC_STORE(TYPE) \
-void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
-
-#else /* !KLD_MODULE && __GNUCLIKE_ASM */
-
-/*
- * For userland, always use lock prefixes so that the binaries will run
- * on both SMP and !SMP systems.
- */
-#if defined(SMP) || !defined(_KERNEL) || defined(KLD_MODULE)
-#define MPLOCKED "lock ; "
-#else
-#define MPLOCKED
-#endif
-
-/*
* The assembly is volatilized to avoid code chunk removal by the compiler.
* GCC aggressively reorders operations and memory clobbering is necessary
* in order to avoid that for memory barriers.
@@ -161,7 +111,7 @@ void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
static __inline void \
atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
- __asm __volatile(MPLOCKED OP \
+ __asm __volatile("lock; " OP \
: "+m" (*p) \
: CONS (V) \
: "cc"); \
@@ -170,7 +120,7 @@ atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
static __inline void \
atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
- __asm __volatile(MPLOCKED OP \
+ __asm __volatile("lock; " OP \
: "+m" (*p) \
: CONS (V) \
: "memory", "cc"); \
@@ -199,8 +149,7 @@ atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
u_char res; \
\
__asm __volatile( \
- " " MPLOCKED " " \
- " cmpxchg %3,%1 ; " \
+ " lock; cmpxchg %3,%1 ; " \
"# atomic_cmpset_" #TYPE " " \
: "=@cce" (res), /* 0 */ \
"+m" (*dst), /* 1 */ \
@@ -216,8 +165,7 @@ atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
u_char res; \
\
__asm __volatile( \
- " " MPLOCKED " " \
- " cmpxchg %3,%1 ; " \
+ " lock; cmpxchg %3,%1 ; " \
"# atomic_fcmpset_" #TYPE " " \
: "=@cce" (res), /* 0 */ \
"+m" (*dst), /* 1 */ \
@@ -241,8 +189,7 @@ atomic_fetchadd_int(volatile u_int *p, u_int v)
{
__asm __volatile(
- " " MPLOCKED " "
- " xaddl %0,%1 ; "
+ " lock; xaddl %0,%1 ; "
"# atomic_fetchadd_int"
: "+r" (v), /* 0 */
"+m" (*p) /* 1 */
@@ -259,8 +206,7 @@ atomic_fetchadd_long(volatile u_long *p, u_long v)
{
__asm __volatile(
- " " MPLOCKED " "
- " xaddq %0,%1 ; "
+ " lock; xaddq %0,%1 ; "
"# atomic_fetchadd_long"
: "+r" (v), /* 0 */
"+m" (*p) /* 1 */
@@ -274,8 +220,7 @@ atomic_testandset_int(volatile u_int *p, u_int v)
u_char res;
__asm __volatile(
- " " MPLOCKED " "
- " btsl %2,%1 ; "
+ " lock; btsl %2,%1 ; "
"# atomic_testandset_int"
: "=@ccc" (res), /* 0 */
"+m" (*p) /* 1 */
@@ -290,8 +235,7 @@ atomic_testandset_long(volatile u_long *p, u_int v)
u_char res;
__asm __volatile(
- " " MPLOCKED " "
- " btsq %2,%1 ; "
+ " lock; btsq %2,%1 ; "
"# atomic_testandset_long"
: "=@ccc" (res), /* 0 */
"+m" (*p) /* 1 */
@@ -306,8 +250,7 @@ atomic_testandclear_int(volatile u_int *p, u_int v)
u_char res;
__asm __volatile(
- " " MPLOCKED " "
- " btrl %2,%1 ; "
+ " lock; btrl %2,%1 ; "
"# atomic_testandclear_int"
: "=@ccc" (res), /* 0 */
"+m" (*p) /* 1 */
@@ -322,8 +265,7 @@ atomic_testandclear_long(volatile u_long *p, u_int v)
u_char res;
__asm __volatile(
- " " MPLOCKED " "
- " btrq %2,%1 ; "
+ " lock; btrq %2,%1 ; "
"# atomic_testandclear_long"
: "=@ccc" (res), /* 0 */
"+m" (*p) /* 1 */
@@ -344,39 +286,18 @@ atomic_testandclear_long(volatile u_long *p, u_int v)
* special address for "mem". In the kernel, we use a private per-cpu
* cache line. In user space, we use a word in the stack's red zone
* (-8(%rsp)).
- *
- * For UP kernels, however, the memory of the single processor is
- * always consistent, so we only need to stop the compiler from
- * reordering accesses in a way that violates the semantics of acquire
- * and release.
*/
-#if defined(_KERNEL)
-
-#if defined(SMP) || defined(KLD_MODULE)
static __inline void
__storeload_barrier(void)
{
-
+#if defined(_KERNEL)
__asm __volatile("lock; addl $0,%%gs:%0"
: "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
-}
-#else /* _KERNEL && UP */
-static __inline void
-__storeload_barrier(void)
-{
-
- __compiler_membar();
-}
-#endif /* SMP */
#else /* !_KERNEL */
-static __inline void
-__storeload_barrier(void)
-{
-
__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
-}
#endif /* _KERNEL*/
+}
#define ATOMIC_LOAD(TYPE) \
static __inline u_##TYPE \
@@ -428,8 +349,6 @@ atomic_thread_fence_seq_cst(void)
__storeload_barrier();
}
-#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
-
ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
@@ -466,8 +385,6 @@ ATOMIC_LOADSTORE(long);
#ifndef WANT_FUNCTIONS
/* Read the current value and store a new value in the destination. */
-#ifdef __GNUCLIKE_ASM
-
static __inline u_int
atomic_swap_int(volatile u_int *p, u_int v)
{
@@ -492,13 +409,6 @@ atomic_swap_long(volatile u_long *p, u_long v)
return (v);
}
-#else /* !__GNUCLIKE_ASM */
-
-u_int atomic_swap_int(volatile u_int *p, u_int v);
-u_long atomic_swap_long(volatile u_long *p, u_long v);
-
-#endif /* __GNUCLIKE_ASM */
-
#define atomic_set_acq_char atomic_set_barr_char
#define atomic_set_rel_char atomic_set_barr_char
#define atomic_clear_acq_char atomic_clear_barr_char
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index bca74d8ead67..99d8c82aa111 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -41,10 +41,6 @@
#ifndef _MACHINE_CPUFUNC_H_
#define _MACHINE_CPUFUNC_H_
-#ifndef _SYS_CDEFS_H_
-#error this file needs sys/cdefs.h as a prerequisite
-#endif
-
struct region_descriptor;
#define readb(va) (*(volatile uint8_t *) (va))
@@ -57,8 +53,6 @@ struct region_descriptor;
#define writel(va, d) (*(volatile uint32_t *) (va) = (d))
#define writeq(va, d) (*(volatile uint64_t *) (va) = (d))
-#if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
-
static __inline void
breakpoint(void)
{
@@ -964,73 +958,6 @@ sgx_eremove(void *epc)
return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
}
-#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
-
-int breakpoint(void);
-u_int bsfl(u_int mask);
-u_int bsrl(u_int mask);
-void clflush(u_long addr);
-void clts(void);
-void cpuid_count(u_int ax, u_int cx, u_int *p);
-void disable_intr(void);
-void do_cpuid(u_int ax, u_int *p);
-void enable_intr(void);
-void halt(void);
-void ia32_pause(void);
-u_char inb(u_int port);
-u_int inl(u_int port);
-void insb(u_int port, void *addr, size_t count);
-void insl(u_int port, void *addr, size_t count);
-void insw(u_int port, void *addr, size_t count);
-register_t intr_disable(void);
-void intr_restore(register_t rf);
-void invd(void);
-void invlpg(u_int addr);
-void invltlb(void);
-u_short inw(u_int port);
-void lidt(struct region_descriptor *addr);
-void lldt(u_short sel);
-void load_cr0(u_long cr0);
-void load_cr3(u_long cr3);
-void load_cr4(u_long cr4);
-void load_dr0(uint64_t dr0);
-void load_dr1(uint64_t dr1);
-void load_dr2(uint64_t dr2);
-void load_dr3(uint64_t dr3);
-void load_dr6(uint64_t dr6);
-void load_dr7(uint64_t dr7);
-void load_fs(u_short sel);
-void load_gs(u_short sel);
-void ltr(u_short sel);
-void outb(u_int port, u_char data);
-void outl(u_int port, u_int data);
-void outsb(u_int port, const void *addr, size_t count);
-void outsl(u_int port, const void *addr, size_t count);
-void outsw(u_int port, const void *addr, size_t count);
-void outw(u_int port, u_short data);
-u_long rcr0(void);
-u_long rcr2(void);
-u_long rcr3(void);
-u_long rcr4(void);
-uint64_t rdmsr(u_int msr);
-uint32_t rdmsr32(u_int msr);
-uint64_t rdpmc(u_int pmc);
-uint64_t rdr0(void);
-uint64_t rdr1(void);
-uint64_t rdr2(void);
-uint64_t rdr3(void);
-uint64_t rdr6(void);
-uint64_t rdr7(void);
-uint64_t rdtsc(void);
-u_long read_rflags(void);
-u_int rfs(void);
-u_int rgs(void);
-void wbinvd(void);
-void write_rflags(u_int rf);
-void wrmsr(u_int msr, uint64_t newval);
-
-#endif /* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
-
void reset_dbregs(void);
#ifdef _KERNEL
diff --git a/sys/amd64/include/ieeefp.h b/sys/amd64/include/ieeefp.h
index 96ee7e9040d8..48d879f0b80b 100644
--- a/sys/amd64/include/ieeefp.h
+++ b/sys/amd64/include/ieeefp.h
@@ -67,8 +67,6 @@
#define SSE_RND_OFF 13 /* rounding control offset */
#define SSE_FZ_OFF 15 /* flush to zero offset */
-#ifdef __GNUCLIKE_ASM
-
/*
* General notes about conflicting SSE vs FP status bits.
* This code assumes that software will not fiddle with the control
@@ -184,9 +182,7 @@ __fpgetsticky(void)
return ((fp_except_t)_ex);
}
-#endif /* __GNUCLIKE_ASM */
-
-#if !defined(__IEEEFP_NOINLINES__) && defined(__GNUCLIKE_ASM)
+#if !defined(__IEEEFP_NOINLINES__)
#define fpgetmask() __fpgetmask()
#define fpgetprec() __fpgetprec()
@@ -196,7 +192,7 @@ __fpgetsticky(void)
#define fpsetprec(m) __fpsetprec(m)
#define fpsetround(m) __fpsetround(m)
-#else /* !(!__IEEEFP_NOINLINES__ && __GNUCLIKE_ASM) */
+#else /* __IEEEFP_NOINLINES__ */
/* Augment the userland declarations. */
__BEGIN_DECLS
@@ -210,6 +206,6 @@ fp_prec_t fpgetprec(void);
fp_prec_t fpsetprec(fp_prec_t);
__END_DECLS
-#endif /* !__IEEEFP_NOINLINES__ && __GNUCLIKE_ASM */
+#endif /* !__IEEEFP_NOINLINES__ */
#endif /* !_MACHINE_IEEEFP_H_ */
diff --git a/sys/amd64/include/in_cksum.h b/sys/amd64/include/in_cksum.h
index 89ff1097f369..d9830168f5ab 100644
--- a/sys/amd64/include/in_cksum.h
+++ b/sys/amd64/include/in_cksum.h
@@ -37,12 +37,6 @@
#ifndef _MACHINE_IN_CKSUM_H_
#define _MACHINE_IN_CKSUM_H_ 1
-#ifndef _SYS_CDEFS_H_
-#error this file needs sys/cdefs.h as a prerequisite
-#endif
-
-#include <sys/cdefs.h>
-
#define in_cksum(m, len) in_cksum_skip(m, len, 0)
#ifdef _KERNEL
diff --git a/sys/amd64/include/limits.h b/sys/amd64/include/limits.h
index 5a7b831b3089..f2a4bf75fe2c 100644
--- a/sys/amd64/include/limits.h
+++ b/sys/amd64/include/limits.h
@@ -35,11 +35,7 @@
#ifndef _MACHINE_LIMITS_H_
#define _MACHINE_LIMITS_H_
-#include <sys/cdefs.h>
-
-#ifdef __CC_SUPPORTS_WARNING
#warning "machine/limits.h is deprecated. Include sys/limits.h instead."
-#endif
#include <sys/limits.h>
diff --git a/sys/amd64/include/pcpu.h b/sys/amd64/include/pcpu.h
index dc99d4249bd2..a671f01dbad5 100644
--- a/sys/amd64/include/pcpu.h
+++ b/sys/amd64/include/pcpu.h
@@ -31,10 +31,6 @@
#ifndef _MACHINE_PCPU_H_
#define _MACHINE_PCPU_H_
-#ifndef _SYS_CDEFS_H_
-#error "sys/cdefs.h is a prerequisite for this file"
-#endif
-
#include <machine/segments.h>
#include <machine/tss.h>
@@ -109,8 +105,6 @@ _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line");
#define MONITOR_STOPSTATE_RUNNING 0
#define MONITOR_STOPSTATE_STOPPED 1
-#if defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF)
-
/*
* Evaluates to the byte offset of the per-cpu variable name.
*/
@@ -277,12 +271,6 @@ _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line");
} \
} while (0);
-#else /* !__GNUCLIKE_ASM || !__GNUCLIKE___TYPEOF */
-
-#error "this file needs to be ported to your compiler"
-
-#endif /* __GNUCLIKE_ASM && __GNUCLIKE___TYPEOF */
-
#endif /* _KERNEL */
#endif /* !_MACHINE_PCPU_H_ */
diff --git a/sys/amd64/include/profile.h b/sys/amd64/include/profile.h
index b0fb469f5354..e86fd582b407 100644
--- a/sys/amd64/include/profile.h
+++ b/sys/amd64/include/profile.h
@@ -45,7 +45,6 @@
static void _mcount(uintfptr_t frompc, uintfptr_t selfpc) __used; \
static void _mcount
-#ifdef __GNUCLIKE_ASM
#define MCOUNT __asm(" \n\
.text \n\
.p2align 4,0x90 \n\
@@ -101,9 +100,6 @@ mcount() \
_mcount(frompc, selfpc); \
}
#endif
-#else /* !__GNUCLIKE_ASM */
-#define MCOUNT
-#endif /* __GNUCLIKE_ASM */
typedef u_long uintfptr_t;
@@ -114,9 +110,7 @@ typedef u_long uintfptr_t;
typedef u_long fptrdiff_t;
__BEGIN_DECLS
-#ifdef __GNUCLIKE_ASM
void mcount(void) __asm(".mcount");
-#endif
__END_DECLS
#endif /* !_KERNEL */
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index f265237a5303..d7d1509248f1 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -469,7 +469,9 @@ void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
#endif /* KERNEL */
+#ifdef _KERNEL
#define VM_MAXCPU 16 /* maximum virtual cpus */
+#endif
/*
* Identifiers for optional vmm capabilities
diff --git a/sys/amd64/include/vmm_dev.h b/sys/amd64/include/vmm_dev.h
index a048e05d4b7c..9ed8f32302ae 100644
--- a/sys/amd64/include/vmm_dev.h
+++ b/sys/amd64/include/vmm_dev.h
@@ -174,6 +174,7 @@ struct vm_nmi {
#define MAX_VM_STATS 64
struct vm_stats {
int cpuid; /* in */
+ int index; /* in */
int num_entries; /* out */
struct timeval tv;
uint64_t statbuf[MAX_VM_STATS];
diff --git a/sys/amd64/include/xen/hypercall.h b/sys/amd64/include/xen/hypercall.h
index 6d00d4a6ebd8..60da390ef4c6 100644
--- a/sys/amd64/include/xen/hypercall.h
+++ b/sys/amd64/include/xen/hypercall.h
@@ -145,6 +145,9 @@ privcmd_hypercall(long op, long a1, long a2, long a3, long a4, long a5)
register long __arg5 __asm__("r8") = (long)(a5);
long __call = (long)&hypercall_page + (op * 32);
+ if (op >= PAGE_SIZE / 32)
+ return -EINVAL;
+
__asm__ volatile (
"call *%[call]"
: "=a" (__res), "=D" (__ign1), "=S" (__ign2),