From 89bf8575eeadb650aefa7fe48d0d9c9f58cd4d2a Mon Sep 17 00:00:00 2001 From: Jake Burkholder Date: Tue, 31 Jul 2001 06:05:05 +0000 Subject: Flesh out the sparc64 port considerably. This contains: - mostly complete kernel pmap support, and tested but currently turned off userland pmap support - low level assembly language trap, context switching and support code - fully implemented atomic.h and supporting cpufunc.h - some support for kernel debugging with ddb - various header tweaks and filling out of machine dependent structures --- sys/sparc64/include/cpufunc.h | 126 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 125 insertions(+), 1 deletion(-) (limited to 'sys/sparc64/include/cpufunc.h') diff --git a/sys/sparc64/include/cpufunc.h b/sys/sparc64/include/cpufunc.h index 57cf2ab2e19c..a3e5a6bb4a69 100644 --- a/sys/sparc64/include/cpufunc.h +++ b/sys/sparc64/include/cpufunc.h @@ -29,15 +29,139 @@ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ +#include +#include + +/* + * membar operand macros for use in other macros when # is a special + * character. Keep these in sync with what the hardware expects. + */ +#define C_Lookaside (0) +#define C_MemIssue (1) +#define C_Sync (2) +#define M_LoadLoad (0) +#define M_StoreLoad (1) +#define M_LoadStore (2) +#define M_StoreStore (3) + +#define CMASK_SHIFT (4) +#define MMASK_SHIFT (0) + +#define CMASK_GEN(bit) ((1 << (bit)) << CMASK_SHIFT) +#define MMASK_GEN(bit) ((1 << (bit)) << MMASK_SHIFT) + +#define Lookaside CMASK_GEN(C_Lookaside) +#define MemIssue CMASK_GEN(C_MemIssue) +#define Sync CMASK_GEN(C_Sync) +#define LoadLoad MMASK_GEN(M_LoadLoad) +#define StoreLoad MMASK_GEN(M_StoreLoad) +#define LoadStore MMASK_GEN(M_LoadStore) +#define StoreStore MMASK_GEN(M_StoreStore) + +#define casa(rs1, rs2, rd, asi) ({ \ + u_int __rd = (u_int32_t)(rd); \ + __asm __volatile("casa [%1] %2, %3, %0" \ + : "+r" (__rd) : "r" (rs1), "n" (asi), "r" (rs2)); \ + __rd; \ +}) + +#define casxa(rs1, rs2, rd, asi) ({ \ + u_long __rd = (u_int64_t)(rd); \ + __asm __volatile("casxa [%1] %2, %3, %0" \ + : "+r" (__rd) : "r" (rs1), "n" (asi), "r" (rs2)); \ + __rd; \ +}) + +#define flush(va) do { \ + __asm __volatile("flush %0" : : "r" (va)); \ +} while (0) + +#define ldxa(va, asi) ({ \ + u_long __r; \ + __asm __volatile("ldxa [%1] %2, %0" \ + : "=r" (__r) : "r" (va), "n" (asi)); \ + __r; \ +}) + +#define stxa(va, asi, val) do { \ + __asm __volatile("stxa %0, [%1] %2" \ + : : "r" (val), "r" (va), "n" (asi)); \ +} while (0) + +#define membar(mask) do { \ + __asm __volatile("membar %0" : : "n" (mask)); \ +} while (0) + +#define rd(name) ({ \ + u_int64_t __sr; \ + __asm __volatile("rd %%" #name ", %0" : "=r" (__sr) :); \ + __sr; \ +}) + +#define wr(name, val, xor) do { \ + __asm __volatile("wr %0, %1, %%" #name \ + : : "r" (val), "rI" (xor)); \ +} while (0) + +#define rdpr(name) ({ \ + u_int64_t __pr; \ + __asm __volatile("rdpr %%" #name", %0" : "=r" (__pr) :); \ + __pr; \ +}) + +#define wrpr(name, val, xor) do { \ + __asm __volatile("wrpr %0, %1, %%" #name \ + : : "r" (val), "rI" (xor)); \ +} while (0) + +static __inline void +breakpoint(void) +{ + __asm __volatile("ta 1"); +} + +/* + * XXX use %pil for these. + */ static __inline critical_t critical_enter(void) { - return (0); + critical_t ie; + + ie = rdpr(pstate); + if (ie & PSTATE_IE) + wrpr(pstate, ie, PSTATE_IE); + return (ie); } static __inline void critical_exit(critical_t ie) { + + if (ie & PSTATE_IE) + wrpr(pstate, ie, 0); +} + +#if 0 +#define HAVE_INLINE_FFS +/* + * See page 202 of the SPARC v9 Architecture Manual. + */ +static __inline int +ffs(int mask) +{ + int result; + int neg; + int tmp; + + __asm __volatile( + " neg %3, %1 ; " + " xnor %3, %1, %2 ; " + " popc %2, %0 ; " + " movrz %3, %%g0, %0 ; " + : "=r" (result), "=r" (neg), "=r" (tmp) : "r" (mask)); + return (result); } +#endif #endif /* !_MACHINE_CPUFUNC_H_ */ -- cgit v1.2.3