aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustin Hibbits <jhibbits@FreeBSD.org>2022-01-14 03:46:29 +0000
committerJustin Hibbits <jhibbits@FreeBSD.org>2022-01-14 03:46:52 +0000
commitb5d227b0b27adbb7b4db9f8f27a14df342344c50 (patch)
tree0876fb1c9c6855c64a1c4098dc40f425c7a42a4b
parentdd694648ff0f1c33ea05d9eb63fdbec527b27836 (diff)
downloadsrc-b5d227b0b27adbb7b4db9f8f27a14df342344c50.tar.gz
src-b5d227b0b27adbb7b4db9f8f27a14df342344c50.zip
powerpc: Add atomic_testand_{set,clear}_{int,long}
Add machine-optimized implementations for the following: * atomic_testandset_int * atomic_testandclear_int * atomic_testandset_long * atomic_testandclear_long This fixes the build with ISA_206_ATOMICS enabled. Add the associated atomic_testandset_32, atomic_testandclear_32, so that ice(4) can potentially build.
-rw-r--r--sys/powerpc/include/atomic.h111
1 files changed, 111 insertions, 0 deletions
diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h
index 0b383a4ebdf6..0d54ba4115c7 100644
--- a/sys/powerpc/include/atomic.h
+++ b/sys/powerpc/include/atomic.h
@@ -994,6 +994,117 @@ atomic_swap_64(volatile u_long *p, u_long v)
#define atomic_swap_ptr(p,v) atomic_swap_32((volatile u_int *)(p), v)
#endif
+static __inline int
+atomic_testandset_int(volatile u_int *p, u_int v)
+{
+ u_int m = (1 << (v & 0x1f));
+ u_int res;
+ u_int tmp;
+
+ __asm __volatile(
+ "1: lwarx %0,0,%3\n"
+ " and. %1,%0,%4\n"
+ " or %0,%0,%4\n"
+ " stwcx. %0,0,%3\n"
+ " bne- 1b\n"
+ : "=&r"(tmp), "=&r"(res), "+m"(*p)
+ : "r"(p), "r"(m)
+ : "cr0", "memory");
+
+ return (res != 0);
+}
+
+static __inline int
+atomic_testandclear_int(volatile u_int *p, u_int v)
+{
+ u_int m = (1 << (v & 0x1f));
+ u_int res;
+ u_int tmp;
+
+ __asm __volatile(
+ "1: lwarx %0,0,%3\n"
+ " and. %1,%0,%4\n"
+ " andc %0,%0,%4\n"
+ " stwcx. %0,0,%3\n"
+ " bne- 1b\n"
+ : "=&r"(tmp), "=&r"(res), "+m"(*p)
+ : "r"(p), "r"(m)
+ : "cr0", "memory");
+
+ return (res != 0);
+}
+
+#ifdef __powerpc64__
+static __inline int
+atomic_testandset_long(volatile u_long *p, u_int v)
+{
+ u_long m = (1 << (v & 0x3f));
+ u_long res;
+ u_long tmp;
+
+ __asm __volatile(
+ "1: ldarx %0,0,%3\n"
+ " and. %1,%0,%4\n"
+ " or %0,%0,%4\n"
+ " stdcx. %0,0,%3\n"
+ " bne- 1b\n"
+ : "=&r"(tmp), "=&r"(res), "+m"(*(volatile u_long *)p)
+ : "r"(p), "r"(m)
+ : "cr0", "memory");
+
+ return (res != 0);
+}
+
+static __inline int
+atomic_testandclear_long(volatile u_long *p, u_int v)
+{
+ u_long m = (1 << (v & 0x3f));
+ u_long res;
+ u_long tmp;
+
+ __asm __volatile(
+ "1: ldarx %0,0,%3\n"
+ " and. %1,%0,%4\n"
+ " andc %0,%0,%4\n"
+ " stdcx. %0,0,%3\n"
+ " bne- 1b\n"
+ : "=&r"(tmp), "=&r"(res), "+m"(*p)
+ : "r"(p), "r"(m)
+ : "cr0", "memory");
+
+ return (res != 0);
+}
+#else
+static __inline int
+atomic_testandset_long(volatile u_long *p, u_int v)
+{
+ return (atomic_testandset_int((volatile u_int *)p, v);
+}
+
+static __inline int
+atomic_testandclear_long(volatile u_long *p, u_int v)
+{
+ return (atomic_testandclear_int((volatile u_int *)p, v);
+}
+#endif
+
+#define atomic_testandclear_32 atomic_testandclear_int
+#define atomic_testandset_32 atomic_testandset_int
+
+static __inline int
+atomic_testandset_acq_long(volatile u_long *p, u_int v)
+{
+ u_int a = atomic_testandset_long(p, v);
+ __ATOMIC_ACQ();
+ return (a);
+}
+
+#define atomic_testandclear_int atomic_testandclear_int
+#define atomic_testandset_int atomic_testandset_int
+#define atomic_testandclear_long atomic_testandclear_long
+#define atomic_testandset_long atomic_testandset_long
+#define atomic_testandset_acq_long atomic_testandset_acq_long
+
static __inline void
atomic_thread_fence_acq(void)
{