aboutsummaryrefslogtreecommitdiff
path: root/sys/compat/linuxkpi/common/include/linux
diff options
context:
space:
mode:
authorHans Petter Selasky <hselasky@FreeBSD.org>2017-02-21 14:22:14 +0000
committerHans Petter Selasky <hselasky@FreeBSD.org>2017-02-21 14:22:14 +0000
commite560eab72c49d96cc6566ac6f7fc6c985421df91 (patch)
treec899cffacef3b6d0c16064605eb0ffa125809db4 /sys/compat/linuxkpi/common/include/linux
parentef23481a79936b733ee5c40fdc6c9ec3b0c01fca (diff)
downloadsrc-e560eab72c49d96cc6566ac6f7fc6c985421df91.tar.gz
src-e560eab72c49d96cc6566ac6f7fc6c985421df91.zip
Streamline the LinuxKPI spinlock wrappers.
1) Add better spinlock debug names when WITNESS_ALL is defined. 2) Make sure that the calling thread gets bound to the current CPU while a spinlock is locked. Some Linux kernel code depends on that the CPU ID doesn't change while a spinlock is locked. 3) Add support for using LinuxKPI spinlocks during a panic(). MFC after: 1 week Sponsored by: Mellanox Technologies
Notes
Notes: svn path=/head/; revision=314044
Diffstat (limited to 'sys/compat/linuxkpi/common/include/linux')
-rw-r--r--sys/compat/linuxkpi/common/include/linux/spinlock.h128
1 files changed, 109 insertions, 19 deletions
diff --git a/sys/compat/linuxkpi/common/include/linux/spinlock.h b/sys/compat/linuxkpi/common/include/linux/spinlock.h
index 4beb6fe45f2d..dbd7a5a9e5ed 100644
--- a/sys/compat/linuxkpi/common/include/linux/spinlock.h
+++ b/sys/compat/linuxkpi/common/include/linux/spinlock.h
@@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,36 +35,126 @@
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
-#include <sys/libkern.h>
+#include <sys/kdb.h>
#include <linux/compiler.h>
#include <linux/rwlock.h>
+#include <linux/bottom_half.h>
typedef struct {
struct mtx m;
} spinlock_t;
-#define spin_lock(_l) mtx_lock(&(_l)->m)
-#define spin_unlock(_l) mtx_unlock(&(_l)->m)
-#define spin_trylock(_l) mtx_trylock(&(_l)->m)
-#define spin_lock_nested(_l, _n) mtx_lock_flags(&(_l)->m, MTX_DUPOK)
-#define spin_lock_irq(lock) spin_lock(lock)
-#define spin_unlock_irq(lock) spin_unlock(lock)
-#define spin_lock_irqsave(lock, flags) \
- do {(flags) = 0; spin_lock(lock); } while (0)
-#define spin_unlock_irqrestore(lock, flags) \
- do { spin_unlock(lock); } while (0)
+/*
+ * By defining CONFIG_SPIN_SKIP LinuxKPI spinlocks and asserts will be
+ * skipped during panic(). By default it is disabled due to
+ * performance reasons.
+ */
+#ifdef CONFIG_SPIN_SKIP
+#define SPIN_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active)
+#else
+#define SPIN_SKIP(void) 0
+#endif
+
+#define spin_lock(_l) do { \
+ if (SPIN_SKIP()) \
+ break; \
+ mtx_lock(&(_l)->m); \
+ local_bh_disable(); \
+} while (0)
+
+#define spin_lock_bh(_l) do { \
+ spin_lock(_l); \
+} while (0)
+
+#define spin_lock_irq(_l) do { \
+ spin_lock(_l); \
+} while (0)
+
+#define spin_unlock(_l) do { \
+ if (SPIN_SKIP()) \
+ break; \
+ local_bh_enable(); \
+ mtx_unlock(&(_l)->m); \
+} while (0)
+
+#define spin_unlock_bh(_l) do { \
+ spin_unlock(_l); \
+} while (0)
+
+#define spin_unlock_irq(_l) do { \
+ spin_unlock(_l); \
+} while (0)
+
+#define spin_trylock(_l) ({ \
+ int __ret; \
+ if (SPIN_SKIP()) { \
+ __ret = 1; \
+ } else { \
+ __ret = mtx_trylock(&(_l)->m); \
+ if (likely(__ret != 0)) \
+ local_bh_disable(); \
+ } \
+ __ret; \
+})
+
+#define spin_lock_nested(_l, _n) do { \
+ if (SPIN_SKIP()) \
+ break; \
+ mtx_lock_flags(&(_l)->m, MTX_DUPOK); \
+ local_bh_disable(); \
+} while (0)
+
+#define spin_lock_irqsave(_l, flags) do { \
+ (flags) = 0; \
+ spin_lock(_l); \
+} while (0)
+
+#define spin_lock_irqsave_nested(_l, flags, _n) do { \
+ (flags) = 0; \
+ spin_lock_nested(_l, _n); \
+} while (0)
+
+#define spin_unlock_irqrestore(_l, flags) do { \
+ spin_unlock(_l); \
+} while (0)
+
+#ifdef WITNESS_ALL
+/* NOTE: the maximum WITNESS name is 64 chars */
+#define __spin_lock_name(name, file, line) \
+ (((const char *){file ":" #line "-" name}) + \
+ (sizeof(file) > 16 ? sizeof(file) - 16 : 0))
+#else
+#define __spin_lock_name(name, file, line) name
+#endif
+#define _spin_lock_name(...) __spin_lock_name(__VA_ARGS__)
+#define spin_lock_name(name) _spin_lock_name(name, __FILE__, __LINE__)
+
+#define spin_lock_init(lock) linux_spin_lock_init(lock, spin_lock_name("lnxspin"))
+
+static inline void
+linux_spin_lock_init(spinlock_t *lock, const char *name)
+{
+
+ memset(lock, 0, sizeof(*lock));
+ mtx_init(&lock->m, name, NULL, MTX_DEF | MTX_NOWITNESS);
+}
static inline void
-spin_lock_init(spinlock_t *lock)
+spin_lock_destroy(spinlock_t *lock)
{
- memset(&lock->m, 0, sizeof(lock->m));
- mtx_init(&lock->m, "lnxspin", NULL, MTX_DEF | MTX_NOWITNESS);
+ mtx_destroy(&lock->m);
}
-#define DEFINE_SPINLOCK(lock) \
- spinlock_t lock; \
- MTX_SYSINIT(lock, &(lock).m, "lnxspin", MTX_DEF)
+#define DEFINE_SPINLOCK(lock) \
+ spinlock_t lock; \
+ MTX_SYSINIT(lock, &(lock).m, spin_lock_name("lnxspin"), MTX_DEF)
+
+#define assert_spin_locked(_l) do { \
+ if (SPIN_SKIP()) \
+ break; \
+ mtx_assert(&(_l)->m, MA_OWNED); \
+} while (0)
-#endif /* _LINUX_SPINLOCK_H_ */
+#endif /* _LINUX_SPINLOCK_H_ */