aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/smr.h
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2020-02-19 08:15:20 +0000
committerJeff Roberson <jeff@FreeBSD.org>2020-02-19 08:15:20 +0000
commitbf7dba0b91602a869e3f26ee910fb8e4a08f444f (patch)
tree4103e318c296d67cf893cbe8d0024820de4dc4c9 /sys/sys/smr.h
parent294de6bbd6dfa5eeb918bb1175905e5a88302095 (diff)
downloadsrc-bf7dba0b91602a869e3f26ee910fb8e4a08f444f.tar.gz
src-bf7dba0b91602a869e3f26ee910fb8e4a08f444f.zip
Type validating smr protected pointer accessors.
This API is intended to provide some measure of safety with SMR protected pointers. A struct wrapper provides type checking and a guarantee that all access is mediated by the API unless abused. All modifying functions take an assert as an argument to guarantee that the required synchronization is present. Reviewed by: kib, markj, mjg Differential Revision: https://reviews.freebsd.org/D23711
Notes
Notes: svn path=/head/; revision=358096
Diffstat (limited to 'sys/sys/smr.h')
-rw-r--r--sys/sys/smr.h92
1 files changed, 92 insertions, 0 deletions
diff --git a/sys/sys/smr.h b/sys/sys/smr.h
index 19502020e6eb..4218558c7936 100644
--- a/sys/sys/smr.h
+++ b/sys/sys/smr.h
@@ -77,6 +77,98 @@ struct smr {
#define SMR_ASSERT_NOT_ENTERED(smr) \
KASSERT(!SMR_ENTERED(smr), ("In smr section."));
+#define SMR_ASSERT(ex, fn) \
+ KASSERT((ex), (fn ": Assertion " #ex " failed at %s:%d", __FILE__, __LINE__))
+
+/*
+ * SMR Accessors are meant to provide safe access to SMR protected
+ * pointers and prevent misuse and accidental access.
+ *
+ * Accessors are grouped by type:
+ * entered - Use while in a read section (between smr_enter/smr_exit())
+ * serialized - Use while holding a lock that serializes writers. Updates
+ * are synchronized with readers via included barriers.
+ * unserialized - Use after the memory is out of scope and not visible to
+ * readers.
+ *
+ * All acceses include a parameter for an assert to verify the required
+ * synchronization. For example, a writer might use:
+ *
+ * smr_serilized_store(pointer, value, mtx_owned(&writelock));
+ *
+ * These are only enabled in INVARIANTS kernels.
+ */
+
+/* Type restricting pointer access to force smr accessors. */
+#define SMR_TYPE_DECLARE(smrtype, type) \
+typedef struct { \
+ type __ptr; /* Do not access directly */ \
+} smrtype
+
+/*
+ * Read from an SMR protected pointer while in a read section.
+ */
+#define smr_entered_load(p, smr) ({ \
+ SMR_ASSERT(SMR_ENTERED((smr)), "smr_entered_load"); \
+ (__typeof((p)->__ptr))atomic_load_acq_ptr((uintptr_t *)&(p)->__ptr); \
+})
+
+/*
+ * Read from an SMR protected pointer while serialized by an
+ * external mechanism. 'ex' should contain an assert that the
+ * external mechanism is held. i.e. mtx_owned()
+ */
+#define smr_serialized_load(p, ex) ({ \
+ SMR_ASSERT(ex, "smr_serialized_load"); \
+ (__typeof((p)->__ptr))atomic_load_ptr((uintptr_t *)&(p)->__ptr);\
+})
+
+/*
+ * Store 'v' to an SMR protected pointer while serialized by an
+ * external mechanism. 'ex' should contain an assert that the
+ * external mechanism is held. i.e. mtx_owned()
+ */
+#define smr_serialized_store(p, v, ex) do { \
+ SMR_ASSERT(ex, "smr_serialized_store"); \
+ __typeof((p)->__ptr) _v = (v); \
+ atomic_store_rel_ptr((uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \
+} while (0)
+
+/*
+ * swap 'v' with an SMR protected pointer and return the old value
+ * while serialized by an external mechanism. 'ex' should contain
+ * an assert that the external mechanism is provided. i.e. mtx_owned()
+ */
+#define smr_serialized_swap(p, v, ex) ({ \
+ SMR_ASSERT(ex, "smr_serialized_swap"); \
+ __typeof((p)->__ptr) _v = (v); \
+ /* Release barrier guarantees contents are visible to reader */ \
+ atomic_thread_fence_rel(); \
+ (__typeof((p)->__ptr))atomic_swap_ptr( \
+ (uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \
+})
+
+/*
+ * Read from an SMR protected pointer when no serialization is required
+ * such as in the destructor callback or when the caller guarantees other
+ * synchronization.
+ */
+#define smr_unserialized_load(p, ex) ({ \
+ SMR_ASSERT(ex, "smr_unserialized_load"); \
+ (__typeof((p)->__ptr))atomic_load_ptr((uintptr_t *)&(p)->__ptr);\
+})
+
+/*
+ * Store to an SMR protected pointer when no serialiation is required
+ * such as in the destructor callback or when the caller guarantees other
+ * synchronization.
+ */
+#define smr_unserialized_store(p, v, ex) do { \
+ SMR_ASSERT(ex, "smr_unserialized_store"); \
+ __typeof((p)->__ptr) _v = (v); \
+ atomic_store_ptr((uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \
+} while (0)
+
/*
* Return the current write sequence number.
*/