aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/smr_types.h
blob: 9da4a73c568c03071510de8360b3676ea534031e (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/*-
 * SPDX-License-Identifier: BSD-2-Clause
 *
 * Copyright (c) 2019, 2020 Jeffrey Roberson <jeff@FreeBSD.org>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice unmodified, this list of conditions, and the following
 *    disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef _SYS_SMR_TYPES_H_
#define	_SYS_SMR_TYPES_H_

#include <sys/_smr.h>

/*
 * SMR Accessors are meant to provide safe access to SMR protected
 * pointers and prevent misuse and accidental access.
 *
 * Accessors are grouped by type:
 * entered	- Use while in a read section (between smr_enter/smr_exit())
 * serialized 	- Use while holding a lock that serializes writers.   Updates
 *		  are synchronized with readers via included barriers.
 * unserialized	- Use after the memory is out of scope and not visible to
 *		  readers.
 *
 * All acceses include a parameter for an assert to verify the required
 * synchronization.  For example, a writer might use:
 *
 * smr_serialized_store(pointer, value, mtx_owned(&writelock));
 *
 * These are only enabled in INVARIANTS kernels.
 */

/* Type restricting pointer access to force smr accessors. */
#define	SMR_POINTER(type)						\
struct {								\
	type	__ptr;		/* Do not access directly */		\
}

/*
 * Read from an SMR protected pointer while in a read section.
 */
#define	smr_entered_load(p, smr) ({					\
	SMR_ASSERT(SMR_ENTERED((smr)), "smr_entered_load");		\
	(__typeof((p)->__ptr))atomic_load_acq_ptr((uintptr_t *)&(p)->__ptr); \
})

/*
 * Read from an SMR protected pointer while serialized by an
 * external mechanism.  'ex' should contain an assert that the
 * external mechanism is held.  i.e. mtx_owned()
 */
#define	smr_serialized_load(p, ex) ({					\
	SMR_ASSERT(ex, "smr_serialized_load");				\
	(__typeof((p)->__ptr))atomic_load_ptr(&(p)->__ptr);		\
})

/*
 * Store 'v' to an SMR protected pointer while serialized by an
 * external mechanism.  'ex' should contain an assert that the
 * external mechanism is held.  i.e. mtx_owned()
 *
 * Writers that are serialized with mutual exclusion or on a single
 * thread should use smr_serialized_store() rather than swap.
 */
#define	smr_serialized_store(p, v, ex) do {				\
	SMR_ASSERT(ex, "smr_serialized_store");				\
	__typeof((p)->__ptr) _v = (v);					\
	atomic_store_rel_ptr((uintptr_t *)&(p)->__ptr, (uintptr_t)_v);	\
} while (0)

/*
 * swap 'v' with an SMR protected pointer and return the old value
 * while serialized by an external mechanism.  'ex' should contain
 * an assert that the external mechanism is provided.  i.e. mtx_owned()
 *
 * Swap permits multiple writers to update a pointer concurrently.
 */
#define	smr_serialized_swap(p, v, ex) ({				\
	SMR_ASSERT(ex, "smr_serialized_swap");				\
	__typeof((p)->__ptr) _v = (v);					\
	/* Release barrier guarantees contents are visible to reader */ \
	atomic_thread_fence_rel();					\
	(__typeof((p)->__ptr))atomic_swap_ptr(				\
	    (uintptr_t *)&(p)->__ptr, (uintptr_t)_v);			\
})

/*
 * Read from an SMR protected pointer when no serialization is required
 * such as in the destructor callback or when the caller guarantees other
 * synchronization.
 */
#define	smr_unserialized_load(p, ex) ({					\
	SMR_ASSERT(ex, "smr_unserialized_load");			\
	(__typeof((p)->__ptr))atomic_load_ptr(&(p)->__ptr);		\
})

/*
 * Store to an SMR protected pointer when no serialiation is required
 * such as in the destructor callback or when the caller guarantees other
 * synchronization.
 */
#define	smr_unserialized_store(p, v, ex) do {				\
	SMR_ASSERT(ex, "smr_unserialized_store");			\
	__typeof((p)->__ptr) _v = (v);					\
	atomic_store_ptr((uintptr_t *)&(p)->__ptr, (uintptr_t)_v);	\
} while (0)

#ifndef _KERNEL

/*
 * Load an SMR protected pointer when accessing kernel data structures through
 * libkvm.
 */
#define	smr_kvm_load(p) ((p)->__ptr)

#endif /* !_KERNEL */
#endif /* !_SYS_SMR_TYPES_H_ */