aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/mount.h
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2020-11-09 23:02:13 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2020-11-09 23:02:13 +0000
commitf6dd1aefb7f69e2b9e1f5e621ac633d17d1427a4 (patch)
tree50b5e0b6fb546a9b33d03b6ed2b9b6c933ee36be /sys/sys/mount.h
parent6fcc846b59f97e3e91e0dcffd782b4f570efc046 (diff)
downloadsrc-f6dd1aefb7f69e2b9e1f5e621ac633d17d1427a4.tar.gz
src-f6dd1aefb7f69e2b9e1f5e621ac633d17d1427a4.zip
vfs: group mount per-cpu vars into one struct
While here move frequently read stuff into the same cacheline. This shrinks struct mount by 64 bytes. Tested by: pho
Notes
Notes: svn path=/head/; revision=367535
Diffstat (limited to 'sys/sys/mount.h')
-rw-r--r--sys/sys/mount.h75
1 files changed, 44 insertions, 31 deletions
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index ee8d916c2432..f965dd72d7ba 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -177,6 +177,16 @@ struct vfsopt {
int seen;
};
+struct mount_pcpu {
+ int mntp_thread_in_ops;
+ int mntp_ref;
+ int mntp_lockref;
+ int mntp_writeopcount;
+};
+
+_Static_assert(sizeof(struct mount_pcpu) == 16,
+ "the struct is allocated from pcpu 16 zone");
+
/*
* Structure per mounted filesystem. Each mounted filesystem has an
* array of operations and an instance record. The filesystems are
@@ -192,20 +202,23 @@ struct vfsopt {
*
*/
struct mount {
- struct mtx mnt_mtx; /* mount structure interlock */
+ int mnt_vfs_ops; /* (i) pending vfs ops */
+ int mnt_kern_flag; /* (i) kernel only flags */
+ uint64_t mnt_flag; /* (i) flags shared with user */
+ struct mount_pcpu *mnt_pcpu; /* per-CPU data */
+ struct vnode *mnt_rootvnode;
+ struct vnode *mnt_vnodecovered; /* vnode we mounted on */
+ struct vfsops *mnt_op; /* operations on fs */
+ struct vfsconf *mnt_vfc; /* configuration info */
+ struct mtx __aligned(CACHE_LINE_SIZE) mnt_mtx; /* mount structure interlock */
int mnt_gen; /* struct mount generation */
#define mnt_startzero mnt_list
TAILQ_ENTRY(mount) mnt_list; /* (m) mount list */
- struct vfsops *mnt_op; /* operations on fs */
- struct vfsconf *mnt_vfc; /* configuration info */
- struct vnode *mnt_vnodecovered; /* vnode we mounted on */
struct vnode *mnt_syncer; /* syncer vnode */
int mnt_ref; /* (i) Reference count */
struct vnodelst mnt_nvnodelist; /* (i) list of vnodes */
int mnt_nvnodelistsize; /* (i) # of vnodes */
int mnt_writeopcount; /* (i) write syscalls pending */
- int mnt_kern_flag; /* (i) kernel only flags */
- uint64_t mnt_flag; /* (i) flags shared with user */
struct vfsoptlist *mnt_opt; /* current mount options */
struct vfsoptlist *mnt_optnew; /* new options passed to fs */
int mnt_maxsymlinklen; /* max size of short symlink */
@@ -229,12 +242,6 @@ struct mount {
struct lock mnt_explock; /* vfs_export walkers lock */
TAILQ_ENTRY(mount) mnt_upper_link; /* (m) we in the all uppers */
TAILQ_HEAD(, mount) mnt_uppers; /* (m) upper mounts over us*/
- int __aligned(CACHE_LINE_SIZE) mnt_vfs_ops;/* (i) pending vfs ops */
- int *mnt_thread_in_ops_pcpu;
- int *mnt_ref_pcpu;
- int *mnt_lockref_pcpu;
- int *mnt_writeopcount_pcpu;
- struct vnode *mnt_rootvnode;
};
/*
@@ -1054,7 +1061,7 @@ void resume_all_fs(void);
/*
* Code transitioning mnt_vfs_ops to > 0 issues IPIs until it observes
- * all CPUs not executing code enclosed by mnt_thread_in_ops_pcpu.
+ * all CPUs not executing code enclosed by thread_in_ops_pcpu variable.
*
* This provides an invariant that by the time the last CPU is observed not
* executing, everyone else entering will see the counter > 0 and exit.
@@ -1064,52 +1071,58 @@ void resume_all_fs(void);
* before making any changes or only make changes safe while the section is
* executed.
*/
+#define vfs_mount_pcpu(mp) zpcpu_get(mp->mnt_pcpu)
+#define vfs_mount_pcpu_remote(mp, cpu) zpcpu_get_cpu(mp->mnt_pcpu, cpu)
+
#define vfs_op_thread_entered(mp) ({ \
MPASS(curthread->td_critnest > 0); \
- *zpcpu_get(mp->mnt_thread_in_ops_pcpu) == 1; \
+ struct mount_pcpu *_mpcpu = vfs_mount_pcpu(mp); \
+ _mpcpu->mntp_thread_in_ops == 1; \
})
-#define vfs_op_thread_enter_crit(mp) ({ \
+#define vfs_op_thread_enter_crit(mp, _mpcpu) ({ \
bool _retval_crit = true; \
MPASS(curthread->td_critnest > 0); \
- MPASS(!vfs_op_thread_entered(mp)); \
- zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 1); \
+ _mpcpu = vfs_mount_pcpu(mp); \
+ MPASS(mpcpu->mntp_thread_in_ops == 0); \
+ _mpcpu->mntp_thread_in_ops = 1; \
__compiler_membar(); \
if (__predict_false(mp->mnt_vfs_ops > 0)) { \
- vfs_op_thread_exit_crit(mp); \
+ vfs_op_thread_exit_crit(mp, _mpcpu); \
_retval_crit = false; \
} \
_retval_crit; \
})
-#define vfs_op_thread_enter(mp) ({ \
+#define vfs_op_thread_enter(mp, _mpcpu) ({ \
bool _retval; \
critical_enter(); \
- _retval = vfs_op_thread_enter_crit(mp); \
+ _retval = vfs_op_thread_enter_crit(mp, _mpcpu); \
if (__predict_false(!_retval)) \
critical_exit(); \
_retval; \
})
-#define vfs_op_thread_exit_crit(mp) do { \
- MPASS(vfs_op_thread_entered(mp)); \
+#define vfs_op_thread_exit_crit(mp, _mpcpu) do { \
+ MPASS(_mpcpu == vfs_mount_pcpu(mp)); \
+ MPASS(_mpcpu->mntp_thread_in_ops == 1); \
__compiler_membar(); \
- zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 0); \
+ _mpcpu->mntp_thread_in_ops = 0; \
} while (0)
-#define vfs_op_thread_exit(mp) do { \
- vfs_op_thread_exit_crit(mp); \
+#define vfs_op_thread_exit(mp, _mpcpu) do { \
+ vfs_op_thread_exit_crit(mp, _mpcpu); \
critical_exit(); \
} while (0)
-#define vfs_mp_count_add_pcpu(mp, count, val) do { \
- MPASS(vfs_op_thread_entered(mp)); \
- zpcpu_add_protected(mp->mnt_##count##_pcpu, val); \
+#define vfs_mp_count_add_pcpu(_mpcpu, count, val) do { \
+ MPASS(_mpcpu->mntp_thread_in_ops == 1); \
+ _mpcpu->mntp_##count += val; \
} while (0)
-#define vfs_mp_count_sub_pcpu(mp, count, val) do { \
- MPASS(vfs_op_thread_entered(mp)); \
- zpcpu_sub_protected(mp->mnt_##count##_pcpu, val); \
+#define vfs_mp_count_sub_pcpu(_mpcpu, count, val) do { \
+ MPASS(_mpcpu->mntp_thread_in_ops == 1); \
+ _mpcpu->mntp_##count -= val; \
} while (0)
#else /* !_KERNEL */