aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2021-01-23 13:45:01 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2021-01-23 15:04:43 +0000
commit618029af508be2c01a84162c1bad02bfd000db27 (patch)
tree586a41591df686d42cd67b80150a7be6a5e02581 /sys
parentc892d60a1d3e11c7e0651705bc4167b218a7415c (diff)
downloadsrc-618029af508be2c01a84162c1bad02bfd000db27.tar.gz
src-618029af508be2c01a84162c1bad02bfd000db27.zip
tmpfs: add support for lockless symlink lookup
Reviewed by: kib (previous version) Tested by: pho (previous version) Differential Revision: https://reviews.freebsd.org/D27488
Diffstat (limited to 'sys')
-rw-r--r--sys/fs/tmpfs/tmpfs.h8
-rw-r--r--sys/fs/tmpfs/tmpfs_subr.c50
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c30
3 files changed, 81 insertions, 7 deletions
diff --git a/sys/fs/tmpfs/tmpfs.h b/sys/fs/tmpfs/tmpfs.h
index 811df5b2936f..28493a550252 100644
--- a/sys/fs/tmpfs/tmpfs.h
+++ b/sys/fs/tmpfs/tmpfs.h
@@ -277,7 +277,10 @@ struct tmpfs_node {
/* Valid when tn_type == VLNK. */
/* The link's target, allocated from a string pool. */
- char * tn_link; /* (c) */
+ struct tn_link {
+ char * tn_link_target; /* (c) */
+ char tn_link_smr; /* (c) */
+ } tn_link;
/* Valid when tn_type == VREG. */
struct tn_reg {
@@ -300,7 +303,8 @@ LIST_HEAD(tmpfs_node_list, tmpfs_node);
#define tn_rdev tn_spec.tn_rdev
#define tn_dir tn_spec.tn_dir
-#define tn_link tn_spec.tn_link
+#define tn_link_target tn_spec.tn_link.tn_link_target
+#define tn_link_smr tn_spec.tn_link.tn_link_smr
#define tn_reg tn_spec.tn_reg
#define tn_fifo tn_spec.tn_fifo
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index c74981db0f58..07e7ea11ad6e 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -252,6 +252,8 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
{
struct tmpfs_node *nnode;
vm_object_t obj;
+ char *symlink;
+ char symlink_smr;
/* If the root directory of the 'tmp' file system is not yet
* allocated, this must be the request to do it. */
@@ -327,9 +329,41 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
case VLNK:
MPASS(strlen(target) < MAXPATHLEN);
nnode->tn_size = strlen(target);
- nnode->tn_link = malloc(nnode->tn_size, M_TMPFSNAME,
- M_WAITOK);
- memcpy(nnode->tn_link, target, nnode->tn_size);
+
+ symlink = NULL;
+ if (!tmp->tm_nonc) {
+ symlink = cache_symlink_alloc(nnode->tn_size + 1, M_WAITOK);
+ symlink_smr = true;
+ }
+ if (symlink == NULL) {
+ symlink = malloc(nnode->tn_size + 1, M_TMPFSNAME, M_WAITOK);
+ symlink_smr = false;
+ }
+ memcpy(symlink, target, nnode->tn_size + 1);
+
+ /*
+ * Allow safe symlink resolving for lockless lookup.
+ * tmpfs_fplookup_symlink references this comment.
+ *
+ * 1. nnode is not yet visible to the world
+ * 2. both tn_link_target and tn_link_smr get populated
+ * 3. release fence publishes their content
+ * 4. tn_link_target content is immutable until node destruction,
+ * where the pointer gets set to NULL
+ * 5. tn_link_smr is never changed once set
+ *
+ * As a result it is sufficient to issue load consume on the node
+ * pointer to also get the above content in a stable manner.
+ * Worst case tn_link_smr flag may be set to true despite being stale,
+ * while the target buffer is already cleared out.
+ *
+ * TODO: Since there is no load consume primitive provided
+ * right now, the load is performed with an acquire fence.
+ */
+ atomic_store_ptr((uintptr_t *)&nnode->tn_link_target,
+ (uintptr_t)symlink);
+ atomic_store_char((char *)&nnode->tn_link_smr, symlink_smr);
+ atomic_thread_fence_rel();
break;
case VREG:
@@ -382,6 +416,7 @@ tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct tmpfs_node *node,
bool detach)
{
vm_object_t uobj;
+ char *symlink;
bool last;
TMPFS_MP_ASSERT_LOCKED(tmp);
@@ -417,7 +452,14 @@ tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct tmpfs_node *node,
break;
case VLNK:
- free(node->tn_link, M_TMPFSNAME);
+ symlink = node->tn_link_target;
+ atomic_store_ptr((uintptr_t *)&node->tn_link_target,
+ (uintptr_t)NULL);
+ if (atomic_load_char(&node->tn_link_smr)) {
+ cache_symlink_free(symlink, node->tn_size + 1);
+ } else {
+ free(symlink, M_TMPFSNAME);
+ }
break;
case VREG:
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index 66568e07b4d7..c716b393efdd 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -1444,13 +1444,40 @@ tmpfs_readlink(struct vop_readlink_args *v)
node = VP_TO_TMPFS_NODE(vp);
- error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid),
+ error = uiomove(node->tn_link_target, MIN(node->tn_size, uio->uio_resid),
uio);
tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
return (error);
}
+/*
+ * VOP_FPLOOKUP_SYMLINK routines are subject to special circumstances, see
+ * the comment above cache_fplookup for details.
+ *
+ * Check tmpfs_alloc_node for tmpfs-specific synchronisation notes.
+ */
+static int
+tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args *v)
+{
+ struct vnode *vp;
+ struct tmpfs_node *node;
+ char *symlink;
+
+ vp = v->a_vp;
+ node = VP_TO_TMPFS_NODE_SMR(vp);
+ atomic_thread_fence_acq();
+ if (__predict_false(node == NULL))
+ return (EAGAIN);
+ if (!atomic_load_char(&node->tn_link_smr))
+ return (EAGAIN);
+ symlink = atomic_load_ptr(&node->tn_link_target);
+ if (symlink == NULL)
+ return (EAGAIN);
+
+ return (cache_symlink_resolve(v->a_fpl, symlink, node->tn_size));
+}
+
static int
tmpfs_inactive(struct vop_inactive_args *v)
{
@@ -1807,6 +1834,7 @@ struct vop_vector tmpfs_vnodeop_entries = {
.vop_open = tmpfs_open,
.vop_close = tmpfs_close,
.vop_fplookup_vexec = tmpfs_fplookup_vexec,
+ .vop_fplookup_symlink = tmpfs_fplookup_symlink,
.vop_access = tmpfs_access,
.vop_stat = tmpfs_stat,
.vop_getattr = tmpfs_getattr,