aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib/openzfs/module/os
diff options
context:
space:
mode:
authorMatt Macy <mmacy@FreeBSD.org>2021-01-07 23:27:17 +0000
committerMatt Macy <mmacy@FreeBSD.org>2021-01-08 00:55:59 +0000
commit7877fdebeeb35fad1cbbafce22598b1bdf97c786 (patch)
tree10ccc0bab059d6f48a221045b92416fc347fe784 /sys/contrib/openzfs/module/os
parent84089de83e79a0f748c6e22b1aacb59156e153d2 (diff)
downloadsrc-7877fdebeeb35fad1cbbafce22598b1bdf97c786.tar.gz
src-7877fdebeeb35fad1cbbafce22598b1bdf97c786.zip
OpenZFS merge main-gf11b09
- add dRAID support - fix duplicate close handling - fix memory leak in prefetch - fix problem with SIMD benchmarking on FreeBSD boot ...
Diffstat (limited to 'sys/contrib/openzfs/module/os')
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/spl/spl_policy.c5
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c43
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c10
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c12
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c70
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c36
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c6
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c70
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c3
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c877
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c18
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c15
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c248
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c132
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/Makefile.in3
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/abd_os.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/arc_os.c88
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/policy.c5
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c31
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/vdev_file.c18
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c1
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c4
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops.c1091
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c5
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c15
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c25
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c354
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c10
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c23
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c24
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c177
31 files changed, 927 insertions, 2494 deletions
diff --git a/sys/contrib/openzfs/module/os/freebsd/spl/spl_policy.c b/sys/contrib/openzfs/module/os/freebsd/spl/spl_policy.c
index 5cd5c69efa71..5ecd3d310361 100644
--- a/sys/contrib/openzfs/module/os/freebsd/spl/spl_policy.c
+++ b/sys/contrib/openzfs/module/os/freebsd/spl/spl_policy.c
@@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$");
#include <sys/jail.h>
#include <sys/policy.h>
#include <sys/zfs_vfsops.h>
+#include <sys/zfs_znode.h>
int
@@ -312,11 +313,11 @@ secpolicy_vnode_setids_setgids(vnode_t *vp, cred_t *cr, gid_t gid)
}
int
-secpolicy_vnode_setid_retain(vnode_t *vp, cred_t *cr,
+secpolicy_vnode_setid_retain(znode_t *zp, cred_t *cr,
boolean_t issuidroot __unused)
{
- if (secpolicy_fs_owner(vp->v_mount, cr) == 0)
+ if (secpolicy_fs_owner(ZTOV(zp)->v_mount, cr) == 0)
return (0);
return (spl_priv_check_cred(cr, PRIV_VFS_RETAINSUGID));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
index a7bda509bf54..0a323e8856a3 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
@@ -106,13 +106,13 @@ abd_free_chunk(void *c)
kmem_cache_free(abd_chunk_cache, c);
}
-static size_t
+static uint_t
abd_chunkcnt_for_bytes(size_t size)
{
return (P2ROUNDUP(size, zfs_abd_chunk_size) / zfs_abd_chunk_size);
}
-static inline size_t
+static inline uint_t
abd_scatter_chunkcnt(abd_t *abd)
{
ASSERT(!abd_is_linear(abd));
@@ -129,7 +129,7 @@ abd_size_alloc_linear(size_t size)
void
abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
{
- size_t n = abd_scatter_chunkcnt(abd);
+ uint_t n = abd_scatter_chunkcnt(abd);
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
int waste = n * zfs_abd_chunk_size - abd->abd_size;
if (op == ABDSTAT_INCR) {
@@ -161,25 +161,28 @@ abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
void
abd_verify_scatter(abd_t *abd)
{
+ uint_t i, n;
+
/*
* There is no scatter linear pages in FreeBSD so there is an
* if an error if the ABD has been marked as a linear page.
*/
- VERIFY(!abd_is_linear_page(abd));
+ ASSERT(!abd_is_linear_page(abd));
ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
zfs_abd_chunk_size);
- size_t n = abd_scatter_chunkcnt(abd);
- for (int i = 0; i < n; i++) {
- ASSERT3P(
- ABD_SCATTER(abd).abd_chunks[i], !=, NULL);
+ n = abd_scatter_chunkcnt(abd);
+ for (i = 0; i < n; i++) {
+ ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL);
}
}
void
abd_alloc_chunks(abd_t *abd, size_t size)
{
- size_t n = abd_chunkcnt_for_bytes(size);
- for (int i = 0; i < n; i++) {
+ uint_t i, n;
+
+ n = abd_chunkcnt_for_bytes(size);
+ for (i = 0; i < n; i++) {
void *c = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
ASSERT3P(c, !=, NULL);
ABD_SCATTER(abd).abd_chunks[i] = c;
@@ -190,8 +193,10 @@ abd_alloc_chunks(abd_t *abd, size_t size)
void
abd_free_chunks(abd_t *abd)
{
- size_t n = abd_scatter_chunkcnt(abd);
- for (int i = 0; i < n; i++) {
+ uint_t i, n;
+
+ n = abd_scatter_chunkcnt(abd);
+ for (i = 0; i < n; i++) {
abd_free_chunk(ABD_SCATTER(abd).abd_chunks[i]);
}
}
@@ -199,7 +204,7 @@ abd_free_chunks(abd_t *abd)
abd_t *
abd_alloc_struct(size_t size)
{
- size_t chunkcnt = abd_chunkcnt_for_bytes(size);
+ uint_t chunkcnt = abd_chunkcnt_for_bytes(size);
/*
* In the event we are allocating a gang ABD, the size passed in
* will be 0. We must make sure to set abd_size to the size of an
@@ -221,9 +226,9 @@ abd_alloc_struct(size_t size)
void
abd_free_struct(abd_t *abd)
{
- size_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 :
+ uint_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 :
abd_scatter_chunkcnt(abd);
- int size = MAX(sizeof (abd_t),
+ ssize_t size = MAX(sizeof (abd_t),
offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
mutex_destroy(&abd->abd_mtx);
ASSERT(!list_link_active(&abd->abd_gang_link));
@@ -238,7 +243,9 @@ abd_free_struct(abd_t *abd)
static void
abd_alloc_zero_scatter(void)
{
- size_t n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
+ uint_t i, n;
+
+ n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
abd_zero_buf = kmem_zalloc(zfs_abd_chunk_size, KM_SLEEP);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
@@ -251,7 +258,7 @@ abd_alloc_zero_scatter(void)
ABD_SCATTER(abd_zero_scatter).abd_chunk_size =
zfs_abd_chunk_size;
- for (int i = 0; i < n; i++) {
+ for (i = 0; i < n; i++) {
ABD_SCATTER(abd_zero_scatter).abd_chunks[i] =
abd_zero_buf;
}
@@ -356,7 +363,7 @@ abd_get_offset_scatter(abd_t *sabd, size_t off)
ASSERT3U(off, <=, sabd->abd_size);
size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
- size_t chunkcnt = abd_scatter_chunkcnt(sabd) -
+ uint_t chunkcnt = abd_scatter_chunkcnt(sabd) -
(new_offset / zfs_abd_chunk_size);
abd = abd_alloc_scatter_offset_chunkcnt(chunkcnt);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c
index 94df750035a4..4fc7468bfa47 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c
@@ -243,3 +243,13 @@ arc_lowmem_fini(void)
if (arc_event_lowmem != NULL)
EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
}
+
+void
+arc_register_hotplug(void)
+{
+}
+
+void
+arc_unregister_hotplug(void)
+{
+}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
index 1b37ce0d7f6b..647c1463ba14 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
@@ -114,6 +114,7 @@ SYSCTL_NODE(_vfs_zfs, OID_AUTO, spa, CTLFLAG_RW, 0, "ZFS space allocation");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RW, 0, "ZFS TRIM");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS transaction group");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
+SYSCTL_NODE(_vfs_zfs, OID_AUTO, vnops, CTLFLAG_RW, 0, "ZFS VNOPS");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zevent, CTLFLAG_RW, 0, "ZFS event");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zil, CTLFLAG_RW, 0, "ZFS ZIL");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
@@ -228,15 +229,14 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
static int
sysctl_vfs_zfs_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
{
- uint32_t val;
- int err;
+ int err, val;
val = arc_no_grow_shift;
- err = sysctl_handle_32(oidp, &val, 0, req);
+ err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
- if (val >= arc_shrink_shift)
+ if (val < 0 || val >= arc_shrink_shift)
return (EINVAL);
arc_no_grow_shift = val;
@@ -244,8 +244,8 @@ sysctl_vfs_zfs_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
- CTLTYPE_U32 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, sizeof (uint32_t),
- sysctl_vfs_zfs_arc_no_grow_shift, "U",
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, sizeof (int),
+ sysctl_vfs_zfs_arc_no_grow_shift, "I",
"log2(fraction of ARC which must be free to allow growing)");
int
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c
index cf762c5fd61c..825bd706e0c0 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c
@@ -292,19 +292,28 @@ vdev_file_io_done(zio_t *zio)
}
vdev_ops_t vdev_file_ops = {
- vdev_file_open,
- vdev_file_close,
- vdev_default_asize,
- vdev_file_io_start,
- vdev_file_io_done,
- NULL,
- NULL,
- vdev_file_hold,
- vdev_file_rele,
- NULL,
- vdev_default_xlate,
- VDEV_TYPE_FILE, /* name of this vdev type */
- B_TRUE /* leaf vdev */
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
+ .vdev_op_open = vdev_file_open,
+ .vdev_op_close = vdev_file_close,
+ .vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
+ .vdev_op_io_start = vdev_file_io_start,
+ .vdev_op_io_done = vdev_file_io_done,
+ .vdev_op_state_change = NULL,
+ .vdev_op_need_resilver = NULL,
+ .vdev_op_hold = vdev_file_hold,
+ .vdev_op_rele = vdev_file_rele,
+ .vdev_op_remap = NULL,
+ .vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
+ .vdev_op_type = VDEV_TYPE_FILE, /* name of this vdev type */
+ .vdev_op_leaf = B_TRUE /* leaf vdev */
};
/*
@@ -313,19 +322,28 @@ vdev_ops_t vdev_file_ops = {
#ifndef _KERNEL
vdev_ops_t vdev_disk_ops = {
- vdev_file_open,
- vdev_file_close,
- vdev_default_asize,
- vdev_file_io_start,
- vdev_file_io_done,
- NULL,
- NULL,
- vdev_file_hold,
- vdev_file_rele,
- NULL,
- vdev_default_xlate,
- VDEV_TYPE_DISK, /* name of this vdev type */
- B_TRUE /* leaf vdev */
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
+ .vdev_op_open = vdev_file_open,
+ .vdev_op_close = vdev_file_close,
+ .vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
+ .vdev_op_io_start = vdev_file_io_start,
+ .vdev_op_io_done = vdev_file_io_done,
+ .vdev_op_state_change = NULL,
+ .vdev_op_need_resilver = NULL,
+ .vdev_op_hold = vdev_file_hold,
+ .vdev_op_rele = vdev_file_rele,
+ .vdev_op_remap = NULL,
+ .vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
+ .vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
+ .vdev_op_leaf = B_TRUE /* leaf vdev */
};
#endif
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
index f042eff7cd2e..c9e8e21982cf 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
@@ -1141,7 +1141,6 @@ sendreq:
break;
case ZIO_TYPE_IOCTL:
bp->bio_cmd = BIO_FLUSH;
- bp->bio_flags |= BIO_ORDERED;
bp->bio_data = NULL;
bp->bio_offset = cp->provider->mediasize;
bp->bio_length = 0;
@@ -1190,17 +1189,26 @@ vdev_geom_rele(vdev_t *vd)
}
vdev_ops_t vdev_disk_ops = {
- vdev_geom_open,
- vdev_geom_close,
- vdev_default_asize,
- vdev_geom_io_start,
- vdev_geom_io_done,
- NULL,
- NULL,
- vdev_geom_hold,
- vdev_geom_rele,
- NULL,
- vdev_default_xlate,
- VDEV_TYPE_DISK, /* name of this vdev type */
- B_TRUE /* leaf vdev */
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
+ .vdev_op_open = vdev_geom_open,
+ .vdev_op_close = vdev_geom_close,
+ .vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
+ .vdev_op_io_start = vdev_geom_io_start,
+ .vdev_op_io_done = vdev_geom_io_done,
+ .vdev_op_state_change = NULL,
+ .vdev_op_need_resilver = NULL,
+ .vdev_op_hold = vdev_geom_hold,
+ .vdev_op_rele = vdev_geom_rele,
+ .vdev_op_remap = NULL,
+ .vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
+ .vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
+ .vdev_op_leaf = B_TRUE /* leaf vdev */
};
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
index d7786d5136a2..8fb259f4ba76 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
@@ -158,7 +158,8 @@ zfs_file_read_impl(zfs_file_t *fp, void *buf, size_t count, loff_t *offp,
rc = fo_read(fp, &auio, td->td_ucred, FOF_OFFSET, td);
if (rc)
return (SET_ERROR(rc));
- *resid = auio.uio_resid;
+ if (resid)
+ *resid = auio.uio_resid;
*offp += count - auio.uio_resid;
return (SET_ERROR(0));
}
@@ -296,7 +297,8 @@ zfs_file_unlink(const char *fnamep)
rc = kern_funlinkat(curthread, AT_FDCWD, fnamep, FD_NONE, seg, 0, 0);
#else
#ifdef AT_BENEATH
- rc = kern_unlinkat(curthread, AT_FDCWD, fnamep, seg, 0, 0);
+ rc = kern_unlinkat(curthread, AT_FDCWD, __DECONST(char *, fnamep),
+ seg, 0, 0);
#else
rc = kern_unlinkat(curthread, AT_FDCWD, __DECONST(char *, fnamep),
seg, 0);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c
index 8b22f2fdc3b3..e69de29bb2d1 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c
@@ -1,70 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013 by Delphix. All rights reserved.
- */
-
-#include <sys/types.h>
-#include <sys/param.h>
-#include <sys/errno.h>
-#include <sys/kmem.h>
-#include <sys/sunddi.h>
-#include <sys/zfs_ioctl.h>
-#include <sys/zfs_onexit.h>
-
-static int
-zfs_onexit_minor_to_state(minor_t minor, zfs_onexit_t **zo)
-{
- *zo = zfsdev_get_state(minor, ZST_ONEXIT);
- if (*zo == NULL)
- return (SET_ERROR(EBADF));
-
- return (0);
-}
-
-int
-zfs_onexit_fd_hold(int fd, minor_t *minorp)
-{
- file_t *fp, *tmpfp;
- zfs_onexit_t *zo;
- void *data;
- int error;
-
- if ((error = zfs_file_get(fd, &fp)))
- return (error);
-
- tmpfp = curthread->td_fpop;
- curthread->td_fpop = fp;
- error = devfs_get_cdevpriv(&data);
- if (error == 0)
- *minorp = (minor_t)(uintptr_t)data;
- curthread->td_fpop = tmpfp;
- if (error != 0)
- return (SET_ERROR(EBADF));
- return (zfs_onexit_minor_to_state(*minorp, &zo));
-}
-
-void
-zfs_onexit_fd_rele(int fd)
-{
- zfs_file_put(fd);
-}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
index 54ebfa7532dd..7bc6b83d0272 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
@@ -42,6 +42,7 @@
#include <sys/mount.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
+#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
@@ -433,7 +434,7 @@ zfs_sync(vfs_t *vfsp, int waitfor)
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
- * run sync(1M). Unlike other filesystems, ZFS honors the
+ * run sync(8). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c
index 3c3285f93389..2e8eadb5e16e 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c
@@ -29,6 +29,7 @@
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
+
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
@@ -270,69 +271,13 @@ zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr)
return (0);
}
-/*
- * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
- * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
- */
-static int
-zfs_holey(vnode_t *vp, ulong_t cmd, offset_t *off)
-{
- znode_t *zp = VTOZ(vp);
- uint64_t noff = (uint64_t)*off; /* new offset */
- uint64_t file_sz;
- int error;
- boolean_t hole;
-
- file_sz = zp->z_size;
- if (noff >= file_sz) {
- return (SET_ERROR(ENXIO));
- }
-
- if (cmd == _FIO_SEEK_HOLE)
- hole = B_TRUE;
- else
- hole = B_FALSE;
-
- error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
-
- if (error == ESRCH)
- return (SET_ERROR(ENXIO));
-
- /* file was dirty, so fall back to using generic logic */
- if (error == EBUSY) {
- if (hole)
- *off = file_sz;
-
- return (0);
- }
-
- /*
- * We could find a hole that begins after the logical end-of-file,
- * because dmu_offset_next() only works on whole blocks. If the
- * EOF falls mid-block, then indicate that the "virtual hole"
- * at the end of the file begins at the logical EOF, rather than
- * at the end of the last block.
- */
- if (noff > file_sz) {
- ASSERT(hole);
- noff = file_sz;
- }
-
- if (noff < *off)
- return (error);
- *off = noff;
- return (error);
-}
-
/* ARGSUSED */
static int
zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
int *rvalp)
{
- offset_t off;
+ loff_t off;
int error;
- zfsvfs_t *zfsvfs;
- znode_t *zp;
switch (com) {
case _FIOFFS:
@@ -350,18 +295,12 @@ zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
return (0);
}
- case _FIO_SEEK_DATA:
- case _FIO_SEEK_HOLE:
+ case F_SEEK_DATA:
+ case F_SEEK_HOLE:
{
off = *(offset_t *)data;
- zp = VTOZ(vp);
- zfsvfs = zp->z_zfsvfs;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
/* offset parameter is in/out */
- error = zfs_holey(vp, com, &off);
- ZFS_EXIT(zfsvfs);
+ error = zfs_holey(VTOZ(vp), com, &off);
if (error)
return (error);
*(offset_t *)data = off;
@@ -525,16 +464,15 @@ page_unhold(vm_page_t pp)
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
-static void
-update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
- int segflg, dmu_tx_t *tx)
+void
+update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
vm_object_t obj;
struct sf_buf *sf;
+ vnode_t *vp = ZTOV(zp);
caddr_t va;
int off;
- ASSERT(segflg != UIO_NOCOPY);
ASSERT(vp->v_mount != NULL);
obj = vp->v_object;
ASSERT(obj != NULL);
@@ -552,8 +490,8 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
- (void) dmu_read(os, oid, start+off, nbytes,
- va+off, DMU_READ_PREFETCH);
+ (void) dmu_read(os, zp->z_id, start + off, nbytes,
+ va + off, DMU_READ_PREFETCH);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
@@ -579,10 +517,10 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
* map them into contiguous KVA region and populate them
* in one single dmu_read() call.
*/
-static int
-mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
+int
+mappedread_sf(znode_t *zp, int nbytes, uio_t *uio)
{
- znode_t *zp = VTOZ(vp);
+ vnode_t *vp = ZTOV(zp);
objset_t *os = zp->z_zfsvfs->z_os;
struct sf_buf *sf;
vm_object_t obj;
@@ -664,10 +602,10 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
-static int
-mappedread(vnode_t *vp, int nbytes, uio_t *uio)
+int
+mappedread(znode_t *zp, int nbytes, uio_t *uio)
{
- znode_t *zp = VTOZ(vp);
+ vnode_t *vp = ZTOV(zp);
vm_object_t obj;
int64_t start;
int len = nbytes;
@@ -710,523 +648,6 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
return (error);
}
-offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
-
-/*
- * Read bytes from specified file into supplied buffer.
- *
- * IN: vp - vnode of file to be read from.
- * uio - structure supplying read location, range info,
- * and return buffer.
- * ioflag - SYNC flags; used to provide FRSYNC semantics.
- * cr - credentials of caller.
- * ct - caller context
- *
- * OUT: uio - updated offset and range, buffer filled.
- *
- * RETURN: 0 on success, error code on failure.
- *
- * Side Effects:
- * vp - atime updated if byte count > 0
- */
-/* ARGSUSED */
-static int
-zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- ssize_t n, nbytes, start_resid;
- int error = 0;
- int64_t nread;
- zfs_locked_range_t *lr;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- /* We don't copy out anything useful for directories. */
- if (vp->v_type == VDIR) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EISDIR));
- }
-
- if (zp->z_pflags & ZFS_AV_QUARANTINED) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EACCES));
- }
-
- /*
- * Validate file offset
- */
- if (uio->uio_loffset < (offset_t)0) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- /*
- * Fasttrack empty reads
- */
- if (uio->uio_resid == 0) {
- ZFS_EXIT(zfsvfs);
- return (0);
- }
-
- /*
- * If we're in FRSYNC mode, sync out this znode before reading it.
- */
- if (zfsvfs->z_log &&
- (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
- zil_commit(zfsvfs->z_log, zp->z_id);
-
- /*
- * Lock the range against changes.
- */
- lr = zfs_rangelock_enter(&zp->z_rangelock, uio->uio_loffset,
- uio->uio_resid, RL_READER);
-
- /*
- * If we are reading past end-of-file we can skip
- * to the end; but we might still need to set atime.
- */
- if (uio->uio_loffset >= zp->z_size) {
- error = 0;
- goto out;
- }
-
- ASSERT(uio->uio_loffset < zp->z_size);
- n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
- start_resid = n;
-
- while (n > 0) {
- nbytes = MIN(n, zfs_read_chunk_size -
- P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
-
- if (uio->uio_segflg == UIO_NOCOPY)
- error = mappedread_sf(vp, nbytes, uio);
- else if (vn_has_cached_data(vp)) {
- error = mappedread(vp, nbytes, uio);
- } else {
- error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
- uio, nbytes);
- }
- if (error) {
- /* convert checksum errors into IO errors */
- if (error == ECKSUM)
- error = SET_ERROR(EIO);
- break;
- }
-
- n -= nbytes;
- }
-
- nread = start_resid - n;
- dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
-
-out:
- zfs_rangelock_exit(lr);
-
- ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-
-/*
- * Write the bytes to a file.
- *
- * IN: vp - vnode of file to be written to.
- * uio - structure supplying write location, range info,
- * and data buffer.
- * ioflag - FAPPEND, FSYNC, and/or FDSYNC. FAPPEND is
- * set if in append mode.
- * cr - credentials of caller.
- * ct - caller context (NFS/CIFS fem monitor only)
- *
- * OUT: uio - updated offset and range.
- *
- * RETURN: 0 on success, error code on failure.
- *
- * Timestamps:
- * vp - ctime|mtime updated if byte count > 0
- */
-
-/* ARGSUSED */
-static int
-zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr)
-{
- znode_t *zp = VTOZ(vp);
- rlim64_t limit = MAXOFFSET_T;
- ssize_t start_resid = uio->uio_resid;
- ssize_t tx_bytes;
- uint64_t end_size;
- dmu_buf_impl_t *db;
- dmu_tx_t *tx;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- zilog_t *zilog;
- offset_t woff;
- ssize_t n, nbytes;
- zfs_locked_range_t *lr;
- int max_blksz = zfsvfs->z_max_blksz;
- int error = 0;
- arc_buf_t *abuf;
- iovec_t *aiov = NULL;
- xuio_t *xuio = NULL;
- int i_iov = 0;
- int iovcnt __unused = uio->uio_iovcnt;
- iovec_t *iovp = uio->uio_iov;
- int write_eof;
- int count = 0;
- sa_bulk_attr_t bulk[4];
- uint64_t mtime[2], ctime[2];
- uint64_t uid, gid, projid;
- int64_t nwritten;
-
- /*
- * Fasttrack empty write
- */
- n = start_resid;
- if (n == 0)
- return (0);
-
- if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
- limit = MAXOFFSET_T;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
- &zp->z_size, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
- &zp->z_pflags, 8);
-
- /*
- * Callers might not be able to detect properly that we are read-only,
- * so check it explicitly here.
- */
- if (zfs_is_readonly(zfsvfs)) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EROFS));
- }
-
- /*
- * If immutable or not appending then return EPERM.
- * Intentionally allow ZFS_READONLY through here.
- * See zfs_zaccess_common()
- */
- if ((zp->z_pflags & ZFS_IMMUTABLE) ||
- ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
- (uio->uio_loffset < zp->z_size))) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EPERM));
- }
-
- zilog = zfsvfs->z_log;
-
- /*
- * Validate file offset
- */
- woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
- if (woff < 0) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- /*
- * If in append mode, set the io offset pointer to eof.
- */
- if (ioflag & FAPPEND) {
- /*
- * Obtain an appending range lock to guarantee file append
- * semantics. We reset the write offset once we have the lock.
- */
- lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
- woff = lr->lr_offset;
- if (lr->lr_length == UINT64_MAX) {
- /*
- * We overlocked the file because this write will cause
- * the file block size to increase.
- * Note that zp_size cannot change with this lock held.
- */
- woff = zp->z_size;
- }
- uio->uio_loffset = woff;
- } else {
- /*
- * Note that if the file block size will change as a result of
- * this write, then this range lock will lock the entire file
- * so that we can re-write the block safely.
- */
- lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
- }
-
- if (vn_rlimit_fsize(vp, uio, uio->uio_td)) {
- zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
- return (EFBIG);
- }
-
- if (woff >= limit) {
- zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EFBIG));
- }
-
- if ((woff + n) > limit || woff > (limit - n))
- n = limit - woff;
-
- /* Will this write extend the file length? */
- write_eof = (woff + n > zp->z_size);
-
- end_size = MAX(zp->z_size, woff + n);
-
- uid = zp->z_uid;
- gid = zp->z_gid;
- projid = zp->z_projid;
-
- /*
- * Write the file in reasonable size chunks. Each chunk is written
- * in a separate transaction; this keeps the intent log records small
- * and allows us to do more fine-grained space accounting.
- */
- while (n > 0) {
- woff = uio->uio_loffset;
-
- if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
- zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
- (projid != ZFS_DEFAULT_PROJID &&
- zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
- projid))) {
- error = SET_ERROR(EDQUOT);
- break;
- }
-
- abuf = NULL;
- if (xuio) {
- ASSERT(i_iov < iovcnt);
- aiov = &iovp[i_iov];
- abuf = dmu_xuio_arcbuf(xuio, i_iov);
- dmu_xuio_clear(xuio, i_iov);
- DTRACE_PROBE3(zfs_cp_write, int, i_iov,
- iovec_t *, aiov, arc_buf_t *, abuf);
- ASSERT((aiov->iov_base == abuf->b_data) ||
- ((char *)aiov->iov_base - (char *)abuf->b_data +
- aiov->iov_len == arc_buf_size(abuf)));
- i_iov++;
- } else if (n >= max_blksz &&
- woff >= zp->z_size &&
- P2PHASE(woff, max_blksz) == 0 &&
- zp->z_blksz == max_blksz) {
- /*
- * This write covers a full block. "Borrow" a buffer
- * from the dmu so that we can fill it before we enter
- * a transaction. This avoids the possibility of
- * holding up the transaction if the data copy hangs
- * up on a pagefault (e.g., from an NFS server mapping).
- */
- size_t cbytes;
-
- abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- max_blksz);
- ASSERT(abuf != NULL);
- ASSERT(arc_buf_size(abuf) == max_blksz);
- if ((error = uiocopy(abuf->b_data, max_blksz,
- UIO_WRITE, uio, &cbytes))) {
- dmu_return_arcbuf(abuf);
- break;
- }
- ASSERT(cbytes == max_blksz);
- }
-
- /*
- * Start a transaction.
- */
- tx = dmu_tx_create(zfsvfs->z_os);
- dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
- db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
- DB_DNODE_ENTER(db);
- dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
- MIN(n, max_blksz));
- DB_DNODE_EXIT(db);
- zfs_sa_upgrade_txholds(tx, zp);
- error = dmu_tx_assign(tx, TXG_WAIT);
- if (error) {
- dmu_tx_abort(tx);
- if (abuf != NULL)
- dmu_return_arcbuf(abuf);
- break;
- }
-
- /*
- * If zfs_range_lock() over-locked we grow the blocksize
- * and then reduce the lock range. This will only happen
- * on the first iteration since zfs_range_reduce() will
- * shrink down r_len to the appropriate size.
- */
- if (lr->lr_length == UINT64_MAX) {
- uint64_t new_blksz;
-
- if (zp->z_blksz > max_blksz) {
- /*
- * File's blocksize is already larger than the
- * "recordsize" property. Only let it grow to
- * the next power of 2.
- */
- ASSERT(!ISP2(zp->z_blksz));
- new_blksz = MIN(end_size,
- 1 << highbit64(zp->z_blksz));
- } else {
- new_blksz = MIN(end_size, max_blksz);
- }
- zfs_grow_blocksize(zp, new_blksz, tx);
- zfs_rangelock_reduce(lr, woff, n);
- }
-
- /*
- * XXX - should we really limit each write to z_max_blksz?
- * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
- */
- nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
-
- if (woff + nbytes > zp->z_size)
- vnode_pager_setsize(vp, woff + nbytes);
-
- if (abuf == NULL) {
- tx_bytes = uio->uio_resid;
- error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
- uio, nbytes, tx);
- tx_bytes -= uio->uio_resid;
- } else {
- tx_bytes = nbytes;
- ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
- /*
- * If this is not a full block write, but we are
- * extending the file past EOF and this data starts
- * block-aligned, use assign_arcbuf(). Otherwise,
- * write via dmu_write().
- */
- if (tx_bytes < max_blksz && (!write_eof ||
- aiov->iov_base != abuf->b_data)) {
- ASSERT(xuio);
- dmu_write(zfsvfs->z_os, zp->z_id, woff,
- aiov->iov_len, aiov->iov_base, tx);
- dmu_return_arcbuf(abuf);
- xuio_stat_wbuf_copied();
- } else {
- ASSERT(xuio || tx_bytes == max_blksz);
- dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl), woff,
- abuf, tx);
- }
- ASSERT(tx_bytes <= uio->uio_resid);
- uioskip(uio, tx_bytes);
- }
- if (tx_bytes && vn_has_cached_data(vp)) {
- update_pages(vp, woff, tx_bytes, zfsvfs->z_os,
- zp->z_id, uio->uio_segflg, tx);
- }
-
- /*
- * If we made no progress, we're done. If we made even
- * partial progress, update the znode and ZIL accordingly.
- */
- if (tx_bytes == 0) {
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
- (void *)&zp->z_size, sizeof (uint64_t), tx);
- dmu_tx_commit(tx);
- ASSERT(error != 0);
- break;
- }
-
- /*
- * Clear Set-UID/Set-GID bits on successful write if not
- * privileged and at least one of the execute bits is set.
- *
- * It would be nice to to this after all writes have
- * been done, but that would still expose the ISUID/ISGID
- * to another app after the partial write is committed.
- *
- * Note: we don't call zfs_fuid_map_id() here because
- * user 0 is not an ephemeral uid.
- */
- mutex_enter(&zp->z_acl_lock);
- if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
- (S_IXUSR >> 6))) != 0 &&
- (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
- secpolicy_vnode_setid_retain(vp, cr,
- (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
- uint64_t newmode;
- zp->z_mode &= ~(S_ISUID | S_ISGID);
- newmode = zp->z_mode;
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
- (void *)&newmode, sizeof (uint64_t), tx);
- }
- mutex_exit(&zp->z_acl_lock);
-
- zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
-
- /*
- * Update the file size (zp_size) if it has changed;
- * account for possible concurrent updates.
- */
- while ((end_size = zp->z_size) < uio->uio_loffset) {
- (void) atomic_cas_64(&zp->z_size, end_size,
- uio->uio_loffset);
- ASSERT(error == 0 || error == EFAULT);
- }
- /*
- * If we are replaying and eof is non zero then force
- * the file size to the specified eof. Note, there's no
- * concurrency during replay.
- */
- if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
- zp->z_size = zfsvfs->z_replay_eof;
-
- if (error == 0)
- error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
- else
- (void) sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
-
- zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes,
- ioflag, NULL, NULL);
- dmu_tx_commit(tx);
-
- if (error != 0)
- break;
- ASSERT(tx_bytes == nbytes);
- n -= nbytes;
-
- }
-
- zfs_rangelock_exit(lr);
-
- /*
- * If we're in replay mode, or we made no progress, return error.
- * Otherwise, it's at least a partial write, so it's successful.
- */
- if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
- ZFS_EXIT(zfsvfs);
- return (error);
- }
-
- /*
- * EFAULT means that at least one page of the source buffer was not
- * available. VFS will re-try remaining I/O upon this error.
- */
- if (error == EFAULT) {
- ZFS_EXIT(zfsvfs);
- return (error);
- }
-
- if (ioflag & (FSYNC | FDSYNC) ||
- zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, zp->z_id);
-
- nwritten = start_resid - uio->uio_resid;
- dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
-
- ZFS_EXIT(zfsvfs);
- return (0);
-}
-
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *presid)
@@ -1249,184 +670,13 @@ zfs_write_simple(znode_t *zp, const void *data, size_t len,
return (error);
}
-static void
-zfs_get_done(zgd_t *zgd, int error)
+void
+zfs_zrele_async(znode_t *zp)
{
- znode_t *zp = zgd->zgd_private;
- objset_t *os = zp->z_zfsvfs->z_os;
-
- if (zgd->zgd_db)
- dmu_buf_rele(zgd->zgd_db, zgd);
+ vnode_t *vp = ZTOV(zp);
+ objset_t *os = ITOZSB(vp)->z_os;
- zfs_rangelock_exit(zgd->zgd_lr);
-
- /*
- * Release the vnode asynchronously as we currently have the
- * txg stopped from syncing.
- */
- VN_RELE_ASYNC(ZTOV(zp), dsl_pool_zrele_taskq(dmu_objset_pool(os)));
-
- kmem_free(zgd, sizeof (zgd_t));
-}
-
-#ifdef ZFS_DEBUG
-static int zil_fault_io = 0;
-#endif
-
-/*
- * Get data to generate a TX_WRITE intent log record.
- */
-int
-zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
-{
- zfsvfs_t *zfsvfs = arg;
- objset_t *os = zfsvfs->z_os;
- znode_t *zp;
- uint64_t object = lr->lr_foid;
- uint64_t offset = lr->lr_offset;
- uint64_t size = lr->lr_length;
- dmu_buf_t *db;
- zgd_t *zgd;
- int error = 0;
-
- ASSERT3P(lwb, !=, NULL);
- ASSERT3P(zio, !=, NULL);
- ASSERT3U(size, !=, 0);
-
- /*
- * Nothing to do if the file has been removed
- */
- if (zfs_zget(zfsvfs, object, &zp) != 0)
- return (SET_ERROR(ENOENT));
- if (zp->z_unlinked) {
- /*
- * Release the vnode asynchronously as we currently have the
- * txg stopped from syncing.
- */
- VN_RELE_ASYNC(ZTOV(zp),
- dsl_pool_zrele_taskq(dmu_objset_pool(os)));
- return (SET_ERROR(ENOENT));
- }
-
- zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
- zgd->zgd_lwb = lwb;
- zgd->zgd_private = zp;
-
- /*
- * Write records come in two flavors: immediate and indirect.
- * For small writes it's cheaper to store the data with the
- * log record (immediate); for large writes it's cheaper to
- * sync the data and get a pointer to it (indirect) so that
- * we don't have to write the data twice.
- */
- if (buf != NULL) { /* immediate write */
- zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock, offset,
- size, RL_READER);
- /* test for truncation needs to be done while range locked */
- if (offset >= zp->z_size) {
- error = SET_ERROR(ENOENT);
- } else {
- error = dmu_read(os, object, offset, size, buf,
- DMU_READ_NO_PREFETCH);
- }
- ASSERT(error == 0 || error == ENOENT);
- } else { /* indirect write */
- /*
- * Have to lock the whole block to ensure when it's
- * written out and its checksum is being calculated
- * that no one can change the data. We need to re-check
- * blocksize after we get the lock in case it's changed!
- */
- for (;;) {
- uint64_t blkoff;
- size = zp->z_blksz;
- blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
- offset -= blkoff;
- zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
- offset, size, RL_READER);
- if (zp->z_blksz == size)
- break;
- offset += blkoff;
- zfs_rangelock_exit(zgd->zgd_lr);
- }
- /* test for truncation needs to be done while range locked */
- if (lr->lr_offset >= zp->z_size)
- error = SET_ERROR(ENOENT);
-#ifdef ZFS_DEBUG
- if (zil_fault_io) {
- error = SET_ERROR(EIO);
- zil_fault_io = 0;
- }
-#endif
- if (error == 0)
- error = dmu_buf_hold(os, object, offset, zgd, &db,
- DMU_READ_NO_PREFETCH);
-
- if (error == 0) {
- blkptr_t *bp = &lr->lr_blkptr;
-
- zgd->zgd_db = db;
- zgd->zgd_bp = bp;
-
- ASSERT(db->db_offset == offset);
- ASSERT(db->db_size == size);
-
- error = dmu_sync(zio, lr->lr_common.lrc_txg,
- zfs_get_done, zgd);
- ASSERT(error || lr->lr_length <= size);
-
- /*
- * On success, we need to wait for the write I/O
- * initiated by dmu_sync() to complete before we can
- * release this dbuf. We will finish everything up
- * in the zfs_get_done() callback.
- */
- if (error == 0)
- return (0);
-
- if (error == EALREADY) {
- lr->lr_common.lrc_txtype = TX_WRITE2;
- /*
- * TX_WRITE2 relies on the data previously
- * written by the TX_WRITE that caused
- * EALREADY. We zero out the BP because
- * it is the old, currently-on-disk BP,
- * so there's no need to zio_flush() its
- * vdevs (flushing would needlesly hurt
- * performance, and doesn't work on
- * indirect vdevs).
- */
- zgd->zgd_bp = NULL;
- BP_ZERO(bp);
- error = 0;
- }
- }
- }
-
- zfs_get_done(zgd, error);
-
- return (error);
-}
-
-/*ARGSUSED*/
-static int
-zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
- caller_context_t *ct)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- int error;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- if (flag & V_ACE_MASK)
- error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
- else
- error = zfs_zaccess_rwx(zp, mode, flag, cr);
-
- ZFS_EXIT(zfsvfs);
- return (error);
+ VN_RELE_ASYNC(vp, dsl_pool_zrele_taskq(dmu_objset_pool(os)));
}
static int
@@ -2708,27 +1958,6 @@ update:
return (error);
}
-ulong_t zfs_fsync_sync_cnt = 4;
-
-static int
-zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-
- (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
-
- if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- zil_commit(zfsvfs->z_log, zp->z_id);
- ZFS_EXIT(zfsvfs);
- }
- tsd_set(zfs_fsyncer_key, NULL);
- return (0);
-}
-
-
/*
* Get the requested file attributes and place them in the provided
* vattr structure.
@@ -3905,7 +3134,7 @@ zfs_rename_check(znode_t *szp, znode_t *sdzp, znode_t *tdzp)
return (error);
}
-#if __FreeBSD_version < 1300110
+#if __FreeBSD_version < 1300124
static void
cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp)
@@ -4793,45 +4022,6 @@ zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
}
}
-/*ARGSUSED*/
-static int
-zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
- caller_context_t *ct)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- int error;
- boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- error = zfs_getacl(zp, vsecp, skipaclchk, cr);
- ZFS_EXIT(zfsvfs);
-
- return (error);
-}
-
-/*ARGSUSED*/
-int
-zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
-{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- int error;
- boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
- zilog_t *zilog = zfsvfs->z_log;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- error = zfs_setacl(zp, vsecp, skipaclchk, cr);
-
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
-
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-
static int
zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
int *rahead)
@@ -5225,7 +4415,7 @@ static int
zfs_freebsd_read(struct vop_read_args *ap)
{
- return (zfs_read(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
+ return (zfs_read(VTOZ(ap->a_vp), ap->a_uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
@@ -5242,7 +4432,7 @@ static int
zfs_freebsd_write(struct vop_write_args *ap)
{
- return (zfs_write(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
+ return (zfs_write(VTOZ(ap->a_vp), ap->a_uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
@@ -5301,7 +4491,7 @@ zfs_freebsd_access(struct vop_access_args *ap)
*/
accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0)
- error = zfs_access(ap->a_vp, accmode, 0, ap->a_cred, NULL);
+ error = zfs_access(zp, accmode, 0, ap->a_cred);
/*
* VADMIN has to be handled by vaccess().
@@ -5512,7 +4702,7 @@ zfs_freebsd_fsync(struct vop_fsync_args *ap)
{
vop_stdfsync(ap);
- return (zfs_fsync(ap->a_vp, 0, ap->a_td->td_ucred, NULL));
+ return (zfs_fsync(VTOZ(ap->a_vp), 0, ap->a_td->td_ucred));
}
#ifndef _SYS_SYSPROTO_H_
@@ -5825,7 +5015,11 @@ zfs_freebsd_inactive(struct vop_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
+#if __FreeBSD_version >= 1300123
zfs_inactive(vp, curthread->td_ucred, NULL);
+#else
+ zfs_inactive(vp, ap->a_td->td_ucred, NULL);
+#endif
return (0);
}
@@ -6377,7 +5571,8 @@ zfs_freebsd_getacl(struct vop_getacl_args *ap)
return (EINVAL);
vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
- if ((error = zfs_getsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL)))
+ if ((error = zfs_getsecattr(VTOZ(ap->a_vp),
+ &vsecattr, 0, ap->a_cred)))
return (error);
error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp,
@@ -6510,7 +5705,13 @@ zfs_vptocnp(struct vop_vptocnp_args *ap)
error = vget(covered_vp, LK_SHARED | LK_VNHELD, curthread);
#endif
if (error == 0) {
- error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_buf, ap->a_buflen);
+#if __FreeBSD_version >= 1300123
+ error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_buf,
+ ap->a_buflen);
+#else
+ error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_cred,
+ ap->a_buf, ap->a_buflen);
+#endif
vput(covered_vp);
}
vn_lock(vp, ltype | LK_RETRY);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c
index 40baa0b80928..6a21623c5f67 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c
@@ -149,7 +149,6 @@ zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
zp->z_acl_cached = NULL;
zp->z_vnode = NULL;
- zp->z_moved = 0;
return (0);
}
@@ -278,7 +277,6 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
sharezp = zfs_znode_alloc_kmem(KM_SLEEP);
ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs));
- sharezp->z_moved = 0;
sharezp->z_unlinked = 0;
sharezp->z_atime_dirty = 0;
sharezp->z_zfsvfs = zfsvfs;
@@ -437,7 +435,6 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
vp->v_data = zp;
ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
- zp->z_moved = 0;
zp->z_sa_hdl = NULL;
zp->z_unlinked = 0;
@@ -1692,7 +1689,6 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
rootzp = zfs_znode_alloc_kmem(KM_SLEEP);
ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs));
- rootzp->z_moved = 0;
rootzp->z_unlinked = 0;
rootzp->z_atime_dirty = 0;
rootzp->z_is_sa = USE_SA(version, os);
@@ -2015,6 +2011,20 @@ zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
return (error);
}
+
+void
+zfs_inode_update(znode_t *zp)
+{
+ vm_object_t object;
+
+ if ((object = ZTOV(zp)->v_object) == NULL ||
+ zp->z_size == object->un_pager.vnp.vnp_size)
+ return;
+
+ vnode_pager_setsize(ZTOV(zp), zp->z_size);
+}
+
+
#ifdef _KERNEL
int
zfs_znode_parent_and_name(znode_t *zp, znode_t **dzpp, char *buf)
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
index fb88bc325d3c..fd2beee7bdd2 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
@@ -1071,6 +1071,16 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
/*
+ * This is necessary here as we check next whether
+ * OBJSET_FLAG_USERACCOUNTING_COMPLETE or
+ * OBJSET_FLAG_USEROBJACCOUNTING are set in order to
+ * decide if the local_mac should be zeroed out.
+ */
+ intval = osp->os_flags;
+ if (should_bswap)
+ intval = BSWAP_64(intval);
+
+ /*
* The local MAC protects the user, group and project accounting.
* If these objects are not present, the local MAC is zeroed out.
*/
@@ -1081,7 +1091,10 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
(datalen >= OBJSET_PHYS_SIZE_V2 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
- (datalen <= OBJSET_PHYS_SIZE_V1)) {
+ (datalen <= OBJSET_PHYS_SIZE_V1) ||
+ (((intval & OBJSET_FLAG_USERACCOUNTING_COMPLETE) == 0 ||
+ (intval & OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE) == 0) &&
+ key->zk_version > 0)) {
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
index 092eb34eaa47..6c44e3681709 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
@@ -116,7 +116,6 @@ enum zvol_geom_state {
};
struct zvol_state_os {
- int zso_volmode;
#define zso_dev _zso_state._zso_dev
#define zso_geom _zso_state._zso_geom
union {
@@ -134,6 +133,7 @@ struct zvol_state_os {
enum zvol_geom_state zsg_state;
} _zso_geom;
} _zso_state;
+ int zso_dying;
};
static uint32_t zvol_minors;
@@ -209,7 +209,7 @@ zvol_geom_open(struct g_provider *pp, int flag, int count)
{
zvol_state_t *zv;
int err = 0;
- boolean_t drop_suspend = B_TRUE;
+ boolean_t drop_suspend = B_FALSE;
boolean_t drop_namespace = B_FALSE;
if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) {
@@ -228,16 +228,15 @@ retry:
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = pp->private;
if (zv == NULL) {
- if (drop_namespace)
- mutex_exit(&spa_namespace_lock);
rw_exit(&zvol_state_lock);
- return (SET_ERROR(ENXIO));
+ err = SET_ERROR(ENXIO);
+ goto out_locked;
}
if (zv->zv_open_count == 0 && !mutex_owned(&spa_namespace_lock)) {
/*
* We need to guarantee that the namespace lock is held
- * to avoid spurious failures in zvol_first_open
+ * to avoid spurious failures in zvol_first_open.
*/
drop_namespace = B_TRUE;
if (!mutex_tryenter(&spa_namespace_lock)) {
@@ -247,8 +246,12 @@ retry:
}
}
mutex_enter(&zv->zv_state_lock);
-
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM);
+ if (zv->zv_zso->zso_dying) {
+ rw_exit(&zvol_state_lock);
+ err = SET_ERROR(ENXIO);
+ goto out_zv_locked;
+ }
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
/*
* make sure zvol is not suspended during first open
@@ -256,6 +259,7 @@ retry:
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 0) {
+ drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
@@ -266,8 +270,6 @@ retry:
drop_suspend = B_FALSE;
}
}
- } else {
- drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
@@ -277,7 +279,7 @@ retry:
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
err = zvol_first_open(zv, !(flag & FWRITE));
if (err)
- goto out_mutex;
+ goto out_zv_locked;
pp->mediasize = zv->zv_volsize;
pp->stripeoffset = 0;
pp->stripesize = zv->zv_volblocksize;
@@ -289,41 +291,37 @@ retry:
*/
if ((flag & FWRITE) && ((zv->zv_flags & ZVOL_RDONLY) ||
dmu_objset_incompatible_encryption_version(zv->zv_objset))) {
- err = EROFS;
- goto out_open_count;
+ err = SET_ERROR(EROFS);
+ goto out_opened;
}
if (zv->zv_flags & ZVOL_EXCL) {
- err = EBUSY;
- goto out_open_count;
+ err = SET_ERROR(EBUSY);
+ goto out_opened;
}
#ifdef FEXCL
if (flag & FEXCL) {
if (zv->zv_open_count != 0) {
- err = EBUSY;
- goto out_open_count;
+ err = SET_ERROR(EBUSY);
+ goto out_opened;
}
zv->zv_flags |= ZVOL_EXCL;
}
#endif
zv->zv_open_count += count;
- if (drop_namespace)
- mutex_exit(&spa_namespace_lock);
- mutex_exit(&zv->zv_state_lock);
- if (drop_suspend)
- rw_exit(&zv->zv_suspend_lock);
- return (0);
-
-out_open_count:
- if (zv->zv_open_count == 0)
+out_opened:
+ if (zv->zv_open_count == 0) {
zvol_last_close(zv);
-out_mutex:
+ wakeup(zv);
+ }
+out_zv_locked:
+ mutex_exit(&zv->zv_state_lock);
+out_locked:
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
- mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
- return (SET_ERROR(err));
+ return (err);
}
/*ARGSUSED*/
@@ -332,6 +330,7 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
{
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
+ int new_open_count;
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = pp->private;
@@ -342,30 +341,32 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
- ASSERT(zv->zv_open_count == 1);
+ ASSERT3U(zv->zv_open_count, ==, 1);
zv->zv_flags &= ~ZVOL_EXCL;
}
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
/*
* If the open count is zero, this is a spurious close.
* That indicates a bug in the kernel / DDI framework.
*/
- ASSERT(zv->zv_open_count > 0);
+ ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
- if ((zv->zv_open_count - count) == 0) {
+ new_open_count = zv->zv_open_count - count;
+ if (new_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
- if (zv->zv_open_count != 1) {
+ new_open_count = zv->zv_open_count - count;
+ if (new_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
@@ -380,11 +381,11 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
/*
* You may get multiple opens, but only one close.
*/
- zv->zv_open_count -= count;
-
+ zv->zv_open_count = new_open_count;
if (zv->zv_open_count == 0) {
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
+ wakeup(zv);
}
mutex_exit(&zv->zv_state_lock);
@@ -400,7 +401,7 @@ zvol_geom_run(zvol_state_t *zv)
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
g_error_provider(pp, 0);
@@ -414,7 +415,7 @@ zvol_geom_destroy(zvol_state_t *zv)
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
g_topology_assert();
@@ -422,10 +423,25 @@ zvol_geom_destroy(zvol_state_t *zv)
VERIFY(zsg->zsg_state == ZVOL_GEOM_RUNNING);
mutex_exit(&zv->zv_state_lock);
zsg->zsg_provider = NULL;
- pp->private = NULL;
g_wither_geom(pp->geom, ENXIO);
}
+void
+zvol_wait_close(zvol_state_t *zv)
+{
+
+ if (zv->zv_volmode != ZFS_VOLMODE_GEOM)
+ return;
+ mutex_enter(&zv->zv_state_lock);
+ zv->zv_zso->zso_dying = B_TRUE;
+
+ if (zv->zv_open_count)
+ msleep(zv, &zv->zv_state_lock,
+ PRIBIO, "zvol:dying", 10*hz);
+ mutex_exit(&zv->zv_state_lock);
+}
+
+
static int
zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
{
@@ -483,7 +499,7 @@ zvol_geom_worker(void *arg)
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct bio *bp;
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
thread_lock(curthread);
sched_prio(curthread, PRIBIO);
@@ -512,9 +528,13 @@ static void
zvol_geom_bio_start(struct bio *bp)
{
zvol_state_t *zv = bp->bio_to->private;
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
+ struct zvol_state_geom *zsg;
boolean_t first;
+ if (zv == NULL) {
+ g_io_deliver(bp, ENXIO);
+ return;
+ }
if (bp->bio_cmd == BIO_GETATTR) {
if (zvol_geom_bio_getattr(bp))
g_io_deliver(bp, EOPNOTSUPP);
@@ -522,6 +542,7 @@ zvol_geom_bio_start(struct bio *bp)
}
if (!THREAD_CAN_SLEEP()) {
+ zsg = &zv->zv_zso->zso_geom;
mtx_lock(&zsg->zsg_queue_mtx);
first = (bioq_first(&zsg->zsg_queue) == NULL);
bioq_insert_tail(&zsg->zsg_queue, bp);
@@ -540,7 +561,7 @@ zvol_geom_bio_getattr(struct bio *bp)
zvol_state_t *zv;
zv = bp->bio_to->private;
- ASSERT(zv != NULL);
+ ASSERT3P(zv, !=, NULL);
spa_t *spa = dmu_objset_spa(zv->zv_objset);
uint64_t refd, avail, usedobjs, availobjs;
@@ -613,7 +634,7 @@ zvol_geom_bio_strategy(struct bio *bp)
goto sync;
break;
default:
- error = EOPNOTSUPP;
+ error = SET_ERROR(EOPNOTSUPP);
goto resume;
}
@@ -621,7 +642,7 @@ zvol_geom_bio_strategy(struct bio *bp)
volsize = zv->zv_volsize;
os = zv->zv_objset;
- ASSERT(os != NULL);
+ ASSERT3P(os, !=, NULL);
addr = bp->bio_data;
resid = bp->bio_length;
@@ -688,7 +709,7 @@ unlock:
bp->bio_completed = bp->bio_length - resid;
if (bp->bio_completed < bp->bio_length && off > volsize)
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
switch (bp->bio_cmd) {
case BIO_FLUSH:
@@ -825,18 +846,33 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
zvol_state_t *zv;
struct zvol_state_dev *zsd;
int err = 0;
- boolean_t drop_suspend = B_TRUE;
+ boolean_t drop_suspend = B_FALSE;
+ boolean_t drop_namespace = B_FALSE;
+retry:
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = dev->si_drv2;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
- return (SET_ERROR(ENXIO));
+ err = SET_ERROR(ENXIO);
+ goto out_locked;
}
+ if (zv->zv_open_count == 0 && !mutex_owned(&spa_namespace_lock)) {
+ /*
+ * We need to guarantee that the namespace lock is held
+ * to avoid spurious failures in zvol_first_open.
+ */
+ drop_namespace = B_TRUE;
+ if (!mutex_tryenter(&spa_namespace_lock)) {
+ rw_exit(&zvol_state_lock);
+ mutex_enter(&spa_namespace_lock);
+ goto retry;
+ }
+ }
mutex_enter(&zv->zv_state_lock);
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_DEV);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
/*
* make sure zvol is not suspended during first open
@@ -844,6 +880,7 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 0) {
+ drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
@@ -854,8 +891,6 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
drop_suspend = B_FALSE;
}
}
- } else {
- drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
@@ -865,21 +900,21 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
err = zvol_first_open(zv, !(flags & FWRITE));
if (err)
- goto out_locked;
+ goto out_zv_locked;
}
if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
- err = EROFS;
+ err = SET_ERROR(EROFS);
goto out_opened;
}
if (zv->zv_flags & ZVOL_EXCL) {
- err = EBUSY;
+ err = SET_ERROR(EBUSY);
goto out_opened;
}
#ifdef FEXCL
if (flags & FEXCL) {
if (zv->zv_open_count != 0) {
- err = EBUSY;
+ err = SET_ERROR(EBUSY);
goto out_opened;
}
zv->zv_flags |= ZVOL_EXCL;
@@ -894,20 +929,19 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
(zv->zv_flags & ZVOL_WRITTEN_TO) != 0)
zil_async_to_sync(zv->zv_zilog, ZVOL_OBJ);
}
-
- mutex_exit(&zv->zv_state_lock);
- if (drop_suspend)
- rw_exit(&zv->zv_suspend_lock);
- return (0);
-
out_opened:
- if (zv->zv_open_count == 0)
+ if (zv->zv_open_count == 0) {
zvol_last_close(zv);
-out_locked:
+ wakeup(zv);
+ }
+out_zv_locked:
mutex_exit(&zv->zv_state_lock);
+out_locked:
+ if (drop_namespace)
+ mutex_exit(&spa_namespace_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
- return (SET_ERROR(err));
+ return (err);
}
static int
@@ -926,17 +960,17 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
- ASSERT(zv->zv_open_count == 1);
+ ASSERT3U(zv->zv_open_count, ==, 1);
zv->zv_flags &= ~ZVOL_EXCL;
}
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_DEV);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
/*
* If the open count is zero, this is a spurious close.
* That indicates a bug in the kernel / DDI framework.
*/
- ASSERT(zv->zv_open_count > 0);
+ ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
@@ -972,6 +1006,7 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
if (zv->zv_open_count == 0) {
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
+ wakeup(zv);
}
mutex_exit(&zv->zv_state_lock);
@@ -1022,7 +1057,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
length <= 0) {
printf("%s: offset=%jd length=%jd\n", __func__, offset,
length);
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
break;
}
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
@@ -1076,7 +1111,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
refd = metaslab_class_get_alloc(spa_normal_class(spa));
arg->value.off = refd / DEV_BSIZE;
} else
- error = ENOIOCTL;
+ error = SET_ERROR(ENOIOCTL);
break;
}
case FIOSEEKHOLE:
@@ -1092,7 +1127,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
break;
}
default:
- error = ENOIOCTL;
+ error = SET_ERROR(ENOIOCTL);
}
return (error);
@@ -1144,14 +1179,14 @@ zvol_rename_minor(zvol_state_t *zv, const char *newname)
hlist_del(&zv->zv_hlink);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
+ if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
struct g_geom *gp;
g_topology_lock();
gp = pp->geom;
- ASSERT(gp != NULL);
+ ASSERT3P(gp, !=, NULL);
zsg->zsg_provider = NULL;
g_wither_provider(pp, ENXIO);
@@ -1164,7 +1199,7 @@ zvol_rename_minor(zvol_state_t *zv, const char *newname)
zsg->zsg_provider = pp;
g_error_provider(pp, 0);
g_topology_unlock();
- } else if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_DEV) {
+ } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev;
struct make_dev_args args;
@@ -1206,26 +1241,30 @@ zvol_free(zvol_state_t *zv)
{
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
- ASSERT(zv->zv_open_count == 0);
+ ASSERT0(zv->zv_open_count);
ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
+ if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
+ struct g_provider *pp __maybe_unused = zsg->zsg_provider;
+
+ ASSERT3P(pp->private, ==, NULL);
g_topology_lock();
zvol_geom_destroy(zv);
g_topology_unlock();
mtx_destroy(&zsg->zsg_queue_mtx);
- } else if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_DEV) {
+ } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
- if (dev != NULL)
- destroy_dev(dev);
+ ASSERT3P(dev->si_drv2, ==, NULL);
+
+ destroy_dev(dev);
}
mutex_destroy(&zv->zv_state_lock);
@@ -1249,7 +1288,6 @@ zvol_create_minor_impl(const char *name)
int error;
ZFS_LOG(1, "Creating ZVOL %s...", name);
-
hash = zvol_name_hash(name);
if ((zv = zvol_find_by_name_hash(name, hash, RW_NONE)) != NULL) {
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -1258,10 +1296,11 @@ zvol_create_minor_impl(const char *name)
}
DROP_GIANT();
- /* lie and say we're read-only */
- error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
+
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
+ /* lie and say we're read-only */
+ error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
if (error)
goto out_doi;
@@ -1275,8 +1314,10 @@ zvol_create_minor_impl(const char *name)
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_VOLMODE), &volmode, NULL);
- if (error != 0 || volmode == ZFS_VOLMODE_DEFAULT)
+ if (error || volmode == ZFS_VOLMODE_DEFAULT)
volmode = zvol_volmode;
+ error = 0;
+
/*
* zvol_alloc equivalent ...
*/
@@ -1284,8 +1325,8 @@ zvol_create_minor_impl(const char *name)
zv->zv_hash = hash;
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
zv->zv_zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
- zv->zv_zso->zso_volmode = volmode;
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
+ zv->zv_volmode = volmode;
+ if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp;
struct g_geom *gp;
@@ -1298,7 +1339,6 @@ zvol_create_minor_impl(const char *name)
gp->start = zvol_geom_bio_start;
gp->access = zvol_geom_access;
pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
- /* TODO: NULL check? */
pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
pp->sectorsize = DEV_BSIZE;
pp->mediasize = 0;
@@ -1306,7 +1346,7 @@ zvol_create_minor_impl(const char *name)
zsg->zsg_provider = pp;
bioq_init(&zsg->zsg_queue);
- } else if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_DEV) {
+ } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev;
struct make_dev_args args;
@@ -1320,12 +1360,12 @@ zvol_create_minor_impl(const char *name)
args.mda_mode = 0640;
args.mda_si_drv2 = zv;
error = make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, name);
- if (error != 0) {
- mutex_destroy(&zv->zv_state_lock);
+ if (error) {
kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
+ mutex_destroy(&zv->zv_state_lock);
kmem_free(zv, sizeof (*zv));
dmu_objset_disown(os, B_TRUE, FTAG);
- goto out_giant;
+ goto out_doi;
}
dev->si_iosize_max = maxphys;
zsd->zsd_cdev = dev;
@@ -1350,15 +1390,14 @@ zvol_create_minor_impl(const char *name)
ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
- /* XXX do prefetch */
+ /* TODO: prefetch for geom tasting */
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, B_TRUE, FTAG);
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
- if (error == 0)
- zvol_geom_run(zv);
+ if (error == 0 && volmode == ZFS_VOLMODE_GEOM) {
+ zvol_geom_run(zv);
g_topology_unlock();
}
out_doi:
@@ -1368,9 +1407,8 @@ out_doi:
zvol_insert(zv);
zvol_minors++;
rw_exit(&zvol_state_lock);
+ ZFS_LOG(1, "ZVOL %s created.", name);
}
- ZFS_LOG(1, "ZVOL %s created.", name);
-out_giant:
PICKUP_GIANT();
return (error);
}
@@ -1379,11 +1417,11 @@ static void
zvol_clear_private(zvol_state_t *zv)
{
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
+ if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
- if (pp == NULL) /* XXX when? */
+ if (pp->private == NULL) /* already cleared */
return;
mtx_lock(&zsg->zsg_queue_mtx);
@@ -1391,11 +1429,15 @@ zvol_clear_private(zvol_state_t *zv)
pp->private = NULL;
wakeup_one(&zsg->zsg_queue);
while (zsg->zsg_state != ZVOL_GEOM_RUNNING)
- msleep(&zsg->zsg_state,
- &zsg->zsg_queue_mtx,
+ msleep(&zsg->zsg_state, &zsg->zsg_queue_mtx,
0, "zvol:w", 0);
mtx_unlock(&zsg->zsg_queue_mtx);
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
+ } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
+ struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
+ struct cdev *dev = zsd->zsd_cdev;
+
+ dev->si_drv2 = NULL;
}
}
@@ -1403,15 +1445,17 @@ static int
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
zv->zv_volsize = volsize;
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
+ if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
- if (pp == NULL) /* XXX when? */
- return (0);
-
g_topology_lock();
+ if (pp->private == NULL) {
+ g_topology_unlock();
+ return (SET_ERROR(ENXIO));
+ }
+
/*
* Do not invoke resize event when initial size was zero.
* ZVOL initializes the size on first open, this is not
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c b/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c
index fafadffc751c..e8d89bfeabe5 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c
@@ -28,6 +28,9 @@
#include <sys/kmem.h>
#include <sys/tsd.h>
#include <sys/trace_spl.h>
+#ifdef HAVE_CPU_HOTPLUG
+#include <linux/cpuhotplug.h>
+#endif
int spl_taskq_thread_bind = 0;
module_param(spl_taskq_thread_bind, int, 0644);
@@ -35,7 +38,7 @@ MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
int spl_taskq_thread_dynamic = 1;
-module_param(spl_taskq_thread_dynamic, int, 0644);
+module_param(spl_taskq_thread_dynamic, int, 0444);
MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
int spl_taskq_thread_priority = 1;
@@ -59,6 +62,11 @@ EXPORT_SYMBOL(system_delay_taskq);
static taskq_t *dynamic_taskq;
static taskq_thread_t *taskq_thread_create(taskq_t *);
+#ifdef HAVE_CPU_HOTPLUG
+/* Multi-callback id for cpu hotplugging. */
+static int spl_taskq_cpuhp_state;
+#endif
+
/* List of all taskqs */
LIST_HEAD(tq_list);
struct rw_semaphore tq_list_sem;
@@ -1024,13 +1032,14 @@ taskq_thread_create(taskq_t *tq)
}
taskq_t *
-taskq_create(const char *name, int nthreads, pri_t pri,
+taskq_create(const char *name, int threads_arg, pri_t pri,
int minalloc, int maxalloc, uint_t flags)
{
taskq_t *tq;
taskq_thread_t *tqt;
int count = 0, rc = 0, i;
unsigned long irqflags;
+ int nthreads = threads_arg;
ASSERT(name != NULL);
ASSERT(minalloc >= 0);
@@ -1041,15 +1050,27 @@ taskq_create(const char *name, int nthreads, pri_t pri,
if (flags & TASKQ_THREADS_CPU_PCT) {
ASSERT(nthreads <= 100);
ASSERT(nthreads >= 0);
- nthreads = MIN(nthreads, 100);
+ nthreads = MIN(threads_arg, 100);
nthreads = MAX(nthreads, 0);
- nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
+ nthreads = MAX((num_online_cpus() * nthreads) /100, 1);
}
tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
if (tq == NULL)
return (NULL);
+ tq->tq_hp_support = B_FALSE;
+#ifdef HAVE_CPU_HOTPLUG
+ if (flags & TASKQ_THREADS_CPU_PCT) {
+ tq->tq_hp_support = B_TRUE;
+ if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state,
+ &tq->tq_hp_cb_node) != 0) {
+ kmem_free(tq, sizeof (*tq));
+ return (NULL);
+ }
+ }
+#endif
+
spin_lock_init(&tq->tq_lock);
INIT_LIST_HEAD(&tq->tq_thread_list);
INIT_LIST_HEAD(&tq->tq_active_list);
@@ -1058,6 +1079,7 @@ taskq_create(const char *name, int nthreads, pri_t pri,
tq->tq_nthreads = 0;
tq->tq_nspawn = 0;
tq->tq_maxthreads = nthreads;
+ tq->tq_cpu_pct = threads_arg;
tq->tq_pri = pri;
tq->tq_minalloc = minalloc;
tq->tq_maxalloc = maxalloc;
@@ -1131,6 +1153,12 @@ taskq_destroy(taskq_t *tq)
tq->tq_flags &= ~TASKQ_ACTIVE;
spin_unlock_irqrestore(&tq->tq_lock, flags);
+#ifdef HAVE_CPU_HOTPLUG
+ if (tq->tq_hp_support) {
+ VERIFY0(cpuhp_state_remove_instance_nocalls(
+ spl_taskq_cpuhp_state, &tq->tq_hp_cb_node));
+ }
+#endif
/*
* When TASKQ_ACTIVE is clear new tasks may not be added nor may
* new worker threads be spawned for dynamic taskq.
@@ -1198,7 +1226,6 @@ taskq_destroy(taskq_t *tq)
}
EXPORT_SYMBOL(taskq_destroy);
-
static unsigned int spl_taskq_kick = 0;
/*
@@ -1255,12 +1282,96 @@ module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint,
MODULE_PARM_DESC(spl_taskq_kick,
"Write nonzero to kick stuck taskqs to spawn more threads");
+#ifdef HAVE_CPU_HOTPLUG
+/*
+ * This callback will be called exactly once for each core that comes online,
+ * for each dynamic taskq. We attempt to expand taskqs that have
+ * TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every
+ * time, to correctly determine whether or not to add a thread.
+ */
+static int
+spl_taskq_expand(unsigned int cpu, struct hlist_node *node)
+{
+ taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
+ unsigned long flags;
+ int err = 0;
+
+ ASSERT(tq);
+ spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
+
+ if (!(tq->tq_flags & TASKQ_ACTIVE))
+ goto out;
+
+ ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
+ int nthreads = MIN(tq->tq_cpu_pct, 100);
+ nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1);
+ tq->tq_maxthreads = nthreads;
+
+ if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
+ tq->tq_maxthreads > tq->tq_nthreads) {
+ ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads + 1);
+ taskq_thread_t *tqt = taskq_thread_create(tq);
+ if (tqt == NULL)
+ err = -1;
+ }
+
+out:
+ spin_unlock_irqrestore(&tq->tq_lock, flags);
+ return (err);
+}
+
+/*
+ * While we don't support offlining CPUs, it is possible that CPUs will fail
+ * to online successfully. We do need to be able to handle this case
+ * gracefully.
+ */
+static int
+spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node)
+{
+ taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
+ unsigned long flags;
+
+ ASSERT(tq);
+ spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
+
+ if (!(tq->tq_flags & TASKQ_ACTIVE))
+ goto out;
+
+ ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
+ int nthreads = MIN(tq->tq_cpu_pct, 100);
+ nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1);
+ tq->tq_maxthreads = nthreads;
+
+ if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
+ tq->tq_maxthreads < tq->tq_nthreads) {
+ ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1);
+ taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next,
+ taskq_thread_t, tqt_thread_list);
+ struct task_struct *thread = tqt->tqt_thread;
+ spin_unlock_irqrestore(&tq->tq_lock, flags);
+
+ kthread_stop(thread);
+
+ return (0);
+ }
+
+out:
+ spin_unlock_irqrestore(&tq->tq_lock, flags);
+ return (0);
+}
+#endif
+
int
spl_taskq_init(void)
{
init_rwsem(&tq_list_sem);
tsd_create(&taskq_tsd, NULL);
+#ifdef HAVE_CPU_HOTPLUG
+ spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down);
+#endif
+
system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
if (system_taskq == NULL)
@@ -1269,6 +1380,9 @@ spl_taskq_init(void)
system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4),
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
if (system_delay_taskq == NULL) {
+#ifdef HAVE_CPU_HOTPLUG
+ cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
+#endif
taskq_destroy(system_taskq);
return (1);
}
@@ -1276,6 +1390,9 @@ spl_taskq_init(void)
dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
if (dynamic_taskq == NULL) {
+#ifdef HAVE_CPU_HOTPLUG
+ cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
+#endif
taskq_destroy(system_taskq);
taskq_destroy(system_delay_taskq);
return (1);
@@ -1304,4 +1421,9 @@ spl_taskq_fini(void)
system_taskq = NULL;
tsd_destroy(&taskq_tsd);
+
+#ifdef HAVE_CPU_HOTPLUG
+ cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
+ spl_taskq_cpuhp_state = 0;
+#endif
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/Makefile.in b/sys/contrib/openzfs/module/os/linux/zfs/Makefile.in
index 87414d6eacc5..75bec52c94e2 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/Makefile.in
+++ b/sys/contrib/openzfs/module/os/linux/zfs/Makefile.in
@@ -23,8 +23,9 @@ $(MODULE)-objs += ../os/linux/zfs/zfs_dir.o
$(MODULE)-objs += ../os/linux/zfs/zfs_file_os.o
$(MODULE)-objs += ../os/linux/zfs/zfs_ioctl_os.o
$(MODULE)-objs += ../os/linux/zfs/zfs_sysfs.o
+$(MODULE)-objs += ../os/linux/zfs/zfs_uio.o
$(MODULE)-objs += ../os/linux/zfs/zfs_vfsops.o
-$(MODULE)-objs += ../os/linux/zfs/zfs_vnops.o
+$(MODULE)-objs += ../os/linux/zfs/zfs_vnops_os.o
$(MODULE)-objs += ../os/linux/zfs/zfs_znode.o
$(MODULE)-objs += ../os/linux/zfs/zio_crypt.o
$(MODULE)-objs += ../os/linux/zfs/zpl_ctldir.o
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
index c2281449ed12..0abac228447f 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
@@ -178,7 +178,7 @@ static struct page *abd_zero_page = NULL;
static kmem_cache_t *abd_cache = NULL;
static kstat_t *abd_ksp;
-static size_t
+static uint_t
abd_chunkcnt_for_bytes(size_t size)
{
return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c b/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
index 792c75d46ffe..83d4a3d8496c 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
@@ -48,6 +48,8 @@
#include <sys/vmsystm.h>
#include <sys/zpl.h>
#include <linux/page_compat.h>
+#include <linux/notifier.h>
+#include <linux/memory.h>
#endif
#include <sys/callb.h>
#include <sys/kstat.h>
@@ -73,6 +75,9 @@
*/
int zfs_arc_shrinker_limit = 10000;
+#ifdef CONFIG_MEMORY_HOTPLUG
+static struct notifier_block arc_hotplug_callback_mem_nb;
+#endif
/*
* Return a default max arc size based on the amount of physical memory.
@@ -278,18 +283,9 @@ arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
return (0);
}
-void
-arc_lowmem_init(void)
+static void
+arc_set_sys_free(uint64_t allmem)
{
- uint64_t allmem = arc_all_memory();
-
- /*
- * Register a shrinker to support synchronous (direct) memory
- * reclaim from the arc. This is done to prevent kswapd from
- * swapping out pages when it is preferable to shrink the arc.
- */
- spl_register_shrinker(&arc_shrinker);
-
/*
* The ARC tries to keep at least this much memory available for the
* system. This gives the ARC time to shrink in response to memory
@@ -343,6 +339,20 @@ arc_lowmem_init(void)
}
void
+arc_lowmem_init(void)
+{
+ uint64_t allmem = arc_all_memory();
+
+ /*
+ * Register a shrinker to support synchronous (direct) memory
+ * reclaim from the arc. This is done to prevent kswapd from
+ * swapping out pages when it is preferable to shrink the arc.
+ */
+ spl_register_shrinker(&arc_shrinker);
+ arc_set_sys_free(allmem);
+}
+
+void
arc_lowmem_fini(void)
{
spl_unregister_shrinker(&arc_shrinker);
@@ -375,6 +385,52 @@ param_set_arc_int(const char *buf, zfs_kernel_param_t *kp)
return (0);
}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+/* ARGSUSED */
+static int
+arc_hotplug_callback(struct notifier_block *self, unsigned long action,
+ void *arg)
+{
+ uint64_t allmem = arc_all_memory();
+ if (action != MEM_ONLINE)
+ return (NOTIFY_OK);
+
+ arc_set_limits(allmem);
+
+#ifdef __LP64__
+ if (zfs_dirty_data_max_max == 0)
+ zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
+ allmem * zfs_dirty_data_max_max_percent / 100);
+#else
+ if (zfs_dirty_data_max_max == 0)
+ zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
+ allmem * zfs_dirty_data_max_max_percent / 100);
+#endif
+
+ arc_set_sys_free(allmem);
+ return (NOTIFY_OK);
+}
+#endif
+
+void
+arc_register_hotplug(void)
+{
+#ifdef CONFIG_MEMORY_HOTPLUG
+ arc_hotplug_callback_mem_nb.notifier_call = arc_hotplug_callback;
+ /* There is no significance to the value 100 */
+ arc_hotplug_callback_mem_nb.priority = 100;
+ register_memory_notifier(&arc_hotplug_callback_mem_nb);
+#endif
+}
+
+void
+arc_unregister_hotplug(void)
+{
+#ifdef CONFIG_MEMORY_HOTPLUG
+ unregister_memory_notifier(&arc_hotplug_callback_mem_nb);
+#endif
+}
#else /* _KERNEL */
int64_t
arc_available_memory(void)
@@ -405,6 +461,16 @@ arc_free_memory(void)
{
return (spa_get_random(arc_all_memory() * 20 / 100));
}
+
+void
+arc_register_hotplug(void)
+{
+}
+
+void
+arc_unregister_hotplug(void)
+{
+}
#endif /* _KERNEL */
/*
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/policy.c b/sys/contrib/openzfs/module/os/linux/zfs/policy.c
index 5267d67eea82..8780d7f6c70a 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/policy.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/policy.c
@@ -204,7 +204,8 @@ secpolicy_vnode_setdac(const cred_t *cr, uid_t owner)
* Enforced in the Linux VFS.
*/
int
-secpolicy_vnode_setid_retain(const cred_t *cr, boolean_t issuidroot)
+secpolicy_vnode_setid_retain(struct znode *zp __maybe_unused, const cred_t *cr,
+ boolean_t issuidroot)
{
return (priv_policy_user(cr, CAP_FSETID, EPERM));
}
@@ -271,7 +272,7 @@ void
secpolicy_setid_clear(vattr_t *vap, cred_t *cr)
{
if ((vap->va_mode & (S_ISUID | S_ISGID)) != 0 &&
- secpolicy_vnode_setid_retain(cr,
+ secpolicy_vnode_setid_retain(NULL, cr,
(vap->va_mode & S_ISUID) != 0 &&
(vap->va_mask & AT_UID) != 0 && vap->va_uid == 0) != 0) {
vap->va_mask |= AT_MODE;
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
index a54961c76870..4bd27d1b516f 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
@@ -94,6 +94,14 @@ bdev_capacity(struct block_device *bdev)
return (i_size_read(bdev->bd_inode));
}
+#if !defined(HAVE_BDEV_WHOLE)
+static inline struct block_device *
+bdev_whole(struct block_device *bdev)
+{
+ return (bdev->bd_contains);
+}
+#endif
+
/*
* Returns the maximum expansion capacity of the block device (in bytes).
*
@@ -118,7 +126,7 @@ bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
uint64_t psize;
int64_t available;
- if (wholedisk && bdev->bd_part != NULL && bdev != bdev->bd_contains) {
+ if (wholedisk && bdev != bdev_whole(bdev)) {
/*
* When reporting maximum expansion capacity for a wholedisk
* deduct any capacity which is expected to be lost due to
@@ -132,7 +140,7 @@ bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
* "reserved" EFI partition: in such cases return the device
* usable capacity.
*/
- available = i_size_read(bdev->bd_contains->bd_inode) -
+ available = i_size_read(bdev_whole(bdev)->bd_inode) -
((EFI_MIN_RESV_SIZE + NEW_START_BLOCK +
PARTITION_END_ALIGNMENT) << SECTOR_BITS);
psize = MAX(available, bdev_capacity(bdev));
@@ -192,8 +200,8 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
vd->vd_bdev = NULL;
if (bdev) {
- if (v->vdev_expanding && bdev != bdev->bd_contains) {
- bdevname(bdev->bd_contains, disk_name + 5);
+ if (v->vdev_expanding && bdev != bdev_whole(bdev)) {
+ bdevname(bdev_whole(bdev), disk_name + 5);
/*
* If userland has BLKPG_RESIZE_PARTITION,
* then it should have updated the partition
@@ -468,7 +476,11 @@ vdev_blkg_tryget(struct blkcg_gq *blkg)
this_cpu_inc(*count);
rc = true;
} else {
+#ifdef ZFS_PERCPU_REF_COUNT_IN_DATA
+ rc = atomic_long_inc_not_zero(&ref->data->count);
+#else
rc = atomic_long_inc_not_zero(&ref->count);
+#endif
}
rcu_read_unlock_sched();
@@ -787,7 +799,7 @@ vdev_disk_io_done(zio_t *zio)
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
- if (check_disk_change(vd->vd_bdev)) {
+ if (zfs_check_media_change(vd->vd_bdev)) {
invalidate_bdev(vd->vd_bdev);
v->vdev_remove_wanted = B_TRUE;
spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
@@ -822,9 +834,13 @@ vdev_disk_rele(vdev_t *vd)
}
vdev_ops_t vdev_disk_ops = {
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
.vdev_op_open = vdev_disk_open,
.vdev_op_close = vdev_disk_close,
.vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_disk_io_start,
.vdev_op_io_done = vdev_disk_io_done,
.vdev_op_state_change = NULL,
@@ -833,6 +849,11 @@ vdev_ops_t vdev_disk_ops = {
.vdev_op_rele = vdev_disk_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/vdev_file.c b/sys/contrib/openzfs/module/os/linux/zfs/vdev_file.c
index 423ce858144c..bf8a13ae6154 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/vdev_file.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/vdev_file.c
@@ -305,9 +305,13 @@ vdev_file_io_done(zio_t *zio)
}
vdev_ops_t vdev_file_ops = {
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
.vdev_op_open = vdev_file_open,
.vdev_op_close = vdev_file_close,
.vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_file_io_start,
.vdev_op_io_done = vdev_file_io_done,
.vdev_op_state_change = NULL,
@@ -316,6 +320,11 @@ vdev_ops_t vdev_file_ops = {
.vdev_op_rele = vdev_file_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_FILE, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
@@ -341,9 +350,13 @@ vdev_file_fini(void)
#ifndef _KERNEL
vdev_ops_t vdev_disk_ops = {
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
.vdev_op_open = vdev_file_open,
.vdev_op_close = vdev_file_close,
.vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_file_io_start,
.vdev_op_io_done = vdev_file_io_done,
.vdev_op_state_change = NULL,
@@ -352,6 +365,11 @@ vdev_ops_t vdev_disk_ops = {
.vdev_op_rele = vdev_file_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
index c13a9771235d..a1668e46e4f9 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
@@ -467,7 +467,6 @@ zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id,
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
zp->z_zn_prefetch = B_FALSE;
- zp->z_moved = B_FALSE;
zp->z_is_sa = B_FALSE;
zp->z_is_mapped = B_FALSE;
zp->z_is_ctldir = B_TRUE;
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
index 36bbd5d0829b..165c1218ae79 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
@@ -294,7 +294,7 @@ zfs_sync(struct super_block *sb, int wait, cred_t *cr)
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
- * run sync(1M). Unlike other filesystems, ZFS honors the
+ * run sync(1). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
@@ -1451,7 +1451,7 @@ int
zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent)
{
const char *osname = zm->mnt_osname;
- struct inode *root_inode;
+ struct inode *root_inode = NULL;
uint64_t recordsize;
int error = 0;
zfsvfs_t *zfsvfs = NULL;
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops.c
index b668c7dff013..3be387a30e5c 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops.c
@@ -240,78 +240,6 @@ zfs_close(struct inode *ip, int flag, cred_t *cr)
return (0);
}
-#if defined(SEEK_HOLE) && defined(SEEK_DATA)
-/*
- * Lseek support for finding holes (cmd == SEEK_HOLE) and
- * data (cmd == SEEK_DATA). "off" is an in/out parameter.
- */
-static int
-zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
-{
- znode_t *zp = ITOZ(ip);
- uint64_t noff = (uint64_t)*off; /* new offset */
- uint64_t file_sz;
- int error;
- boolean_t hole;
-
- file_sz = zp->z_size;
- if (noff >= file_sz) {
- return (SET_ERROR(ENXIO));
- }
-
- if (cmd == SEEK_HOLE)
- hole = B_TRUE;
- else
- hole = B_FALSE;
-
- error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
-
- if (error == ESRCH)
- return (SET_ERROR(ENXIO));
-
- /* file was dirty, so fall back to using generic logic */
- if (error == EBUSY) {
- if (hole)
- *off = file_sz;
-
- return (0);
- }
-
- /*
- * We could find a hole that begins after the logical end-of-file,
- * because dmu_offset_next() only works on whole blocks. If the
- * EOF falls mid-block, then indicate that the "virtual hole"
- * at the end of the file begins at the logical EOF, rather than
- * at the end of the last block.
- */
- if (noff > file_sz) {
- ASSERT(hole);
- noff = file_sz;
- }
-
- if (noff < *off)
- return (error);
- *off = noff;
- return (error);
-}
-
-int
-zfs_holey(struct inode *ip, int cmd, loff_t *off)
-{
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- int error;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- error = zfs_holey_common(ip, cmd, off);
-
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-#endif /* SEEK_HOLE && SEEK_DATA */
-
#if defined(_KERNEL)
/*
* When a file is memory mapped, we must keep the IO data synchronized
@@ -320,10 +248,10 @@ zfs_holey(struct inode *ip, int cmd, loff_t *off)
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
-static void
-update_pages(struct inode *ip, int64_t start, int len,
- objset_t *os, uint64_t oid)
+void
+update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
+ struct inode *ip = ZTOI(zp);
struct address_space *mp = ip->i_mapping;
struct page *pp;
uint64_t nbytes;
@@ -340,8 +268,8 @@ update_pages(struct inode *ip, int64_t start, int len,
flush_dcache_page(pp);
pb = kmap(pp);
- (void) dmu_read(os, oid, start+off, nbytes, pb+off,
- DMU_READ_PREFETCH);
+ (void) dmu_read(os, zp->z_id, start + off, nbytes,
+ pb + off, DMU_READ_PREFETCH);
kunmap(pp);
if (mapping_writably_mapped(mp))
@@ -369,12 +297,12 @@ update_pages(struct inode *ip, int64_t start, int len,
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
-static int
-mappedread(struct inode *ip, int nbytes, uio_t *uio)
+int
+mappedread(znode_t *zp, int nbytes, uio_t *uio)
{
+ struct inode *ip = ZTOI(zp);
struct address_space *mp = ip->i_mapping;
struct page *pp;
- znode_t *zp = ITOZ(ip);
int64_t start, off;
uint64_t bytes;
int len = nbytes;
@@ -414,575 +342,9 @@ mappedread(struct inode *ip, int nbytes, uio_t *uio)
}
#endif /* _KERNEL */
-unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
/*
- * Read bytes from specified file into supplied buffer.
- *
- * IN: ip - inode of file to be read from.
- * uio - structure supplying read location, range info,
- * and return buffer.
- * ioflag - O_SYNC flags; used to provide FRSYNC semantics.
- * O_DIRECT flag; used to bypass page cache.
- * cr - credentials of caller.
- *
- * OUT: uio - updated offset and range, buffer filled.
- *
- * RETURN: 0 on success, error code on failure.
- *
- * Side Effects:
- * inode - atime updated if byte count > 0
- */
-/* ARGSUSED */
-int
-zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
-{
- int error = 0;
- boolean_t frsync = B_FALSE;
-
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- if (zp->z_pflags & ZFS_AV_QUARANTINED) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EACCES));
- }
-
- /*
- * Validate file offset
- */
- if (uio->uio_loffset < (offset_t)0) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- /*
- * Fasttrack empty reads
- */
- if (uio->uio_resid == 0) {
- ZFS_EXIT(zfsvfs);
- return (0);
- }
-
-#ifdef FRSYNC
- /*
- * If we're in FRSYNC mode, sync out this znode before reading it.
- * Only do this for non-snapshots.
- *
- * Some platforms do not support FRSYNC and instead map it
- * to O_SYNC, which results in unnecessary calls to zil_commit. We
- * only honor FRSYNC requests on platforms which support it.
- */
- frsync = !!(ioflag & FRSYNC);
-#endif
- if (zfsvfs->z_log &&
- (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
- zil_commit(zfsvfs->z_log, zp->z_id);
-
- /*
- * Lock the range against changes.
- */
- zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
- uio->uio_loffset, uio->uio_resid, RL_READER);
-
- /*
- * If we are reading past end-of-file we can skip
- * to the end; but we might still need to set atime.
- */
- if (uio->uio_loffset >= zp->z_size) {
- error = 0;
- goto out;
- }
-
- ASSERT(uio->uio_loffset < zp->z_size);
- ssize_t n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
- ssize_t start_resid = n;
-
-#ifdef HAVE_UIO_ZEROCOPY
- xuio_t *xuio = NULL;
- if ((uio->uio_extflg == UIO_XUIO) &&
- (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
- int nblk;
- int blksz = zp->z_blksz;
- uint64_t offset = uio->uio_loffset;
-
- xuio = (xuio_t *)uio;
- if ((ISP2(blksz))) {
- nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
- blksz)) / blksz;
- } else {
- ASSERT(offset + n <= blksz);
- nblk = 1;
- }
- (void) dmu_xuio_init(xuio, nblk);
-
- if (vn_has_cached_data(ip)) {
- /*
- * For simplicity, we always allocate a full buffer
- * even if we only expect to read a portion of a block.
- */
- while (--nblk >= 0) {
- (void) dmu_xuio_add(xuio,
- dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- blksz), 0, blksz);
- }
- }
- }
-#endif /* HAVE_UIO_ZEROCOPY */
-
- while (n > 0) {
- ssize_t nbytes = MIN(n, zfs_read_chunk_size -
- P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
-
- if (zp->z_is_mapped && !(ioflag & O_DIRECT)) {
- error = mappedread(ip, nbytes, uio);
- } else {
- error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
- uio, nbytes);
- }
-
- if (error) {
- /* convert checksum errors into IO errors */
- if (error == ECKSUM)
- error = SET_ERROR(EIO);
- break;
- }
-
- n -= nbytes;
- }
-
- int64_t nread = start_resid - n;
- dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
- task_io_account_read(nread);
-out:
- zfs_rangelock_exit(lr);
-
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-
-/*
- * Write the bytes to a file.
- *
- * IN: ip - inode of file to be written to.
- * uio - structure supplying write location, range info,
- * and data buffer.
- * ioflag - O_APPEND flag set if in append mode.
- * O_DIRECT flag; used to bypass page cache.
- * cr - credentials of caller.
- *
- * OUT: uio - updated offset and range.
- *
- * RETURN: 0 if success
- * error code if failure
- *
- * Timestamps:
- * ip - ctime|mtime updated if byte count > 0
- */
-
-/* ARGSUSED */
-int
-zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
-{
- int error = 0;
- ssize_t start_resid = uio->uio_resid;
-
- /*
- * Fasttrack empty write
- */
- ssize_t n = start_resid;
- if (n == 0)
- return (0);
-
- rlim64_t limit = uio->uio_limit;
- if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
- limit = MAXOFFSET_T;
-
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ZTOZSB(zp);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- sa_bulk_attr_t bulk[4];
- int count = 0;
- uint64_t mtime[2], ctime[2];
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
- &zp->z_size, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
- &zp->z_pflags, 8);
-
- /*
- * Callers might not be able to detect properly that we are read-only,
- * so check it explicitly here.
- */
- if (zfs_is_readonly(zfsvfs)) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EROFS));
- }
-
- /*
- * If immutable or not appending then return EPERM
- */
- if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
- ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
- (uio->uio_loffset < zp->z_size))) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EPERM));
- }
-
- /*
- * Validate file offset
- */
- offset_t woff = ioflag & O_APPEND ? zp->z_size : uio->uio_loffset;
- if (woff < 0) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- int max_blksz = zfsvfs->z_max_blksz;
- xuio_t *xuio = NULL;
-
- /*
- * Pre-fault the pages to ensure slow (eg NFS) pages
- * don't hold up txg.
- * Skip this if uio contains loaned arc_buf.
- */
-#ifdef HAVE_UIO_ZEROCOPY
- if ((uio->uio_extflg == UIO_XUIO) &&
- (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
- xuio = (xuio_t *)uio;
- else
-#endif
- if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EFAULT));
- }
-
- /*
- * If in append mode, set the io offset pointer to eof.
- */
- zfs_locked_range_t *lr;
- if (ioflag & O_APPEND) {
- /*
- * Obtain an appending range lock to guarantee file append
- * semantics. We reset the write offset once we have the lock.
- */
- lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
- woff = lr->lr_offset;
- if (lr->lr_length == UINT64_MAX) {
- /*
- * We overlocked the file because this write will cause
- * the file block size to increase.
- * Note that zp_size cannot change with this lock held.
- */
- woff = zp->z_size;
- }
- uio->uio_loffset = woff;
- } else {
- /*
- * Note that if the file block size will change as a result of
- * this write, then this range lock will lock the entire file
- * so that we can re-write the block safely.
- */
- lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
- }
-
- if (woff >= limit) {
- zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EFBIG));
- }
-
- if ((woff + n) > limit || woff > (limit - n))
- n = limit - woff;
-
- /* Will this write extend the file length? */
- int write_eof = (woff + n > zp->z_size);
-
- uint64_t end_size = MAX(zp->z_size, woff + n);
- zilog_t *zilog = zfsvfs->z_log;
-#ifdef HAVE_UIO_ZEROCOPY
- int i_iov = 0;
- const iovec_t *iovp = uio->uio_iov;
- int iovcnt __maybe_unused = uio->uio_iovcnt;
-#endif
-
-
- /*
- * Write the file in reasonable size chunks. Each chunk is written
- * in a separate transaction; this keeps the intent log records small
- * and allows us to do more fine-grained space accounting.
- */
- while (n > 0) {
- woff = uio->uio_loffset;
-
- if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
- KUID_TO_SUID(ip->i_uid)) ||
- zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
- KGID_TO_SGID(ip->i_gid)) ||
- (zp->z_projid != ZFS_DEFAULT_PROJID &&
- zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
- zp->z_projid))) {
- error = SET_ERROR(EDQUOT);
- break;
- }
-
- arc_buf_t *abuf = NULL;
- const iovec_t *aiov = NULL;
- if (xuio) {
-#ifdef HAVE_UIO_ZEROCOPY
- ASSERT(i_iov < iovcnt);
- ASSERT3U(uio->uio_segflg, !=, UIO_BVEC);
- aiov = &iovp[i_iov];
- abuf = dmu_xuio_arcbuf(xuio, i_iov);
- dmu_xuio_clear(xuio, i_iov);
- ASSERT((aiov->iov_base == abuf->b_data) ||
- ((char *)aiov->iov_base - (char *)abuf->b_data +
- aiov->iov_len == arc_buf_size(abuf)));
- i_iov++;
-#endif
- } else if (n >= max_blksz && woff >= zp->z_size &&
- P2PHASE(woff, max_blksz) == 0 &&
- zp->z_blksz == max_blksz) {
- /*
- * This write covers a full block. "Borrow" a buffer
- * from the dmu so that we can fill it before we enter
- * a transaction. This avoids the possibility of
- * holding up the transaction if the data copy hangs
- * up on a pagefault (e.g., from an NFS server mapping).
- */
- size_t cbytes;
-
- abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- max_blksz);
- ASSERT(abuf != NULL);
- ASSERT(arc_buf_size(abuf) == max_blksz);
- if ((error = uiocopy(abuf->b_data, max_blksz,
- UIO_WRITE, uio, &cbytes))) {
- dmu_return_arcbuf(abuf);
- break;
- }
- ASSERT(cbytes == max_blksz);
- }
-
- /*
- * Start a transaction.
- */
- dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
- dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
- dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
- DB_DNODE_ENTER(db);
- dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
- MIN(n, max_blksz));
- DB_DNODE_EXIT(db);
- zfs_sa_upgrade_txholds(tx, zp);
- error = dmu_tx_assign(tx, TXG_WAIT);
- if (error) {
- dmu_tx_abort(tx);
- if (abuf != NULL)
- dmu_return_arcbuf(abuf);
- break;
- }
-
- /*
- * If rangelock_enter() over-locked we grow the blocksize
- * and then reduce the lock range. This will only happen
- * on the first iteration since rangelock_reduce() will
- * shrink down lr_length to the appropriate size.
- */
- if (lr->lr_length == UINT64_MAX) {
- uint64_t new_blksz;
-
- if (zp->z_blksz > max_blksz) {
- /*
- * File's blocksize is already larger than the
- * "recordsize" property. Only let it grow to
- * the next power of 2.
- */
- ASSERT(!ISP2(zp->z_blksz));
- new_blksz = MIN(end_size,
- 1 << highbit64(zp->z_blksz));
- } else {
- new_blksz = MIN(end_size, max_blksz);
- }
- zfs_grow_blocksize(zp, new_blksz, tx);
- zfs_rangelock_reduce(lr, woff, n);
- }
-
- /*
- * XXX - should we really limit each write to z_max_blksz?
- * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
- */
- ssize_t nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
-
- ssize_t tx_bytes;
- if (abuf == NULL) {
- tx_bytes = uio->uio_resid;
- uio->uio_fault_disable = B_TRUE;
- error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
- uio, nbytes, tx);
- uio->uio_fault_disable = B_FALSE;
- if (error == EFAULT) {
- dmu_tx_commit(tx);
- /*
- * Account for partial writes before
- * continuing the loop.
- * Update needs to occur before the next
- * uio_prefaultpages, or prefaultpages may
- * error, and we may break the loop early.
- */
- if (tx_bytes != uio->uio_resid)
- n -= tx_bytes - uio->uio_resid;
- if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
- break;
- }
- continue;
- } else if (error != 0) {
- dmu_tx_commit(tx);
- break;
- }
- tx_bytes -= uio->uio_resid;
- } else {
- tx_bytes = nbytes;
- ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
- /*
- * If this is not a full block write, but we are
- * extending the file past EOF and this data starts
- * block-aligned, use assign_arcbuf(). Otherwise,
- * write via dmu_write().
- */
- if (tx_bytes < max_blksz && (!write_eof ||
- aiov->iov_base != abuf->b_data)) {
- ASSERT(xuio);
- dmu_write(zfsvfs->z_os, zp->z_id, woff,
- /* cppcheck-suppress nullPointer */
- aiov->iov_len, aiov->iov_base, tx);
- dmu_return_arcbuf(abuf);
- xuio_stat_wbuf_copied();
- } else {
- ASSERT(xuio || tx_bytes == max_blksz);
- error = dmu_assign_arcbuf_by_dbuf(
- sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
- if (error != 0) {
- dmu_return_arcbuf(abuf);
- dmu_tx_commit(tx);
- break;
- }
- }
- ASSERT(tx_bytes <= uio->uio_resid);
- uioskip(uio, tx_bytes);
- }
- if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) {
- update_pages(ip, woff,
- tx_bytes, zfsvfs->z_os, zp->z_id);
- }
-
- /*
- * If we made no progress, we're done. If we made even
- * partial progress, update the znode and ZIL accordingly.
- */
- if (tx_bytes == 0) {
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
- (void *)&zp->z_size, sizeof (uint64_t), tx);
- dmu_tx_commit(tx);
- ASSERT(error != 0);
- break;
- }
-
- /*
- * Clear Set-UID/Set-GID bits on successful write if not
- * privileged and at least one of the execute bits is set.
- *
- * It would be nice to do this after all writes have
- * been done, but that would still expose the ISUID/ISGID
- * to another app after the partial write is committed.
- *
- * Note: we don't call zfs_fuid_map_id() here because
- * user 0 is not an ephemeral uid.
- */
- mutex_enter(&zp->z_acl_lock);
- uint32_t uid = KUID_TO_SUID(ip->i_uid);
- if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
- (S_IXUSR >> 6))) != 0 &&
- (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
- secpolicy_vnode_setid_retain(cr,
- ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
- uint64_t newmode;
- zp->z_mode &= ~(S_ISUID | S_ISGID);
- ip->i_mode = newmode = zp->z_mode;
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
- (void *)&newmode, sizeof (uint64_t), tx);
- }
- mutex_exit(&zp->z_acl_lock);
-
- zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
-
- /*
- * Update the file size (zp_size) if it has changed;
- * account for possible concurrent updates.
- */
- while ((end_size = zp->z_size) < uio->uio_loffset) {
- (void) atomic_cas_64(&zp->z_size, end_size,
- uio->uio_loffset);
- ASSERT(error == 0);
- }
- /*
- * If we are replaying and eof is non zero then force
- * the file size to the specified eof. Note, there's no
- * concurrency during replay.
- */
- if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
- zp->z_size = zfsvfs->z_replay_eof;
-
- error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
-
- zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
- NULL, NULL);
- dmu_tx_commit(tx);
-
- if (error != 0)
- break;
- ASSERT(tx_bytes == nbytes);
- n -= nbytes;
-
- if (!xuio && n > 0) {
- if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
- error = EFAULT;
- break;
- }
- }
- }
-
- zfs_inode_update(zp);
- zfs_rangelock_exit(lr);
-
- /*
- * If we're in replay mode, or we made no progress, return error.
- * Otherwise, it's at least a partial write, so it's successful.
- */
- if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
- ZFS_EXIT(zfsvfs);
- return (error);
- }
-
- if (ioflag & (O_SYNC | O_DSYNC) ||
- zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, zp->z_id);
-
- int64_t nwritten = start_resid - uio->uio_resid;
- dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
- task_io_account_write(nwritten);
-
- ZFS_EXIT(zfsvfs);
- return (0);
-}
-
-/*
* Write the bytes to a file.
*
* IN: zp - znode of file to be written to
@@ -993,37 +355,40 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* OUT: resid - remaining bytes to write
*
* RETURN: 0 if success
- * positive error code if failure
+ * positive error code if failure. EIO is returned
+ * for a short write when residp isn't provided.
*
* Timestamps:
* zp - ctime|mtime updated if byte count > 0
*/
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
- loff_t pos, size_t *resid)
+ loff_t pos, size_t *residp)
{
- ssize_t written;
- int error = 0;
+ fstrans_cookie_t cookie;
+ int error;
- written = zpl_write_common(ZTOI(zp), data, len, &pos,
- UIO_SYSSPACE, 0, kcred);
- if (written < 0) {
- error = -written;
- } else if (resid == NULL) {
- if (written < len)
- error = SET_ERROR(EIO); /* short write */
- } else {
- *resid = len - written;
+ struct iovec iov;
+ iov.iov_base = (void *)data;
+ iov.iov_len = len;
+
+ uio_t uio;
+ uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0);
+
+ cookie = spl_fstrans_mark();
+ error = zfs_write(zp, &uio, 0, kcred);
+ spl_fstrans_unmark(cookie);
+
+ if (error == 0) {
+ if (residp != NULL)
+ *residp = uio_resid(&uio);
+ else if (uio_resid(&uio) != 0)
+ error = SET_ERROR(EIO);
}
+
return (error);
}
-/*
- * Drop a reference on the passed inode asynchronously. This ensures
- * that the caller will never drop the last reference on an inode in
- * the current context. Doing so while holding open a tx could result
- * in a deadlock if iput_final() re-enters the filesystem code.
- */
void
zfs_zrele_async(znode_t *zp)
{
@@ -1040,179 +405,6 @@ zfs_zrele_async(znode_t *zp)
zrele(zp);
}
-/* ARGSUSED */
-static void
-zfs_get_done(zgd_t *zgd, int error)
-{
- znode_t *zp = zgd->zgd_private;
-
- if (zgd->zgd_db)
- dmu_buf_rele(zgd->zgd_db, zgd);
-
- zfs_rangelock_exit(zgd->zgd_lr);
-
- /*
- * Release the vnode asynchronously as we currently have the
- * txg stopped from syncing.
- */
- zfs_zrele_async(zp);
-
- kmem_free(zgd, sizeof (zgd_t));
-}
-
-#ifdef ZFS_DEBUG
-static int zil_fault_io = 0;
-#endif
-
-/*
- * Get data to generate a TX_WRITE intent log record.
- */
-int
-zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
-{
- zfsvfs_t *zfsvfs = arg;
- objset_t *os = zfsvfs->z_os;
- znode_t *zp;
- uint64_t object = lr->lr_foid;
- uint64_t offset = lr->lr_offset;
- uint64_t size = lr->lr_length;
- dmu_buf_t *db;
- zgd_t *zgd;
- int error = 0;
-
- ASSERT3P(lwb, !=, NULL);
- ASSERT3P(zio, !=, NULL);
- ASSERT3U(size, !=, 0);
-
- /*
- * Nothing to do if the file has been removed
- */
- if (zfs_zget(zfsvfs, object, &zp) != 0)
- return (SET_ERROR(ENOENT));
- if (zp->z_unlinked) {
- /*
- * Release the vnode asynchronously as we currently have the
- * txg stopped from syncing.
- */
- zfs_zrele_async(zp);
- return (SET_ERROR(ENOENT));
- }
-
- zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
- zgd->zgd_lwb = lwb;
- zgd->zgd_private = zp;
-
- /*
- * Write records come in two flavors: immediate and indirect.
- * For small writes it's cheaper to store the data with the
- * log record (immediate); for large writes it's cheaper to
- * sync the data and get a pointer to it (indirect) so that
- * we don't have to write the data twice.
- */
- if (buf != NULL) { /* immediate write */
- zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
- offset, size, RL_READER);
- /* test for truncation needs to be done while range locked */
- if (offset >= zp->z_size) {
- error = SET_ERROR(ENOENT);
- } else {
- error = dmu_read(os, object, offset, size, buf,
- DMU_READ_NO_PREFETCH);
- }
- ASSERT(error == 0 || error == ENOENT);
- } else { /* indirect write */
- /*
- * Have to lock the whole block to ensure when it's
- * written out and its checksum is being calculated
- * that no one can change the data. We need to re-check
- * blocksize after we get the lock in case it's changed!
- */
- for (;;) {
- uint64_t blkoff;
- size = zp->z_blksz;
- blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
- offset -= blkoff;
- zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
- offset, size, RL_READER);
- if (zp->z_blksz == size)
- break;
- offset += blkoff;
- zfs_rangelock_exit(zgd->zgd_lr);
- }
- /* test for truncation needs to be done while range locked */
- if (lr->lr_offset >= zp->z_size)
- error = SET_ERROR(ENOENT);
-#ifdef ZFS_DEBUG
- if (zil_fault_io) {
- error = SET_ERROR(EIO);
- zil_fault_io = 0;
- }
-#endif
- if (error == 0)
- error = dmu_buf_hold(os, object, offset, zgd, &db,
- DMU_READ_NO_PREFETCH);
-
- if (error == 0) {
- blkptr_t *bp = &lr->lr_blkptr;
-
- zgd->zgd_db = db;
- zgd->zgd_bp = bp;
-
- ASSERT(db->db_offset == offset);
- ASSERT(db->db_size == size);
-
- error = dmu_sync(zio, lr->lr_common.lrc_txg,
- zfs_get_done, zgd);
- ASSERT(error || lr->lr_length <= size);
-
- /*
- * On success, we need to wait for the write I/O
- * initiated by dmu_sync() to complete before we can
- * release this dbuf. We will finish everything up
- * in the zfs_get_done() callback.
- */
- if (error == 0)
- return (0);
-
- if (error == EALREADY) {
- lr->lr_common.lrc_txtype = TX_WRITE2;
- /*
- * TX_WRITE2 relies on the data previously
- * written by the TX_WRITE that caused
- * EALREADY. We zero out the BP because
- * it is the old, currently-on-disk BP.
- */
- zgd->zgd_bp = NULL;
- BP_ZERO(bp);
- error = 0;
- }
- }
- }
-
- zfs_get_done(zgd, error);
-
- return (error);
-}
-
-/*ARGSUSED*/
-int
-zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
-{
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- int error;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- if (flag & V_ACE_MASK)
- error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
- else
- error = zfs_zaccess_rwx(zp, mode, flag, cr);
-
- ZFS_EXIT(zfsvfs);
- return (error);
-}
/*
* Lookup an entry in a directory, or an extended attribute directory.
@@ -2440,26 +1632,6 @@ out:
return (error);
}
-ulong_t zfs_fsync_sync_cnt = 4;
-
-int
-zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
-{
- zfsvfs_t *zfsvfs = ZTOZSB(zp);
-
- (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
-
- if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- zil_commit(zfsvfs->z_log, zp->z_id);
- ZFS_EXIT(zfsvfs);
- }
- tsd_set(zfs_fsyncer_key, NULL);
-
- return (0);
-}
-
/*
* Get the basic file attributes and place them in the provided kstat
* structure. The inode is assumed to be the authoritative source
@@ -4796,207 +3968,9 @@ zfs_fid(struct inode *ip, fid_t *fidp)
return (0);
}
-/*ARGSUSED*/
-int
-zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
-{
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- int error;
- boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- error = zfs_getacl(zp, vsecp, skipaclchk, cr);
- ZFS_EXIT(zfsvfs);
-
- return (error);
-}
-
-/*ARGSUSED*/
-int
-zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
-{
- zfsvfs_t *zfsvfs = ZTOZSB(zp);
- int error;
- boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
- zilog_t *zilog = zfsvfs->z_log;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- error = zfs_setacl(zp, vsecp, skipaclchk, cr);
-
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
-
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-
-#ifdef HAVE_UIO_ZEROCOPY
-/*
- * The smallest read we may consider to loan out an arcbuf.
- * This must be a power of 2.
- */
-int zcr_blksz_min = (1 << 10); /* 1K */
-/*
- * If set to less than the file block size, allow loaning out of an
- * arcbuf for a partial block read. This must be a power of 2.
- */
-int zcr_blksz_max = (1 << 17); /* 128K */
-
-/*ARGSUSED*/
-static int
-zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
-{
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- int max_blksz = zfsvfs->z_max_blksz;
- uio_t *uio = &xuio->xu_uio;
- ssize_t size = uio->uio_resid;
- offset_t offset = uio->uio_loffset;
- int blksz;
- int fullblk, i;
- arc_buf_t *abuf;
- ssize_t maxsize;
- int preamble, postamble;
-
- if (xuio->xu_type != UIOTYPE_ZEROCOPY)
- return (SET_ERROR(EINVAL));
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- switch (ioflag) {
- case UIO_WRITE:
- /*
- * Loan out an arc_buf for write if write size is bigger than
- * max_blksz, and the file's block size is also max_blksz.
- */
- blksz = max_blksz;
- if (size < blksz || zp->z_blksz != blksz) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
- /*
- * Caller requests buffers for write before knowing where the
- * write offset might be (e.g. NFS TCP write).
- */
- if (offset == -1) {
- preamble = 0;
- } else {
- preamble = P2PHASE(offset, blksz);
- if (preamble) {
- preamble = blksz - preamble;
- size -= preamble;
- }
- }
-
- postamble = P2PHASE(size, blksz);
- size -= postamble;
-
- fullblk = size / blksz;
- (void) dmu_xuio_init(xuio,
- (preamble != 0) + fullblk + (postamble != 0));
-
- /*
- * Have to fix iov base/len for partial buffers. They
- * currently represent full arc_buf's.
- */
- if (preamble) {
- /* data begins in the middle of the arc_buf */
- abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- blksz);
- ASSERT(abuf);
- (void) dmu_xuio_add(xuio, abuf,
- blksz - preamble, preamble);
- }
-
- for (i = 0; i < fullblk; i++) {
- abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- blksz);
- ASSERT(abuf);
- (void) dmu_xuio_add(xuio, abuf, 0, blksz);
- }
-
- if (postamble) {
- /* data ends in the middle of the arc_buf */
- abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- blksz);
- ASSERT(abuf);
- (void) dmu_xuio_add(xuio, abuf, 0, postamble);
- }
- break;
- case UIO_READ:
- /*
- * Loan out an arc_buf for read if the read size is larger than
- * the current file block size. Block alignment is not
- * considered. Partial arc_buf will be loaned out for read.
- */
- blksz = zp->z_blksz;
- if (blksz < zcr_blksz_min)
- blksz = zcr_blksz_min;
- if (blksz > zcr_blksz_max)
- blksz = zcr_blksz_max;
- /* avoid potential complexity of dealing with it */
- if (blksz > max_blksz) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- maxsize = zp->z_size - uio->uio_loffset;
- if (size > maxsize)
- size = maxsize;
-
- if (size < blksz) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
- break;
- default:
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- uio->uio_extflg = UIO_XUIO;
- XUIO_XUZC_RW(xuio) = ioflag;
- ZFS_EXIT(zfsvfs);
- return (0);
-}
-
-/*ARGSUSED*/
-static int
-zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
-{
- int i;
- arc_buf_t *abuf;
- int ioflag = XUIO_XUZC_RW(xuio);
-
- ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
-
- i = dmu_xuio_cnt(xuio);
- while (i-- > 0) {
- abuf = dmu_xuio_arcbuf(xuio, i);
- /*
- * if abuf == NULL, it must be a write buffer
- * that has been returned in zfs_write().
- */
- if (abuf)
- dmu_return_arcbuf(abuf);
- ASSERT(abuf || ioflag == UIO_WRITE);
- }
-
- dmu_xuio_fini(xuio);
- return (0);
-}
-#endif /* HAVE_UIO_ZEROCOPY */
-
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_open);
EXPORT_SYMBOL(zfs_close);
-EXPORT_SYMBOL(zfs_read);
-EXPORT_SYMBOL(zfs_write);
-EXPORT_SYMBOL(zfs_access);
EXPORT_SYMBOL(zfs_lookup);
EXPORT_SYMBOL(zfs_create);
EXPORT_SYMBOL(zfs_tmpfile);
@@ -5004,7 +3978,6 @@ EXPORT_SYMBOL(zfs_remove);
EXPORT_SYMBOL(zfs_mkdir);
EXPORT_SYMBOL(zfs_rmdir);
EXPORT_SYMBOL(zfs_readdir);
-EXPORT_SYMBOL(zfs_fsync);
EXPORT_SYMBOL(zfs_getattr_fast);
EXPORT_SYMBOL(zfs_setattr);
EXPORT_SYMBOL(zfs_rename);
@@ -5014,8 +3987,6 @@ EXPORT_SYMBOL(zfs_link);
EXPORT_SYMBOL(zfs_inactive);
EXPORT_SYMBOL(zfs_space);
EXPORT_SYMBOL(zfs_fid);
-EXPORT_SYMBOL(zfs_getsecattr);
-EXPORT_SYMBOL(zfs_setsecattr);
EXPORT_SYMBOL(zfs_getpage);
EXPORT_SYMBOL(zfs_putpage);
EXPORT_SYMBOL(zfs_dirty_inode);
@@ -5024,8 +3995,6 @@ EXPORT_SYMBOL(zfs_map);
/* BEGIN CSTYLED */
module_param(zfs_delete_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
-module_param(zfs_read_chunk_size, ulong, 0644);
-MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
/* END CSTYLED */
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
index a542c662cb15..b33594488ee0 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
@@ -134,7 +134,6 @@ zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
zp->z_acl_cached = NULL;
zp->z_xattr_cached = NULL;
zp->z_xattr_parent = 0;
- zp->z_moved = B_FALSE;
return (0);
}
@@ -505,6 +504,7 @@ zfs_inode_update(znode_t *zp)
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, &i_blocks);
spin_lock(&ip->i_lock);
+ ip->i_mode = zp->z_mode;
ip->i_blocks = i_blocks;
i_size_write(ip, zp->z_size);
spin_unlock(&ip->i_lock);
@@ -546,7 +546,6 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
ASSERT3P(zp->z_xattr_cached, ==, NULL);
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
- zp->z_moved = B_FALSE;
zp->z_is_mapped = B_FALSE;
zp->z_is_ctldir = B_FALSE;
zp->z_is_stale = B_FALSE;
@@ -619,7 +618,6 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
- membar_producer();
mutex_exit(&zfsvfs->z_znodes_lock);
unlock_new_inode(ip);
@@ -1901,7 +1899,6 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
rootzp->z_unlinked = B_FALSE;
rootzp->z_atime_dirty = B_FALSE;
- rootzp->z_moved = B_FALSE;
rootzp->z_is_sa = USE_SA(version, os);
rootzp->z_pflags = 0;
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c b/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
index 96dabe55a138..8106359e1c77 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
@@ -1198,6 +1198,16 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
/*
+ * This is necessary here as we check next whether
+ * OBJSET_FLAG_USERACCOUNTING_COMPLETE or
+ * OBJSET_FLAG_USEROBJACCOUNTING are set in order to
+ * decide if the local_mac should be zeroed out.
+ */
+ intval = osp->os_flags;
+ if (should_bswap)
+ intval = BSWAP_64(intval);
+
+ /*
* The local MAC protects the user, group and project accounting.
* If these objects are not present, the local MAC is zeroed out.
*/
@@ -1208,7 +1218,10 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
(datalen >= OBJSET_PHYS_SIZE_V2 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
- (datalen <= OBJSET_PHYS_SIZE_V1)) {
+ (datalen <= OBJSET_PHYS_SIZE_V1) ||
+ (((intval & OBJSET_FLAG_USERACCOUNTING_COMPLETE) == 0 ||
+ (intval & OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE) == 0) &&
+ key->zk_version > 0)) {
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
index fa4500f6f8d1..e6420f19ed87 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
@@ -55,7 +55,7 @@ zpl_root_iterate(struct file *filp, zpl_dir_context_t *ctx)
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
int error = 0;
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
if (!zpl_dir_emit_dots(filp, ctx))
goto out;
@@ -76,7 +76,7 @@ zpl_root_iterate(struct file *filp, zpl_dir_context_t *ctx)
ctx->pos++;
}
out:
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
return (error);
}
@@ -242,13 +242,14 @@ zpl_snapdir_iterate(struct file *filp, zpl_dir_context_t *ctx)
uint64_t id, pos;
int error = 0;
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
cookie = spl_fstrans_mark();
if (!zpl_dir_emit_dots(filp, ctx))
goto out;
- pos = ctx->pos;
+ /* Start the position at 0 if it already emitted . and .. */
+ pos = (ctx->pos == 2 ? 0 : ctx->pos);
while (error == 0) {
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = -dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN,
@@ -265,7 +266,7 @@ zpl_snapdir_iterate(struct file *filp, zpl_dir_context_t *ctx)
}
out:
spl_fstrans_unmark(cookie);
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
if (error == -ENOENT)
return (0);
@@ -368,13 +369,13 @@ zpl_snapdir_getattr_impl(const struct path *path, struct kstat *stat,
struct inode *ip = path->dentry->d_inode;
zfsvfs_t *zfsvfs = ITOZSB(ip);
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
generic_fillattr(ip, stat);
stat->nlink = stat->size = 2;
stat->ctime = stat->mtime = dmu_objset_snap_cmtime(zfsvfs->z_os);
stat->atime = current_time(ip);
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
return (0);
}
@@ -452,7 +453,7 @@ zpl_shares_iterate(struct file *filp, zpl_dir_context_t *ctx)
znode_t *dzp;
int error = 0;
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
cookie = spl_fstrans_mark();
if (zfsvfs->z_shares_dir == 0) {
@@ -471,7 +472,7 @@ zpl_shares_iterate(struct file *filp, zpl_dir_context_t *ctx)
iput(ZTOI(dzp));
out:
spl_fstrans_unmark(cookie);
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
ASSERT3S(error, <=, 0);
return (error);
@@ -502,13 +503,13 @@ zpl_shares_getattr_impl(const struct path *path, struct kstat *stat,
znode_t *dzp;
int error;
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
if (zfsvfs->z_shares_dir == 0) {
generic_fillattr(path->dentry->d_inode, stat);
stat->nlink = stat->size = 2;
stat->atime = current_time(ip);
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
return (0);
}
@@ -518,7 +519,7 @@ zpl_shares_getattr_impl(const struct path *path, struct kstat *stat,
iput(ZTOI(dzp));
}
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
ASSERT3S(error, <=, 0);
return (error);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
index 51e189a87272..9e08c94e2147 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
@@ -212,244 +212,221 @@ zfs_io_flags(struct kiocb *kiocb)
return (flags);
}
-static ssize_t
-zpl_read_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
- unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags,
- cred_t *cr, size_t skip)
+/*
+ * If relatime is enabled, call file_accessed() if zfs_relatime_need_update()
+ * is true. This is needed since datasets with inherited "relatime" property
+ * aren't necessarily mounted with the MNT_RELATIME flag (e.g. after
+ * `zfs set relatime=...`), which is what relatime test in VFS by
+ * relatime_need_update() is based on.
+ */
+static inline void
+zpl_file_accessed(struct file *filp)
{
- ssize_t read;
- uio_t uio = { { 0 }, 0 };
- int error;
- fstrans_cookie_t cookie;
-
- uio.uio_iov = iovp;
- uio.uio_iovcnt = nr_segs;
- uio.uio_loffset = *ppos;
- uio.uio_segflg = segment;
- uio.uio_limit = MAXOFFSET_T;
- uio.uio_resid = count;
- uio.uio_skip = skip;
-
- cookie = spl_fstrans_mark();
- error = -zfs_read(ip, &uio, flags, cr);
- spl_fstrans_unmark(cookie);
- if (error < 0)
- return (error);
-
- read = count - uio.uio_resid;
- *ppos += read;
+ struct inode *ip = filp->f_mapping->host;
- return (read);
+ if (!IS_NOATIME(ip) && ITOZSB(ip)->z_relatime) {
+ if (zfs_relatime_need_update(ip))
+ file_accessed(filp);
+ } else {
+ file_accessed(filp);
+ }
}
-inline ssize_t
-zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
- uio_seg_t segment, int flags, cred_t *cr)
-{
- struct iovec iov;
-
- iov.iov_base = (void *)buf;
- iov.iov_len = len;
+#if defined(HAVE_VFS_RW_ITERATE)
- return (zpl_read_common_iovec(ip, &iov, len, 1, ppos, segment,
- flags, cr, 0));
+/*
+ * When HAVE_VFS_IOV_ITER is defined the iov_iter structure supports
+ * iovecs, kvevs, bvecs and pipes, plus all the required interfaces to
+ * manipulate the iov_iter are available. In which case the full iov_iter
+ * can be attached to the uio and correctly handled in the lower layers.
+ * Otherwise, for older kernels extract the iovec and pass it instead.
+ */
+static void
+zpl_uio_init(uio_t *uio, struct kiocb *kiocb, struct iov_iter *to,
+ loff_t pos, ssize_t count, size_t skip)
+{
+#if defined(HAVE_VFS_IOV_ITER)
+ uio_iov_iter_init(uio, to, pos, count, skip);
+#else
+ uio_iovec_init(uio, to->iov, to->nr_segs, pos,
+ to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
+ count, skip);
+#endif
}
static ssize_t
-zpl_iter_read_common(struct kiocb *kiocb, const struct iovec *iovp,
- unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip)
+zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
{
cred_t *cr = CRED();
+ fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
- struct inode *ip = filp->f_mapping->host;
- zfsvfs_t *zfsvfs = ZTOZSB(ITOZ(ip));
- ssize_t read;
- unsigned int f_flags = filp->f_flags;
+ ssize_t count = iov_iter_count(to);
+ uio_t uio;
+
+ zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0);
- f_flags |= zfs_io_flags(kiocb);
crhold(cr);
- read = zpl_read_common_iovec(filp->f_mapping->host, iovp, count,
- nr_segs, &kiocb->ki_pos, seg, f_flags, cr, skip);
+ cookie = spl_fstrans_mark();
+
+ int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
+ filp->f_flags | zfs_io_flags(kiocb), cr);
+
+ spl_fstrans_unmark(cookie);
crfree(cr);
- /*
- * If relatime is enabled, call file_accessed() only if
- * zfs_relatime_need_update() is true. This is needed since datasets
- * with inherited "relatime" property aren't necessarily mounted with
- * MNT_RELATIME flag (e.g. after `zfs set relatime=...`), which is what
- * relatime test in VFS by relatime_need_update() is based on.
- */
- if (!IS_NOATIME(ip) && zfsvfs->z_relatime) {
- if (zfs_relatime_need_update(ip))
- file_accessed(filp);
- } else {
- file_accessed(filp);
- }
+ if (error < 0)
+ return (error);
+
+ ssize_t read = count - uio.uio_resid;
+ kiocb->ki_pos += read;
+
+ zpl_file_accessed(filp);
return (read);
}
-#if defined(HAVE_VFS_RW_ITERATE)
-static ssize_t
-zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
+static inline ssize_t
+zpl_generic_write_checks(struct kiocb *kiocb, struct iov_iter *from,
+ size_t *countp)
{
- ssize_t ret;
- uio_seg_t seg = UIO_USERSPACE;
- if (to->type & ITER_KVEC)
- seg = UIO_SYSSPACE;
- if (to->type & ITER_BVEC)
- seg = UIO_BVEC;
- ret = zpl_iter_read_common(kiocb, to->iov, to->nr_segs,
- iov_iter_count(to), seg, to->iov_offset);
- if (ret > 0)
- iov_iter_advance(to, ret);
- return (ret);
-}
+#ifdef HAVE_GENERIC_WRITE_CHECKS_KIOCB
+ ssize_t ret = generic_write_checks(kiocb, from);
+ if (ret <= 0)
+ return (ret);
+
+ *countp = ret;
#else
-static ssize_t
-zpl_aio_read(struct kiocb *kiocb, const struct iovec *iovp,
- unsigned long nr_segs, loff_t pos)
-{
- ssize_t ret;
- size_t count;
+ struct file *file = kiocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *ip = mapping->host;
+ int isblk = S_ISBLK(ip->i_mode);
- ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_WRITE);
+ *countp = iov_iter_count(from);
+ ssize_t ret = generic_write_checks(file, &kiocb->ki_pos, countp, isblk);
if (ret)
return (ret);
+#endif
- return (zpl_iter_read_common(kiocb, iovp, nr_segs, count,
- UIO_USERSPACE, 0));
+ return (0);
}
-#endif /* HAVE_VFS_RW_ITERATE */
static ssize_t
-zpl_write_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
- unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags,
- cred_t *cr, size_t skip)
+zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
{
- ssize_t wrote;
- uio_t uio = { { 0 }, 0 };
- int error;
+ cred_t *cr = CRED();
fstrans_cookie_t cookie;
+ struct file *filp = kiocb->ki_filp;
+ struct inode *ip = filp->f_mapping->host;
+ uio_t uio;
+ size_t count = 0;
+ ssize_t ret;
- if (flags & O_APPEND)
- *ppos = i_size_read(ip);
+ ret = zpl_generic_write_checks(kiocb, from, &count);
+ if (ret)
+ return (ret);
- uio.uio_iov = iovp;
- uio.uio_iovcnt = nr_segs;
- uio.uio_loffset = *ppos;
- uio.uio_segflg = segment;
- uio.uio_limit = MAXOFFSET_T;
- uio.uio_resid = count;
- uio.uio_skip = skip;
+ zpl_uio_init(&uio, kiocb, from, kiocb->ki_pos, count, from->iov_offset);
+ crhold(cr);
cookie = spl_fstrans_mark();
- error = -zfs_write(ip, &uio, flags, cr);
+
+ int error = -zfs_write(ITOZ(ip), &uio,
+ filp->f_flags | zfs_io_flags(kiocb), cr);
+
spl_fstrans_unmark(cookie);
+ crfree(cr);
+
if (error < 0)
return (error);
- wrote = count - uio.uio_resid;
- *ppos += wrote;
+ ssize_t wrote = count - uio.uio_resid;
+ kiocb->ki_pos += wrote;
+
+ if (wrote > 0)
+ iov_iter_advance(from, wrote);
return (wrote);
}
-inline ssize_t
-zpl_write_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
- uio_seg_t segment, int flags, cred_t *cr)
-{
- struct iovec iov;
-
- iov.iov_base = (void *)buf;
- iov.iov_len = len;
-
- return (zpl_write_common_iovec(ip, &iov, len, 1, ppos, segment,
- flags, cr, 0));
-}
+#else /* !HAVE_VFS_RW_ITERATE */
static ssize_t
-zpl_iter_write_common(struct kiocb *kiocb, const struct iovec *iovp,
- unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip)
+zpl_aio_read(struct kiocb *kiocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
cred_t *cr = CRED();
+ fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
- ssize_t wrote;
- unsigned int f_flags = filp->f_flags;
-
- f_flags |= zfs_io_flags(kiocb);
- crhold(cr);
- wrote = zpl_write_common_iovec(filp->f_mapping->host, iovp, count,
- nr_segs, &kiocb->ki_pos, seg, f_flags, cr, skip);
- crfree(cr);
-
- return (wrote);
-}
-
-#if defined(HAVE_VFS_RW_ITERATE)
-static ssize_t
-zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
-{
size_t count;
ssize_t ret;
- uio_seg_t seg = UIO_USERSPACE;
-#ifndef HAVE_GENERIC_WRITE_CHECKS_KIOCB
- struct file *file = kiocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *ip = mapping->host;
- int isblk = S_ISBLK(ip->i_mode);
-
- count = iov_iter_count(from);
- ret = generic_write_checks(file, &kiocb->ki_pos, &count, isblk);
+ ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (ret)
return (ret);
-#else
- /*
- * XXX - ideally this check should be in the same lock region with
- * write operations, so that there's no TOCTTOU race when doing
- * append and someone else grow the file.
- */
- ret = generic_write_checks(kiocb, from);
- if (ret <= 0)
- return (ret);
- count = ret;
-#endif
- if (from->type & ITER_KVEC)
- seg = UIO_SYSSPACE;
- if (from->type & ITER_BVEC)
- seg = UIO_BVEC;
+ uio_t uio;
+ uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
+ count, 0);
- ret = zpl_iter_write_common(kiocb, from->iov, from->nr_segs,
- count, seg, from->iov_offset);
- if (ret > 0)
- iov_iter_advance(from, ret);
+ crhold(cr);
+ cookie = spl_fstrans_mark();
+
+ int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
+ filp->f_flags | zfs_io_flags(kiocb), cr);
+
+ spl_fstrans_unmark(cookie);
+ crfree(cr);
+
+ if (error < 0)
+ return (error);
- return (ret);
+ ssize_t read = count - uio.uio_resid;
+ kiocb->ki_pos += read;
+
+ zpl_file_accessed(filp);
+
+ return (read);
}
-#else
+
static ssize_t
-zpl_aio_write(struct kiocb *kiocb, const struct iovec *iovp,
+zpl_aio_write(struct kiocb *kiocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- struct file *file = kiocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *ip = mapping->host;
- int isblk = S_ISBLK(ip->i_mode);
+ cred_t *cr = CRED();
+ fstrans_cookie_t cookie;
+ struct file *filp = kiocb->ki_filp;
+ struct inode *ip = filp->f_mapping->host;
size_t count;
ssize_t ret;
- ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_READ);
+ ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (ret)
return (ret);
- ret = generic_write_checks(file, &pos, &count, isblk);
+ ret = generic_write_checks(filp, &pos, &count, S_ISBLK(ip->i_mode));
if (ret)
return (ret);
- return (zpl_iter_write_common(kiocb, iovp, nr_segs, count,
- UIO_USERSPACE, 0));
+ uio_t uio;
+ uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
+ count, 0);
+
+ crhold(cr);
+ cookie = spl_fstrans_mark();
+
+ int error = -zfs_write(ITOZ(ip), &uio,
+ filp->f_flags | zfs_io_flags(kiocb), cr);
+
+ spl_fstrans_unmark(cookie);
+ crfree(cr);
+
+ if (error < 0)
+ return (error);
+
+ ssize_t wrote = count - uio.uio_resid;
+ kiocb->ki_pos += wrote;
+
+ return (wrote);
}
#endif /* HAVE_VFS_RW_ITERATE */
@@ -486,14 +463,27 @@ zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
#error "Unknown direct IO interface"
#endif
-#else
+#else /* HAVE_VFS_RW_ITERATE */
#if defined(HAVE_VFS_DIRECT_IO_IOVEC)
static ssize_t
-zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iovp,
+zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov,
loff_t pos, unsigned long nr_segs)
{
if (rw == WRITE)
+ return (zpl_aio_write(kiocb, iov, nr_segs, pos));
+ else
+ return (zpl_aio_read(kiocb, iov, nr_segs, pos));
+}
+#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
+static ssize_t
+zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
+{
+ const struct iovec *iovp = iov_iter_iovec(iter);
+ unsigned long nr_segs = iter->nr_segs;
+
+ ASSERT3S(pos, ==, kiocb->ki_pos);
+ if (rw == WRITE)
return (zpl_aio_write(kiocb, iovp, nr_segs, pos));
else
return (zpl_aio_read(kiocb, iovp, nr_segs, pos));
@@ -517,7 +507,7 @@ zpl_llseek(struct file *filp, loff_t offset, int whence)
spl_inode_lock_shared(ip);
cookie = spl_fstrans_mark();
- error = -zfs_holey(ip, whence, &offset);
+ error = -zfs_holey(ITOZ(ip), whence, &offset);
spl_fstrans_unmark(cookie);
if (error == 0)
error = lseek_execute(filp, ip, offset, maxbytes);
@@ -603,10 +593,6 @@ zpl_mmap(struct file *filp, struct vm_area_struct *vma)
* Populate a page with data for the Linux page cache. This function is
* only used to support mmap(2). There will be an identical copy of the
* data in the ARC which is kept up to date via .write() and .writepage().
- *
- * Current this function relies on zpl_read_common() and the O_DIRECT
- * flag to read in a page. This works but the more correct way is to
- * update zfs_fillpage() to be Linux friendly and use that interface.
*/
static int
zpl_readpage(struct file *filp, struct page *pp)
@@ -675,10 +661,10 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
enum writeback_sync_modes sync_mode;
int result;
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
sync_mode = wbc->sync_mode;
/*
@@ -691,11 +677,11 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
wbc->sync_mode = WB_SYNC_NONE;
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
if (sync_mode != wbc->sync_mode) {
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ ZPL_ENTER(zfsvfs);
+ ZPL_VERIFY_ZP(zp);
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, zp->z_id);
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
/*
* We need to call write_cache_pages() again (we can't just
@@ -1037,6 +1023,10 @@ const struct file_operations zpl_file_operations = {
#endif
.read_iter = zpl_iter_read,
.write_iter = zpl_iter_write,
+#ifdef HAVE_VFS_IOV_ITER
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+#endif
#else
.read = do_sync_read,
.write = do_sync_write,
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
index f3b97a22074c..f336fbb1272b 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
@@ -490,19 +490,17 @@ zpl_get_link_common(struct dentry *dentry, struct inode *ip, char **link)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
- struct iovec iov;
- uio_t uio = { { 0 }, 0 };
int error;
crhold(cr);
*link = NULL;
+
+ struct iovec iov;
iov.iov_len = MAXPATHLEN;
iov.iov_base = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_resid = (MAXPATHLEN - 1);
+ uio_t uio;
+ uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, MAXPATHLEN - 1, 0);
cookie = spl_fstrans_mark();
error = -zfs_readlink(ip, &uio, cr);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
index 9db8bda4cc66..c2fd3fee1401 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
@@ -185,14 +185,27 @@ zpl_remount_fs(struct super_block *sb, int *flags, char *data)
static int
__zpl_show_devname(struct seq_file *seq, zfsvfs_t *zfsvfs)
{
- char *fsname;
+ ZPL_ENTER(zfsvfs);
- ZFS_ENTER(zfsvfs);
- fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
+ char *fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dmu_objset_name(zfsvfs->z_os, fsname);
- seq_puts(seq, fsname);
+
+ for (int i = 0; fsname[i] != 0; i++) {
+ /*
+ * Spaces in the dataset name must be converted to their
+ * octal escape sequence for getmntent(3) to correctly
+ * parse then fsname portion of /proc/self/mounts.
+ */
+ if (fsname[i] == ' ') {
+ seq_puts(seq, "\\040");
+ } else {
+ seq_putc(seq, fsname[i]);
+ }
+ }
+
kmem_free(fsname, ZFS_MAX_DATASET_NAME_LEN);
- ZFS_EXIT(zfsvfs);
+
+ ZPL_EXIT(zfsvfs);
return (0);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
index 9b5fd0fd397b..1ec3dae2bb81 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
@@ -274,10 +274,10 @@ static int
zpl_xattr_get_dir(struct inode *ip, const char *name, void *value,
size_t size, cred_t *cr)
{
+ fstrans_cookie_t cookie;
struct inode *xip = NULL;
znode_t *dxzp = NULL;
znode_t *xzp = NULL;
- loff_t pos = 0;
int error;
/* Lookup the xattr directory */
@@ -302,7 +302,19 @@ zpl_xattr_get_dir(struct inode *ip, const char *name, void *value,
goto out;
}
- error = zpl_read_common(xip, value, size, &pos, UIO_SYSSPACE, 0, cr);
+ struct iovec iov;
+ iov.iov_base = (void *)value;
+ iov.iov_len = size;
+
+ uio_t uio;
+ uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, size, 0);
+
+ cookie = spl_fstrans_mark();
+ error = -zfs_read(ITOZ(xip), &uio, 0, cr);
+ spl_fstrans_unmark(cookie);
+
+ if (error == 0)
+ error = size - uio_resid(&uio);
out:
if (xzp)
zrele(xzp);
@@ -441,7 +453,6 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value,
znode_t *dxzp = NULL;
znode_t *xzp = NULL;
vattr_t *vap = NULL;
- ssize_t wrote;
int lookup_flags, error;
const int xattr_mode = S_IFREG | 0644;
loff_t pos = 0;
@@ -496,13 +507,8 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value,
if (error)
goto out;
- wrote = zpl_write_common(ZTOI(xzp), value, size, &pos,
- UIO_SYSSPACE, 0, cr);
- if (wrote < 0)
- error = wrote;
-
+ error = -zfs_write_simple(xzp, value, size, pos, NULL);
out:
-
if (error == 0) {
ip->i_ctime = current_time(ip);
zfs_mark_inode_dirty(ip);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
index 218e1101edf8..cdc2076702af 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
@@ -66,49 +66,33 @@ typedef struct zv_request {
* Given a path, return TRUE if path is a ZVOL.
*/
static boolean_t
-zvol_is_zvol_impl(const char *device)
+zvol_is_zvol_impl(const char *path)
{
- struct block_device *bdev;
- unsigned int major;
+ dev_t dev = 0;
- bdev = vdev_lookup_bdev(device);
- if (IS_ERR(bdev))
+ if (vdev_lookup_bdev(path, &dev) != 0)
return (B_FALSE);
- major = MAJOR(bdev->bd_dev);
- bdput(bdev);
-
- if (major == zvol_major)
+ if (MAJOR(dev) == zvol_major)
return (B_TRUE);
return (B_FALSE);
}
static void
-uio_from_bio(uio_t *uio, struct bio *bio)
-{
- uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
- uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
- uio->uio_loffset = BIO_BI_SECTOR(bio) << 9;
- uio->uio_segflg = UIO_BVEC;
- uio->uio_limit = MAXOFFSET_T;
- uio->uio_resid = BIO_BI_SIZE(bio);
- uio->uio_skip = BIO_BI_SKIP(bio);
-}
-
-static void
zvol_write(void *arg)
{
- int error = 0;
-
zv_request_t *zvr = arg;
struct bio *bio = zvr->bio;
- uio_t uio = { { 0 }, 0 };
- uio_from_bio(&uio, bio);
+ int error = 0;
+ uio_t uio;
+
+ uio_bvec_init(&uio, bio);
zvol_state_t *zv = zvr->zv;
- ASSERT(zv && zv->zv_open_count > 0);
- ASSERT(zv->zv_zilog != NULL);
+ ASSERT3P(zv, !=, NULL);
+ ASSERT3U(zv->zv_open_count, >, 0);
+ ASSERT3P(zv->zv_zilog, !=, NULL);
/* bio marked as FLUSH need to flush before write */
if (bio_is_flush(bio))
@@ -122,10 +106,14 @@ zvol_write(void *arg)
return;
}
+ struct request_queue *q = zv->zv_zso->zvo_queue;
+ struct gendisk *disk = zv->zv_zso->zvo_disk;
ssize_t start_resid = uio.uio_resid;
- unsigned long start_jif = jiffies;
- blk_generic_start_io_acct(zv->zv_zso->zvo_queue, WRITE,
- bio_sectors(bio), &zv->zv_zso->zvo_disk->part0);
+ unsigned long start_time;
+
+ boolean_t acct = blk_queue_io_stat(q);
+ if (acct)
+ start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
boolean_t sync =
bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
@@ -169,8 +157,10 @@ zvol_write(void *arg)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
- blk_generic_end_io_acct(zv->zv_zso->zvo_queue,
- WRITE, &zv->zv_zso->zvo_disk->part0, start_jif);
+
+ if (acct)
+ blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
+
BIO_END_IO(bio, -error);
kmem_free(zvr, sizeof (zv_request_t));
}
@@ -187,14 +177,18 @@ zvol_discard(void *arg)
boolean_t sync;
int error = 0;
dmu_tx_t *tx;
- unsigned long start_jif;
- ASSERT(zv && zv->zv_open_count > 0);
- ASSERT(zv->zv_zilog != NULL);
+ ASSERT3P(zv, !=, NULL);
+ ASSERT3U(zv->zv_open_count, >, 0);
+ ASSERT3P(zv->zv_zilog, !=, NULL);
+
+ struct request_queue *q = zv->zv_zso->zvo_queue;
+ struct gendisk *disk = zv->zv_zso->zvo_disk;
+ unsigned long start_time;
- start_jif = jiffies;
- blk_generic_start_io_acct(zv->zv_zso->zvo_queue, WRITE,
- bio_sectors(bio), &zv->zv_zso->zvo_disk->part0);
+ boolean_t acct = blk_queue_io_stat(q);
+ if (acct)
+ start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
sync = bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
@@ -239,8 +233,10 @@ zvol_discard(void *arg)
unlock:
rw_exit(&zv->zv_suspend_lock);
- blk_generic_end_io_acct(zv->zv_zso->zvo_queue, WRITE,
- &zv->zv_zso->zvo_disk->part0, start_jif);
+
+ if (acct)
+ blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
+
BIO_END_IO(bio, -error);
kmem_free(zvr, sizeof (zv_request_t));
}
@@ -248,20 +244,25 @@ unlock:
static void
zvol_read(void *arg)
{
- int error = 0;
-
zv_request_t *zvr = arg;
struct bio *bio = zvr->bio;
- uio_t uio = { { 0 }, 0 };
- uio_from_bio(&uio, bio);
+ int error = 0;
+ uio_t uio;
+
+ uio_bvec_init(&uio, bio);
zvol_state_t *zv = zvr->zv;
- ASSERT(zv && zv->zv_open_count > 0);
+ ASSERT3P(zv, !=, NULL);
+ ASSERT3U(zv->zv_open_count, >, 0);
+ struct request_queue *q = zv->zv_zso->zvo_queue;
+ struct gendisk *disk = zv->zv_zso->zvo_disk;
ssize_t start_resid = uio.uio_resid;
- unsigned long start_jif = jiffies;
- blk_generic_start_io_acct(zv->zv_zso->zvo_queue, READ, bio_sectors(bio),
- &zv->zv_zso->zvo_disk->part0);
+ unsigned long start_time;
+
+ boolean_t acct = blk_queue_io_stat(q);
+ if (acct)
+ start_time = blk_generic_start_io_acct(q, disk, READ, bio);
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
uio.uio_loffset, uio.uio_resid, RL_READER);
@@ -289,8 +290,10 @@ zvol_read(void *arg)
task_io_account_read(nread);
rw_exit(&zv->zv_suspend_lock);
- blk_generic_end_io_acct(zv->zv_zso->zvo_queue, READ,
- &zv->zv_zso->zvo_disk->part0, start_jif);
+
+ if (acct)
+ blk_generic_end_io_acct(q, disk, READ, bio, start_time);
+
BIO_END_IO(bio, -error);
kmem_free(zvr, sizeof (zv_request_t));
}
@@ -482,9 +485,9 @@ zvol_open(struct block_device *bdev, fmode_t flag)
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
- ASSERT(zv->zv_open_count != 0 || RW_READ_HELD(&zv->zv_suspend_lock));
if (zv->zv_open_count == 0) {
+ ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
error = -zvol_first_open(zv, !(flag & FMODE_WRITE));
if (error)
goto out_mutex;
@@ -501,7 +504,7 @@ zvol_open(struct block_device *bdev, fmode_t flag)
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
- check_disk_change(bdev);
+ zfs_check_media_change(bdev);
return (0);
@@ -530,7 +533,7 @@ zvol_release(struct gendisk *disk, fmode_t mode)
zv = disk->private_data;
mutex_enter(&zv->zv_state_lock);
- ASSERT(zv->zv_open_count > 0);
+ ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
@@ -553,11 +556,12 @@ zvol_release(struct gendisk *disk, fmode_t mode)
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
- ASSERT(zv->zv_open_count != 1 || RW_READ_HELD(&zv->zv_suspend_lock));
zv->zv_open_count--;
- if (zv->zv_open_count == 0)
+ if (zv->zv_open_count == 0) {
+ ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
+ }
mutex_exit(&zv->zv_state_lock);
@@ -652,8 +656,15 @@ zvol_revalidate_disk(struct gendisk *disk)
static int
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
+ struct gendisk *disk = zv->zv_zso->zvo_disk;
- revalidate_disk(zv->zv_zso->zvo_disk);
+#if defined(HAVE_REVALIDATE_DISK_SIZE)
+ revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
+#elif defined(HAVE_REVALIDATE_DISK)
+ revalidate_disk(disk);
+#else
+ zvol_revalidate_disk(disk);
+#endif
return (0);
}
@@ -697,46 +708,6 @@ zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return (0);
}
-/*
- * Find a zvol_state_t given the full major+minor dev_t. If found,
- * return with zv_state_lock taken, otherwise, return (NULL) without
- * taking zv_state_lock.
- */
-static zvol_state_t *
-zvol_find_by_dev(dev_t dev)
-{
- zvol_state_t *zv;
-
- rw_enter(&zvol_state_lock, RW_READER);
- for (zv = list_head(&zvol_state_list); zv != NULL;
- zv = list_next(&zvol_state_list, zv)) {
- mutex_enter(&zv->zv_state_lock);
- if (zv->zv_zso->zvo_dev == dev) {
- rw_exit(&zvol_state_lock);
- return (zv);
- }
- mutex_exit(&zv->zv_state_lock);
- }
- rw_exit(&zvol_state_lock);
-
- return (NULL);
-}
-
-static struct kobject *
-zvol_probe(dev_t dev, int *part, void *arg)
-{
- zvol_state_t *zv;
- struct kobject *kobj;
-
- zv = zvol_find_by_dev(dev);
- kobj = zv ? get_disk_and_module(zv->zv_zso->zvo_disk) : NULL;
- ASSERT(zv == NULL || MUTEX_HELD(&zv->zv_state_lock));
- if (zv)
- mutex_exit(&zv->zv_state_lock);
-
- return (kobj);
-}
-
static struct block_device_operations zvol_ops = {
.open = zvol_open,
.release = zvol_release,
@@ -774,6 +745,7 @@ zvol_alloc(dev_t dev, const char *name)
zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
zv->zv_zso = zso;
+ zv->zv_volmode = volmode;
list_link_init(&zv->zv_next);
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -859,8 +831,8 @@ zvol_free(zvol_state_t *zv)
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
- ASSERT(zv->zv_open_count == 0);
- ASSERT(zv->zv_zso->zvo_disk->private_data == NULL);
+ ASSERT0(zv->zv_open_count);
+ ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
@@ -879,6 +851,11 @@ zvol_free(zvol_state_t *zv)
kmem_free(zv, sizeof (zvol_state_t));
}
+void
+zvol_wait_close(zvol_state_t *zv)
+{
+}
+
/*
* Create a block device minor node and setup the linkage between it
* and the specified volume. Once this function returns the block
@@ -1083,9 +1060,6 @@ zvol_init(void)
return (-ENOMEM);
}
zvol_init_impl();
- blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
- THIS_MODULE, zvol_probe, NULL, NULL);
-
ida_init(&zvol_ida);
zvol_register_ops(&zvol_linux_ops);
return (0);
@@ -1095,7 +1069,6 @@ void
zvol_fini(void)
{
zvol_fini_impl();
- blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
unregister_blkdev(zvol_major, ZVOL_DRIVER);
taskq_destroy(zvol_taskq);
ida_destroy(&zvol_ida);