aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib/openzfs/module/os/freebsd/zfs
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/openzfs/module/os/freebsd/zfs')
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c43
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c10
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c12
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c70
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c36
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c6
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c70
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c3
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c877
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c18
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c15
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c248
12 files changed, 326 insertions, 1082 deletions
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
index a7bda509bf54..0a323e8856a3 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
@@ -106,13 +106,13 @@ abd_free_chunk(void *c)
kmem_cache_free(abd_chunk_cache, c);
}
-static size_t
+static uint_t
abd_chunkcnt_for_bytes(size_t size)
{
return (P2ROUNDUP(size, zfs_abd_chunk_size) / zfs_abd_chunk_size);
}
-static inline size_t
+static inline uint_t
abd_scatter_chunkcnt(abd_t *abd)
{
ASSERT(!abd_is_linear(abd));
@@ -129,7 +129,7 @@ abd_size_alloc_linear(size_t size)
void
abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
{
- size_t n = abd_scatter_chunkcnt(abd);
+ uint_t n = abd_scatter_chunkcnt(abd);
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
int waste = n * zfs_abd_chunk_size - abd->abd_size;
if (op == ABDSTAT_INCR) {
@@ -161,25 +161,28 @@ abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
void
abd_verify_scatter(abd_t *abd)
{
+ uint_t i, n;
+
/*
* There is no scatter linear pages in FreeBSD so there is an
* if an error if the ABD has been marked as a linear page.
*/
- VERIFY(!abd_is_linear_page(abd));
+ ASSERT(!abd_is_linear_page(abd));
ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
zfs_abd_chunk_size);
- size_t n = abd_scatter_chunkcnt(abd);
- for (int i = 0; i < n; i++) {
- ASSERT3P(
- ABD_SCATTER(abd).abd_chunks[i], !=, NULL);
+ n = abd_scatter_chunkcnt(abd);
+ for (i = 0; i < n; i++) {
+ ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL);
}
}
void
abd_alloc_chunks(abd_t *abd, size_t size)
{
- size_t n = abd_chunkcnt_for_bytes(size);
- for (int i = 0; i < n; i++) {
+ uint_t i, n;
+
+ n = abd_chunkcnt_for_bytes(size);
+ for (i = 0; i < n; i++) {
void *c = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
ASSERT3P(c, !=, NULL);
ABD_SCATTER(abd).abd_chunks[i] = c;
@@ -190,8 +193,10 @@ abd_alloc_chunks(abd_t *abd, size_t size)
void
abd_free_chunks(abd_t *abd)
{
- size_t n = abd_scatter_chunkcnt(abd);
- for (int i = 0; i < n; i++) {
+ uint_t i, n;
+
+ n = abd_scatter_chunkcnt(abd);
+ for (i = 0; i < n; i++) {
abd_free_chunk(ABD_SCATTER(abd).abd_chunks[i]);
}
}
@@ -199,7 +204,7 @@ abd_free_chunks(abd_t *abd)
abd_t *
abd_alloc_struct(size_t size)
{
- size_t chunkcnt = abd_chunkcnt_for_bytes(size);
+ uint_t chunkcnt = abd_chunkcnt_for_bytes(size);
/*
* In the event we are allocating a gang ABD, the size passed in
* will be 0. We must make sure to set abd_size to the size of an
@@ -221,9 +226,9 @@ abd_alloc_struct(size_t size)
void
abd_free_struct(abd_t *abd)
{
- size_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 :
+ uint_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 :
abd_scatter_chunkcnt(abd);
- int size = MAX(sizeof (abd_t),
+ ssize_t size = MAX(sizeof (abd_t),
offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
mutex_destroy(&abd->abd_mtx);
ASSERT(!list_link_active(&abd->abd_gang_link));
@@ -238,7 +243,9 @@ abd_free_struct(abd_t *abd)
static void
abd_alloc_zero_scatter(void)
{
- size_t n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
+ uint_t i, n;
+
+ n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
abd_zero_buf = kmem_zalloc(zfs_abd_chunk_size, KM_SLEEP);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
@@ -251,7 +258,7 @@ abd_alloc_zero_scatter(void)
ABD_SCATTER(abd_zero_scatter).abd_chunk_size =
zfs_abd_chunk_size;
- for (int i = 0; i < n; i++) {
+ for (i = 0; i < n; i++) {
ABD_SCATTER(abd_zero_scatter).abd_chunks[i] =
abd_zero_buf;
}
@@ -356,7 +363,7 @@ abd_get_offset_scatter(abd_t *sabd, size_t off)
ASSERT3U(off, <=, sabd->abd_size);
size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
- size_t chunkcnt = abd_scatter_chunkcnt(sabd) -
+ uint_t chunkcnt = abd_scatter_chunkcnt(sabd) -
(new_offset / zfs_abd_chunk_size);
abd = abd_alloc_scatter_offset_chunkcnt(chunkcnt);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c
index 94df750035a4..4fc7468bfa47 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c
@@ -243,3 +243,13 @@ arc_lowmem_fini(void)
if (arc_event_lowmem != NULL)
EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
}
+
+void
+arc_register_hotplug(void)
+{
+}
+
+void
+arc_unregister_hotplug(void)
+{
+}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
index 1b37ce0d7f6b..647c1463ba14 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
@@ -114,6 +114,7 @@ SYSCTL_NODE(_vfs_zfs, OID_AUTO, spa, CTLFLAG_RW, 0, "ZFS space allocation");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RW, 0, "ZFS TRIM");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS transaction group");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
+SYSCTL_NODE(_vfs_zfs, OID_AUTO, vnops, CTLFLAG_RW, 0, "ZFS VNOPS");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zevent, CTLFLAG_RW, 0, "ZFS event");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zil, CTLFLAG_RW, 0, "ZFS ZIL");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
@@ -228,15 +229,14 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
static int
sysctl_vfs_zfs_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
{
- uint32_t val;
- int err;
+ int err, val;
val = arc_no_grow_shift;
- err = sysctl_handle_32(oidp, &val, 0, req);
+ err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
- if (val >= arc_shrink_shift)
+ if (val < 0 || val >= arc_shrink_shift)
return (EINVAL);
arc_no_grow_shift = val;
@@ -244,8 +244,8 @@ sysctl_vfs_zfs_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
- CTLTYPE_U32 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, sizeof (uint32_t),
- sysctl_vfs_zfs_arc_no_grow_shift, "U",
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, sizeof (int),
+ sysctl_vfs_zfs_arc_no_grow_shift, "I",
"log2(fraction of ARC which must be free to allow growing)");
int
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c
index cf762c5fd61c..825bd706e0c0 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c
@@ -292,19 +292,28 @@ vdev_file_io_done(zio_t *zio)
}
vdev_ops_t vdev_file_ops = {
- vdev_file_open,
- vdev_file_close,
- vdev_default_asize,
- vdev_file_io_start,
- vdev_file_io_done,
- NULL,
- NULL,
- vdev_file_hold,
- vdev_file_rele,
- NULL,
- vdev_default_xlate,
- VDEV_TYPE_FILE, /* name of this vdev type */
- B_TRUE /* leaf vdev */
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
+ .vdev_op_open = vdev_file_open,
+ .vdev_op_close = vdev_file_close,
+ .vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
+ .vdev_op_io_start = vdev_file_io_start,
+ .vdev_op_io_done = vdev_file_io_done,
+ .vdev_op_state_change = NULL,
+ .vdev_op_need_resilver = NULL,
+ .vdev_op_hold = vdev_file_hold,
+ .vdev_op_rele = vdev_file_rele,
+ .vdev_op_remap = NULL,
+ .vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
+ .vdev_op_type = VDEV_TYPE_FILE, /* name of this vdev type */
+ .vdev_op_leaf = B_TRUE /* leaf vdev */
};
/*
@@ -313,19 +322,28 @@ vdev_ops_t vdev_file_ops = {
#ifndef _KERNEL
vdev_ops_t vdev_disk_ops = {
- vdev_file_open,
- vdev_file_close,
- vdev_default_asize,
- vdev_file_io_start,
- vdev_file_io_done,
- NULL,
- NULL,
- vdev_file_hold,
- vdev_file_rele,
- NULL,
- vdev_default_xlate,
- VDEV_TYPE_DISK, /* name of this vdev type */
- B_TRUE /* leaf vdev */
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
+ .vdev_op_open = vdev_file_open,
+ .vdev_op_close = vdev_file_close,
+ .vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
+ .vdev_op_io_start = vdev_file_io_start,
+ .vdev_op_io_done = vdev_file_io_done,
+ .vdev_op_state_change = NULL,
+ .vdev_op_need_resilver = NULL,
+ .vdev_op_hold = vdev_file_hold,
+ .vdev_op_rele = vdev_file_rele,
+ .vdev_op_remap = NULL,
+ .vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
+ .vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
+ .vdev_op_leaf = B_TRUE /* leaf vdev */
};
#endif
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
index f042eff7cd2e..c9e8e21982cf 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
@@ -1141,7 +1141,6 @@ sendreq:
break;
case ZIO_TYPE_IOCTL:
bp->bio_cmd = BIO_FLUSH;
- bp->bio_flags |= BIO_ORDERED;
bp->bio_data = NULL;
bp->bio_offset = cp->provider->mediasize;
bp->bio_length = 0;
@@ -1190,17 +1189,26 @@ vdev_geom_rele(vdev_t *vd)
}
vdev_ops_t vdev_disk_ops = {
- vdev_geom_open,
- vdev_geom_close,
- vdev_default_asize,
- vdev_geom_io_start,
- vdev_geom_io_done,
- NULL,
- NULL,
- vdev_geom_hold,
- vdev_geom_rele,
- NULL,
- vdev_default_xlate,
- VDEV_TYPE_DISK, /* name of this vdev type */
- B_TRUE /* leaf vdev */
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
+ .vdev_op_open = vdev_geom_open,
+ .vdev_op_close = vdev_geom_close,
+ .vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
+ .vdev_op_io_start = vdev_geom_io_start,
+ .vdev_op_io_done = vdev_geom_io_done,
+ .vdev_op_state_change = NULL,
+ .vdev_op_need_resilver = NULL,
+ .vdev_op_hold = vdev_geom_hold,
+ .vdev_op_rele = vdev_geom_rele,
+ .vdev_op_remap = NULL,
+ .vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
+ .vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
+ .vdev_op_leaf = B_TRUE /* leaf vdev */
};
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
index d7786d5136a2..8fb259f4ba76 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
@@ -158,7 +158,8 @@ zfs_file_read_impl(zfs_file_t *fp, void *buf, size_t count, loff_t *offp,
rc = fo_read(fp, &auio, td->td_ucred, FOF_OFFSET, td);
if (rc)
return (SET_ERROR(rc));
- *resid = auio.uio_resid;
+ if (resid)
+ *resid = auio.uio_resid;
*offp += count - auio.uio_resid;
return (SET_ERROR(0));
}
@@ -296,7 +297,8 @@ zfs_file_unlink(const char *fnamep)
rc = kern_funlinkat(curthread, AT_FDCWD, fnamep, FD_NONE, seg, 0, 0);
#else
#ifdef AT_BENEATH
- rc = kern_unlinkat(curthread, AT_FDCWD, fnamep, seg, 0, 0);
+ rc = kern_unlinkat(curthread, AT_FDCWD, __DECONST(char *, fnamep),
+ seg, 0, 0);
#else
rc = kern_unlinkat(curthread, AT_FDCWD, __DECONST(char *, fnamep),
seg, 0);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c
index 8b22f2fdc3b3..e69de29bb2d1 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c
@@ -1,70 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013 by Delphix. All rights reserved.
- */
-
-#include <sys/types.h>
-#include <sys/param.h>
-#include <sys/errno.h>
-#include <sys/kmem.h>
-#include <sys/sunddi.h>
-#include <sys/zfs_ioctl.h>
-#include <sys/zfs_onexit.h>
-
-static int
-zfs_onexit_minor_to_state(minor_t minor, zfs_onexit_t **zo)
-{
- *zo = zfsdev_get_state(minor, ZST_ONEXIT);
- if (*zo == NULL)
- return (SET_ERROR(EBADF));
-
- return (0);
-}
-
-int
-zfs_onexit_fd_hold(int fd, minor_t *minorp)
-{
- file_t *fp, *tmpfp;
- zfs_onexit_t *zo;
- void *data;
- int error;
-
- if ((error = zfs_file_get(fd, &fp)))
- return (error);
-
- tmpfp = curthread->td_fpop;
- curthread->td_fpop = fp;
- error = devfs_get_cdevpriv(&data);
- if (error == 0)
- *minorp = (minor_t)(uintptr_t)data;
- curthread->td_fpop = tmpfp;
- if (error != 0)
- return (SET_ERROR(EBADF));
- return (zfs_onexit_minor_to_state(*minorp, &zo));
-}
-
-void
-zfs_onexit_fd_rele(int fd)
-{
- zfs_file_put(fd);
-}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
index 54ebfa7532dd..7bc6b83d0272 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
@@ -42,6 +42,7 @@
#include <sys/mount.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
+#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
@@ -433,7 +434,7 @@ zfs_sync(vfs_t *vfsp, int waitfor)
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
- * run sync(1M). Unlike other filesystems, ZFS honors the
+ * run sync(8). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c
index 3c3285f93389..2e8eadb5e16e 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c
@@ -29,6 +29,7 @@
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
+
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
@@ -270,69 +271,13 @@ zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr)
return (0);
}
-/*
- * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
- * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
- */
-static int
-zfs_holey(vnode_t *vp, ulong_t cmd, offset_t *off)
-{
- znode_t *zp = VTOZ(vp);
- uint64_t noff = (uint64_t)*off; /* new offset */
- uint64_t file_sz;
- int error;
- boolean_t hole;
-
- file_sz = zp->z_size;
- if (noff >= file_sz) {
- return (SET_ERROR(ENXIO));
- }
-
- if (cmd == _FIO_SEEK_HOLE)
- hole = B_TRUE;
- else
- hole = B_FALSE;
-
- error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
-
- if (error == ESRCH)
- return (SET_ERROR(ENXIO));
-
- /* file was dirty, so fall back to using generic logic */
- if (error == EBUSY) {
- if (hole)
- *off = file_sz;
-
- return (0);
- }
-
- /*
- * We could find a hole that begins after the logical end-of-file,
- * because dmu_offset_next() only works on whole blocks. If the
- * EOF falls mid-block, then indicate that the "virtual hole"
- * at the end of the file begins at the logical EOF, rather than
- * at the end of the last block.
- */
- if (noff > file_sz) {
- ASSERT(hole);
- noff = file_sz;
- }
-
- if (noff < *off)
- return (error);
- *off = noff;
- return (error);
-}
-
/* ARGSUSED */
static int
zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
int *rvalp)
{
- offset_t off;
+ loff_t off;
int error;
- zfsvfs_t *zfsvfs;
- znode_t *zp;
switch (com) {
case _FIOFFS:
@@ -350,18 +295,12 @@ zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
return (0);
}
- case _FIO_SEEK_DATA:
- case _FIO_SEEK_HOLE:
+ case F_SEEK_DATA:
+ case F_SEEK_HOLE:
{
off = *(offset_t *)data;
- zp = VTOZ(vp);
- zfsvfs = zp->z_zfsvfs;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
/* offset parameter is in/out */
- error = zfs_holey(vp, com, &off);
- ZFS_EXIT(zfsvfs);
+ error = zfs_holey(VTOZ(vp), com, &off);
if (error)
return (error);
*(offset_t *)data = off;
@@ -525,16 +464,15 @@ page_unhold(vm_page_t pp)
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
-static void
-update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
- int segflg, dmu_tx_t *tx)
+void
+update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
vm_object_t obj;
struct sf_buf *sf;
+ vnode_t *vp = ZTOV(zp);
caddr_t va;
int off;
- ASSERT(segflg != UIO_NOCOPY);
ASSERT(vp->v_mount != NULL);
obj = vp->v_object;
ASSERT(obj != NULL);
@@ -552,8 +490,8 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
- (void) dmu_read(os, oid, start+off, nbytes,
- va+off, DMU_READ_PREFETCH);
+ (void) dmu_read(os, zp->z_id, start + off, nbytes,
+ va + off, DMU_READ_PREFETCH);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
@@ -579,10 +517,10 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
* map them into contiguous KVA region and populate them
* in one single dmu_read() call.
*/
-static int
-mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
+int
+mappedread_sf(znode_t *zp, int nbytes, uio_t *uio)
{
- znode_t *zp = VTOZ(vp);
+ vnode_t *vp = ZTOV(zp);
objset_t *os = zp->z_zfsvfs->z_os;
struct sf_buf *sf;
vm_object_t obj;
@@ -664,10 +602,10 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
-static int
-mappedread(vnode_t *vp, int nbytes, uio_t *uio)
+int
+mappedread(znode_t *zp, int nbytes, uio_t *uio)
{
- znode_t *zp = VTOZ(vp);
+ vnode_t *vp = ZTOV(zp);
vm_object_t obj;
int64_t start;
int len = nbytes;
@@ -710,523 +648,6 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
return (error);
}
-offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
-
-/*
- * Read bytes from specified file into supplied buffer.
- *
- * IN: vp - vnode of file to be read from.
- * uio - structure supplying read location, range info,
- * and return buffer.
- * ioflag - SYNC flags; used to provide FRSYNC semantics.
- * cr - credentials of caller.
- * ct - caller context
- *
- * OUT: uio - updated offset and range, buffer filled.
- *
- * RETURN: 0 on success, error code on failure.
- *
- * Side Effects:
- * vp - atime updated if byte count > 0
- */
-/* ARGSUSED */
-static int
-zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- ssize_t n, nbytes, start_resid;
- int error = 0;
- int64_t nread;
- zfs_locked_range_t *lr;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- /* We don't copy out anything useful for directories. */
- if (vp->v_type == VDIR) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EISDIR));
- }
-
- if (zp->z_pflags & ZFS_AV_QUARANTINED) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EACCES));
- }
-
- /*
- * Validate file offset
- */
- if (uio->uio_loffset < (offset_t)0) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- /*
- * Fasttrack empty reads
- */
- if (uio->uio_resid == 0) {
- ZFS_EXIT(zfsvfs);
- return (0);
- }
-
- /*
- * If we're in FRSYNC mode, sync out this znode before reading it.
- */
- if (zfsvfs->z_log &&
- (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
- zil_commit(zfsvfs->z_log, zp->z_id);
-
- /*
- * Lock the range against changes.
- */
- lr = zfs_rangelock_enter(&zp->z_rangelock, uio->uio_loffset,
- uio->uio_resid, RL_READER);
-
- /*
- * If we are reading past end-of-file we can skip
- * to the end; but we might still need to set atime.
- */
- if (uio->uio_loffset >= zp->z_size) {
- error = 0;
- goto out;
- }
-
- ASSERT(uio->uio_loffset < zp->z_size);
- n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
- start_resid = n;
-
- while (n > 0) {
- nbytes = MIN(n, zfs_read_chunk_size -
- P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
-
- if (uio->uio_segflg == UIO_NOCOPY)
- error = mappedread_sf(vp, nbytes, uio);
- else if (vn_has_cached_data(vp)) {
- error = mappedread(vp, nbytes, uio);
- } else {
- error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
- uio, nbytes);
- }
- if (error) {
- /* convert checksum errors into IO errors */
- if (error == ECKSUM)
- error = SET_ERROR(EIO);
- break;
- }
-
- n -= nbytes;
- }
-
- nread = start_resid - n;
- dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
-
-out:
- zfs_rangelock_exit(lr);
-
- ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-
-/*
- * Write the bytes to a file.
- *
- * IN: vp - vnode of file to be written to.
- * uio - structure supplying write location, range info,
- * and data buffer.
- * ioflag - FAPPEND, FSYNC, and/or FDSYNC. FAPPEND is
- * set if in append mode.
- * cr - credentials of caller.
- * ct - caller context (NFS/CIFS fem monitor only)
- *
- * OUT: uio - updated offset and range.
- *
- * RETURN: 0 on success, error code on failure.
- *
- * Timestamps:
- * vp - ctime|mtime updated if byte count > 0
- */
-
-/* ARGSUSED */
-static int
-zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr)
-{
- znode_t *zp = VTOZ(vp);
- rlim64_t limit = MAXOFFSET_T;
- ssize_t start_resid = uio->uio_resid;
- ssize_t tx_bytes;
- uint64_t end_size;
- dmu_buf_impl_t *db;
- dmu_tx_t *tx;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- zilog_t *zilog;
- offset_t woff;
- ssize_t n, nbytes;
- zfs_locked_range_t *lr;
- int max_blksz = zfsvfs->z_max_blksz;
- int error = 0;
- arc_buf_t *abuf;
- iovec_t *aiov = NULL;
- xuio_t *xuio = NULL;
- int i_iov = 0;
- int iovcnt __unused = uio->uio_iovcnt;
- iovec_t *iovp = uio->uio_iov;
- int write_eof;
- int count = 0;
- sa_bulk_attr_t bulk[4];
- uint64_t mtime[2], ctime[2];
- uint64_t uid, gid, projid;
- int64_t nwritten;
-
- /*
- * Fasttrack empty write
- */
- n = start_resid;
- if (n == 0)
- return (0);
-
- if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
- limit = MAXOFFSET_T;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
- &zp->z_size, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
- &zp->z_pflags, 8);
-
- /*
- * Callers might not be able to detect properly that we are read-only,
- * so check it explicitly here.
- */
- if (zfs_is_readonly(zfsvfs)) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EROFS));
- }
-
- /*
- * If immutable or not appending then return EPERM.
- * Intentionally allow ZFS_READONLY through here.
- * See zfs_zaccess_common()
- */
- if ((zp->z_pflags & ZFS_IMMUTABLE) ||
- ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
- (uio->uio_loffset < zp->z_size))) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EPERM));
- }
-
- zilog = zfsvfs->z_log;
-
- /*
- * Validate file offset
- */
- woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
- if (woff < 0) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- /*
- * If in append mode, set the io offset pointer to eof.
- */
- if (ioflag & FAPPEND) {
- /*
- * Obtain an appending range lock to guarantee file append
- * semantics. We reset the write offset once we have the lock.
- */
- lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
- woff = lr->lr_offset;
- if (lr->lr_length == UINT64_MAX) {
- /*
- * We overlocked the file because this write will cause
- * the file block size to increase.
- * Note that zp_size cannot change with this lock held.
- */
- woff = zp->z_size;
- }
- uio->uio_loffset = woff;
- } else {
- /*
- * Note that if the file block size will change as a result of
- * this write, then this range lock will lock the entire file
- * so that we can re-write the block safely.
- */
- lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
- }
-
- if (vn_rlimit_fsize(vp, uio, uio->uio_td)) {
- zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
- return (EFBIG);
- }
-
- if (woff >= limit) {
- zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EFBIG));
- }
-
- if ((woff + n) > limit || woff > (limit - n))
- n = limit - woff;
-
- /* Will this write extend the file length? */
- write_eof = (woff + n > zp->z_size);
-
- end_size = MAX(zp->z_size, woff + n);
-
- uid = zp->z_uid;
- gid = zp->z_gid;
- projid = zp->z_projid;
-
- /*
- * Write the file in reasonable size chunks. Each chunk is written
- * in a separate transaction; this keeps the intent log records small
- * and allows us to do more fine-grained space accounting.
- */
- while (n > 0) {
- woff = uio->uio_loffset;
-
- if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
- zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
- (projid != ZFS_DEFAULT_PROJID &&
- zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
- projid))) {
- error = SET_ERROR(EDQUOT);
- break;
- }
-
- abuf = NULL;
- if (xuio) {
- ASSERT(i_iov < iovcnt);
- aiov = &iovp[i_iov];
- abuf = dmu_xuio_arcbuf(xuio, i_iov);
- dmu_xuio_clear(xuio, i_iov);
- DTRACE_PROBE3(zfs_cp_write, int, i_iov,
- iovec_t *, aiov, arc_buf_t *, abuf);
- ASSERT((aiov->iov_base == abuf->b_data) ||
- ((char *)aiov->iov_base - (char *)abuf->b_data +
- aiov->iov_len == arc_buf_size(abuf)));
- i_iov++;
- } else if (n >= max_blksz &&
- woff >= zp->z_size &&
- P2PHASE(woff, max_blksz) == 0 &&
- zp->z_blksz == max_blksz) {
- /*
- * This write covers a full block. "Borrow" a buffer
- * from the dmu so that we can fill it before we enter
- * a transaction. This avoids the possibility of
- * holding up the transaction if the data copy hangs
- * up on a pagefault (e.g., from an NFS server mapping).
- */
- size_t cbytes;
-
- abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- max_blksz);
- ASSERT(abuf != NULL);
- ASSERT(arc_buf_size(abuf) == max_blksz);
- if ((error = uiocopy(abuf->b_data, max_blksz,
- UIO_WRITE, uio, &cbytes))) {
- dmu_return_arcbuf(abuf);
- break;
- }
- ASSERT(cbytes == max_blksz);
- }
-
- /*
- * Start a transaction.
- */
- tx = dmu_tx_create(zfsvfs->z_os);
- dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
- db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
- DB_DNODE_ENTER(db);
- dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
- MIN(n, max_blksz));
- DB_DNODE_EXIT(db);
- zfs_sa_upgrade_txholds(tx, zp);
- error = dmu_tx_assign(tx, TXG_WAIT);
- if (error) {
- dmu_tx_abort(tx);
- if (abuf != NULL)
- dmu_return_arcbuf(abuf);
- break;
- }
-
- /*
- * If zfs_range_lock() over-locked we grow the blocksize
- * and then reduce the lock range. This will only happen
- * on the first iteration since zfs_range_reduce() will
- * shrink down r_len to the appropriate size.
- */
- if (lr->lr_length == UINT64_MAX) {
- uint64_t new_blksz;
-
- if (zp->z_blksz > max_blksz) {
- /*
- * File's blocksize is already larger than the
- * "recordsize" property. Only let it grow to
- * the next power of 2.
- */
- ASSERT(!ISP2(zp->z_blksz));
- new_blksz = MIN(end_size,
- 1 << highbit64(zp->z_blksz));
- } else {
- new_blksz = MIN(end_size, max_blksz);
- }
- zfs_grow_blocksize(zp, new_blksz, tx);
- zfs_rangelock_reduce(lr, woff, n);
- }
-
- /*
- * XXX - should we really limit each write to z_max_blksz?
- * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
- */
- nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
-
- if (woff + nbytes > zp->z_size)
- vnode_pager_setsize(vp, woff + nbytes);
-
- if (abuf == NULL) {
- tx_bytes = uio->uio_resid;
- error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
- uio, nbytes, tx);
- tx_bytes -= uio->uio_resid;
- } else {
- tx_bytes = nbytes;
- ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
- /*
- * If this is not a full block write, but we are
- * extending the file past EOF and this data starts
- * block-aligned, use assign_arcbuf(). Otherwise,
- * write via dmu_write().
- */
- if (tx_bytes < max_blksz && (!write_eof ||
- aiov->iov_base != abuf->b_data)) {
- ASSERT(xuio);
- dmu_write(zfsvfs->z_os, zp->z_id, woff,
- aiov->iov_len, aiov->iov_base, tx);
- dmu_return_arcbuf(abuf);
- xuio_stat_wbuf_copied();
- } else {
- ASSERT(xuio || tx_bytes == max_blksz);
- dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl), woff,
- abuf, tx);
- }
- ASSERT(tx_bytes <= uio->uio_resid);
- uioskip(uio, tx_bytes);
- }
- if (tx_bytes && vn_has_cached_data(vp)) {
- update_pages(vp, woff, tx_bytes, zfsvfs->z_os,
- zp->z_id, uio->uio_segflg, tx);
- }
-
- /*
- * If we made no progress, we're done. If we made even
- * partial progress, update the znode and ZIL accordingly.
- */
- if (tx_bytes == 0) {
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
- (void *)&zp->z_size, sizeof (uint64_t), tx);
- dmu_tx_commit(tx);
- ASSERT(error != 0);
- break;
- }
-
- /*
- * Clear Set-UID/Set-GID bits on successful write if not
- * privileged and at least one of the execute bits is set.
- *
- * It would be nice to to this after all writes have
- * been done, but that would still expose the ISUID/ISGID
- * to another app after the partial write is committed.
- *
- * Note: we don't call zfs_fuid_map_id() here because
- * user 0 is not an ephemeral uid.
- */
- mutex_enter(&zp->z_acl_lock);
- if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
- (S_IXUSR >> 6))) != 0 &&
- (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
- secpolicy_vnode_setid_retain(vp, cr,
- (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
- uint64_t newmode;
- zp->z_mode &= ~(S_ISUID | S_ISGID);
- newmode = zp->z_mode;
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
- (void *)&newmode, sizeof (uint64_t), tx);
- }
- mutex_exit(&zp->z_acl_lock);
-
- zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
-
- /*
- * Update the file size (zp_size) if it has changed;
- * account for possible concurrent updates.
- */
- while ((end_size = zp->z_size) < uio->uio_loffset) {
- (void) atomic_cas_64(&zp->z_size, end_size,
- uio->uio_loffset);
- ASSERT(error == 0 || error == EFAULT);
- }
- /*
- * If we are replaying and eof is non zero then force
- * the file size to the specified eof. Note, there's no
- * concurrency during replay.
- */
- if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
- zp->z_size = zfsvfs->z_replay_eof;
-
- if (error == 0)
- error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
- else
- (void) sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
-
- zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes,
- ioflag, NULL, NULL);
- dmu_tx_commit(tx);
-
- if (error != 0)
- break;
- ASSERT(tx_bytes == nbytes);
- n -= nbytes;
-
- }
-
- zfs_rangelock_exit(lr);
-
- /*
- * If we're in replay mode, or we made no progress, return error.
- * Otherwise, it's at least a partial write, so it's successful.
- */
- if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
- ZFS_EXIT(zfsvfs);
- return (error);
- }
-
- /*
- * EFAULT means that at least one page of the source buffer was not
- * available. VFS will re-try remaining I/O upon this error.
- */
- if (error == EFAULT) {
- ZFS_EXIT(zfsvfs);
- return (error);
- }
-
- if (ioflag & (FSYNC | FDSYNC) ||
- zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, zp->z_id);
-
- nwritten = start_resid - uio->uio_resid;
- dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
-
- ZFS_EXIT(zfsvfs);
- return (0);
-}
-
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *presid)
@@ -1249,184 +670,13 @@ zfs_write_simple(znode_t *zp, const void *data, size_t len,
return (error);
}
-static void
-zfs_get_done(zgd_t *zgd, int error)
+void
+zfs_zrele_async(znode_t *zp)
{
- znode_t *zp = zgd->zgd_private;
- objset_t *os = zp->z_zfsvfs->z_os;
-
- if (zgd->zgd_db)
- dmu_buf_rele(zgd->zgd_db, zgd);
+ vnode_t *vp = ZTOV(zp);
+ objset_t *os = ITOZSB(vp)->z_os;
- zfs_rangelock_exit(zgd->zgd_lr);
-
- /*
- * Release the vnode asynchronously as we currently have the
- * txg stopped from syncing.
- */
- VN_RELE_ASYNC(ZTOV(zp), dsl_pool_zrele_taskq(dmu_objset_pool(os)));
-
- kmem_free(zgd, sizeof (zgd_t));
-}
-
-#ifdef ZFS_DEBUG
-static int zil_fault_io = 0;
-#endif
-
-/*
- * Get data to generate a TX_WRITE intent log record.
- */
-int
-zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
-{
- zfsvfs_t *zfsvfs = arg;
- objset_t *os = zfsvfs->z_os;
- znode_t *zp;
- uint64_t object = lr->lr_foid;
- uint64_t offset = lr->lr_offset;
- uint64_t size = lr->lr_length;
- dmu_buf_t *db;
- zgd_t *zgd;
- int error = 0;
-
- ASSERT3P(lwb, !=, NULL);
- ASSERT3P(zio, !=, NULL);
- ASSERT3U(size, !=, 0);
-
- /*
- * Nothing to do if the file has been removed
- */
- if (zfs_zget(zfsvfs, object, &zp) != 0)
- return (SET_ERROR(ENOENT));
- if (zp->z_unlinked) {
- /*
- * Release the vnode asynchronously as we currently have the
- * txg stopped from syncing.
- */
- VN_RELE_ASYNC(ZTOV(zp),
- dsl_pool_zrele_taskq(dmu_objset_pool(os)));
- return (SET_ERROR(ENOENT));
- }
-
- zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
- zgd->zgd_lwb = lwb;
- zgd->zgd_private = zp;
-
- /*
- * Write records come in two flavors: immediate and indirect.
- * For small writes it's cheaper to store the data with the
- * log record (immediate); for large writes it's cheaper to
- * sync the data and get a pointer to it (indirect) so that
- * we don't have to write the data twice.
- */
- if (buf != NULL) { /* immediate write */
- zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock, offset,
- size, RL_READER);
- /* test for truncation needs to be done while range locked */
- if (offset >= zp->z_size) {
- error = SET_ERROR(ENOENT);
- } else {
- error = dmu_read(os, object, offset, size, buf,
- DMU_READ_NO_PREFETCH);
- }
- ASSERT(error == 0 || error == ENOENT);
- } else { /* indirect write */
- /*
- * Have to lock the whole block to ensure when it's
- * written out and its checksum is being calculated
- * that no one can change the data. We need to re-check
- * blocksize after we get the lock in case it's changed!
- */
- for (;;) {
- uint64_t blkoff;
- size = zp->z_blksz;
- blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
- offset -= blkoff;
- zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
- offset, size, RL_READER);
- if (zp->z_blksz == size)
- break;
- offset += blkoff;
- zfs_rangelock_exit(zgd->zgd_lr);
- }
- /* test for truncation needs to be done while range locked */
- if (lr->lr_offset >= zp->z_size)
- error = SET_ERROR(ENOENT);
-#ifdef ZFS_DEBUG
- if (zil_fault_io) {
- error = SET_ERROR(EIO);
- zil_fault_io = 0;
- }
-#endif
- if (error == 0)
- error = dmu_buf_hold(os, object, offset, zgd, &db,
- DMU_READ_NO_PREFETCH);
-
- if (error == 0) {
- blkptr_t *bp = &lr->lr_blkptr;
-
- zgd->zgd_db = db;
- zgd->zgd_bp = bp;
-
- ASSERT(db->db_offset == offset);
- ASSERT(db->db_size == size);
-
- error = dmu_sync(zio, lr->lr_common.lrc_txg,
- zfs_get_done, zgd);
- ASSERT(error || lr->lr_length <= size);
-
- /*
- * On success, we need to wait for the write I/O
- * initiated by dmu_sync() to complete before we can
- * release this dbuf. We will finish everything up
- * in the zfs_get_done() callback.
- */
- if (error == 0)
- return (0);
-
- if (error == EALREADY) {
- lr->lr_common.lrc_txtype = TX_WRITE2;
- /*
- * TX_WRITE2 relies on the data previously
- * written by the TX_WRITE that caused
- * EALREADY. We zero out the BP because
- * it is the old, currently-on-disk BP,
- * so there's no need to zio_flush() its
- * vdevs (flushing would needlesly hurt
- * performance, and doesn't work on
- * indirect vdevs).
- */
- zgd->zgd_bp = NULL;
- BP_ZERO(bp);
- error = 0;
- }
- }
- }
-
- zfs_get_done(zgd, error);
-
- return (error);
-}
-
-/*ARGSUSED*/
-static int
-zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
- caller_context_t *ct)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- int error;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- if (flag & V_ACE_MASK)
- error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
- else
- error = zfs_zaccess_rwx(zp, mode, flag, cr);
-
- ZFS_EXIT(zfsvfs);
- return (error);
+ VN_RELE_ASYNC(vp, dsl_pool_zrele_taskq(dmu_objset_pool(os)));
}
static int
@@ -2708,27 +1958,6 @@ update:
return (error);
}
-ulong_t zfs_fsync_sync_cnt = 4;
-
-static int
-zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-
- (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
-
- if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- zil_commit(zfsvfs->z_log, zp->z_id);
- ZFS_EXIT(zfsvfs);
- }
- tsd_set(zfs_fsyncer_key, NULL);
- return (0);
-}
-
-
/*
* Get the requested file attributes and place them in the provided
* vattr structure.
@@ -3905,7 +3134,7 @@ zfs_rename_check(znode_t *szp, znode_t *sdzp, znode_t *tdzp)
return (error);
}
-#if __FreeBSD_version < 1300110
+#if __FreeBSD_version < 1300124
static void
cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp)
@@ -4793,45 +4022,6 @@ zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
}
}
-/*ARGSUSED*/
-static int
-zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
- caller_context_t *ct)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- int error;
- boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- error = zfs_getacl(zp, vsecp, skipaclchk, cr);
- ZFS_EXIT(zfsvfs);
-
- return (error);
-}
-
-/*ARGSUSED*/
-int
-zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
-{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- int error;
- boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
- zilog_t *zilog = zfsvfs->z_log;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- error = zfs_setacl(zp, vsecp, skipaclchk, cr);
-
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
-
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-
static int
zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
int *rahead)
@@ -5225,7 +4415,7 @@ static int
zfs_freebsd_read(struct vop_read_args *ap)
{
- return (zfs_read(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
+ return (zfs_read(VTOZ(ap->a_vp), ap->a_uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
@@ -5242,7 +4432,7 @@ static int
zfs_freebsd_write(struct vop_write_args *ap)
{
- return (zfs_write(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
+ return (zfs_write(VTOZ(ap->a_vp), ap->a_uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
@@ -5301,7 +4491,7 @@ zfs_freebsd_access(struct vop_access_args *ap)
*/
accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0)
- error = zfs_access(ap->a_vp, accmode, 0, ap->a_cred, NULL);
+ error = zfs_access(zp, accmode, 0, ap->a_cred);
/*
* VADMIN has to be handled by vaccess().
@@ -5512,7 +4702,7 @@ zfs_freebsd_fsync(struct vop_fsync_args *ap)
{
vop_stdfsync(ap);
- return (zfs_fsync(ap->a_vp, 0, ap->a_td->td_ucred, NULL));
+ return (zfs_fsync(VTOZ(ap->a_vp), 0, ap->a_td->td_ucred));
}
#ifndef _SYS_SYSPROTO_H_
@@ -5825,7 +5015,11 @@ zfs_freebsd_inactive(struct vop_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
+#if __FreeBSD_version >= 1300123
zfs_inactive(vp, curthread->td_ucred, NULL);
+#else
+ zfs_inactive(vp, ap->a_td->td_ucred, NULL);
+#endif
return (0);
}
@@ -6377,7 +5571,8 @@ zfs_freebsd_getacl(struct vop_getacl_args *ap)
return (EINVAL);
vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
- if ((error = zfs_getsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL)))
+ if ((error = zfs_getsecattr(VTOZ(ap->a_vp),
+ &vsecattr, 0, ap->a_cred)))
return (error);
error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp,
@@ -6510,7 +5705,13 @@ zfs_vptocnp(struct vop_vptocnp_args *ap)
error = vget(covered_vp, LK_SHARED | LK_VNHELD, curthread);
#endif
if (error == 0) {
- error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_buf, ap->a_buflen);
+#if __FreeBSD_version >= 1300123
+ error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_buf,
+ ap->a_buflen);
+#else
+ error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_cred,
+ ap->a_buf, ap->a_buflen);
+#endif
vput(covered_vp);
}
vn_lock(vp, ltype | LK_RETRY);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c
index 40baa0b80928..6a21623c5f67 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c
@@ -149,7 +149,6 @@ zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
zp->z_acl_cached = NULL;
zp->z_vnode = NULL;
- zp->z_moved = 0;
return (0);
}
@@ -278,7 +277,6 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
sharezp = zfs_znode_alloc_kmem(KM_SLEEP);
ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs));
- sharezp->z_moved = 0;
sharezp->z_unlinked = 0;
sharezp->z_atime_dirty = 0;
sharezp->z_zfsvfs = zfsvfs;
@@ -437,7 +435,6 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
vp->v_data = zp;
ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
- zp->z_moved = 0;
zp->z_sa_hdl = NULL;
zp->z_unlinked = 0;
@@ -1692,7 +1689,6 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
rootzp = zfs_znode_alloc_kmem(KM_SLEEP);
ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs));
- rootzp->z_moved = 0;
rootzp->z_unlinked = 0;
rootzp->z_atime_dirty = 0;
rootzp->z_is_sa = USE_SA(version, os);
@@ -2015,6 +2011,20 @@ zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
return (error);
}
+
+void
+zfs_inode_update(znode_t *zp)
+{
+ vm_object_t object;
+
+ if ((object = ZTOV(zp)->v_object) == NULL ||
+ zp->z_size == object->un_pager.vnp.vnp_size)
+ return;
+
+ vnode_pager_setsize(ZTOV(zp), zp->z_size);
+}
+
+
#ifdef _KERNEL
int
zfs_znode_parent_and_name(znode_t *zp, znode_t **dzpp, char *buf)
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
index fb88bc325d3c..fd2beee7bdd2 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
@@ -1071,6 +1071,16 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
/*
+ * This is necessary here as we check next whether
+ * OBJSET_FLAG_USERACCOUNTING_COMPLETE or
+ * OBJSET_FLAG_USEROBJACCOUNTING are set in order to
+ * decide if the local_mac should be zeroed out.
+ */
+ intval = osp->os_flags;
+ if (should_bswap)
+ intval = BSWAP_64(intval);
+
+ /*
* The local MAC protects the user, group and project accounting.
* If these objects are not present, the local MAC is zeroed out.
*/
@@ -1081,7 +1091,10 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
(datalen >= OBJSET_PHYS_SIZE_V2 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
- (datalen <= OBJSET_PHYS_SIZE_V1)) {
+ (datalen <= OBJSET_PHYS_SIZE_V1) ||
+ (((intval & OBJSET_FLAG_USERACCOUNTING_COMPLETE) == 0 ||
+ (intval & OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE) == 0) &&
+ key->zk_version > 0)) {
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
index 092eb34eaa47..6c44e3681709 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
@@ -116,7 +116,6 @@ enum zvol_geom_state {
};
struct zvol_state_os {
- int zso_volmode;
#define zso_dev _zso_state._zso_dev
#define zso_geom _zso_state._zso_geom
union {
@@ -134,6 +133,7 @@ struct zvol_state_os {
enum zvol_geom_state zsg_state;
} _zso_geom;
} _zso_state;
+ int zso_dying;
};
static uint32_t zvol_minors;
@@ -209,7 +209,7 @@ zvol_geom_open(struct g_provider *pp, int flag, int count)
{
zvol_state_t *zv;
int err = 0;
- boolean_t drop_suspend = B_TRUE;
+ boolean_t drop_suspend = B_FALSE;
boolean_t drop_namespace = B_FALSE;
if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) {
@@ -228,16 +228,15 @@ retry:
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = pp->private;
if (zv == NULL) {
- if (drop_namespace)
- mutex_exit(&spa_namespace_lock);
rw_exit(&zvol_state_lock);
- return (SET_ERROR(ENXIO));
+ err = SET_ERROR(ENXIO);
+ goto out_locked;
}
if (zv->zv_open_count == 0 && !mutex_owned(&spa_namespace_lock)) {
/*
* We need to guarantee that the namespace lock is held
- * to avoid spurious failures in zvol_first_open
+ * to avoid spurious failures in zvol_first_open.
*/
drop_namespace = B_TRUE;
if (!mutex_tryenter(&spa_namespace_lock)) {
@@ -247,8 +246,12 @@ retry:
}
}
mutex_enter(&zv->zv_state_lock);
-
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM);
+ if (zv->zv_zso->zso_dying) {
+ rw_exit(&zvol_state_lock);
+ err = SET_ERROR(ENXIO);
+ goto out_zv_locked;
+ }
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
/*
* make sure zvol is not suspended during first open
@@ -256,6 +259,7 @@ retry:
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 0) {
+ drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
@@ -266,8 +270,6 @@ retry:
drop_suspend = B_FALSE;
}
}
- } else {
- drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
@@ -277,7 +279,7 @@ retry:
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
err = zvol_first_open(zv, !(flag & FWRITE));
if (err)
- goto out_mutex;
+ goto out_zv_locked;
pp->mediasize = zv->zv_volsize;
pp->stripeoffset = 0;
pp->stripesize = zv->zv_volblocksize;
@@ -289,41 +291,37 @@ retry:
*/
if ((flag & FWRITE) && ((zv->zv_flags & ZVOL_RDONLY) ||
dmu_objset_incompatible_encryption_version(zv->zv_objset))) {
- err = EROFS;
- goto out_open_count;
+ err = SET_ERROR(EROFS);
+ goto out_opened;
}
if (zv->zv_flags & ZVOL_EXCL) {
- err = EBUSY;
- goto out_open_count;
+ err = SET_ERROR(EBUSY);
+ goto out_opened;
}
#ifdef FEXCL
if (flag & FEXCL) {
if (zv->zv_open_count != 0) {
- err = EBUSY;
- goto out_open_count;
+ err = SET_ERROR(EBUSY);
+ goto out_opened;
}
zv->zv_flags |= ZVOL_EXCL;
}
#endif
zv->zv_open_count += count;
- if (drop_namespace)
- mutex_exit(&spa_namespace_lock);
- mutex_exit(&zv->zv_state_lock);
- if (drop_suspend)
- rw_exit(&zv->zv_suspend_lock);
- return (0);
-
-out_open_count:
- if (zv->zv_open_count == 0)
+out_opened:
+ if (zv->zv_open_count == 0) {
zvol_last_close(zv);
-out_mutex:
+ wakeup(zv);
+ }
+out_zv_locked:
+ mutex_exit(&zv->zv_state_lock);
+out_locked:
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
- mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
- return (SET_ERROR(err));
+ return (err);
}
/*ARGSUSED*/
@@ -332,6 +330,7 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
{
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
+ int new_open_count;
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = pp->private;
@@ -342,30 +341,32 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
- ASSERT(zv->zv_open_count == 1);
+ ASSERT3U(zv->zv_open_count, ==, 1);
zv->zv_flags &= ~ZVOL_EXCL;
}
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
/*
* If the open count is zero, this is a spurious close.
* That indicates a bug in the kernel / DDI framework.
*/
- ASSERT(zv->zv_open_count > 0);
+ ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
- if ((zv->zv_open_count - count) == 0) {
+ new_open_count = zv->zv_open_count - count;
+ if (new_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
- if (zv->zv_open_count != 1) {
+ new_open_count = zv->zv_open_count - count;
+ if (new_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
@@ -380,11 +381,11 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
/*
* You may get multiple opens, but only one close.
*/
- zv->zv_open_count -= count;
-
+ zv->zv_open_count = new_open_count;
if (zv->zv_open_count == 0) {
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
+ wakeup(zv);
}
mutex_exit(&zv->zv_state_lock);
@@ -400,7 +401,7 @@ zvol_geom_run(zvol_state_t *zv)
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
g_error_provider(pp, 0);
@@ -414,7 +415,7 @@ zvol_geom_destroy(zvol_state_t *zv)
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
g_topology_assert();
@@ -422,10 +423,25 @@ zvol_geom_destroy(zvol_state_t *zv)
VERIFY(zsg->zsg_state == ZVOL_GEOM_RUNNING);
mutex_exit(&zv->zv_state_lock);
zsg->zsg_provider = NULL;
- pp->private = NULL;
g_wither_geom(pp->geom, ENXIO);
}
+void
+zvol_wait_close(zvol_state_t *zv)
+{
+
+ if (zv->zv_volmode != ZFS_VOLMODE_GEOM)
+ return;
+ mutex_enter(&zv->zv_state_lock);
+ zv->zv_zso->zso_dying = B_TRUE;
+
+ if (zv->zv_open_count)
+ msleep(zv, &zv->zv_state_lock,
+ PRIBIO, "zvol:dying", 10*hz);
+ mutex_exit(&zv->zv_state_lock);
+}
+
+
static int
zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
{
@@ -483,7 +499,7 @@ zvol_geom_worker(void *arg)
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct bio *bp;
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
thread_lock(curthread);
sched_prio(curthread, PRIBIO);
@@ -512,9 +528,13 @@ static void
zvol_geom_bio_start(struct bio *bp)
{
zvol_state_t *zv = bp->bio_to->private;
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
+ struct zvol_state_geom *zsg;
boolean_t first;
+ if (zv == NULL) {
+ g_io_deliver(bp, ENXIO);
+ return;
+ }
if (bp->bio_cmd == BIO_GETATTR) {
if (zvol_geom_bio_getattr(bp))
g_io_deliver(bp, EOPNOTSUPP);
@@ -522,6 +542,7 @@ zvol_geom_bio_start(struct bio *bp)
}
if (!THREAD_CAN_SLEEP()) {
+ zsg = &zv->zv_zso->zso_geom;
mtx_lock(&zsg->zsg_queue_mtx);
first = (bioq_first(&zsg->zsg_queue) == NULL);
bioq_insert_tail(&zsg->zsg_queue, bp);
@@ -540,7 +561,7 @@ zvol_geom_bio_getattr(struct bio *bp)
zvol_state_t *zv;
zv = bp->bio_to->private;
- ASSERT(zv != NULL);
+ ASSERT3P(zv, !=, NULL);
spa_t *spa = dmu_objset_spa(zv->zv_objset);
uint64_t refd, avail, usedobjs, availobjs;
@@ -613,7 +634,7 @@ zvol_geom_bio_strategy(struct bio *bp)
goto sync;
break;
default:
- error = EOPNOTSUPP;
+ error = SET_ERROR(EOPNOTSUPP);
goto resume;
}
@@ -621,7 +642,7 @@ zvol_geom_bio_strategy(struct bio *bp)
volsize = zv->zv_volsize;
os = zv->zv_objset;
- ASSERT(os != NULL);
+ ASSERT3P(os, !=, NULL);
addr = bp->bio_data;
resid = bp->bio_length;
@@ -688,7 +709,7 @@ unlock:
bp->bio_completed = bp->bio_length - resid;
if (bp->bio_completed < bp->bio_length && off > volsize)
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
switch (bp->bio_cmd) {
case BIO_FLUSH:
@@ -825,18 +846,33 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
zvol_state_t *zv;
struct zvol_state_dev *zsd;
int err = 0;
- boolean_t drop_suspend = B_TRUE;
+ boolean_t drop_suspend = B_FALSE;
+ boolean_t drop_namespace = B_FALSE;
+retry:
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = dev->si_drv2;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
- return (SET_ERROR(ENXIO));
+ err = SET_ERROR(ENXIO);
+ goto out_locked;
}
+ if (zv->zv_open_count == 0 && !mutex_owned(&spa_namespace_lock)) {
+ /*
+ * We need to guarantee that the namespace lock is held
+ * to avoid spurious failures in zvol_first_open.
+ */
+ drop_namespace = B_TRUE;
+ if (!mutex_tryenter(&spa_namespace_lock)) {
+ rw_exit(&zvol_state_lock);
+ mutex_enter(&spa_namespace_lock);
+ goto retry;
+ }
+ }
mutex_enter(&zv->zv_state_lock);
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_DEV);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
/*
* make sure zvol is not suspended during first open
@@ -844,6 +880,7 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 0) {
+ drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
@@ -854,8 +891,6 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
drop_suspend = B_FALSE;
}
}
- } else {
- drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
@@ -865,21 +900,21 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
err = zvol_first_open(zv, !(flags & FWRITE));
if (err)
- goto out_locked;
+ goto out_zv_locked;
}
if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
- err = EROFS;
+ err = SET_ERROR(EROFS);
goto out_opened;
}
if (zv->zv_flags & ZVOL_EXCL) {
- err = EBUSY;
+ err = SET_ERROR(EBUSY);
goto out_opened;
}
#ifdef FEXCL
if (flags & FEXCL) {
if (zv->zv_open_count != 0) {
- err = EBUSY;
+ err = SET_ERROR(EBUSY);
goto out_opened;
}
zv->zv_flags |= ZVOL_EXCL;
@@ -894,20 +929,19 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
(zv->zv_flags & ZVOL_WRITTEN_TO) != 0)
zil_async_to_sync(zv->zv_zilog, ZVOL_OBJ);
}
-
- mutex_exit(&zv->zv_state_lock);
- if (drop_suspend)
- rw_exit(&zv->zv_suspend_lock);
- return (0);
-
out_opened:
- if (zv->zv_open_count == 0)
+ if (zv->zv_open_count == 0) {
zvol_last_close(zv);
-out_locked:
+ wakeup(zv);
+ }
+out_zv_locked:
mutex_exit(&zv->zv_state_lock);
+out_locked:
+ if (drop_namespace)
+ mutex_exit(&spa_namespace_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
- return (SET_ERROR(err));
+ return (err);
}
static int
@@ -926,17 +960,17 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
- ASSERT(zv->zv_open_count == 1);
+ ASSERT3U(zv->zv_open_count, ==, 1);
zv->zv_flags &= ~ZVOL_EXCL;
}
- ASSERT(zv->zv_zso->zso_volmode == ZFS_VOLMODE_DEV);
+ ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
/*
* If the open count is zero, this is a spurious close.
* That indicates a bug in the kernel / DDI framework.
*/
- ASSERT(zv->zv_open_count > 0);
+ ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
@@ -972,6 +1006,7 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
if (zv->zv_open_count == 0) {
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
+ wakeup(zv);
}
mutex_exit(&zv->zv_state_lock);
@@ -1022,7 +1057,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
length <= 0) {
printf("%s: offset=%jd length=%jd\n", __func__, offset,
length);
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
break;
}
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
@@ -1076,7 +1111,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
refd = metaslab_class_get_alloc(spa_normal_class(spa));
arg->value.off = refd / DEV_BSIZE;
} else
- error = ENOIOCTL;
+ error = SET_ERROR(ENOIOCTL);
break;
}
case FIOSEEKHOLE:
@@ -1092,7 +1127,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
break;
}
default:
- error = ENOIOCTL;
+ error = SET_ERROR(ENOIOCTL);
}
return (error);
@@ -1144,14 +1179,14 @@ zvol_rename_minor(zvol_state_t *zv, const char *newname)
hlist_del(&zv->zv_hlink);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
+ if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
struct g_geom *gp;
g_topology_lock();
gp = pp->geom;
- ASSERT(gp != NULL);
+ ASSERT3P(gp, !=, NULL);
zsg->zsg_provider = NULL;
g_wither_provider(pp, ENXIO);
@@ -1164,7 +1199,7 @@ zvol_rename_minor(zvol_state_t *zv, const char *newname)
zsg->zsg_provider = pp;
g_error_provider(pp, 0);
g_topology_unlock();
- } else if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_DEV) {
+ } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev;
struct make_dev_args args;
@@ -1206,26 +1241,30 @@ zvol_free(zvol_state_t *zv)
{
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
- ASSERT(zv->zv_open_count == 0);
+ ASSERT0(zv->zv_open_count);
ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
+ if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
+ struct g_provider *pp __maybe_unused = zsg->zsg_provider;
+
+ ASSERT3P(pp->private, ==, NULL);
g_topology_lock();
zvol_geom_destroy(zv);
g_topology_unlock();
mtx_destroy(&zsg->zsg_queue_mtx);
- } else if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_DEV) {
+ } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
- if (dev != NULL)
- destroy_dev(dev);
+ ASSERT3P(dev->si_drv2, ==, NULL);
+
+ destroy_dev(dev);
}
mutex_destroy(&zv->zv_state_lock);
@@ -1249,7 +1288,6 @@ zvol_create_minor_impl(const char *name)
int error;
ZFS_LOG(1, "Creating ZVOL %s...", name);
-
hash = zvol_name_hash(name);
if ((zv = zvol_find_by_name_hash(name, hash, RW_NONE)) != NULL) {
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -1258,10 +1296,11 @@ zvol_create_minor_impl(const char *name)
}
DROP_GIANT();
- /* lie and say we're read-only */
- error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
+
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
+ /* lie and say we're read-only */
+ error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
if (error)
goto out_doi;
@@ -1275,8 +1314,10 @@ zvol_create_minor_impl(const char *name)
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_VOLMODE), &volmode, NULL);
- if (error != 0 || volmode == ZFS_VOLMODE_DEFAULT)
+ if (error || volmode == ZFS_VOLMODE_DEFAULT)
volmode = zvol_volmode;
+ error = 0;
+
/*
* zvol_alloc equivalent ...
*/
@@ -1284,8 +1325,8 @@ zvol_create_minor_impl(const char *name)
zv->zv_hash = hash;
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
zv->zv_zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
- zv->zv_zso->zso_volmode = volmode;
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
+ zv->zv_volmode = volmode;
+ if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp;
struct g_geom *gp;
@@ -1298,7 +1339,6 @@ zvol_create_minor_impl(const char *name)
gp->start = zvol_geom_bio_start;
gp->access = zvol_geom_access;
pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
- /* TODO: NULL check? */
pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
pp->sectorsize = DEV_BSIZE;
pp->mediasize = 0;
@@ -1306,7 +1346,7 @@ zvol_create_minor_impl(const char *name)
zsg->zsg_provider = pp;
bioq_init(&zsg->zsg_queue);
- } else if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_DEV) {
+ } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev;
struct make_dev_args args;
@@ -1320,12 +1360,12 @@ zvol_create_minor_impl(const char *name)
args.mda_mode = 0640;
args.mda_si_drv2 = zv;
error = make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, name);
- if (error != 0) {
- mutex_destroy(&zv->zv_state_lock);
+ if (error) {
kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
+ mutex_destroy(&zv->zv_state_lock);
kmem_free(zv, sizeof (*zv));
dmu_objset_disown(os, B_TRUE, FTAG);
- goto out_giant;
+ goto out_doi;
}
dev->si_iosize_max = maxphys;
zsd->zsd_cdev = dev;
@@ -1350,15 +1390,14 @@ zvol_create_minor_impl(const char *name)
ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
- /* XXX do prefetch */
+ /* TODO: prefetch for geom tasting */
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, B_TRUE, FTAG);
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
- if (error == 0)
- zvol_geom_run(zv);
+ if (error == 0 && volmode == ZFS_VOLMODE_GEOM) {
+ zvol_geom_run(zv);
g_topology_unlock();
}
out_doi:
@@ -1368,9 +1407,8 @@ out_doi:
zvol_insert(zv);
zvol_minors++;
rw_exit(&zvol_state_lock);
+ ZFS_LOG(1, "ZVOL %s created.", name);
}
- ZFS_LOG(1, "ZVOL %s created.", name);
-out_giant:
PICKUP_GIANT();
return (error);
}
@@ -1379,11 +1417,11 @@ static void
zvol_clear_private(zvol_state_t *zv)
{
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
+ if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
- if (pp == NULL) /* XXX when? */
+ if (pp->private == NULL) /* already cleared */
return;
mtx_lock(&zsg->zsg_queue_mtx);
@@ -1391,11 +1429,15 @@ zvol_clear_private(zvol_state_t *zv)
pp->private = NULL;
wakeup_one(&zsg->zsg_queue);
while (zsg->zsg_state != ZVOL_GEOM_RUNNING)
- msleep(&zsg->zsg_state,
- &zsg->zsg_queue_mtx,
+ msleep(&zsg->zsg_state, &zsg->zsg_queue_mtx,
0, "zvol:w", 0);
mtx_unlock(&zsg->zsg_queue_mtx);
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
+ } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
+ struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
+ struct cdev *dev = zsd->zsd_cdev;
+
+ dev->si_drv2 = NULL;
}
}
@@ -1403,15 +1445,17 @@ static int
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
zv->zv_volsize = volsize;
- if (zv->zv_zso->zso_volmode == ZFS_VOLMODE_GEOM) {
+ if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
- if (pp == NULL) /* XXX when? */
- return (0);
-
g_topology_lock();
+ if (pp->private == NULL) {
+ g_topology_unlock();
+ return (SET_ERROR(ENXIO));
+ }
+
/*
* Do not invoke resize event when initial size was zero.
* ZVOL initializes the size on first open, this is not