aboutsummaryrefslogtreecommitdiff
path: root/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c')
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c61
1 files changed, 31 insertions, 30 deletions
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
index 7d50de506169..ebcc0ad92e0f 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
@@ -655,7 +655,6 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ssize_t n, nbytes;
int error = 0;
- rl_t *rl;
xuio_t *xuio = NULL;
ZFS_ENTER(zfsvfs);
@@ -703,7 +702,8 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
/*
* Lock the range against changes.
*/
- rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
+ locked_range_t *lr = rangelock_enter(&zp->z_rangelock,
+ uio->uio_loffset, uio->uio_resid, RL_READER);
/*
* If we are reading past end-of-file we can skip
@@ -773,7 +773,7 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
n -= nbytes;
}
out:
- zfs_range_unlock(rl);
+ rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
ZFS_EXIT(zfsvfs);
@@ -813,7 +813,6 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
zilog_t *zilog;
offset_t woff;
ssize_t n, nbytes;
- rl_t *rl;
int max_blksz = zfsvfs->z_max_blksz;
int error = 0;
arc_buf_t *abuf;
@@ -881,7 +880,7 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
}
/*
- * Check for mandatory locks before calling zfs_range_lock()
+ * Check for mandatory locks before calling rangelock_enter()
* in order to prevent a deadlock with locks set via fcntl().
*/
if (MANDMODE((mode_t)zp->z_mode) &&
@@ -906,14 +905,15 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
/*
* If in append mode, set the io offset pointer to eof.
*/
+ locked_range_t *lr;
if (ioflag & FAPPEND) {
/*
* Obtain an appending range lock to guarantee file append
* semantics. We reset the write offset once we have the lock.
*/
- rl = zfs_range_lock(zp, 0, n, RL_APPEND);
- woff = rl->r_off;
- if (rl->r_len == UINT64_MAX) {
+ lr = rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
+ woff = lr->lr_offset;
+ if (lr->lr_length == UINT64_MAX) {
/*
* We overlocked the file because this write will cause
* the file block size to increase.
@@ -928,17 +928,17 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
* this write, then this range lock will lock the entire file
* so that we can re-write the block safely.
*/
- rl = zfs_range_lock(zp, woff, n, RL_WRITER);
+ lr = rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
}
if (vn_rlimit_fsize(vp, uio, uio->uio_td)) {
- zfs_range_unlock(rl);
+ rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (EFBIG);
}
if (woff >= limit) {
- zfs_range_unlock(rl);
+ rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFBIG));
}
@@ -1019,12 +1019,12 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
}
/*
- * If zfs_range_lock() over-locked we grow the blocksize
+ * If rangelock_enter() over-locked we grow the blocksize
* and then reduce the lock range. This will only happen
- * on the first iteration since zfs_range_reduce() will
- * shrink down r_len to the appropriate size.
+ * on the first iteration since rangelock_reduce() will
+ * shrink down lr_length to the appropriate size.
*/
- if (rl->r_len == UINT64_MAX) {
+ if (lr->lr_length == UINT64_MAX) {
uint64_t new_blksz;
if (zp->z_blksz > max_blksz) {
@@ -1040,7 +1040,7 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
new_blksz = MIN(end_size, max_blksz);
}
zfs_grow_blocksize(zp, new_blksz, tx);
- zfs_range_reduce(rl, woff, n);
+ rangelock_reduce(lr, woff, n);
}
/*
@@ -1166,7 +1166,7 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
#endif
}
- zfs_range_unlock(rl);
+ rangelock_exit(lr);
/*
* If we're in replay mode, or we made no progress, return error.
@@ -1206,7 +1206,7 @@ zfs_get_done(zgd_t *zgd, int error)
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
- zfs_range_unlock(zgd->zgd_rl);
+ rangelock_exit(zgd->zgd_lr);
/*
* Release the vnode asynchronously as we currently have the
@@ -1268,7 +1268,8 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
- zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
+ zgd->zgd_lr = rangelock_enter(&zp->z_rangelock,
+ offset, size, RL_READER);
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
error = SET_ERROR(ENOENT);
@@ -1289,12 +1290,12 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
size = zp->z_blksz;
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
offset -= blkoff;
- zgd->zgd_rl = zfs_range_lock(zp, offset, size,
- RL_READER);
+ zgd->zgd_lr = rangelock_enter(&zp->z_rangelock,
+ offset, size, RL_READER);
if (zp->z_blksz == size)
break;
offset += blkoff;
- zfs_range_unlock(zgd->zgd_rl);
+ rangelock_exit(zgd->zgd_lr);
}
/* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size)
@@ -4484,7 +4485,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os = zp->z_zfsvfs->z_os;
- rl_t *rl;
+ locked_range_t *lr;
vm_object_t object;
off_t start, end, obj_size;
uint_t blksz;
@@ -4503,11 +4504,11 @@ zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
*/
for (;;) {
blksz = zp->z_blksz;
- rl = zfs_range_lock(zp, rounddown(start, blksz),
+ lr = rangelock_enter(&zp->z_rangelock, rounddown(start, blksz),
roundup(end, blksz) - rounddown(start, blksz), RL_READER);
if (blksz == zp->z_blksz)
break;
- zfs_range_unlock(rl);
+ rangelock_exit(lr);
}
object = ma[0]->object;
@@ -4515,7 +4516,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
obj_size = object->un_pager.vnp.vnp_size;
zfs_vmobject_wunlock(object);
if (IDX_TO_OFF(ma[count - 1]->pindex) >= obj_size) {
- zfs_range_unlock(rl);
+ rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (zfs_vm_pagerret_bad);
}
@@ -4543,7 +4544,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
error = dmu_read_pages(os, zp->z_id, ma, count, &pgsin_b, &pgsin_a,
MIN(end, obj_size) - (end - PAGE_SIZE));
- zfs_range_unlock(rl);
+ rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
ZFS_EXIT(zfsvfs);
@@ -4580,7 +4581,7 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- rl_t *rl;
+ locked_range_t *lr;
dmu_tx_t *tx;
struct sf_buf *sf;
vm_object_t object;
@@ -4613,7 +4614,7 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
blksz = zp->z_blksz;
lo_off = rounddown(off, blksz);
lo_len = roundup(len + (off - lo_off), blksz);
- rl = zfs_range_lock(zp, lo_off, lo_len, RL_WRITER);
+ lr = rangelock_enter(&zp->z_rangelock, lo_off, lo_len, RL_WRITER);
zfs_vmobject_wlock(object);
if (len + off > object->un_pager.vnp.vnp_size) {
@@ -4707,7 +4708,7 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
dmu_tx_commit(tx);
out:
- zfs_range_unlock(rl);
+ rangelock_exit(lr);
if ((flags & (zfs_vm_pagerput_sync | zfs_vm_pagerput_inval)) != 0 ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zfsvfs->z_log, zp->z_id);