diff --git a/cmd/ztest.c b/cmd/ztest.c index f77a37c21545..a9fd2acad018 100644 --- a/cmd/ztest.c +++ b/cmd/ztest.c @@ -1736,23 +1736,19 @@ ztest_object_unlock(ztest_ds_t *zd, uint64_t object) ztest_rll_unlock(rll); } -static rl_t * -ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, +static void +ztest_range_lock(ztest_ds_t *zd, rl_t *rl, uint64_t object, uint64_t offset, uint64_t size, rl_type_t type) { uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; - rl_t *rl; - rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); rl->rl_object = object; rl->rl_offset = offset; rl->rl_size = size; rl->rl_lock = rll; ztest_rll_lock(rll, type); - - return (rl); } static void @@ -1761,8 +1757,6 @@ ztest_range_unlock(rl_t *rl) rll_t *rll = rl->rl_lock; ztest_rll_unlock(rll); - - umem_free(rl, sizeof (*rl)); } static void @@ -2190,7 +2184,7 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap) dmu_tx_t *tx; dmu_buf_t *db; arc_buf_t *abuf = NULL; - rl_t *rl; + rl_t rl; if (byteswap) byteswap_uint64_array(lr, sizeof (*lr)); @@ -2214,7 +2208,7 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap) bt = NULL; ztest_object_lock(zd, lr->lr_foid, ZTRL_READER); - rl = ztest_range_lock(zd, lr->lr_foid, offset, length, ZTRL_WRITER); + ztest_range_lock(zd, &rl, lr->lr_foid, offset, length, ZTRL_WRITER); VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); @@ -2239,7 +2233,7 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap) if (abuf != NULL) dmu_return_arcbuf(abuf); dmu_buf_rele(db, FTAG); - ztest_range_unlock(rl); + ztest_range_unlock(&rl); ztest_object_unlock(zd, lr->lr_foid); return (ENOSPC); } @@ -2298,7 +2292,7 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap) dmu_tx_commit(tx); - ztest_range_unlock(rl); + ztest_range_unlock(&rl); ztest_object_unlock(zd, lr->lr_foid); return (0); @@ -2312,13 +2306,13 @@ ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap) objset_t *os = zd->zd_os; dmu_tx_t *tx; uint64_t txg; - rl_t *rl; + rl_t rl; if (byteswap) byteswap_uint64_array(lr, sizeof (*lr)); ztest_object_lock(zd, lr->lr_foid, ZTRL_READER); - rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, + ztest_range_lock(zd, &rl, lr->lr_foid, lr->lr_offset, lr->lr_length, ZTRL_WRITER); tx = dmu_tx_create(os); @@ -2327,7 +2321,7 @@ ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap) txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); if (txg == 0) { - ztest_range_unlock(rl); + ztest_range_unlock(&rl); ztest_object_unlock(zd, lr->lr_foid); return (ENOSPC); } @@ -2339,7 +2333,7 @@ ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap) dmu_tx_commit(tx); - ztest_range_unlock(rl); + ztest_range_unlock(&rl); ztest_object_unlock(zd, lr->lr_foid); return (0); @@ -2455,12 +2449,12 @@ ztest_get_done(zgd_t *zgd, int error) { (void) error; ztest_ds_t *zd = zgd->zgd_private; - uint64_t object = ((rl_t *)zgd->zgd_lr)->rl_object; + uint64_t object = ((rl_t *)&zgd->zgd_lr)->rl_object; if (zgd->zgd_db) dmu_buf_rele(zgd->zgd_db, zgd); - ztest_range_unlock((rl_t *)zgd->zgd_lr); + ztest_range_unlock((rl_t *)&zgd->zgd_lr); ztest_object_unlock(zd, object); umem_free(zgd, sizeof (*zgd)); @@ -2510,7 +2504,7 @@ ztest_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf, zgd->zgd_private = zd; if (buf != NULL) { /* immediate write */ - zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd, + ztest_range_lock(zd, (rl_t *)&zgd->zgd_lr, object, offset, size, ZTRL_READER); error = dmu_read(os, object, offset, size, buf, @@ -2526,7 +2520,7 @@ ztest_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf, offset = 0; } - zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd, + ztest_range_lock(zd, (rl_t *)&zgd->zgd_lr, object, offset, size, ZTRL_READER); error = dmu_buf_hold_noread(os, object, offset, zgd, &db); @@ -2772,12 +2766,12 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) objset_t *os = zd->zd_os; dmu_tx_t *tx; uint64_t txg; - rl_t *rl; + rl_t rl; txg_wait_synced(dmu_objset_pool(os), 0); ztest_object_lock(zd, object, ZTRL_READER); - rl = ztest_range_lock(zd, object, offset, size, ZTRL_WRITER); + ztest_range_lock(zd, &rl, object, offset, size, ZTRL_WRITER); tx = dmu_tx_create(os); @@ -2793,7 +2787,7 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) (void) dmu_free_long_range(os, object, offset, size); } - ztest_range_unlock(rl); + ztest_range_unlock(&rl); ztest_object_unlock(zd, object); } diff --git a/include/sys/dmu.h b/include/sys/dmu.h index b5fed64da4ad..df020d87f032 100644 --- a/include/sys/dmu.h +++ b/include/sys/dmu.h @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -1069,8 +1070,8 @@ typedef struct zgd { struct lwb *zgd_lwb; struct blkptr *zgd_bp; dmu_buf_t *zgd_db; - struct zfs_locked_range *zgd_lr; void *zgd_private; + zfs_locked_range_t zgd_lr; } zgd_t; typedef void dmu_sync_cb_t(zgd_t *arg, int error); diff --git a/include/sys/zfs_rlock.h b/include/sys/zfs_rlock.h index 5e5d6d68d6c5..85c99a4d73b7 100644 --- a/include/sys/zfs_rlock.h +++ b/include/sys/zfs_rlock.h @@ -69,9 +69,9 @@ typedef struct zfs_locked_range { void zfs_rangelock_init(zfs_rangelock_t *, zfs_rangelock_cb_t *, void *); void zfs_rangelock_fini(zfs_rangelock_t *); -zfs_locked_range_t *zfs_rangelock_enter(zfs_rangelock_t *, +void zfs_rangelock_enter(zfs_rangelock_t *, zfs_locked_range_t *, uint64_t, uint64_t, zfs_rangelock_type_t); -zfs_locked_range_t *zfs_rangelock_tryenter(zfs_rangelock_t *, +boolean_t zfs_rangelock_tryenter(zfs_rangelock_t *, zfs_locked_range_t *, uint64_t, uint64_t, zfs_rangelock_type_t); void zfs_rangelock_exit(zfs_locked_range_t *); void zfs_rangelock_reduce(zfs_locked_range_t *, uint64_t, uint64_t); diff --git a/module/os/freebsd/zfs/zfs_vnops_os.c b/module/os/freebsd/zfs/zfs_vnops_os.c index b9b332434bd2..2b86e3e0df29 100644 --- a/module/os/freebsd/zfs/zfs_vnops_os.c +++ b/module/os/freebsd/zfs/zfs_vnops_os.c @@ -4055,7 +4055,8 @@ zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind, { znode_t *zp = VTOZ(vp); zfsvfs_t *zfsvfs = zp->z_zfsvfs; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; + boolean_t res; vm_object_t object; off_t start, end, obj_size; uint_t blksz; @@ -4074,10 +4075,10 @@ zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind, */ for (;;) { blksz = zp->z_blksz; - lr = zfs_rangelock_tryenter(&zp->z_rangelock, + res = zfs_rangelock_tryenter(&zp->z_rangelock, &lr, rounddown(start, blksz), roundup(end, blksz) - rounddown(start, blksz), RL_READER); - if (lr == NULL) { + if (res == B_FALSE) { if (rahead != NULL) { *rahead = 0; rahead = NULL; @@ -4090,7 +4091,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind, } if (blksz == zp->z_blksz) break; - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); } object = ma[0]->object; @@ -4098,8 +4099,8 @@ zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind, obj_size = object->un_pager.vnp.vnp_size; zfs_vmobject_wunlock(object); if (IDX_TO_OFF(ma[count - 1]->pindex) >= obj_size) { - if (lr != NULL) - zfs_rangelock_exit(lr); + if (res == B_TRUE) + zfs_rangelock_exit(&lr); zfs_exit(zfsvfs, FTAG); return (zfs_vm_pagerret_bad); } @@ -4127,8 +4128,8 @@ zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind, error = dmu_read_pages(zfsvfs->z_os, zp->z_id, ma, count, &pgsin_b, &pgsin_a, MIN(end, obj_size) - (end - PAGE_SIZE)); - if (lr != NULL) - zfs_rangelock_exit(lr); + if (res == B_TRUE) + zfs_rangelock_exit(&lr); ZFS_ACCESSTIME_STAMP(zfsvfs, zp); dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, count*PAGE_SIZE); @@ -4171,7 +4172,7 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags, { znode_t *zp = VTOZ(vp); zfsvfs_t *zfsvfs = zp->z_zfsvfs; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; dmu_tx_t *tx; struct sf_buf *sf; vm_object_t object; @@ -4203,7 +4204,7 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags, blksz = zp->z_blksz; lo_off = rounddown(off, blksz); lo_len = roundup(len + (off - lo_off), blksz); - lr = zfs_rangelock_enter(&zp->z_rangelock, lo_off, lo_len, RL_WRITER); + zfs_rangelock_enter(&zp->z_rangelock, &lr, lo_off, lo_len, RL_WRITER); zfs_vmobject_wlock(object); if (len + off > object->un_pager.vnp.vnp_size) { @@ -4309,7 +4310,7 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags, dmu_tx_commit(tx); out: - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); if (commit) zil_commit(zfsvfs->z_log, zp->z_id); diff --git a/module/os/freebsd/zfs/zfs_znode.c b/module/os/freebsd/zfs/zfs_znode.c index 0eea2a849416..0ca93e4e7bc6 100644 --- a/module/os/freebsd/zfs/zfs_znode.c +++ b/module/os/freebsd/zfs/zfs_znode.c @@ -1393,20 +1393,21 @@ zfs_extend(znode_t *zp, uint64_t end) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; dmu_tx_t *tx; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; uint64_t newblksz; int error; /* * We will change zp_size, lock the whole file. */ - lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER); + lr = zfs_rangelock_enter(&zp->z_rangelock, &lr, 0, UINT64_MAX, + RL_WRITER); /* * Nothing to do if file already at desired length. */ if (end <= zp->z_size) { - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (0); } tx = dmu_tx_create(zfsvfs->z_os); @@ -1436,7 +1437,7 @@ zfs_extend(znode_t *zp, uint64_t end) error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (error); } @@ -1450,7 +1451,7 @@ zfs_extend(znode_t *zp, uint64_t end) vnode_pager_setsize(ZTOV(zp), end); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); dmu_tx_commit(tx); @@ -1470,19 +1471,19 @@ static int zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; int error; /* * Lock the range being freed. */ - lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER); + lr = zfs_rangelock_enter(&zp->z_rangelock, &lr, off, len, RL_WRITER); /* * Nothing to do if file already at desired length. */ if (off >= zp->z_size) { - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (0); } @@ -1504,7 +1505,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) #endif } - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (error); } @@ -1523,7 +1524,7 @@ zfs_trunc(znode_t *zp, uint64_t end) zfsvfs_t *zfsvfs = zp->z_zfsvfs; vnode_t *vp = ZTOV(zp); dmu_tx_t *tx; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; int error; sa_bulk_attr_t bulk[2]; int count = 0; @@ -1531,20 +1532,21 @@ zfs_trunc(znode_t *zp, uint64_t end) /* * We will change zp_size, lock the whole file. */ - lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER); + lr = zfs_rangelock_enter(&zp->z_rangelock, &lr, 0, UINT64_MAX, + RL_WRITER); /* * Nothing to do if file already at desired length. */ if (end >= zp->z_size) { - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (0); } error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end, DMU_OBJECT_END); if (error) { - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (error); } tx = dmu_tx_create(zfsvfs->z_os); @@ -1554,7 +1556,7 @@ zfs_trunc(znode_t *zp, uint64_t end) error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (error); } @@ -1579,7 +1581,7 @@ zfs_trunc(znode_t *zp, uint64_t end) */ vnode_pager_setsize(vp, end); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (0); } diff --git a/module/os/freebsd/zfs/zvol_os.c b/module/os/freebsd/zfs/zvol_os.c index 712ff1b837d7..84a3f5c06bbe 100644 --- a/module/os/freebsd/zfs/zvol_os.c +++ b/module/os/freebsd/zfs/zvol_os.c @@ -664,7 +664,7 @@ zvol_geom_bio_strategy(struct bio *bp) size_t resid; char *addr; objset_t *os; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; int error = 0; boolean_t doread = B_FALSE; boolean_t is_dumpified; @@ -724,7 +724,7 @@ zvol_geom_bio_strategy(struct bio *bp) * There must be no buffer changes when doing a dmu_sync() because * we can't change the data whilst calculating the checksum. */ - lr = zfs_rangelock_enter(&zv->zv_rangelock, off, resid, + zfs_rangelock_enter(&zv->zv_rangelock, &lr, off, resid, doread ? RL_READER : RL_WRITER); if (bp->bio_cmd == BIO_DELETE) { @@ -769,7 +769,7 @@ zvol_geom_bio_strategy(struct bio *bp) resid -= size; } unlock: - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); bp->bio_completed = bp->bio_length - resid; if (bp->bio_completed < bp->bio_length && off > volsize) @@ -814,7 +814,7 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag) { zvol_state_t *zv; uint64_t volsize; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; int error = 0; zfs_uio_t uio; @@ -833,7 +833,7 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag) rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); ssize_t start_resid = zfs_uio_resid(&uio); - lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio), + zfs_rangelock_enter(&zv->zv_rangelock, &lr, zfs_uio_offset(&uio), zfs_uio_resid(&uio), RL_READER); while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) { uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1); @@ -850,7 +850,7 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag) break; } } - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); int64_t nread = start_resid - zfs_uio_resid(&uio); dataset_kstats_update_read_kstats(&zv->zv_kstat, nread); rw_exit(&zv->zv_suspend_lock); @@ -863,7 +863,7 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag) { zvol_state_t *zv; uint64_t volsize; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; int error = 0; boolean_t commit; zfs_uio_t uio; @@ -885,7 +885,7 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag) rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); zvol_ensure_zilog(zv); - lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio), + zfs_rangelock_enter(&zv->zv_rangelock, &lr, zfs_uio_offset(&uio), zfs_uio_resid(&uio), RL_WRITER); while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) { uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1); @@ -909,7 +909,7 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag) if (error) break; } - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); int64_t nwritten = start_resid - zfs_uio_resid(&uio); dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten); if (commit) @@ -1100,7 +1100,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data, int fflag, struct thread *td) { zvol_state_t *zv; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; off_t offset, length; int error; boolean_t sync; @@ -1140,7 +1140,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data, } rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER); zvol_ensure_zilog(zv); - lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, length, + zfs_rangelock_enter(&zv->zv_rangelock, &lr, offset, length, RL_WRITER); dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); error = dmu_tx_assign(tx, TXG_WAIT); @@ -1154,7 +1154,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data, error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length); } - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); if (sync) zil_commit(zv->zv_zilog, ZVOL_OBJ); rw_exit(&zv->zv_suspend_lock); @@ -1200,10 +1200,10 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data, hole = (cmd == FIOSEEKHOLE); noff = *off; - lr = zfs_rangelock_enter(&zv->zv_rangelock, 0, UINT64_MAX, + zfs_rangelock_enter(&zv->zv_rangelock, &lr, 0, UINT64_MAX, RL_READER); error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); *off = noff; break; } diff --git a/module/os/linux/zfs/zfs_vnops_os.c b/module/os/linux/zfs/zfs_vnops_os.c index 1cecad9f7755..bb8e1c654e4c 100644 --- a/module/os/linux/zfs/zfs_vnops_os.c +++ b/module/os/linux/zfs/zfs_vnops_os.c @@ -3732,14 +3732,14 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc, redirty_page_for_writepage(wbc, pp); unlock_page(pp); - zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock, - pgoff, pglen, RL_WRITER); + zfs_locked_range_t lr; + zfs_rangelock_enter(&zp->z_rangelock, &lr, pgoff, pglen, RL_WRITER); lock_page(pp); /* Page mapping changed or it was no longer dirty, we're done */ if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) { unlock_page(pp); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); zfs_exit(zfsvfs, FTAG); return (0); } @@ -3747,7 +3747,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc, /* Another process started write block if required */ if (PageWriteback(pp)) { unlock_page(pp); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); if (wbc->sync_mode != WB_SYNC_NONE) { /* @@ -3775,7 +3775,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc, /* Clear the dirty flag the required locks are held */ if (!clear_page_dirty_for_io(pp)) { unlock_page(pp); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); zfs_exit(zfsvfs, FTAG); return (0); } @@ -3807,7 +3807,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc, end_page_writeback(pp); if (!for_sync) atomic_dec_32(&zp->z_async_writes_cnt); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); zfs_exit(zfsvfs, FTAG); return (err); } @@ -3858,7 +3858,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc, dmu_tx_commit(tx); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); if (commit) zil_commit(zfsvfs->z_log, zp->z_id); diff --git a/module/os/linux/zfs/zfs_znode.c b/module/os/linux/zfs/zfs_znode.c index b99df188c64b..b6edde334092 100644 --- a/module/os/linux/zfs/zfs_znode.c +++ b/module/os/linux/zfs/zfs_znode.c @@ -1519,20 +1519,20 @@ zfs_extend(znode_t *zp, uint64_t end) { zfsvfs_t *zfsvfs = ZTOZSB(zp); dmu_tx_t *tx; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; uint64_t newblksz; int error; /* * We will change zp_size, lock the whole file. */ - lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER); + zfs_rangelock_enter(&zp->z_rangelock, &lr, 0, UINT64_MAX, RL_WRITER); /* * Nothing to do if file already at desired length. */ if (end <= zp->z_size) { - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (0); } tx = dmu_tx_create(zfsvfs->z_os); @@ -1562,7 +1562,7 @@ zfs_extend(znode_t *zp, uint64_t end) error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (error); } @@ -1574,7 +1574,7 @@ zfs_extend(znode_t *zp, uint64_t end) VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)), &zp->z_size, sizeof (zp->z_size), tx)); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); dmu_tx_commit(tx); @@ -1637,19 +1637,19 @@ static int zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) { zfsvfs_t *zfsvfs = ZTOZSB(zp); - zfs_locked_range_t *lr; + zfs_locked_range_t lr; int error; /* * Lock the range being freed. */ - lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER); + zfs_rangelock_enter(&zp->z_rangelock, &lr, off, len, RL_WRITER); /* * Nothing to do if file already at desired length. */ if (off >= zp->z_size) { - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (0); } @@ -1699,7 +1699,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) page_len); } } - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (error); } @@ -1717,7 +1717,7 @@ zfs_trunc(znode_t *zp, uint64_t end) { zfsvfs_t *zfsvfs = ZTOZSB(zp); dmu_tx_t *tx; - zfs_locked_range_t *lr; + zfs_locked_range_t lr; int error; sa_bulk_attr_t bulk[2]; int count = 0; @@ -1725,20 +1725,20 @@ zfs_trunc(znode_t *zp, uint64_t end) /* * We will change zp_size, lock the whole file. */ - lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER); + zfs_rangelock_enter(&zp->z_rangelock, &lr, 0, UINT64_MAX, RL_WRITER); /* * Nothing to do if file already at desired length. */ if (end >= zp->z_size) { - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (0); } error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end, DMU_OBJECT_END); if (error) { - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (error); } tx = dmu_tx_create(zfsvfs->z_os); @@ -1748,7 +1748,7 @@ zfs_trunc(znode_t *zp, uint64_t end) error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (error); } @@ -1764,7 +1764,7 @@ zfs_trunc(znode_t *zp, uint64_t end) VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0); dmu_tx_commit(tx); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); return (0); } diff --git a/module/os/linux/zfs/zvol_os.c b/module/os/linux/zfs/zvol_os.c index 3e020e532263..1ef5940b014e 100644 --- a/module/os/linux/zfs/zvol_os.c +++ b/module/os/linux/zfs/zvol_os.c @@ -285,8 +285,9 @@ zvol_write(zv_request_t *zvr) boolean_t sync = io_is_fua(bio, rq) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS; - zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock, - uio.uio_loffset, uio.uio_resid, RL_WRITER); + zfs_locked_range_t lr; + zfs_rangelock_enter(&zv->zv_rangelock, &lr, uio.uio_loffset, + uio.uio_resid, RL_WRITER); uint64_t volsize = zv->zv_volsize; while (uio.uio_resid > 0 && uio.uio_loffset < volsize) { @@ -314,7 +315,7 @@ zvol_write(zv_request_t *zvr) if (error) break; } - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); int64_t nwritten = start_resid - uio.uio_resid; dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten); @@ -391,8 +392,8 @@ zvol_discard(zv_request_t *zvr) if (start >= end) goto unlock; - zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock, - start, size, RL_WRITER); + zfs_locked_range_t lr; + zfs_rangelock_enter(&zv->zv_rangelock, &lr, start, size, RL_WRITER); tx = dmu_tx_create(zv->zv_objset); dmu_tx_mark_netfree(tx); @@ -405,7 +406,7 @@ zvol_discard(zv_request_t *zvr) error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, size); } - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); if (error == 0 && sync) zil_commit(zv->zv_zilog, ZVOL_OBJ); @@ -463,8 +464,9 @@ zvol_read(zv_request_t *zvr) bio); } - zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock, - uio.uio_loffset, uio.uio_resid, RL_READER); + zfs_locked_range_t lr; + zfs_rangelock_enter(&zv->zv_rangelock, &lr, uio.uio_loffset, + uio.uio_resid, RL_READER); uint64_t volsize = zv->zv_volsize; @@ -483,7 +485,7 @@ zvol_read(zv_request_t *zvr) break; } } - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); int64_t nread = start_resid - uio.uio_resid; dataset_kstats_update_read_kstats(&zv->zv_kstat, nread); diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c index 15c8b8ca6016..de8b50f20813 100644 --- a/module/zfs/vdev_raidz.c +++ b/module/zfs/vdev_raidz.c @@ -2520,7 +2520,8 @@ vdev_raidz_io_start(zio_t *zio) uint64_t logical_width = vdev_raidz_get_logical_width(vdrz, BP_GET_BIRTH(zio->io_bp)); if (logical_width != vdrz->vd_physical_width) { - zfs_locked_range_t *lr = NULL; + zfs_locked_range_t *lr = kmem_alloc( + sizeof (zfs_locked_range_t), KM_SLEEP); uint64_t synced_offset = UINT64_MAX; uint64_t next_offset = UINT64_MAX; boolean_t use_scratch = B_FALSE; @@ -2539,7 +2540,7 @@ vdev_raidz_io_start(zio_t *zio) if (vdrz->vn_vre.vre_state == DSS_SCANNING) { ASSERT3P(vd->vdev_spa->spa_raidz_expand, ==, &vdrz->vn_vre); - lr = zfs_rangelock_enter(&vdrz->vn_vre.vre_rangelock, + zfs_rangelock_enter(&vdrz->vn_vre.vre_rangelock, lr, zio->io_offset, zio->io_size, RL_READER); use_scratch = (RRSS_GET_STATE(&vd->vdev_spa->spa_ubsync) == @@ -3540,6 +3541,7 @@ vdev_raidz_io_done(zio_t *zio) } if (rm->rm_lr != NULL) { zfs_rangelock_exit(rm->rm_lr); + kmem_free(rm->rm_lr, sizeof (zfs_locked_range_t)); rm->rm_lr = NULL; } } @@ -3685,9 +3687,9 @@ raidz_reflow_sync(void *arg, dmu_tx_t *tx) VERIFY3U(vre->vre_failed_offset, >=, old_offset); mutex_exit(&vre->vre_lock); - zfs_locked_range_t *lr = zfs_rangelock_enter(&vre->vre_rangelock, - old_offset, new_offset - old_offset, - RL_WRITER); + zfs_locked_range_t lr; + zfs_rangelock_enter(&vre->vre_rangelock, &lr, old_offset, + new_offset - old_offset, RL_WRITER); /* * Update the uberblock that will be written when this txg completes. @@ -3695,7 +3697,7 @@ raidz_reflow_sync(void *arg, dmu_tx_t *tx) RAIDZ_REFLOW_SET(&spa->spa_uberblock, RRSS_SCRATCH_INVALID_SYNCED_REFLOW, new_offset); vre->vre_offset_pertxg[txgoff] = 0; - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); mutex_enter(&vre->vre_lock); vre->vre_bytes_copied += vre->vre_bytes_copied_pertxg[txgoff]; @@ -3787,7 +3789,7 @@ raidz_reflow_complete_sync(void *arg, dmu_tx_t *tx) */ typedef struct raidz_reflow_arg { vdev_raidz_expand_t *rra_vre; - zfs_locked_range_t *rra_lr; + zfs_locked_range_t rra_lr; uint64_t rra_txg; } raidz_reflow_arg_t; @@ -3806,11 +3808,11 @@ raidz_reflow_write_done(zio_t *zio) if (zio->io_error != 0) { /* Force a reflow pause on errors */ vre->vre_failed_offset = - MIN(vre->vre_failed_offset, rra->rra_lr->lr_offset); + MIN(vre->vre_failed_offset, rra->rra_lr.lr_offset); } ASSERT3U(vre->vre_outstanding_bytes, >=, zio->io_size); vre->vre_outstanding_bytes -= zio->io_size; - if (rra->rra_lr->lr_offset + rra->rra_lr->lr_length < + if (rra->rra_lr.lr_offset + rra->rra_lr.lr_length < vre->vre_failed_offset) { vre->vre_bytes_copied_pertxg[rra->rra_txg & TXG_MASK] += zio->io_size; @@ -3818,7 +3820,7 @@ raidz_reflow_write_done(zio_t *zio) cv_signal(&vre->vre_cv); mutex_exit(&vre->vre_lock); - zfs_rangelock_exit(rra->rra_lr); + zfs_rangelock_exit(&rra->rra_lr); kmem_free(rra, sizeof (*rra)); spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa); @@ -3844,8 +3846,8 @@ raidz_reflow_read_done(zio_t *zio) if (zio->io_error != 0 || !vdev_dtl_empty(zio->io_vd, DTL_MISSING)) { zfs_dbgmsg("reflow read failed off=%llu size=%llu txg=%llu " "err=%u partial_dtl_empty=%u missing_dtl_empty=%u", - (long long)rra->rra_lr->lr_offset, - (long long)rra->rra_lr->lr_length, + (long long)rra->rra_lr.lr_offset, + (long long)rra->rra_lr.lr_length, (long long)rra->rra_txg, zio->io_error, vdev_dtl_empty(zio->io_vd, DTL_PARTIAL), @@ -3853,7 +3855,7 @@ raidz_reflow_read_done(zio_t *zio) mutex_enter(&vre->vre_lock); /* Force a reflow pause on errors */ vre->vre_failed_offset = - MIN(vre->vre_failed_offset, rra->rra_lr->lr_offset); + MIN(vre->vre_failed_offset, rra->rra_lr.lr_offset); mutex_exit(&vre->vre_lock); } @@ -3942,8 +3944,8 @@ raidz_reflow_impl(vdev_t *vd, vdev_raidz_expand_t *vre, range_tree_t *rt, raidz_reflow_arg_t *rra = kmem_zalloc(sizeof (*rra), KM_SLEEP); rra->rra_vre = vre; - rra->rra_lr = zfs_rangelock_enter(&vre->vre_rangelock, - offset, length, RL_WRITER); + zfs_rangelock_enter(&vre->vre_rangelock, &rra->rra_lr, offset, length, + RL_WRITER); rra->rra_txg = dmu_tx_get_txg(tx); raidz_reflow_record_progress(vre, offset + length, tx); @@ -3962,17 +3964,17 @@ raidz_reflow_impl(vdev_t *vd, vdev_raidz_expand_t *vre, range_tree_t *rt, if (vdev_raidz_expand_child_replacing(vd)) { zfs_dbgmsg("replacing vdev encountered, reflow paused at " "offset=%llu txg=%llu", - (long long)rra->rra_lr->lr_offset, + (long long)rra->rra_lr.lr_offset, (long long)rra->rra_txg); mutex_enter(&vre->vre_lock); vre->vre_failed_offset = - MIN(vre->vre_failed_offset, rra->rra_lr->lr_offset); + MIN(vre->vre_failed_offset, rra->rra_lr.lr_offset); cv_signal(&vre->vre_cv); mutex_exit(&vre->vre_lock); /* drop everything we acquired */ - zfs_rangelock_exit(rra->rra_lr); + zfs_rangelock_exit(&rra->rra_lr); kmem_free(rra, sizeof (*rra)); spa_config_exit(spa, SCL_STATE, spa); return (B_TRUE); @@ -4055,8 +4057,9 @@ raidz_reflow_scratch_sync(void *arg, dmu_tx_t *tx) VERIFY3U(write_size, <=, VDEV_BOOT_SIZE); VERIFY3U(write_size, <=, read_size); - zfs_locked_range_t *lr = zfs_rangelock_enter(&vre->vre_rangelock, - 0, logical_size, RL_WRITER); + zfs_locked_range_t lr; + zfs_rangelock_enter(&vre->vre_rangelock, &lr, 0, logical_size, + RL_WRITER); abd_t **abds = kmem_alloc(raidvd->vdev_children * sizeof (abd_t *), KM_SLEEP); @@ -4118,7 +4121,7 @@ raidz_reflow_scratch_sync(void *arg, dmu_tx_t *tx) for (int i = 0; i < raidvd->vdev_children; i++) abd_free(abds[i]); kmem_free(abds, raidvd->vdev_children * sizeof (abd_t *)); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); spa_config_exit(spa, SCL_STATE, FTAG); return; } @@ -4270,7 +4273,7 @@ raidz_reflow_scratch_sync(void *arg, dmu_tx_t *tx) * Update progress. */ vre->vre_offset = logical_size; - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); spa_config_exit(spa, SCL_STATE, FTAG); int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; diff --git a/module/zfs/zfs_rlock.c b/module/zfs/zfs_rlock.c index f42661df82e4..3c1a85dad1c5 100644 --- a/module/zfs/zfs_rlock.c +++ b/module/zfs/zfs_rlock.c @@ -478,25 +478,23 @@ zfs_rangelock_enter_reader(zfs_rangelock_t *rl, zfs_locked_range_t *new, * entire file is locked as RL_WRITER), or NULL if nonblock is true and the * lock could not be acquired immediately. */ -static zfs_locked_range_t * -zfs_rangelock_enter_impl(zfs_rangelock_t *rl, uint64_t off, uint64_t len, - zfs_rangelock_type_t type, boolean_t nonblock) +static boolean_t +zfs_rangelock_enter_impl(zfs_rangelock_t *rl, zfs_locked_range_t *lr, + uint64_t off, uint64_t len, zfs_rangelock_type_t type, boolean_t nonblock) { - zfs_locked_range_t *new; - + boolean_t success = B_TRUE; ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND); - new = kmem_alloc(sizeof (zfs_locked_range_t), KM_SLEEP); - new->lr_rangelock = rl; - new->lr_offset = off; + lr->lr_rangelock = rl; + lr->lr_offset = off; if (len + off < off) /* overflow */ len = UINT64_MAX - off; - new->lr_length = len; - new->lr_count = 1; /* assume it's going to be in the tree */ - new->lr_type = type; - new->lr_proxy = B_FALSE; - new->lr_write_wanted = B_FALSE; - new->lr_read_wanted = B_FALSE; + lr->lr_length = len; + lr->lr_count = 1; /* assume it's going to be in the tree */ + lr->lr_type = type; + lr->lr_proxy = B_FALSE; + lr->lr_write_wanted = B_FALSE; + lr->lr_read_wanted = B_FALSE; mutex_enter(&rl->rl_lock); if (type == RL_READER) { @@ -504,31 +502,29 @@ zfs_rangelock_enter_impl(zfs_rangelock_t *rl, uint64_t off, uint64_t len, * First check for the usual case of no locks */ if (avl_numnodes(&rl->rl_tree) == 0) { - avl_add(&rl->rl_tree, new); - } else if (!zfs_rangelock_enter_reader(rl, new, nonblock)) { - kmem_free(new, sizeof (*new)); - new = NULL; + avl_add(&rl->rl_tree, lr); + } else if (!zfs_rangelock_enter_reader(rl, lr, nonblock)) { + success = B_FALSE; } - } else if (!zfs_rangelock_enter_writer(rl, new, nonblock)) { - kmem_free(new, sizeof (*new)); - new = NULL; + } else if (!zfs_rangelock_enter_writer(rl, lr, nonblock)) { + success = B_FALSE; } mutex_exit(&rl->rl_lock); - return (new); + return (success); } -zfs_locked_range_t * -zfs_rangelock_enter(zfs_rangelock_t *rl, uint64_t off, uint64_t len, - zfs_rangelock_type_t type) +void +zfs_rangelock_enter(zfs_rangelock_t *rl, zfs_locked_range_t *lr, uint64_t off, + uint64_t len, zfs_rangelock_type_t type) { - return (zfs_rangelock_enter_impl(rl, off, len, type, B_FALSE)); + (void) zfs_rangelock_enter_impl(rl, lr, off, len, type, B_FALSE); } -zfs_locked_range_t * -zfs_rangelock_tryenter(zfs_rangelock_t *rl, uint64_t off, uint64_t len, - zfs_rangelock_type_t type) +boolean_t +zfs_rangelock_tryenter(zfs_rangelock_t *rl, zfs_locked_range_t *lr, + uint64_t off, uint64_t len, zfs_rangelock_type_t type) { - return (zfs_rangelock_enter_impl(rl, off, len, type, B_TRUE)); + return (zfs_rangelock_enter_impl(rl, lr, off, len, type, B_TRUE)); } /* @@ -542,8 +538,6 @@ zfs_rangelock_free(zfs_locked_range_t *lr) if (lr->lr_read_wanted) cv_destroy(&lr->lr_read_cv); - - kmem_free(lr, sizeof (zfs_locked_range_t)); } /* diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index f3db953eab46..4167d46bf225 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -105,7 +105,7 @@ zfs_fsync(znode_t *zp, int syncflag, cred_t *cr) static int zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off) { - zfs_locked_range_t *lr; + zfs_locked_range_t lr; uint64_t noff = (uint64_t)*off; /* new offset */ uint64_t file_sz; int error; @@ -125,9 +125,9 @@ zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off) if (zn_has_cached_data(zp, 0, file_sz - 1)) zn_flush_cached_data(zp, B_TRUE); - lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_READER); + zfs_rangelock_enter(&zp->z_rangelock, &lr, 0, UINT64_MAX, RL_READER); error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); if (error == ESRCH) return (SET_ERROR(ENXIO)); @@ -275,7 +275,8 @@ zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) /* * Lock the range against changes. */ - zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock, + zfs_locked_range_t lr; + zfs_rangelock_enter(&zp->z_rangelock, &lr, zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER); /* @@ -336,7 +337,7 @@ zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread); task_io_account_read(nread); out: - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); ZFS_ACCESSTIME_STAMP(zfsvfs, zp); zfs_exit(zfsvfs, FTAG); @@ -487,15 +488,15 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) /* * If in append mode, set the io offset pointer to eof. */ - zfs_locked_range_t *lr; + zfs_locked_range_t lr; if (ioflag & O_APPEND) { /* * Obtain an appending range lock to guarantee file append * semantics. We reset the write offset once we have the lock. */ - lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND); - woff = lr->lr_offset; - if (lr->lr_length == UINT64_MAX) { + zfs_rangelock_enter(&zp->z_rangelock, &lr, 0, n, RL_APPEND); + woff = lr.lr_offset; + if (lr.lr_length == UINT64_MAX) { /* * We overlocked the file because this write will cause * the file block size to increase. @@ -510,11 +511,11 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) * this write, then this range lock will lock the entire file * so that we can re-write the block safely. */ - lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER); + zfs_rangelock_enter(&zp->z_rangelock, &lr, woff, n, RL_WRITER); } if (zn_rlimit_fsize_uio(zp, uio)) { - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); zfs_exit(zfsvfs, FTAG); return (SET_ERROR(EFBIG)); } @@ -522,7 +523,7 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) const rlim64_t limit = MAXOFFSET_T; if (woff >= limit) { - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); zfs_exit(zfsvfs, FTAG); return (SET_ERROR(EFBIG)); } @@ -557,7 +558,7 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) } uint64_t blksz; - if (lr->lr_length == UINT64_MAX && zp->z_size <= zp->z_blksz) { + if (lr.lr_length == UINT64_MAX && zp->z_size <= zp->z_blksz) { if (zp->z_blksz > zfsvfs->z_max_blksz && !ISP2(zp->z_blksz)) { /* @@ -639,9 +640,9 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) * on the first iteration since rangelock_reduce() will * shrink down lr_length to the appropriate size. */ - if (lr->lr_length == UINT64_MAX) { + if (lr.lr_length == UINT64_MAX) { zfs_grow_blocksize(zp, blksz, tx); - zfs_rangelock_reduce(lr, woff, n); + zfs_rangelock_reduce(&lr, woff, n); } ssize_t tx_bytes; @@ -768,7 +769,7 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) } zfs_znode_update_vfs(zp); - zfs_rangelock_exit(lr); + zfs_rangelock_exit(&lr); /* * If we're in replay mode, or we made no progress, or the @@ -890,7 +891,7 @@ zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf, * we don't have to write the data twice. */ if (buf != NULL) { /* immediate write */ - zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock, + zfs_rangelock_enter(&zp->z_rangelock, &zgd->zgd_lr, offset, size, RL_READER); /* test for truncation needs to be done while range locked */ if (offset >= zp->z_size) { @@ -913,12 +914,12 @@ zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf, size = zp->z_blksz; blkoff = ISP2(size) ? P2PHASE(offset, size) : offset; offset -= blkoff; - zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock, + zfs_rangelock_enter(&zp->z_rangelock, &zgd->zgd_lr, offset, size, RL_READER); if (zp->z_blksz == size) break; offset += blkoff; - zfs_rangelock_exit(zgd->zgd_lr); + zfs_rangelock_exit(&zgd->zgd_lr); } /* test for truncation needs to be done while range locked */ if (lr->lr_offset >= zp->z_size) @@ -985,7 +986,7 @@ zfs_get_done(zgd_t *zgd, int error) if (zgd->zgd_db) dmu_buf_rele(zgd->zgd_db, zgd); - zfs_rangelock_exit(zgd->zgd_lr); + zfs_rangelock_exit(&zgd->zgd_lr); /* * Release the vnode asynchronously as we currently have the @@ -1052,7 +1053,7 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp, { zfsvfs_t *inzfsvfs, *outzfsvfs; objset_t *inos, *outos; - zfs_locked_range_t *inlr, *outlr; + zfs_locked_range_t inlr, outlr; dmu_buf_impl_t *db; dmu_tx_t *tx; zilog_t *zilog; @@ -1195,14 +1196,14 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp, * Maintain predictable lock order. */ if (inzp < outzp || (inzp == outzp && inoff < outoff)) { - inlr = zfs_rangelock_enter(&inzp->z_rangelock, inoff, len, + zfs_rangelock_enter(&inzp->z_rangelock, &inlr, inoff, len, RL_READER); - outlr = zfs_rangelock_enter(&outzp->z_rangelock, outoff, len, + zfs_rangelock_enter(&outzp->z_rangelock, &outlr, outoff, len, RL_WRITER); } else { - outlr = zfs_rangelock_enter(&outzp->z_rangelock, outoff, len, + zfs_rangelock_enter(&outzp->z_rangelock, &outlr, outoff, len, RL_WRITER); - inlr = zfs_rangelock_enter(&inzp->z_rangelock, inoff, len, + zfs_rangelock_enter(&inzp->z_rangelock, &inlr, inoff, len, RL_READER); } @@ -1220,7 +1221,7 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp, goto unlock; } if (inblksz != outzp->z_blksz && (outzp->z_size > outzp->z_blksz || - outlr->lr_length != UINT64_MAX)) { + outlr.lr_length != UINT64_MAX)) { error = SET_ERROR(EINVAL); goto unlock; } @@ -1352,7 +1353,7 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp, * on the first iteration since zfs_rangelock_reduce() will * shrink down lr_length to the appropriate size. */ - if (outlr->lr_length == UINT64_MAX) { + if (outlr.lr_length == UINT64_MAX) { zfs_grow_blocksize(outzp, inblksz, tx); /* @@ -1369,7 +1370,7 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp, * Round range lock up to the block boundary, so we * prevent appends until we are done. */ - zfs_rangelock_reduce(outlr, outoff, + zfs_rangelock_reduce(&outlr, outoff, ((len - 1) / inblksz + 1) * inblksz); } @@ -1423,8 +1424,8 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp, zfs_znode_update_vfs(outzp); unlock: - zfs_rangelock_exit(outlr); - zfs_rangelock_exit(inlr); + zfs_rangelock_exit(&outlr); + zfs_rangelock_exit(&inlr); if (done > 0) { /* diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index 5b6a3f5cb410..5ee524047131 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -654,7 +654,7 @@ zvol_get_done(zgd_t *zgd, int error) if (zgd->zgd_db) dmu_buf_rele(zgd->zgd_db, zgd); - zfs_rangelock_exit(zgd->zgd_lr); + zfs_rangelock_exit(&zgd->zgd_lr); kmem_free(zgd, sizeof (zgd_t)); } @@ -687,7 +687,7 @@ zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf, * we don't have to write the data twice. */ if (buf != NULL) { /* immediate write */ - zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, + zfs_rangelock_enter(&zv->zv_rangelock, &zgd->zgd_lr, offset, size, RL_READER); error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf, DMU_READ_NO_PREFETCH); @@ -701,7 +701,7 @@ zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf, */ size = zv->zv_volblocksize; offset = P2ALIGN_TYPED(offset, size, uint64_t); - zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, + zfs_rangelock_enter(&zv->zv_rangelock, &zgd->zgd_lr, offset, size, RL_READER); error = dmu_buf_hold_noread_by_dnode(zv->zv_dn, offset, zgd, &db);