Skip to content

Commit

Permalink
loop: avoid EAGAIN, if offset or block_size are changed
Browse files Browse the repository at this point in the history
Previously, there was a bug where user could see stale buffer cache (e.g, 512B)
attached in the 4KB-sized pager cache, when the block size was changed from
512B to 4KB. That was fixed by:
commit 5db470e ("loop: drop caches if offset or block_size are changed")

But, there were some regression reports saying the fix returns EAGAIN easily.
So, this patch removes previously added EAGAIN condition, nrpages != 0.

Instead, it changes the flow like this:
- sync_blockdev()
- blk_mq_freeze_queue()
 : change the loop configuration
- blk_mq_unfreeze_queue()
- sync_blockdev()
- invalidate_bdev()

After invalidating the buffer cache, we must see the full valid 4KB page.

Additional concern came from Bart in which we can lose some data when
changing the lo_offset. In that case, this patch adds:
- sync_blockdev()
- blk_set_queue_dying
- blk_mq_freeze_queue()
 : change the loop configuration
- blk_mq_unfreeze_queue()
- blk_queue_flag_clear(QUEUE_FLAG_DYING);
- sync_blockdev()
- invalidate_bdev()

Report: https://bugs.chromium.org/p/chromium/issues/detail?id=938958#c38

Cc: <[email protected]>
Cc: Jens Axboe <[email protected]>
Cc: [email protected]
Cc: Bart Van Assche <[email protected]>
Fixes: 5db470e ("loop: drop caches if offset or block_size are changed")
Reported-by: Gwendal Grignou <[email protected]>
Reported-by: grygorii tertychnyi <[email protected]>
Reviewed-by: Bart Van Assche <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
  • Loading branch information
Jaegeuk Kim committed May 30, 2020
1 parent 15dd141 commit e1f972f
Showing 1 changed file with 27 additions and 29 deletions.
56 changes: 27 additions & 29 deletions drivers/block/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -1247,6 +1247,8 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
kuid_t uid = current_uid();
struct block_device *bdev;
bool partscan = false;
bool drop_request = false;
bool drop_cache = false;

err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
Expand All @@ -1266,14 +1268,21 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
goto out_unlock;
}

if (lo->lo_offset != info->lo_offset)
drop_request = true;
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
sync_blockdev(lo->lo_device);
kill_bdev(lo->lo_device);
}
lo->lo_sizelimit != info->lo_sizelimit)
drop_cache = true;

/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
sync_blockdev(lo->lo_device);

if (drop_request) {
blk_set_queue_dying(lo->lo_queue);
blk_mq_freeze_queue_wait(lo->lo_queue);
} else {
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
}

err = loop_release_xfer(lo);
if (err)
Expand All @@ -1300,14 +1309,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)

if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
/* kill_bdev should have truncated all the pages */
if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze;
}
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
goto out_unfreeze;
Expand Down Expand Up @@ -1344,6 +1345,8 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)

out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
if (drop_request)
blk_queue_flag_clear(QUEUE_FLAG_DYING, lo->lo_queue);

if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
!(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
Expand All @@ -1352,6 +1355,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
bdev = lo->lo_device;
partscan = true;
}

/* truncate stale pages cached by previous operations */
if (!err && drop_cache) {
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
}
out_unlock:
mutex_unlock(&loop_ctl_mutex);
if (partscan)
Expand Down Expand Up @@ -1533,8 +1542,6 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)

static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
int err = 0;

if (lo->lo_state != Lo_bound)
return -ENXIO;

Expand All @@ -1545,27 +1552,18 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
return 0;

sync_blockdev(lo->lo_device);
kill_bdev(lo->lo_device);

blk_mq_freeze_queue(lo->lo_queue);

/* kill_bdev should have truncated all the pages */
if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze;
}

blk_queue_logical_block_size(lo->lo_queue, arg);
blk_queue_physical_block_size(lo->lo_queue, arg);
blk_queue_io_min(lo->lo_queue, arg);
loop_update_dio(lo);
out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);

return err;
/* truncate stale pages cached by previous operations */
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
return 0;
}

static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
Expand Down

0 comments on commit e1f972f

Please sign in to comment.