Skip to content

Commit

Permalink
f2fs: Optimize f2fs_truncate_data_blocks_range()
Browse files Browse the repository at this point in the history
Function f2fs_invalidate_blocks() can process continuous
blocks at a time, so f2fs_truncate_data_blocks_range() is
optimized to use the new functionality of
f2fs_invalidate_blocks().

Signed-off-by: Yi Sun <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
  • Loading branch information
Yi Sun authored and Jaegeuk Kim committed Jan 8, 2025
1 parent b51dca0 commit 5254cce
Showing 1 changed file with 68 additions and 4 deletions.
72 changes: 68 additions & 4 deletions fs/f2fs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -612,6 +612,15 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
return finish_preallocate_blocks(inode);
}

static bool check_curr_block_is_consecutive(struct f2fs_sb_info *sbi,
block_t curr, block_t end)
{
if (curr - end == 1 || curr == end)
return true;
else
return false;
}

void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Expand All @@ -621,8 +630,27 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
/*
* Temporary record location.
* When the current @blkaddr and @blkaddr_end can be processed
* together, update the value of @blkaddr_end.
* When it is detected that current @blkaddr is not continues with
* @blkaddr_end, it is necessary to process continues blocks
* range [blkaddr_start, blkaddr_end].
*/
block_t blkaddr_start, blkaddr_end;
/*.
* To avoid processing various invalid data blocks.
* Because @blkaddr_start and @blkaddr_end may be assigned
* NULL_ADDR or invalid data blocks, @last_valid is used to
* record this situation.
*/
bool last_valid = false;
/* Process the last @blkaddr separately? */
bool last_one = true;

addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
blkaddr_start = blkaddr_end = le32_to_cpu(*addr);

/* Assumption: truncation starts with cluster */
for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
Expand All @@ -638,24 +666,60 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
}

if (blkaddr == NULL_ADDR)
continue;
goto next;

f2fs_set_data_blkaddr(dn, NULL_ADDR);

if (__is_valid_data_blkaddr(blkaddr)) {
if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
continue;
goto next;
if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
DATA_GENERIC_ENHANCE))
continue;
goto next;
if (compressed_cluster)
valid_blocks++;
}

f2fs_invalidate_blocks(sbi, blkaddr, 1);

if (check_curr_block_is_consecutive(sbi, blkaddr, blkaddr_end)) {
/*
* The current block @blkaddr is continuous with
* @blkaddr_end, so @blkaddr_end is updated.
* And the f2fs_invalidate_blocks() is skipped
* until @blkaddr that cannot be processed
* together is encountered.
*/
blkaddr_end = blkaddr;
if (count == 1)
last_one = false;
else
goto skip_invalid;
}

f2fs_invalidate_blocks(sbi, blkaddr_start,
blkaddr_end - blkaddr_start + 1);
blkaddr_start = blkaddr_end = blkaddr;

if (count == 1 && last_one)
f2fs_invalidate_blocks(sbi, blkaddr, 1);

skip_invalid:
last_valid = true;

if (!released || blkaddr != COMPRESS_ADDR)
nr_free++;

continue;

next:
/* If consecutive blocks have been recorded, we need to process them. */
if (last_valid == true)
f2fs_invalidate_blocks(sbi, blkaddr_start,
blkaddr_end - blkaddr_start + 1);

blkaddr_start = blkaddr_end = le32_to_cpu(*(addr + 1));
last_valid = false;

}

if (compressed_cluster)
Expand Down

0 comments on commit 5254cce

Please sign in to comment.