From 0e6895ba00b7be45f3ab0d2107dda3ef1245f5b4 Mon Sep 17 00:00:00 2001 From: Ritesh Harjani Date: Fri, 4 Sep 2020 14:46:53 +0530 Subject: ext4: implement swap_activate aops using iomap After moving ext4's bmap to iomap interface, swapon functionality on files created using fallocate (which creates unwritten extents) are failing. This is since iomap_bmap interface returns 0 for unwritten extents and thus generic_swapfile_activate considers this as holes and hence bail out with below kernel msg :- [340.915835] swapon: swapfile has holes To fix this we need to implement ->swap_activate aops in ext4 which will use ext4_iomap_report_ops. Since we only need to return the list of extents so ext4_iomap_report_ops should be enough. Cc: stable@kernel.org Reported-by: Yuxuan Shui Fixes: ac58e4fb03f ("ext4: move ext4 bmap to use iomap infrastructure") Signed-off-by: Ritesh Harjani Link: https://lore.kernel.org/r/20200904091653.1014334-1-riteshh@linux.ibm.com Signed-off-by: Theodore Ts'o --- fs/ext4/inode.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index bf596467c234..771ed8b1fadb 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3601,6 +3601,13 @@ static int ext4_set_page_dirty(struct page *page) return __set_page_dirty_buffers(page); } +static int ext4_iomap_swap_activate(struct swap_info_struct *sis, + struct file *file, sector_t *span) +{ + return iomap_swapfile_activate(sis, file, span, + &ext4_iomap_report_ops); +} + static const struct address_space_operations ext4_aops = { .readpage = ext4_readpage, .readahead = ext4_readahead, @@ -3616,6 +3623,7 @@ static const struct address_space_operations ext4_aops = { .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, + .swap_activate = ext4_iomap_swap_activate, }; static const struct address_space_operations ext4_journalled_aops = { @@ -3632,6 +3640,7 @@ static const struct address_space_operations ext4_journalled_aops = { .direct_IO = noop_direct_IO, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, + .swap_activate = ext4_iomap_swap_activate, }; static const struct address_space_operations ext4_da_aops = { @@ -3649,6 +3658,7 @@ static const struct address_space_operations ext4_da_aops = { .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, + .swap_activate = ext4_iomap_swap_activate, }; static const struct address_space_operations ext4_dax_aops = { @@ -3657,6 +3667,7 @@ static const struct address_space_operations ext4_dax_aops = { .set_page_dirty = noop_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = noop_invalidatepage, + .swap_activate = ext4_iomap_swap_activate, }; void ext4_set_aops(struct inode *inode) -- cgit v1.2.3 From 70022da804f0f3f152115688885608c39182082e Mon Sep 17 00:00:00 2001 From: Ye Bin Date: Wed, 16 Sep 2020 19:38:59 +0800 Subject: ext4: fix dead loop in ext4_mb_new_blocks As we test disk offline/online with running fsstress, we find fsstress process is keeping running state. kworker/u32:3-262 [004] ...1 140.787471: ext4_mb_discard_preallocations: dev 8,32 needed 114 .... kworker/u32:3-262 [004] ...1 140.787471: ext4_mb_discard_preallocations: dev 8,32 needed 114 ext4_mb_new_blocks repeat: ext4_mb_discard_preallocations_should_retry(sb, ac, &seq) freed = ext4_mb_discard_preallocations ext4_mb_discard_group_preallocations this_cpu_inc(discard_pa_seq); ---> freed == 0 seq_retry = ext4_get_discard_pa_seq_sum for_each_possible_cpu(__cpu) __seq += per_cpu(discard_pa_seq, __cpu); if (seq_retry != *seq) { *seq = seq_retry; ret = true; } As we see seq_retry is sum of discard_pa_seq every cpu, if ext4_mb_discard_group_preallocations return zero discard_pa_seq in this cpu maybe increase one, so condition "seq_retry != *seq" have always been met. Ritesh Harjani suggest to in ext4_mb_discard_group_preallocations function we only increase discard_pa_seq when there is some PA to free. Fixes: 07b5b8e1ac40 ("ext4: mballoc: introduce pcpu seqcnt for freeing PA to improve ENOSPC handling") Signed-off-by: Ye Bin Reviewed-by: Jan Kara Reviewed-by: Ritesh Harjani Link: https://lore.kernel.org/r/20200916113859.1556397-3-yebin10@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/mballoc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 132c118d12e1..ff47347012f4 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4189,7 +4189,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, INIT_LIST_HEAD(&list); repeat: ext4_lock_group(sb, group); - this_cpu_inc(discard_pa_seq); list_for_each_entry_safe(pa, tmp, &grp->bb_prealloc_list, pa_group_list) { spin_lock(&pa->pa_lock); @@ -4206,6 +4205,9 @@ repeat: /* seems this one can be freed ... */ ext4_mb_mark_pa_deleted(sb, pa); + if (!free) + this_cpu_inc(discard_pa_seq); + /* we can trust pa_free ... */ free += pa->pa_free; -- cgit v1.2.3 From 5b3dc19dda6691e8ab574e8eede1aef6f02a4f1c Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 24 Sep 2020 17:09:59 +0200 Subject: ext4: discard preallocations before releasing group lock ext4_mb_discard_group_preallocations() can be releasing group lock with preallocations accumulated on its local list. Thus although discard_pa_seq was incremented and concurrent allocating processes will be retrying allocations, it can happen that premature ENOSPC error is returned because blocks used for preallocations are not available for reuse yet. Make sure we always free locally accumulated preallocations before releasing group lock. Fixes: 07b5b8e1ac40 ("ext4: mballoc: introduce pcpu seqcnt for freeing PA to improve ENOSPC handling") Signed-off-by: Jan Kara Link: https://lore.kernel.org/r/20200924150959.4335-1-jack@suse.cz Signed-off-by: Theodore Ts'o --- fs/ext4/mballoc.c | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index ff47347012f4..a8d99f676fb1 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4160,7 +4160,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, struct ext4_buddy e4b; int err; int busy = 0; - int free = 0; + int free, free_total = 0; mb_debug(sb, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) @@ -4188,6 +4188,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, INIT_LIST_HEAD(&list); repeat: + free = 0; ext4_lock_group(sb, group); list_for_each_entry_safe(pa, tmp, &grp->bb_prealloc_list, pa_group_list) { @@ -4217,22 +4218,6 @@ repeat: list_add(&pa->u.pa_tmp_list, &list); } - /* if we still need more blocks and some PAs were used, try again */ - if (free < needed && busy) { - busy = 0; - ext4_unlock_group(sb, group); - cond_resched(); - goto repeat; - } - - /* found anything to free? */ - if (list_empty(&list)) { - BUG_ON(free != 0); - mb_debug(sb, "Someone else may have freed PA for this group %u\n", - group); - goto out; - } - /* now free all selected PAs */ list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { @@ -4250,14 +4235,22 @@ repeat: call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } -out: + free_total += free; + + /* if we still need more blocks and some PAs were used, try again */ + if (free_total < needed && busy) { + ext4_unlock_group(sb, group); + cond_resched(); + busy = 0; + goto repeat; + } ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); out_dbg: mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", - free, group, grp->bb_free); - return free; + free_total, group, grp->bb_free); + return free_total; } /* -- cgit v1.2.3 From cb8d53d2c97369029cc638c9274ac7be0a316c75 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 22 Sep 2020 09:24:56 -0700 Subject: ext4: fix leaking sysfs kobject after failed mount ext4_unregister_sysfs() only deletes the kobject. The reference to it needs to be put separately, like ext4_put_super() does. This addresses the syzbot report "memory leak in kobject_set_name_vargs (3)" (https://syzkaller.appspot.com/bug?extid=9f864abad79fae7c17e1). Reported-by: syzbot+9f864abad79fae7c17e1@syzkaller.appspotmail.com Fixes: 72ba74508b28 ("ext4: release sysfs kobject when failing to enable quotas on mount") Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers Link: https://lore.kernel.org/r/20200922162456.93657-1-ebiggers@kernel.org Reviewed-by: Jan Kara Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index ea425b49b345..41953b86ffe3 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4872,6 +4872,7 @@ cantfind_ext4: failed_mount8: ext4_unregister_sysfs(sb); + kobject_put(&sbi->s_kobj); failed_mount7: ext4_unregister_li_request(sb); failed_mount6: -- cgit v1.2.3 From 766ef1e101cddc8f5e722f1c0147e9c0499a2d43 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 3 Aug 2020 17:02:11 -0600 Subject: ext4: flag as supporting buffered async reads ext4 uses generic_file_read_iter(), which already supports this. Cc: Theodore Ts'o Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/fb90cc2d-b12c-738f-21a4-dd7a8ae0556a@kernel.dk Signed-off-by: Theodore Ts'o --- fs/ext4/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 7d61069531d3..02ffbd29d6b0 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -844,7 +844,7 @@ static int ext4_file_open(struct inode *inode, struct file *filp) return ret; } - filp->f_mode |= FMODE_NOWAIT; + filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; return dquot_file_open(inode, filp); } -- cgit v1.2.3 From b483bb77194b4ec462fb23ad3fd6a6b36a635340 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 4 Aug 2020 19:48:50 -0700 Subject: ext4: delete duplicated words + other fixes Delete repeated words in fs/ext4/. {the, this, of, we, after} Also change spelling of "xttr" in inline.c to "xattr" in 2 places. Signed-off-by: Randy Dunlap Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20200805024850.12129-1-rdunlap@infradead.org Signed-off-by: Theodore Ts'o --- fs/ext4/extents.c | 2 +- fs/ext4/indirect.c | 2 +- fs/ext4/inline.c | 2 +- fs/ext4/inode.c | 2 +- fs/ext4/mballoc.c | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index a0481582187a..740e83cffb10 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4023,7 +4023,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) * - * return > 0, number of of blocks already mapped/allocated + * return > 0, number of blocks already mapped/allocated * if create == 0 and these are pre-allocated blocks * buffer head is unmapped * otherwise blocks are mapped diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 80c9f33800be..a56ce3873e6d 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -1033,7 +1033,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, brelse(bh); /* - * Everything below this this pointer has been + * Everything below this pointer has been * released. Now let this top-of-subtree go. * * We want the freeing of this indirect block to be diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 75c97bca0815..caa51473207d 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -354,7 +354,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, if (error) goto out; - /* Update the xttr entry. */ + /* Update the xattr entry. */ i.value = value; i.value_len = len; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 771ed8b1fadb..f1ee0229ac12 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2785,7 +2785,7 @@ retry: * ext4_journal_stop() can wait for transaction commit * to finish which may depend on writeback of pages to * complete or on page lock to be released. In that - * case, we have to wait until after after we have + * case, we have to wait until after we have * submitted all the IO, released page locks we hold, * and dropped io_end reference (for extent conversion * to be able to complete) before stopping the handle. diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index a8d99f676fb1..c59fd49a7567 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -124,7 +124,7 @@ * /sys/fs/ext4//mb_group_prealloc. The value is represented in * terms of number of blocks. If we have mounted the file system with -O * stripe= option the group prealloc request is normalized to the - * the smallest multiple of the stripe value (sbi->s_stripe) which is + * smallest multiple of the stripe value (sbi->s_stripe) which is * greater than the default mb_group_prealloc. * * The regular allocator (using the buddy cache) supports a few tunables. @@ -2019,7 +2019,7 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, /* * IF we have corrupt bitmap, we won't find any * free blocks even though group info says we - * we have free blocks + * have free blocks */ ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free clusters as per " -- cgit v1.2.3 From 81e8c3c50352f4eede634a89e018e3b3907c2d44 Mon Sep 17 00:00:00 2001 From: Petr Malat Date: Tue, 25 Aug 2020 17:00:16 +0200 Subject: ext4: do not interpret high bytes if 64bit feature is disabled Fields s_free_blocks_count_hi, s_r_blocks_count_hi and s_blocks_count_hi are not valid if EXT4_FEATURE_INCOMPAT_64BIT is not enabled and should be treated as zeroes. Signed-off-by: Petr Malat Link: https://lore.kernel.org/r/20200825150016.3363-1-oss@malat.biz Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 523e00d7b392..eafb92fe7735 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -3012,22 +3012,24 @@ static inline int ext4_has_group_desc_csum(struct super_block *sb) return ext4_has_feature_gdt_csum(sb) || ext4_has_metadata_csum(sb); } +#define ext4_read_incompat_64bit_val(es, name) \ + (((es)->s_feature_incompat & cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT) \ + ? (ext4_fsblk_t)le32_to_cpu(es->name##_hi) << 32 : 0) | \ + le32_to_cpu(es->name##_lo)) + static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es) { - return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) | - le32_to_cpu(es->s_blocks_count_lo); + return ext4_read_incompat_64bit_val(es, s_blocks_count); } static inline ext4_fsblk_t ext4_r_blocks_count(struct ext4_super_block *es) { - return ((ext4_fsblk_t)le32_to_cpu(es->s_r_blocks_count_hi) << 32) | - le32_to_cpu(es->s_r_blocks_count_lo); + return ext4_read_incompat_64bit_val(es, s_r_blocks_count); } static inline ext4_fsblk_t ext4_free_blocks_count(struct ext4_super_block *es) { - return ((ext4_fsblk_t)le32_to_cpu(es->s_free_blocks_count_hi) << 32) | - le32_to_cpu(es->s_free_blocks_count_lo); + return ext4_read_incompat_64bit_val(es, s_free_blocks_count); } static inline void ext4_blocks_count_set(struct ext4_super_block *es, -- cgit v1.2.3 From 15ed2851b0f42d0dc42d7172f1eaa0bea57ff807 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 26 Aug 2020 16:31:16 +0300 Subject: ext4: remove unused argument from ext4_(inc|dec)_count The 'handle' argument is not used for anything so simply remove it. Signed-off-by: Nikolay Borisov Reviewed-by: Ritesh Harjani Link: https://lore.kernel.org/r/20200826133116.11592-1-nborisov@suse.com Signed-off-by: Theodore Ts'o --- fs/ext4/namei.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 153a9fbe1dd0..701ef9fa21c3 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2554,7 +2554,7 @@ out: * for checking S_ISDIR(inode) (since the INODE_INDEX feature will not be set * on regular files) and to avoid creating huge/slow non-HTREE directories. */ -static void ext4_inc_count(handle_t *handle, struct inode *inode) +static void ext4_inc_count(struct inode *inode) { inc_nlink(inode); if (is_dx(inode) && @@ -2566,7 +2566,7 @@ static void ext4_inc_count(handle_t *handle, struct inode *inode) * If a directory had nlink == 1, then we should let it be 1. This indicates * directory has >EXT4_LINK_MAX subdirs. */ -static void ext4_dec_count(handle_t *handle, struct inode *inode) +static void ext4_dec_count(struct inode *inode) { if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) drop_nlink(inode); @@ -2825,7 +2825,7 @@ out_clear_inode: iput(inode); goto out_retry; } - ext4_inc_count(handle, dir); + ext4_inc_count(dir); ext4_update_dx_flag(dir); err = ext4_mark_inode_dirty(handle, dir); if (err) @@ -3163,7 +3163,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) retval = ext4_mark_inode_dirty(handle, inode); if (retval) goto end_rmdir; - ext4_dec_count(handle, dir); + ext4_dec_count(dir); ext4_update_dx_flag(dir); retval = ext4_mark_inode_dirty(handle, dir); @@ -3434,7 +3434,7 @@ retry: ext4_handle_sync(handle); inode->i_ctime = current_time(inode); - ext4_inc_count(handle, inode); + ext4_inc_count(inode); ihold(inode); err = ext4_add_entry(handle, dentry, inode); @@ -3631,9 +3631,9 @@ static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent) { if (ent->dir_nlink_delta) { if (ent->dir_nlink_delta == -1) - ext4_dec_count(handle, ent->dir); + ext4_dec_count(ent->dir); else - ext4_inc_count(handle, ent->dir); + ext4_inc_count(ent->dir); ext4_mark_inode_dirty(handle, ent->dir); } } @@ -3845,7 +3845,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, } if (new.inode) { - ext4_dec_count(handle, new.inode); + ext4_dec_count(new.inode); new.inode->i_ctime = current_time(new.inode); } old.dir->i_ctime = old.dir->i_mtime = current_time(old.dir); @@ -3855,14 +3855,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, if (retval) goto end_rename; - ext4_dec_count(handle, old.dir); + ext4_dec_count(old.dir); if (new.inode) { /* checked ext4_empty_dir above, can't have another * parent, ext4_dec_count() won't work for many-linked * dirs */ clear_nlink(new.inode); } else { - ext4_inc_count(handle, new.dir); + ext4_inc_count(new.dir); ext4_update_dx_flag(new.dir); retval = ext4_mark_inode_dirty(handle, new.dir); if (unlikely(retval)) -- cgit v1.2.3 From aa2f77920b743c44e02e2dc8474bbf8bd30007a2 Mon Sep 17 00:00:00 2001 From: Xiao Yang Date: Fri, 28 Aug 2020 16:43:30 +0800 Subject: ext4: disallow modifying DAX inode flag if inline_data has been set inline_data is mutually exclusive to DAX so enabling both of them triggers the following issue: ------------------------------------------ # mkfs.ext4 -F -O inline_data /dev/pmem1 ... # mount /dev/pmem1 /mnt # echo 'test' >/mnt/file # lsattr -l /mnt/file /mnt/file Inline_Data # xfs_io -c "chattr +x" /mnt/file # xfs_io -c "lsattr -v" /mnt/file [dax] /mnt/file # umount /mnt # mount /dev/pmem1 /mnt # cat /mnt/file cat: /mnt/file: Numerical result out of range ------------------------------------------ Fixes: b383a73f2b83 ("fs/ext4: Introduce DAX inode flag") Signed-off-by: Xiao Yang Reviewed-by: Jan Kara Reviewed-by: Ira Weiny Reviewed-by: Andreas Dilger Link: https://lore.kernel.org/r/20200828084330.15776-1-yangx.jy@cn.fujitsu.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index eafb92fe7735..d2a4b67319d4 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -492,7 +492,7 @@ struct flex_groups { /* Flags which are mutually exclusive to DAX */ #define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\ - EXT4_JOURNAL_DATA_FL) + EXT4_JOURNAL_DATA_FL | EXT4_INLINE_DATA_FL) /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags) -- cgit v1.2.3 From c9e87161cc621cbdcfc472fa0b2d81c63780c8f5 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Sat, 29 Aug 2020 10:54:02 +0800 Subject: ext4: fix error handling code in add_new_gdb When ext4_journal_get_write_access() fails, we should terminate the execution flow and release n_group_desc, iloc.bh, dind and gdb_bh. Cc: stable@kernel.org Signed-off-by: Dinghao Liu Reviewed-by: Andreas Dilger Link: https://lore.kernel.org/r/20200829025403.3139-1-dinghao.liu@zju.edu.cn Signed-off-by: Theodore Ts'o --- fs/ext4/resize.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index a50b51270ea9..71bf600e5b42 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -843,8 +843,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, BUFFER_TRACE(dind, "get_write_access"); err = ext4_journal_get_write_access(handle, dind); - if (unlikely(err)) + if (unlikely(err)) { ext4_std_error(sb, err); + goto errout; + } /* ext4_reserve_inode_write() gets a reference on the iloc */ err = ext4_reserve_inode_write(handle, inode, &iloc); -- cgit v1.2.3 From acaa532687cdc3a03757defafece9c27aa667546 Mon Sep 17 00:00:00 2001 From: Constantine Sapuntzakis Date: Mon, 14 Sep 2020 10:10:14 -0600 Subject: ext4: fix superblock checksum calculation race The race condition could cause the persisted superblock checksum to not match the contents of the superblock, causing the superblock to be considered corrupt. An example of the race follows. A first thread is interrupted in the middle of a checksum calculation. Then, another thread changes the superblock, calculates a new checksum, and sets it. Then, the first thread resumes and sets the checksum based on the older superblock. To fix, serialize the superblock checksum calculation using the buffer header lock. While a spinlock is sufficient, the buffer header is already there and there is precedent for locking it (e.g. in ext4_commit_super). Tested the patch by booting up a kernel with the patch, creating a filesystem and some files (including some orphans), and then unmounting and remounting the file system. Cc: stable@kernel.org Signed-off-by: Constantine Sapuntzakis Reviewed-by: Jan Kara Suggested-by: Jan Kara Link: https://lore.kernel.org/r/20200914161014.22275-1-costa@purestorage.com Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 41953b86ffe3..593eb123b3c7 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -201,7 +201,18 @@ void ext4_superblock_csum_set(struct super_block *sb) if (!ext4_has_metadata_csum(sb)) return; + /* + * Locking the superblock prevents the scenario + * where: + * 1) a first thread pauses during checksum calculation. + * 2) a second thread updates the superblock, recalculates + * the checksum, and updates s_checksum + * 3) the first thread resumes and finishes its checksum calculation + * and updates s_checksum with a potentially stale or torn value. + */ + lock_buffer(EXT4_SB(sb)->s_sbh); es->s_checksum = ext4_superblock_csum(sb, es); + unlock_buffer(EXT4_SB(sb)->s_sbh); } ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, -- cgit v1.2.3 From 7eb90a2d6a4817d73165a2a1addd5ead2fcb74b1 Mon Sep 17 00:00:00 2001 From: Tian Tao Date: Fri, 18 Sep 2020 10:46:05 +0800 Subject: ext4: remove unused including Remove including that don't need it. Signed-off-by: Tian Tao Link: https://lore.kernel.org/r/1600397165-42873-1-git-send-email-tiantao6@hisilicon.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index d2a4b67319d4..627600c25a15 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 9faac62d40131521973192e46a82d5066bb42c09 Mon Sep 17 00:00:00 2001 From: Ritesh Harjani Date: Fri, 18 Sep 2020 10:36:35 +0530 Subject: ext4: optimize file overwrites In case if the file already has underlying blocks/extents allocated then we don't need to start a journal txn and can directly return the underlying mapping. Currently ext4_iomap_begin() is used by both DAX & DIO path. We can check if the write request is an overwrite & then directly return the mapping information. This could give a significant perf boost for multi-threaded writes specially random overwrites. On PPC64 VM with simulated pmem(DAX) device, ~10x perf improvement could be seen in random writes (overwrite). Also bcoz this optimizes away the spinlock contention during jbd2 slab cache allocation (jbd2_journal_handle). On x86 VM, ~2x perf improvement was observed. Reported-by: Dan Williams Reviewed-by: Jan Kara Signed-off-by: Ritesh Harjani Link: https://lore.kernel.org/r/88e795d8a4d5cd22165c7ebe857ba91d68d8813e.1600401668.git.riteshh@linux.ibm.com Signed-off-by: Theodore Ts'o --- fs/ext4/inode.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f1ee0229ac12..f81f45f5db73 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3436,14 +3436,26 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; - if (flags & IOMAP_WRITE) + if (flags & IOMAP_WRITE) { + /* + * We check here if the blocks are already allocated, then we + * don't need to start a journal txn and we can directly return + * the mapping information. This could boost performance + * especially in multi-threaded overwrite requests. + */ + if (offset + length <= i_size_read(inode)) { + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED)) + goto out; + } ret = ext4_iomap_alloc(inode, &map, flags); - else + } else { ret = ext4_map_blocks(NULL, inode, &map, 0); + } if (ret < 0) return ret; - +out: ext4_set_iomap(inode, iomap, &map, offset, length); return 0; -- cgit v1.2.3 From 2be7d717cafc893d4487cbc7f018d1174537fa2e Mon Sep 17 00:00:00 2001 From: Zhang Qilong Date: Mon, 21 Sep 2020 20:47:38 +0800 Subject: ext4: add trace exit in exception path. Missing trace exit in exception path of ext4_sync_file and ext4_ind_map_blocks. Signed-off-by: Zhang Qilong Link: https://lore.kernel.org/r/20200921124738.23352-1-zhangqilong3@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/fsync.c | 2 +- fs/ext4/indirect.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 1d668c8f131f..6476994d9861 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -150,7 +150,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ret = file_write_and_wait_range(file, start, end); if (ret) - return ret; + goto out; /* * data=writeback,ordered: diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index a56ce3873e6d..23e504a40cd7 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -593,7 +593,8 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, if (ext4_has_feature_bigalloc(inode->i_sb)) { EXT4_ERROR_INODE(inode, "Can't allocate blocks for " "non-extent mapped inodes with bigalloc"); - return -EFSCORRUPTED; + err = -EFSCORRUPTED; + goto out; } /* Set up for the direct block allocation */ -- cgit v1.2.3 From ee7ed3aa0f08621dbf897d2a98dc6f2c7e7d0335 Mon Sep 17 00:00:00 2001 From: Chunguang Xu Date: Thu, 24 Sep 2020 11:03:42 +0800 Subject: ext4: rename journal_dev to s_journal_dev inside ext4_sb_info Rename journal_dev to s_journal_dev inside ext4_sb_info, keep the naming rules consistent with other variables, which is convenient for code reading and writing. Signed-off-by: Chunguang Xu Reviewed-by: Andreas Dilger Reviewed-by: Ritesh Harjani Link: https://lore.kernel.org/r/1600916623-544-1-git-send-email-brookxu@tencent.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 2 +- fs/ext4/fsmap.c | 8 ++++---- fs/ext4/super.c | 14 +++++++------- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 627600c25a15..0c547fce7ff9 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1480,7 +1480,7 @@ struct ext4_sb_info { unsigned long s_commit_interval; u32 s_max_batch_time; u32 s_min_batch_time; - struct block_device *journal_bdev; + struct block_device *s_journal_bdev; #ifdef CONFIG_QUOTA /* Names of quota files with journalled quota */ char __rcu *s_qf_names[EXT4_MAXQUOTAS]; diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c index dbccf46f1770..005c0ae95a0e 100644 --- a/fs/ext4/fsmap.c +++ b/fs/ext4/fsmap.c @@ -571,8 +571,8 @@ static bool ext4_getfsmap_is_valid_device(struct super_block *sb, if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX || fm->fmr_device == new_encode_dev(sb->s_bdev->bd_dev)) return true; - if (EXT4_SB(sb)->journal_bdev && - fm->fmr_device == new_encode_dev(EXT4_SB(sb)->journal_bdev->bd_dev)) + if (EXT4_SB(sb)->s_journal_bdev && + fm->fmr_device == new_encode_dev(EXT4_SB(sb)->s_journal_bdev->bd_dev)) return true; return false; } @@ -642,9 +642,9 @@ int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head, memset(handlers, 0, sizeof(handlers)); handlers[0].gfd_dev = new_encode_dev(sb->s_bdev->bd_dev); handlers[0].gfd_fn = ext4_getfsmap_datadev; - if (EXT4_SB(sb)->journal_bdev) { + if (EXT4_SB(sb)->s_journal_bdev) { handlers[1].gfd_dev = new_encode_dev( - EXT4_SB(sb)->journal_bdev->bd_dev); + EXT4_SB(sb)->s_journal_bdev->bd_dev); handlers[1].gfd_fn = ext4_getfsmap_logdev; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 593eb123b3c7..b2f654f8c3a3 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -950,10 +950,10 @@ static void ext4_blkdev_put(struct block_device *bdev) static void ext4_blkdev_remove(struct ext4_sb_info *sbi) { struct block_device *bdev; - bdev = sbi->journal_bdev; + bdev = sbi->s_journal_bdev; if (bdev) { ext4_blkdev_put(bdev); - sbi->journal_bdev = NULL; + sbi->s_journal_bdev = NULL; } } @@ -1084,14 +1084,14 @@ static void ext4_put_super(struct super_block *sb) sync_blockdev(sb->s_bdev); invalidate_bdev(sb->s_bdev); - if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { + if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ - sync_blockdev(sbi->journal_bdev); - invalidate_bdev(sbi->journal_bdev); + sync_blockdev(sbi->s_journal_bdev); + invalidate_bdev(sbi->s_journal_bdev); ext4_blkdev_remove(sbi); } @@ -3765,7 +3765,7 @@ int ext4_calculate_overhead(struct super_block *sb) * Add the internal journal blocks whether the journal has been * loaded or not */ - if (sbi->s_journal && !sbi->journal_bdev) + if (sbi->s_journal && !sbi->s_journal_bdev) overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) { /* j_inum for internal journal is non-zero */ @@ -5126,7 +5126,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } - EXT4_SB(sb)->journal_bdev = bdev; + EXT4_SB(sb)->s_journal_bdev = bdev; ext4_init_journal_params(sb, journal); return journal; -- cgit v1.2.3 From dd0db94f305c9f5dd44d0ecc7bf29944d4867074 Mon Sep 17 00:00:00 2001 From: Chunguang Xu Date: Thu, 24 Sep 2020 11:03:43 +0800 Subject: ext4: rename system_blks to s_system_blks inside ext4_sb_info Rename system_blks to s_system_blks inside ext4_sb_info, keep the naming rules consistent with other variables, which is convenient for code reading and writing. Signed-off-by: Chunguang Xu Reviewed-by: Andreas Dilger Reviewed-by: Ritesh Harjani Link: https://lore.kernel.org/r/1600916623-544-2-git-send-email-brookxu@tencent.com Signed-off-by: Theodore Ts'o --- fs/ext4/block_validity.c | 10 +++++----- fs/ext4/ext4.h | 2 +- fs/ext4/super.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index c54ba52f2dd4..8e6ca23ed172 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -131,7 +131,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi) printk(KERN_INFO "System zones: "); rcu_read_lock(); - system_blks = rcu_dereference(sbi->system_blks); + system_blks = rcu_dereference(sbi->s_system_blks); node = rb_first(&system_blks->root); while (node) { entry = rb_entry(node, struct ext4_system_zone, node); @@ -261,7 +261,7 @@ int ext4_setup_system_zone(struct super_block *sb) * with ext4_data_block_valid() accessing the rbtree at the same * time. */ - rcu_assign_pointer(sbi->system_blks, system_blks); + rcu_assign_pointer(sbi->s_system_blks, system_blks); if (test_opt(sb, DEBUG)) debug_print_tree(sbi); @@ -286,9 +286,9 @@ void ext4_release_system_zone(struct super_block *sb) { struct ext4_system_blocks *system_blks; - system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks, + system_blks = rcu_dereference_protected(EXT4_SB(sb)->s_system_blks, lockdep_is_held(&sb->s_umount)); - rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL); + rcu_assign_pointer(EXT4_SB(sb)->s_system_blks, NULL); if (system_blks) call_rcu(&system_blks->rcu, ext4_destroy_system_zone); @@ -319,7 +319,7 @@ int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk, * mount option. */ rcu_read_lock(); - system_blks = rcu_dereference(sbi->system_blks); + system_blks = rcu_dereference(sbi->s_system_blks); if (system_blks == NULL) goto out_rcu; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0c547fce7ff9..11077696031d 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1487,7 +1487,7 @@ struct ext4_sb_info { int s_jquota_fmt; /* Format of quota to use */ #endif unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */ - struct ext4_system_blocks __rcu *system_blks; + struct ext4_system_blocks __rcu *s_system_blks; #ifdef EXTENTS_STATS /* ext4 extents stats */ diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b2f654f8c3a3..d1ea1c7fc8c4 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5772,7 +5772,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) * Releasing of existing data is done when we are sure remount will * succeed. */ - if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) { + if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) { err = ext4_setup_system_zone(sb); if (err) goto restore_opts; @@ -5798,7 +5798,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } } #endif - if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) ext4_release_system_zone(sb); /* @@ -5821,7 +5821,7 @@ restore_opts: sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; - if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) ext4_release_system_zone(sb); #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; -- cgit v1.2.3 From 9704a322ea67fdb05fc66cf431fdd01c2424bbd9 Mon Sep 17 00:00:00 2001 From: Zhang Xiaoxu Date: Sun, 27 Sep 2020 22:05:56 -0400 Subject: ext4: fix bdev write error check failed when mount fs with ro Consider a situation when a filesystem was uncleanly shutdown and the orphan list is not empty and a read-only mount is attempted. The orphan list cleanup during mount will fail with: ext4_check_bdev_write_error:193: comm mount: Error while async write back metadata This happens because sbi->s_bdev_wb_err is not initialized when mounting the filesystem in read only mode and so ext4_check_bdev_write_error() falsely triggers. Initialize sbi->s_bdev_wb_err unconditionally to avoid this problem. Fixes: bc71726c7257 ("ext4: abort the filesystem if failed to async write metadata buffer") Cc: stable@kernel.org Signed-off-by: Zhang Xiaoxu Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20200928020556.710971-1-zhangxiaoxu5@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index d1ea1c7fc8c4..61af903e7b32 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4825,9 +4825,8 @@ no_journal: * used to detect the metadata async write error. */ spin_lock_init(&sbi->s_bdev_wb_lock); - if (!sb_rdonly(sb)) - errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, - &sbi->s_bdev_wb_err); + errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, + &sbi->s_bdev_wb_err); sb->s_bdev->bd_super = sb; EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; ext4_orphan_cleanup(sb, es); @@ -5719,14 +5718,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - /* - * Update the original bdev mapping's wb_err value - * which could be used to detect the metadata async - * write error. - */ - errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, - &sbi->s_bdev_wb_err); - /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have -- cgit v1.2.3 From 9d1f9b27704009a0032c9a70f8a44cfb331971b5 Mon Sep 17 00:00:00 2001 From: Chunguang Xu Date: Mon, 28 Sep 2020 19:36:34 +0800 Subject: ext4: delete invalid comments near mb_buddy_adjust_border The comment near mb_buddy_adjust_border seems meaningless, just clear it. Signed-off-by: Chunguang Xu Link: https://lore.kernel.org/r/1601292995-32205-1-git-send-email-brookxu@tencent.com Signed-off-by: Theodore Ts'o --- fs/ext4/mballoc.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c59fd49a7567..4846f885357e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1394,9 +1394,6 @@ void ext4_set_bits(void *bm, int cur, int len) } } -/* - * _________________________________________________________________ */ - static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) { if (mb_test_bit(*bit + side, bitmap)) { -- cgit v1.2.3 From addd752cff27774258f593c68d283725398689c4 Mon Sep 17 00:00:00 2001 From: Chunguang Xu Date: Mon, 28 Sep 2020 19:36:35 +0800 Subject: ext4: make mb_check_counter per group Make bb_check_counter per group, so each group has the same chance to be checked, which can expose errors more easily. Signed-off-by: Chunguang Xu Link: https://lore.kernel.org/r/1601292995-32205-2-git-send-email-brookxu@tencent.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 3 +++ fs/ext4/mballoc.c | 7 ++----- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 11077696031d..3e31371795cb 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -3154,6 +3154,9 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, struct ext4_group_info { unsigned long bb_state; +#ifdef AGGRESSIVE_CHECK + unsigned long bb_check_counter; +#endif struct rb_root bb_free_root; ext4_grpblk_t bb_first_free; /* first free block */ ext4_grpblk_t bb_free; /* total free blocks */ diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4846f885357e..74a48d6ff9cc 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -619,11 +619,8 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, void *buddy; void *buddy2; - { - static int mb_check_counter; - if (mb_check_counter++ % 100 != 0) - return 0; - } + if (e4b->bd_info->bb_check_counter++ % 10) + return 0; while (order > 1) { buddy = mb_find_buddy(e4b, order, &max); -- cgit v1.2.3 From af8c53c8bc087459b1aadd4c94805d8272358d79 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 1 Oct 2020 15:21:48 -0700 Subject: ext4: limit entries returned when counting fsmap records If userspace asked fsmap to try to count the number of entries, we cannot return more than UINT_MAX entries because fmh_entries is u32. Therefore, stop counting if we hit this limit or else we will waste time to return truncated results. Fixes: 0c9ec4beecac ("ext4: support GETFSMAP ioctls") Signed-off-by: Darrick J. Wong Link: https://lore.kernel.org/r/20201001222148.GA49520@magnolia Signed-off-by: Theodore Ts'o --- fs/ext4/fsmap.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c index 005c0ae95a0e..b232c2767534 100644 --- a/fs/ext4/fsmap.c +++ b/fs/ext4/fsmap.c @@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb, /* Are we just counting mappings? */ if (info->gfi_head->fmh_count == 0) { + if (info->gfi_head->fmh_entries == UINT_MAX) + return EXT4_QUERY_RANGE_ABORT; + if (rec_fsblk > info->gfi_next_fsblk) info->gfi_head->fmh_entries++; -- cgit v1.2.3 From d9befedaafcf3a111428baa7c45b02923eab2d87 Mon Sep 17 00:00:00 2001 From: "zhangyi (F)" Date: Thu, 24 Sep 2020 15:33:31 +0800 Subject: ext4: clear buffer verified flag if read meta block from disk The metadata buffer is no longer trusted after we read it from disk again because it is not uptodate for some reasons (e.g. failed to write back). Otherwise we may get below memory corruption problem in ext4_ext_split()->memset() if we read stale data from the newly allocated extent block on disk which has been failed to async write out but miss verify again since the verified bit has already been set on the buffer. [ 29.774674] BUG: unable to handle kernel paging request at ffff88841949d000 ... [ 29.783317] Oops: 0002 [#2] SMP [ 29.784219] R10: 00000000000f4240 R11: 0000000000002e28 R12: ffff88842fa1c800 [ 29.784627] CPU: 1 PID: 126 Comm: kworker/u4:3 Tainted: G D W [ 29.785546] R13: ffffffff9cddcc20 R14: ffffffff9cddd420 R15: ffff88842fa1c2f8 [ 29.786679] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),BIOS ?-20190727_0738364 [ 29.787588] FS: 0000000000000000(0000) GS:ffff88842fa00000(0000) knlGS:0000000000000000 [ 29.789288] Workqueue: writeback wb_workfn [ 29.790319] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 29.790321] (flush-8:0) [ 29.790844] CR2: 0000000000000008 CR3: 00000004234f2000 CR4: 00000000000006f0 [ 29.791924] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 29.792839] RIP: 0010:__memset+0x24/0x30 [ 29.793739] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 29.794256] Code: 90 90 90 90 90 90 0f 1f 44 00 00 49 89 f9 48 89 d1 83 e2 07 48 c1 e9 033 [ 29.795161] Kernel panic - not syncing: Fatal exception in interrupt ... [ 29.808149] Call Trace: [ 29.808475] ext4_ext_insert_extent+0x102e/0x1be0 [ 29.809085] ext4_ext_map_blocks+0xa89/0x1bb0 [ 29.809652] ext4_map_blocks+0x290/0x8a0 [ 29.809085] ext4_ext_map_blocks+0xa89/0x1bb0 [ 29.809652] ext4_map_blocks+0x290/0x8a0 [ 29.810161] ext4_writepages+0xc85/0x17c0 ... Fix this by clearing buffer's verified bit if we read meta block from disk again. Signed-off-by: zhangyi (F) Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200924073337.861472-2-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/balloc.c | 1 + fs/ext4/extents.c | 1 + fs/ext4/ialloc.c | 1 + fs/ext4/inode.c | 5 ++++- fs/ext4/super.c | 1 + 5 files changed, 8 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 48c3df47748d..8e7e9715cde9 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -494,6 +494,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group, * submit the buffer_head for reading */ set_buffer_new(bh); + clear_buffer_verified(bh); trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked); bh->b_end_io = ext4_end_bitmap_read; get_bh(bh); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 740e83cffb10..40dbbfff1ba2 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -501,6 +501,7 @@ __read_extent_tree_block(const char *function, unsigned int line, if (!bh_uptodate_or_lock(bh)) { trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); + clear_buffer_verified(bh); err = bh_submit_read(bh); if (err < 0) goto errout; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index df25d38d6539..20cda952c621 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -188,6 +188,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) /* * submit the buffer_head for reading */ + clear_buffer_verified(bh); trace_ext4_load_inode_bitmap(sb, block_group); bh->b_end_io = ext4_end_bitmap_read; get_bh(bh); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f81f45f5db73..eae44660a4b5 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -884,6 +884,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, return bh; if (!bh || ext4_buffer_uptodate(bh)) return bh; + clear_buffer_verified(bh); ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) @@ -909,9 +910,11 @@ int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count, for (i = 0; i < bh_count; i++) /* Note that NULL bhs[i] is valid because of holes. */ - if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) + if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) { + clear_buffer_verified(bhs[i]); ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bhs[i]); + } if (!wait) return 0; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 61af903e7b32..6937ab4dfa67 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -156,6 +156,7 @@ ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags) return ERR_PTR(-ENOMEM); if (ext4_buffer_uptodate(bh)) return bh; + clear_buffer_verified(bh); ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) -- cgit v1.2.3 From fa491b14cd9586ad703606ef0155cd43459d2b32 Mon Sep 17 00:00:00 2001 From: "zhangyi (F)" Date: Thu, 24 Sep 2020 15:33:32 +0800 Subject: ext4: introduce new metadata buffer read helpers The previous patch add clear_buffer_verified() before we read metadata block from disk again, but it's rather easy to miss clearing of this bit because currently we read metadata buffer through different open codes (e.g. ll_rw_block(), bh_submit_read() and invoke submit_bh() directly). So, it's time to add common helpers to unify in all the places reading metadata buffers instead. This patch add 3 helpers: - ext4_read_bh_nowait(): async read metadata buffer if it's actually not uptodate, clear buffer_verified bit before read from disk. - ext4_read_bh(): sync version of read metadata buffer, it will wait until the read operation return and check the return status. - ext4_read_bh_lock(): try to lock the buffer before read buffer, it will skip reading if the buffer is already locked. After this patch, we need to use these helpers in all the places reading metadata buffer instead of different open codes. Signed-off-by: zhangyi (F) Suggested-by: Jan Kara Link: https://lore.kernel.org/r/20200924073337.861472-3-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 5 +++++ fs/ext4/super.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 3e31371795cb..199afbf27510 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2823,6 +2823,11 @@ extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count); /* super.c */ extern struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags); +extern void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags, + bh_end_io_t *end_io); +extern int ext4_read_bh(struct buffer_head *bh, int op_flags, + bh_end_io_t *end_io); +extern int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait); extern int ext4_seq_options_show(struct seq_file *seq, void *offset); extern int ext4_calculate_overhead(struct super_block *sb); extern void ext4_superblock_csum_set(struct super_block *sb); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 6937ab4dfa67..4cfa95abe9dc 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -141,6 +141,68 @@ MODULE_ALIAS_FS("ext3"); MODULE_ALIAS("ext3"); #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) + +static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags, + bh_end_io_t *end_io) +{ + /* + * buffer's verified bit is no longer valid after reading from + * disk again due to write out error, clear it to make sure we + * recheck the buffer contents. + */ + clear_buffer_verified(bh); + + bh->b_end_io = end_io ? end_io : end_buffer_read_sync; + get_bh(bh); + submit_bh(REQ_OP_READ, op_flags, bh); +} + +void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags, + bh_end_io_t *end_io) +{ + BUG_ON(!buffer_locked(bh)); + + if (ext4_buffer_uptodate(bh)) { + unlock_buffer(bh); + return; + } + __ext4_read_bh(bh, op_flags, end_io); +} + +int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io) +{ + BUG_ON(!buffer_locked(bh)); + + if (ext4_buffer_uptodate(bh)) { + unlock_buffer(bh); + return 0; + } + + __ext4_read_bh(bh, op_flags, end_io); + + wait_on_buffer(bh); + if (buffer_uptodate(bh)) + return 0; + return -EIO; +} + +int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait) +{ + if (trylock_buffer(bh)) { + if (wait) + return ext4_read_bh(bh, op_flags, NULL); + ext4_read_bh_nowait(bh, op_flags, NULL); + return 0; + } + if (wait) { + wait_on_buffer(bh); + if (buffer_uptodate(bh)) + return 0; + return -EIO; + } + return 0; +} + /* * This works like sb_bread() except it uses ERR_PTR for error * returns. Currently with sb_bread it's impossible to distinguish -- cgit v1.2.3 From 2d069c0889ef0decda7af6ecbdc63b680b767749 Mon Sep 17 00:00:00 2001 From: "zhangyi (F)" Date: Thu, 24 Sep 2020 15:33:33 +0800 Subject: ext4: use common helpers in all places reading metadata buffers Revome all open codes that read metadata buffers, switch to use ext4_read_bh_*() common helpers. Signed-off-by: zhangyi (F) Suggested-by: Jan Kara Link: https://lore.kernel.org/r/20200924073337.861472-4-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/balloc.c | 8 +++----- fs/ext4/extents.c | 3 +-- fs/ext4/ialloc.c | 6 +----- fs/ext4/indirect.c | 2 +- fs/ext4/inode.c | 35 ++++++++++++++--------------------- fs/ext4/mmp.c | 10 +++------- fs/ext4/move_extent.c | 2 +- fs/ext4/resize.c | 2 +- fs/ext4/super.c | 22 +++++++++++----------- 9 files changed, 36 insertions(+), 54 deletions(-) (limited to 'fs') diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 8e7e9715cde9..dea738ba2acd 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -494,12 +494,10 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group, * submit the buffer_head for reading */ set_buffer_new(bh); - clear_buffer_verified(bh); trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked); - bh->b_end_io = ext4_end_bitmap_read; - get_bh(bh); - submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO | - (ignore_locked ? REQ_RAHEAD : 0), bh); + ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO | + (ignore_locked ? REQ_RAHEAD : 0), + ext4_end_bitmap_read); return bh; verify: err = ext4_validate_block_bitmap(sb, desc, block_group, bh); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 40dbbfff1ba2..2baf0debd2b7 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -501,8 +501,7 @@ __read_extent_tree_block(const char *function, unsigned int line, if (!bh_uptodate_or_lock(bh)) { trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); - clear_buffer_verified(bh); - err = bh_submit_read(bh); + err = ext4_read_bh(bh, 0, NULL); if (err < 0) goto errout; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 20cda952c621..33c0fc0197ce 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -188,12 +188,8 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) /* * submit the buffer_head for reading */ - clear_buffer_verified(bh); trace_ext4_load_inode_bitmap(sb, block_group); - bh->b_end_io = ext4_end_bitmap_read; - get_bh(bh); - submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); - wait_on_buffer(bh); + ext4_read_bh(bh, REQ_META | REQ_PRIO, ext4_end_bitmap_read); ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO); if (!buffer_uptodate(bh)) { put_bh(bh); diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 23e504a40cd7..bb4e999116d0 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -163,7 +163,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, } if (!bh_uptodate_or_lock(bh)) { - if (bh_submit_read(bh) < 0) { + if (ext4_read_bh(bh, 0, NULL) < 0) { put_bh(bh); goto failure; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index eae44660a4b5..36d381622677 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -878,19 +878,20 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, ext4_lblk_t block, int map_flags) { struct buffer_head *bh; + int ret; bh = ext4_getblk(handle, inode, block, map_flags); if (IS_ERR(bh)) return bh; if (!bh || ext4_buffer_uptodate(bh)) return bh; - clear_buffer_verified(bh); - ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); - wait_on_buffer(bh); - if (buffer_uptodate(bh)) - return bh; - put_bh(bh); - return ERR_PTR(-EIO); + + ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true); + if (ret) { + put_bh(bh); + return ERR_PTR(ret); + } + return bh; } /* Read a contiguous batch of blocks. */ @@ -910,11 +911,8 @@ int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count, for (i = 0; i < bh_count; i++) /* Note that NULL bhs[i] is valid because of holes. */ - if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) { - clear_buffer_verified(bhs[i]); - ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, - &bhs[i]); - } + if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) + ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false); if (!wait) return 0; @@ -1084,7 +1082,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { - ll_rw_block(REQ_OP_READ, 0, 1, &bh); + ext4_read_bh_lock(bh, 0, false); wait[nr_wait++] = bh; } } @@ -3756,11 +3754,8 @@ static int __ext4_block_zero_page_range(handle_t *handle, set_buffer_uptodate(bh); if (!buffer_uptodate(bh)) { - err = -EIO; - ll_rw_block(REQ_OP_READ, 0, 1, &bh); - wait_on_buffer(bh); - /* Uhhuh. Read error. Complain and punt. */ - if (!buffer_uptodate(bh)) + err = ext4_read_bh_lock(bh, 0, true); + if (err) goto unlock; if (fscrypt_inode_uses_fs_layer_crypto(inode)) { /* We expect the key to be set. */ @@ -4404,9 +4399,7 @@ make_io: * Read the block from disk. */ trace_ext4_load_inode(inode); - get_bh(bh); - bh->b_end_io = end_buffer_read_sync; - submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); + ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL); blk_finish_plug(&plug); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index d34cb8c46655..795c3ff2907c 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -85,15 +85,11 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh, } } - get_bh(*bh); lock_buffer(*bh); - (*bh)->b_end_io = end_buffer_read_sync; - submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, *bh); - wait_on_buffer(*bh); - if (!buffer_uptodate(*bh)) { - ret = -EIO; + ret = ext4_read_bh(*bh, REQ_META | REQ_PRIO, NULL); + if (ret) goto warn_exit; - } + mmp = (struct mmp_struct *)((*bh)->b_data); if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) { ret = -EFSCORRUPTED; diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 0d601b822875..64a579734f93 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -215,7 +215,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to) for (i = 0; i < nr; i++) { bh = arr[i]; if (!bh_uptodate_or_lock(bh)) { - err = bh_submit_read(bh); + err = ext4_read_bh(bh, 0, NULL); if (err) return err; } diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 71bf600e5b42..828d8581d1c7 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1245,7 +1245,7 @@ static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) if (unlikely(!bh)) return NULL; if (!bh_uptodate_or_lock(bh)) { - if (bh_submit_read(bh) < 0) { + if (ext4_read_bh(bh, 0, NULL) < 0) { brelse(bh); return NULL; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4cfa95abe9dc..77492cc12807 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -212,19 +212,21 @@ int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait) struct buffer_head * ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags) { - struct buffer_head *bh = sb_getblk(sb, block); + struct buffer_head *bh; + int ret; + bh = sb_getblk(sb, block); if (bh == NULL) return ERR_PTR(-ENOMEM); if (ext4_buffer_uptodate(bh)) return bh; - clear_buffer_verified(bh); - ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh); - wait_on_buffer(bh); - if (buffer_uptodate(bh)) - return bh; - put_bh(bh); - return ERR_PTR(-EIO); + + ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true); + if (ret) { + put_bh(bh); + return ERR_PTR(ret); + } + return bh; } static int ext4_verify_csum_type(struct super_block *sb, @@ -5176,9 +5178,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, goto out_bdev; } journal->j_private = sb; - ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer); - wait_on_buffer(journal->j_sb_buffer); - if (!buffer_uptodate(journal->j_sb_buffer)) { + if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) { ext4_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } -- cgit v1.2.3 From 60c776e50bc52a9334be8a49415479d5cb6ca2e2 Mon Sep 17 00:00:00 2001 From: "zhangyi (F)" Date: Thu, 24 Sep 2020 15:33:34 +0800 Subject: ext4: use ext4_buffer_uptodate() in __ext4_get_inode_loc() We have already introduced ext4_buffer_uptodate() to re-set the uptodate bit on buffer which has been failed to write out to disk. Just remove the redundant codes and switch to use ext4_buffer_uptodate() in __ext4_get_inode_loc(). Signed-off-by: zhangyi (F) Link: https://lore.kernel.org/r/20200924073337.861472-5-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/inode.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 36d381622677..39c02dc79a72 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4310,16 +4310,7 @@ static int __ext4_get_inode_loc(struct inode *inode, if (!buffer_uptodate(bh)) { lock_buffer(bh); - /* - * If the buffer has the write error flag, we have failed - * to write out another inode in the same block. In this - * case, we don't have to read the block because we may - * read the old inode data successfully. - */ - if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) - set_buffer_uptodate(bh); - - if (buffer_uptodate(bh)) { + if (ext4_buffer_uptodate(bh)) { /* someone brought it uptodate while we waited */ unlock_buffer(bh); goto has_buffer; -- cgit v1.2.3 From 5df1d4123d53261d9d71c7d237d0f165add7ce72 Mon Sep 17 00:00:00 2001 From: "zhangyi (F)" Date: Thu, 24 Sep 2020 15:33:35 +0800 Subject: ext4: introduce ext4_sb_breadahead_unmovable() to replace sb_breadahead_unmovable() If we readahead inode tables in __ext4_get_inode_loc(), it may bypass buffer_write_io_error() check, so introduce ext4_sb_breadahead_unmovable() to handle this special case. This patch also replace sb_breadahead_unmovable() in ext4_fill_super() for the sake of unification. Signed-off-by: zhangyi (F) Link: https://lore.kernel.org/r/20200924073337.861472-6-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 1 + fs/ext4/inode.c | 2 +- fs/ext4/super.c | 12 +++++++++++- 3 files changed, 13 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 199afbf27510..caef3aab1588 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2828,6 +2828,7 @@ extern void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags, extern int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io); extern int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait); +extern void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block); extern int ext4_seq_options_show(struct seq_file *seq, void *offset); extern int ext4_calculate_overhead(struct super_block *sb); extern void ext4_superblock_csum_set(struct super_block *sb); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 39c02dc79a72..3630b4900e69 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4381,7 +4381,7 @@ make_io: if (end > table) end = table; while (b <= end) - sb_breadahead_unmovable(sb, b++); + ext4_sb_breadahead_unmovable(sb, b++); } /* diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 77492cc12807..4a0887e36d45 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -229,6 +229,16 @@ ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags) return bh; } +void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) +{ + struct buffer_head *bh = sb_getblk_gfp(sb, block, 0); + + if (likely(bh)) { + ext4_read_bh_lock(bh, REQ_RAHEAD, false); + brelse(bh); + } +} + static int ext4_verify_csum_type(struct super_block *sb, struct ext4_super_block *es) { @@ -4556,7 +4566,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) /* Pre-read the descriptors into the buffer cache */ for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logical_sb_block, i); - sb_breadahead_unmovable(sb, block); + ext4_sb_breadahead_unmovable(sb, block); } for (i = 0; i < db_count; i++) { -- cgit v1.2.3 From 0a846f496db1d3996341e140364aa58563d1ebe1 Mon Sep 17 00:00:00 2001 From: "zhangyi (F)" Date: Thu, 24 Sep 2020 15:33:36 +0800 Subject: ext4: use ext4_sb_bread() instead of sb_bread() We have already remove open codes that invoke helpers provide by fs/buffer.c in all places reading metadata buffers. This patch switch to use ext4_sb_bread() to replace all sb_bread() helpers, which is ext4_read_bh() helper back end. Signed-off-by: zhangyi (F) Link: https://lore.kernel.org/r/20200924073337.861472-7-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/indirect.c | 6 +++--- fs/ext4/resize.c | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index bb4e999116d0..05efa682bc2f 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -1013,14 +1013,14 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, } /* Go read the buffer for the next level down */ - bh = sb_bread(inode->i_sb, nr); + bh = ext4_sb_bread(inode->i_sb, nr, 0); /* * A read failure? Report error and clear slot * (should be rare). */ - if (!bh) { - ext4_error_inode_block(inode, nr, EIO, + if (IS_ERR(bh)) { + ext4_error_inode_block(inode, nr, -PTR_ERR(bh), "Read failure"); continue; } diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 828d8581d1c7..928700d57eb6 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1808,8 +1808,8 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, o_blocks_count + add, add); /* See if the device is actually as big as what was requested */ - bh = sb_bread(sb, o_blocks_count + add - 1); - if (!bh) { + bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0); + if (IS_ERR(bh)) { ext4_warning(sb, "can't read last block, resize aborted"); return -ENOSPC; } @@ -1934,8 +1934,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) int meta_bg; /* See if the device is actually as big as what was requested */ - bh = sb_bread(sb, n_blocks_count - 1); - if (!bh) { + bh = ext4_sb_bread(sb, n_blocks_count - 1, 0); + if (IS_ERR(bh)) { ext4_warning(sb, "can't read last block, resize aborted"); return -ENOSPC; } -- cgit v1.2.3 From 8394a6abf3aeaa02e30fd33c248adab9fbe6fd6c Mon Sep 17 00:00:00 2001 From: "zhangyi (F)" Date: Thu, 24 Sep 2020 15:33:37 +0800 Subject: ext4: introduce ext4_sb_bread_unmovable() to replace sb_bread_unmovable() Now we only use sb_bread_unmovable() to read superblock and descriptor block at mount time, so there is no opportunity that we need to clear buffer verified bit and also handle buffer write_io error bit. But for the sake of unification, let's introduce ext4_sb_bread_unmovable() to replace all sb_bread_unmovable(). After this patch, we stop using read helpers in fs/buffer.c. Signed-off-by: zhangyi (F) Link: https://lore.kernel.org/r/20200924073337.861472-8-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 2 ++ fs/ext4/super.c | 38 +++++++++++++++++++++++++++++--------- 2 files changed, 31 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index caef3aab1588..1879531a119f 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2823,6 +2823,8 @@ extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count); /* super.c */ extern struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags); +extern struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, + sector_t block); extern void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io); extern int ext4_read_bh(struct buffer_head *bh, int op_flags, diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4a0887e36d45..0ee673e8e261 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -204,18 +204,19 @@ int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait) } /* - * This works like sb_bread() except it uses ERR_PTR for error + * This works like __bread_gfp() except it uses ERR_PTR for error * returns. Currently with sb_bread it's impossible to distinguish * between ENOMEM and EIO situations (since both result in a NULL * return. */ -struct buffer_head * -ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags) +static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb, + sector_t block, int op_flags, + gfp_t gfp) { struct buffer_head *bh; int ret; - bh = sb_getblk(sb, block); + bh = sb_getblk_gfp(sb, block, gfp); if (bh == NULL) return ERR_PTR(-ENOMEM); if (ext4_buffer_uptodate(bh)) @@ -229,6 +230,18 @@ ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags) return bh; } +struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, + int op_flags) +{ + return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE); +} + +struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, + sector_t block) +{ + return __ext4_sb_bread_gfp(sb, block, 0, 0); +} + void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) { struct buffer_head *bh = sb_getblk_gfp(sb, block, 0); @@ -3954,8 +3967,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) logical_sb_block = sb_block; } - if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) { + bh = ext4_sb_bread_unmovable(sb, logical_sb_block); + if (IS_ERR(bh)) { ext4_msg(sb, KERN_ERR, "unable to read superblock"); + ret = PTR_ERR(bh); + bh = NULL; goto out_fail; } /* @@ -4351,10 +4367,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) brelse(bh); logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); - bh = sb_bread_unmovable(sb, logical_sb_block); - if (!bh) { + bh = ext4_sb_bread_unmovable(sb, logical_sb_block); + if (IS_ERR(bh)) { ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); + ret = PTR_ERR(bh); + bh = NULL; goto failed_mount; } es = (struct ext4_super_block *)(bh->b_data + offset); @@ -4573,11 +4591,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) struct buffer_head *bh; block = descriptor_loc(sb, logical_sb_block, i); - bh = sb_bread_unmovable(sb, block); - if (!bh) { + bh = ext4_sb_bread_unmovable(sb, block); + if (IS_ERR(bh)) { ext4_msg(sb, KERN_ERR, "can't read group descriptor %d", i); db_count = i; + ret = PTR_ERR(bh); + bh = NULL; goto failed_mount2; } rcu_read_lock(); -- cgit v1.2.3 From aa3c0c61f62d682259e3e66cdc01846290f9cd6c Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Mon, 5 Oct 2020 21:48:38 -0300 Subject: jbd2: introduce/export functions jbd2_journal_submit|finish_inode_data_buffers() Export functions that implement the current behavior done for an inode in journal_submit|finish_inode_data_buffers(). No functional change. Signed-off-by: Mauricio Faria de Oliveira Suggested-by: Jan Kara Reviewed-by: Jan Kara Reviewed-by: Andreas Dilger Link: https://lore.kernel.org/r/20201006004841.600488-2-mfo@canonical.com Signed-off-by: Theodore Ts'o --- fs/jbd2/commit.c | 36 ++++++++++++++++-------------------- fs/jbd2/journal.c | 2 ++ include/linux/jbd2.h | 4 ++++ 3 files changed, 22 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 6d2da8ad0e6f..f79b86b4241f 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -187,19 +187,17 @@ static int journal_wait_on_commit_record(journal_t *journal, * use writepages() because with delayed allocation we may be doing * block allocation in writepages(). */ -static int journal_submit_inode_data_buffers(struct address_space *mapping, - loff_t dirty_start, loff_t dirty_end) +int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode) { - int ret; + struct address_space *mapping = jinode->i_vfs_inode->i_mapping; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = mapping->nrpages * 2, - .range_start = dirty_start, - .range_end = dirty_end, + .range_start = jinode->i_dirty_start, + .range_end = jinode->i_dirty_end, }; - ret = generic_writepages(mapping, &wbc); - return ret; + return generic_writepages(mapping, &wbc); } /* @@ -215,16 +213,11 @@ static int journal_submit_data_buffers(journal_t *journal, { struct jbd2_inode *jinode; int err, ret = 0; - struct address_space *mapping; spin_lock(&journal->j_list_lock); list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) { - loff_t dirty_start = jinode->i_dirty_start; - loff_t dirty_end = jinode->i_dirty_end; - if (!(jinode->i_flags & JI_WRITE_DATA)) continue; - mapping = jinode->i_vfs_inode->i_mapping; jinode->i_flags |= JI_COMMIT_RUNNING; spin_unlock(&journal->j_list_lock); /* @@ -234,8 +227,7 @@ static int journal_submit_data_buffers(journal_t *journal, * only allocated blocks here. */ trace_jbd2_submit_inode_data(jinode->i_vfs_inode); - err = journal_submit_inode_data_buffers(mapping, dirty_start, - dirty_end); + err = jbd2_journal_submit_inode_data_buffers(jinode); if (!ret) ret = err; spin_lock(&journal->j_list_lock); @@ -248,6 +240,15 @@ static int journal_submit_data_buffers(journal_t *journal, return ret; } +int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode) +{ + struct address_space *mapping = jinode->i_vfs_inode->i_mapping; + + return filemap_fdatawait_range_keep_errors(mapping, + jinode->i_dirty_start, + jinode->i_dirty_end); +} + /* * Wait for data submitted for writeout, refile inodes to proper * transaction if needed. @@ -262,16 +263,11 @@ static int journal_finish_inode_data_buffers(journal_t *journal, /* For locking, see the comment in journal_submit_data_buffers() */ spin_lock(&journal->j_list_lock); list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) { - loff_t dirty_start = jinode->i_dirty_start; - loff_t dirty_end = jinode->i_dirty_end; - if (!(jinode->i_flags & JI_WAIT_DATA)) continue; jinode->i_flags |= JI_COMMIT_RUNNING; spin_unlock(&journal->j_list_lock); - err = filemap_fdatawait_range_keep_errors( - jinode->i_vfs_inode->i_mapping, dirty_start, - dirty_end); + err = jbd2_journal_finish_inode_data_buffers(jinode); if (!ret) ret = err; spin_lock(&journal->j_list_lock); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 17fdc482f554..c0600405e7a2 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -91,6 +91,8 @@ EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers); EXPORT_SYMBOL(jbd2_journal_force_commit); EXPORT_SYMBOL(jbd2_journal_inode_ranged_write); EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait); +EXPORT_SYMBOL(jbd2_journal_submit_inode_data_buffers); +EXPORT_SYMBOL(jbd2_journal_finish_inode_data_buffers); EXPORT_SYMBOL(jbd2_journal_init_jbd_inode); EXPORT_SYMBOL(jbd2_journal_release_jbd_inode); EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index a1ef05412acf..8b7b06066bc2 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1421,6 +1421,10 @@ extern int jbd2_journal_inode_ranged_write(handle_t *handle, extern int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *inode, loff_t start_byte, loff_t length); +extern int jbd2_journal_submit_inode_data_buffers( + struct jbd2_inode *jinode); +extern int jbd2_journal_finish_inode_data_buffers( + struct jbd2_inode *jinode); extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *inode, loff_t new_size); extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); -- cgit v1.2.3 From 342af94ec6c02aa478fe2adcd41b950e154b03ba Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Mon, 5 Oct 2020 21:48:39 -0300 Subject: jbd2, ext4, ocfs2: introduce/use journal callbacks j_submit|finish_inode_data_buffers() Introduce journal callbacks to allow different behaviors for an inode in journal_submit|finish_inode_data_buffers(). The existing users of the current behavior (ext4, ocfs2) are adapted to use the previously exported functions that implement the current behavior. Users are callers of jbd2_journal_inode_ranged_write|wait(), which adds the inode to the transaction's inode list with the JI_WRITE|WAIT_DATA flags. Only ext4 and ocfs2 in-tree. Both CONFIG_EXT4_FS and CONFIG_OCSFS2_FS select CONFIG_JBD2, which builds fs/jbd2/commit.c and journal.c that define and export the functions, so we can call directly in ext4/ocfs2. Signed-off-by: Mauricio Faria de Oliveira Suggested-by: Jan Kara Reviewed-by: Jan Kara Reviewed-by: Andreas Dilger Link: https://lore.kernel.org/r/20201006004841.600488-3-mfo@canonical.com Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 4 ++++ fs/jbd2/commit.c | 30 ++++++++++++++++++------------ fs/ocfs2/journal.c | 4 ++++ include/linux/jbd2.h | 25 ++++++++++++++++++++++++- 4 files changed, 50 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 0ee673e8e261..a3e57f554f1b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4752,6 +4752,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); sbi->s_journal->j_commit_callback = ext4_journal_commit_callback; + sbi->s_journal->j_submit_inode_data_buffers = + jbd2_journal_submit_inode_data_buffers; + sbi->s_journal->j_finish_inode_data_buffers = + jbd2_journal_finish_inode_data_buffers; no_journal: if (!test_opt(sb, NO_MBCACHE)) { diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index f79b86b4241f..6252b4c50666 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -197,6 +197,12 @@ int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode) .range_end = jinode->i_dirty_end, }; + /* + * submit the inode data buffers. We use writepage + * instead of writepages. Because writepages can do + * block allocation with delalloc. We need to write + * only allocated blocks here. + */ return generic_writepages(mapping, &wbc); } @@ -220,16 +226,13 @@ static int journal_submit_data_buffers(journal_t *journal, continue; jinode->i_flags |= JI_COMMIT_RUNNING; spin_unlock(&journal->j_list_lock); - /* - * submit the inode data buffers. We use writepage - * instead of writepages. Because writepages can do - * block allocation with delalloc. We need to write - * only allocated blocks here. - */ + /* submit the inode data buffers. */ trace_jbd2_submit_inode_data(jinode->i_vfs_inode); - err = jbd2_journal_submit_inode_data_buffers(jinode); - if (!ret) - ret = err; + if (journal->j_submit_inode_data_buffers) { + err = journal->j_submit_inode_data_buffers(jinode); + if (!ret) + ret = err; + } spin_lock(&journal->j_list_lock); J_ASSERT(jinode->i_transaction == commit_transaction); jinode->i_flags &= ~JI_COMMIT_RUNNING; @@ -267,9 +270,12 @@ static int journal_finish_inode_data_buffers(journal_t *journal, continue; jinode->i_flags |= JI_COMMIT_RUNNING; spin_unlock(&journal->j_list_lock); - err = jbd2_journal_finish_inode_data_buffers(jinode); - if (!ret) - ret = err; + /* wait for the inode data buffers writeout. */ + if (journal->j_finish_inode_data_buffers) { + err = journal->j_finish_inode_data_buffers(jinode); + if (!ret) + ret = err; + } spin_lock(&journal->j_list_lock); jinode->i_flags &= ~JI_COMMIT_RUNNING; smp_mb(); diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index b425f0b01dce..b9a9d69dde7e 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -883,6 +883,10 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) OCFS2_JOURNAL_DIRTY_FL); journal->j_journal = j_journal; + journal->j_journal->j_submit_inode_data_buffers = + jbd2_journal_submit_inode_data_buffers; + journal->j_journal->j_finish_inode_data_buffers = + jbd2_journal_finish_inode_data_buffers; journal->j_inode = inode; journal->j_bh = bh; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 8b7b06066bc2..04afa6dcd60d 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -629,7 +629,9 @@ struct transaction_s struct journal_head *t_shadow_list; /* - * List of inodes whose data we've modified in data=ordered mode. + * List of inodes associated with the transaction; e.g., ext4 uses + * this to track inodes in data=ordered and data=journal mode that + * need special handling on transaction commit; also used by ocfs2. * [j_list_lock] */ struct list_head t_inode_list; @@ -1111,6 +1113,27 @@ struct journal_s void (*j_commit_callback)(journal_t *, transaction_t *); + /** + * @j_submit_inode_data_buffers: + * + * This function is called for all inodes associated with the + * committing transaction marked with JI_WRITE_DATA flag + * before we start to write out the transaction to the journal. + */ + int (*j_submit_inode_data_buffers) + (struct jbd2_inode *); + + /** + * @j_finish_inode_data_buffers: + * + * This function is called for all inodes associated with the + * committing transaction marked with JI_WAIT_DATA flag + * after we have written the transaction to the journal + * but before we write out the commit block. + */ + int (*j_finish_inode_data_buffers) + (struct jbd2_inode *); + /* * Journal statistics */ -- cgit v1.2.3 From 64a9f1449950c774743420cf374047043e32fde4 Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Mon, 5 Oct 2020 21:48:40 -0300 Subject: ext4: data=journal: fixes for ext4_page_mkwrite() These are two fixes for data journalling required by the next patch, discovered while testing it. First, the optimization to return early if all buffers are mapped is not appropriate for the next patch: The inode _must_ be added to the transaction's list in data=journal mode (so to write-protect pages on commit) thus we cannot return early there. Second, once that optimization to reduce transactions was disabled for data=journal mode, more transactions happened, and occasionally hit this warning message: 'JBD2: Spotted dirty metadata buffer'. Reason is, block_page_mkwrite() will set_buffer_dirty() before do_journal_get_write_access() that is there to prevent it. This issue was masked by the optimization. So, on data=journal use __block_write_begin() instead. This also requires page locking and len recalculation. (see block_page_mkwrite() for implementation details.) Finally, as Jan noted there is little sharing between data=journal and other modes in ext4_page_mkwrite(). However, a prototype of ext4_journalled_page_mkwrite() showed there still would be lots of duplicated lines (tens of) that didn't seem worth it. Thus this patch ends up with an ugly goto to skip all non-data journalling code (to avoid long indentations, but that can be changed..) in the beginning, and just a conditional in the transaction section. Well, we skip a common part to data journalling which is the page truncated check, but we do it again after ext4_journal_start() when we re-acquire the page lock (so not to acquire the page lock twice needlessly for data journalling.) Signed-off-by: Mauricio Faria de Oliveira Suggested-by: Jan Kara Reviewed-by: Jan Kara Reviewed-by: Andreas Dilger Link: https://lore.kernel.org/r/20201006004841.600488-4-mfo@canonical.com Signed-off-by: Theodore Ts'o --- fs/ext4/inode.c | 51 ++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 44 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 3630b4900e69..804fd14fe5e0 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5987,9 +5987,17 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf) if (err) goto out_ret; + /* + * On data journalling we skip straight to the transaction handle: + * there's no delalloc; page truncated will be checked later; the + * early return w/ all buffers mapped (calculates size/len) can't + * be used; and there's no dioread_nolock, so only ext4_get_block. + */ + if (ext4_should_journal_data(inode)) + goto retry_alloc; + /* Delalloc case is easy... */ if (test_opt(inode->i_sb, DELALLOC) && - !ext4_should_journal_data(inode) && !ext4_nonda_switch(inode->i_sb)) { do { err = block_page_mkwrite(vma, vmf, @@ -6015,6 +6023,9 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf) /* * Return if we have all the buffers mapped. This avoids the need to do * journal_start/journal_stop which can block and take a long time + * + * This cannot be done for data journalling, as we have to add the + * inode to the transaction's list to writeprotect pages on commit. */ if (page_has_buffers(page)) { if (!ext4_walk_page_buffers(NULL, page_buffers(page), @@ -6039,16 +6050,42 @@ retry_alloc: ret = VM_FAULT_SIGBUS; goto out; } - err = block_page_mkwrite(vma, vmf, get_block); - if (!err && ext4_should_journal_data(inode)) { - if (ext4_walk_page_buffers(handle, page_buffers(page), 0, - PAGE_SIZE, NULL, do_journal_get_write_access)) { + /* + * Data journalling can't use block_page_mkwrite() because it + * will set_buffer_dirty() before do_journal_get_write_access() + * thus might hit warning messages for dirty metadata buffers. + */ + if (!ext4_should_journal_data(inode)) { + err = block_page_mkwrite(vma, vmf, get_block); + } else { + lock_page(page); + size = i_size_read(inode); + /* Page got truncated from under us? */ + if (page->mapping != mapping || page_offset(page) > size) { unlock_page(page); - ret = VM_FAULT_SIGBUS; + ret = VM_FAULT_NOPAGE; ext4_journal_stop(handle); goto out; } - ext4_set_inode_state(inode, EXT4_STATE_JDATA); + + if (page->index == size >> PAGE_SHIFT) + len = size & ~PAGE_MASK; + else + len = PAGE_SIZE; + + err = __block_write_begin(page, 0, len, ext4_get_block); + if (!err) { + if (ext4_walk_page_buffers(handle, page_buffers(page), + 0, len, NULL, do_journal_get_write_access)) { + unlock_page(page); + ret = VM_FAULT_SIGBUS; + ext4_journal_stop(handle); + goto out; + } + ext4_set_inode_state(inode, EXT4_STATE_JDATA); + } else { + unlock_page(page); + } } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) -- cgit v1.2.3 From afb585a97f81899e39c14658789f02259d8c306a Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Mon, 5 Oct 2020 21:48:41 -0300 Subject: ext4: data=journal: write-protect pages on j_submit_inode_data_buffers() This implements journal callbacks j_submit|finish_inode_data_buffers() with different behavior for data=journal: to write-protect pages under commit, preventing changes to buffers writeably mapped to userspace. If a buffer's content changes between commit's checksum calculation and write-out to disk, it can cause journal recovery/mount failures upon a kernel crash or power loss. [ 27.334874] EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, and O_DIRECT support! [ 27.339492] JBD2: Invalid checksum recovering data block 8705 in log [ 27.342716] JBD2: recovery failed [ 27.343316] EXT4-fs (loop0): error loading journal mount: /ext4: can't read superblock on /dev/loop0. In j_submit_inode_data_buffers() we write-protect the inode's pages with write_cache_pages() and redirty w/ writepage callback if needed. In j_finish_inode_data_buffers() there is nothing do to. And in order to use the callbacks, inodes are added to the inode list in transaction in __ext4_journalled_writepage() and ext4_page_mkwrite(). In ext4_page_mkwrite() we must make sure that the buffers are attached to the transaction as jbddirty with write_end_fn(), as already done in __ext4_journalled_writepage(). Signed-off-by: Mauricio Faria de Oliveira Reported-by: Dann Frazier Reported-by: kernel test robot # wbc.nr_to_write Suggested-by: Jan Kara Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20201006004841.600488-5-mfo@canonical.com Signed-off-by: Theodore Ts'o --- fs/ext4/inode.c | 25 +++++++++++------ fs/ext4/super.c | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 101 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 804fd14fe5e0..2ac294fc2247 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1911,6 +1911,9 @@ static int __ext4_journalled_writepage(struct page *page, err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, write_end_fn); } + if (ret == 0) + ret = err; + err = ext4_jbd2_inode_add_write(handle, inode, 0, len); if (ret == 0) ret = err; EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; @@ -6062,10 +6065,8 @@ retry_alloc: size = i_size_read(inode); /* Page got truncated from under us? */ if (page->mapping != mapping || page_offset(page) > size) { - unlock_page(page); ret = VM_FAULT_NOPAGE; - ext4_journal_stop(handle); - goto out; + goto out_error; } if (page->index == size >> PAGE_SHIFT) @@ -6075,13 +6076,15 @@ retry_alloc: err = __block_write_begin(page, 0, len, ext4_get_block); if (!err) { + ret = VM_FAULT_SIGBUS; if (ext4_walk_page_buffers(handle, page_buffers(page), - 0, len, NULL, do_journal_get_write_access)) { - unlock_page(page); - ret = VM_FAULT_SIGBUS; - ext4_journal_stop(handle); - goto out; - } + 0, len, NULL, do_journal_get_write_access)) + goto out_error; + if (ext4_walk_page_buffers(handle, page_buffers(page), + 0, len, NULL, write_end_fn)) + goto out_error; + if (ext4_jbd2_inode_add_write(handle, inode, 0, len)) + goto out_error; ext4_set_inode_state(inode, EXT4_STATE_JDATA); } else { unlock_page(page); @@ -6096,6 +6099,10 @@ out: up_read(&EXT4_I(inode)->i_mmap_sem); sb_end_pagefault(inode->i_sb); return ret; +out_error: + unlock_page(page); + ext4_journal_stop(handle); + goto out; } vm_fault_t ext4_filemap_fault(struct vm_fault *vmf) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index a3e57f554f1b..901c1c938276 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -571,6 +571,89 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) spin_unlock(&sbi->s_md_lock); } +/* + * This writepage callback for write_cache_pages() + * takes care of a few cases after page cleaning. + * + * write_cache_pages() already checks for dirty pages + * and calls clear_page_dirty_for_io(), which we want, + * to write protect the pages. + * + * However, we may have to redirty a page (see below.) + */ +static int ext4_journalled_writepage_callback(struct page *page, + struct writeback_control *wbc, + void *data) +{ + transaction_t *transaction = (transaction_t *) data; + struct buffer_head *bh, *head; + struct journal_head *jh; + + bh = head = page_buffers(page); + do { + /* + * We have to redirty a page in these cases: + * 1) If buffer is dirty, it means the page was dirty because it + * contains a buffer that needs checkpointing. So the dirty bit + * needs to be preserved so that checkpointing writes the buffer + * properly. + * 2) If buffer is not part of the committing transaction + * (we may have just accidentally come across this buffer because + * inode range tracking is not exact) or if the currently running + * transaction already contains this buffer as well, dirty bit + * needs to be preserved so that the buffer gets writeprotected + * properly on running transaction's commit. + */ + jh = bh2jh(bh); + if (buffer_dirty(bh) || + (jh && (jh->b_transaction != transaction || + jh->b_next_transaction))) { + redirty_page_for_writepage(wbc, page); + goto out; + } + } while ((bh = bh->b_this_page) != head); + +out: + return AOP_WRITEPAGE_ACTIVATE; +} + +static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode) +{ + struct address_space *mapping = jinode->i_vfs_inode->i_mapping; + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .range_start = jinode->i_dirty_start, + .range_end = jinode->i_dirty_end, + }; + + return write_cache_pages(mapping, &wbc, + ext4_journalled_writepage_callback, + jinode->i_transaction); +} + +static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode) +{ + int ret; + + if (ext4_should_journal_data(jinode->i_vfs_inode)) + ret = ext4_journalled_submit_inode_data_buffers(jinode); + else + ret = jbd2_journal_submit_inode_data_buffers(jinode); + + return ret; +} + +static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode) +{ + int ret = 0; + + if (!ext4_should_journal_data(jinode->i_vfs_inode)) + ret = jbd2_journal_finish_inode_data_buffers(jinode); + + return ret; +} + static bool system_going_down(void) { return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF @@ -4753,9 +4836,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_journal->j_commit_callback = ext4_journal_commit_callback; sbi->s_journal->j_submit_inode_data_buffers = - jbd2_journal_submit_inode_data_buffers; + ext4_journal_submit_inode_data_buffers; sbi->s_journal->j_finish_inode_data_buffers = - jbd2_journal_finish_inode_data_buffers; + ext4_journal_finish_inode_data_buffers; no_journal: if (!test_opt(sb, NO_MBCACHE)) { -- cgit v1.2.3 From d1e18b8824dd50cff255e6cecf515ea598eaf9f0 Mon Sep 17 00:00:00 2001 From: Ritesh Harjani Date: Thu, 8 Oct 2020 20:32:48 +0530 Subject: ext4: fix bs < ps issue reported with dioread_nolock mount opt left shifting m_lblk by blkbits was causing value overflow and hence it was not able to convert unwritten to written extent. So, make sure we typecast it to loff_t before do left shift operation. Also in func ext4_convert_unwritten_io_end_vec(), make sure to initialize ret variable to avoid accidentally returning an uninitialized ret. This patch fixes the issue reported in ext4 for bs < ps with dioread_nolock mount option. Fixes: c8cc88163f40df39e50c ("ext4: Add support for blocksize < pagesize in dioread_nolock") Cc: stable@kernel.org Reported-by: Aneesh Kumar K.V Signed-off-by: Ritesh Harjani Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/af902b5db99e8b73980c795d84ad7bb417487e76.1602168865.git.riteshh@linux.ibm.com Signed-off-by: Theodore Ts'o --- fs/ext4/extents.c | 2 +- fs/ext4/inode.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 2baf0debd2b7..e46f3381ba4c 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4769,7 +4769,7 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end) { - int ret, err = 0; + int ret = 0, err = 0; struct ext4_io_end_vec *io_end_vec; /* diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2ac294fc2247..09096fe6170e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2258,7 +2258,7 @@ static int mpage_process_page(struct mpage_da_data *mpd, struct page *page, err = PTR_ERR(io_end_vec); goto out; } - io_end_vec->offset = mpd->map.m_lblk << blkbits; + io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits; } *map_bh = true; goto out; -- cgit v1.2.3 From d3e7d20befd9d07db2955015a3f294c0a0a771d3 Mon Sep 17 00:00:00 2001 From: Kaixu Xia Date: Sat, 10 Oct 2020 16:10:16 +0800 Subject: ext4: use the normal helper to get the actual inode Here we use the READ_ONCE to fix race conditions in ->d_compare() and ->d_hash() when they are called in RCU-walk mode, seems we can use the normal helper d_inode_rcu() to get the actual inode. Signed-off-by: Kaixu Xia Reviewed-by: Lukas Czerner Link: https://lore.kernel.org/r/1602317416-1260-1-git-send-email-kaixuxia@tencent.com Signed-off-by: Theodore Ts'o --- fs/ext4/dir.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 1d82336b1cd4..3bf6cb8e55f6 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -674,7 +674,7 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len, { struct qstr qstr = {.name = str, .len = len }; const struct dentry *parent = READ_ONCE(dentry->d_parent); - const struct inode *inode = READ_ONCE(parent->d_inode); + const struct inode *inode = d_inode_rcu(parent); char strbuf[DNAME_INLINE_LEN]; if (!inode || !IS_CASEFOLDED(inode) || @@ -706,7 +706,7 @@ static int ext4_d_hash(const struct dentry *dentry, struct qstr *str) { const struct ext4_sb_info *sbi = EXT4_SB(dentry->d_sb); const struct unicode_map *um = sbi->s_encoding; - const struct inode *inode = READ_ONCE(dentry->d_inode); + const struct inode *inode = d_inode_rcu(dentry); unsigned char *norm; int len, ret = 0; -- cgit v1.2.3 From fc750a3b44bdccb9fb96d6abbc48a9b8e480ce7b Mon Sep 17 00:00:00 2001 From: changfengnan Date: Mon, 12 Oct 2020 18:49:00 +0200 Subject: jbd2: avoid transaction reuse after reformatting When ext4 is formatted with lazy_journal_init=1 and transactions from the previous filesystem are still on disk, it is possible that they are considered during a recovery after a crash. Because the checksum seed has changed, the CRC check will fail, and the journal recovery fails with checksum error although the journal is otherwise perfectly valid. Fix the problem by checking commit block time stamps to determine whether the data in the journal block is just stale or whether it is indeed corrupt. Reported-by: kernel test robot Reviewed-by: Andreas Dilger Signed-off-by: Fengnan Chang Signed-off-by: Jan Kara Link: https://lore.kernel.org/r/20201012164900.20197-1-jack@suse.cz Signed-off-by: Theodore Ts'o --- fs/jbd2/recovery.c | 78 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 66 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index faa97d748474..fb134c7a12c8 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -428,6 +428,8 @@ static int do_one_pass(journal_t *journal, __u32 crc32_sum = ~0; /* Transactional Checksums */ int descr_csum_size = 0; int block_error = 0; + bool need_check_commit_time = false; + __u64 last_trans_commit_time = 0, commit_time; /* * First thing is to establish what we expect to find in the log @@ -520,12 +522,21 @@ static int do_one_pass(journal_t *journal, if (descr_csum_size > 0 && !jbd2_descriptor_block_csum_verify(journal, bh->b_data)) { - printk(KERN_ERR "JBD2: Invalid checksum " - "recovering block %lu in log\n", - next_log_block); - err = -EFSBADCRC; - brelse(bh); - goto failed; + /* + * PASS_SCAN can see stale blocks due to lazy + * journal init. Don't error out on those yet. + */ + if (pass != PASS_SCAN) { + pr_err("JBD2: Invalid checksum recovering block %lu in log\n", + next_log_block); + err = -EFSBADCRC; + brelse(bh); + goto failed; + } + need_check_commit_time = true; + jbd_debug(1, + "invalid descriptor block found in %lu\n", + next_log_block); } /* If it is a valid descriptor block, replay it @@ -535,6 +546,7 @@ static int do_one_pass(journal_t *journal, if (pass != PASS_REPLAY) { if (pass == PASS_SCAN && jbd2_has_feature_checksum(journal) && + !need_check_commit_time && !info->end_transaction) { if (calc_chksums(journal, bh, &next_log_block, @@ -683,11 +695,41 @@ static int do_one_pass(journal_t *journal, * mentioned conditions. Hence assume * "Interrupted Commit".) */ + commit_time = be64_to_cpu( + ((struct commit_header *)bh->b_data)->h_commit_sec); + /* + * If need_check_commit_time is set, it means we are in + * PASS_SCAN and csum verify failed before. If + * commit_time is increasing, it's the same journal, + * otherwise it is stale journal block, just end this + * recovery. + */ + if (need_check_commit_time) { + if (commit_time >= last_trans_commit_time) { + pr_err("JBD2: Invalid checksum found in transaction %u\n", + next_commit_ID); + err = -EFSBADCRC; + brelse(bh); + goto failed; + } + ignore_crc_mismatch: + /* + * It likely does not belong to same journal, + * just end this recovery with success. + */ + jbd_debug(1, "JBD2: Invalid checksum ignored in transaction %u, likely stale data\n", + next_commit_ID); + err = 0; + brelse(bh); + goto done; + } - /* Found an expected commit block: if checksums - * are present verify them in PASS_SCAN; else not + /* + * Found an expected commit block: if checksums + * are present, verify them in PASS_SCAN; else not * much to do other than move on to the next sequence - * number. */ + * number. + */ if (pass == PASS_SCAN && jbd2_has_feature_checksum(journal)) { struct commit_header *cbh = @@ -719,6 +761,8 @@ static int do_one_pass(journal_t *journal, !jbd2_commit_block_csum_verify(journal, bh->b_data)) { chksum_error: + if (commit_time < last_trans_commit_time) + goto ignore_crc_mismatch; info->end_transaction = next_commit_ID; if (!jbd2_has_feature_async_commit(journal)) { @@ -728,11 +772,24 @@ static int do_one_pass(journal_t *journal, break; } } + if (pass == PASS_SCAN) + last_trans_commit_time = commit_time; brelse(bh); next_commit_ID++; continue; case JBD2_REVOKE_BLOCK: + /* + * Check revoke block crc in pass_scan, if csum verify + * failed, check commit block time later. + */ + if (pass == PASS_SCAN && + !jbd2_descriptor_block_csum_verify(journal, + bh->b_data)) { + jbd_debug(1, "JBD2: invalid revoke block found in %lu\n", + next_log_block); + need_check_commit_time = true; + } /* If we aren't in the REVOKE pass, then we can * just skip over this block. */ if (pass != PASS_REVOKE) { @@ -800,9 +857,6 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, offset = sizeof(jbd2_journal_revoke_header_t); rcount = be32_to_cpu(header->r_count); - if (!jbd2_descriptor_block_csum_verify(journal, header)) - return -EFSBADCRC; - if (jbd2_journal_has_csum_v2or3(journal)) csum_size = sizeof(struct jbd2_journal_block_tail); if (rcount > journal->j_blocksize - csum_size) -- cgit v1.2.3 From e0770e91424f694b461141cbc99adf6b23006b60 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 15 Oct 2020 13:03:30 +0200 Subject: ext4: Detect already used quota file early When we try to use file already used as a quota file again (for the same or different quota type), strange things can happen. At the very least lockdep annotations may be wrong but also inode flags may be wrongly set / reset. When the file is used for two quota types at once we can even corrupt the file and likely crash the kernel. Catch all these cases by checking whether passed file is already used as quota file and bail early in that case. This fixes occasional generic/219 failure due to lockdep complaint. Reviewed-by: Andreas Dilger Reported-by: Ritesh Harjani Signed-off-by: Jan Kara Link: https://lore.kernel.org/r/20201015110330.28716-1-jack@suse.cz Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 901c1c938276..f997fc95cc31 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -6225,6 +6225,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) return -EXDEV; + + /* Quota already enabled for this file? */ + if (IS_NOQUOTA(d_inode(path->dentry))) + return -EBUSY; + /* Journaling quota? */ if (EXT4_SB(sb)->s_qf_names[type]) { /* Quotafile not in fs root? */ -- cgit v1.2.3 From 995a3ed67fc8c0e3301a770016fb66f1bbf15ec8 Mon Sep 17 00:00:00 2001 From: Harshad Shirwadkar Date: Thu, 15 Oct 2020 13:37:54 -0700 Subject: ext4: add fast_commit feature and handling for extended mount options We are running out of mount option bits. Add handling for using s_mount_opt2. Add ext4 and jbd2 fast commit feature flag and also add ability to turn off the fast commit feature in Ext4. Signed-off-by: Harshad Shirwadkar Link: https://lore.kernel.org/r/20201015203802.3597742-3-harshadshirwadkar@gmail.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 4 ++++ fs/ext4/super.c | 27 ++++++++++++++++++++++----- include/linux/jbd2.h | 5 ++++- 3 files changed, 30 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 1879531a119f..02d7dc378505 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1213,6 +1213,8 @@ struct ext4_inode_info { #define EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM 0x00000008 /* User explicitly specified journal checksum */ +#define EXT4_MOUNT2_JOURNAL_FAST_COMMIT 0x00000010 /* Journal fast commit */ + #define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \ ~EXT4_MOUNT_##opt #define set_opt(sb, opt) EXT4_SB(sb)->s_mount_opt |= \ @@ -1813,6 +1815,7 @@ static inline bool ext4_verity_in_progress(struct inode *inode) #define EXT4_FEATURE_COMPAT_RESIZE_INODE 0x0010 #define EXT4_FEATURE_COMPAT_DIR_INDEX 0x0020 #define EXT4_FEATURE_COMPAT_SPARSE_SUPER2 0x0200 +#define EXT4_FEATURE_COMPAT_FAST_COMMIT 0x0400 #define EXT4_FEATURE_COMPAT_STABLE_INODES 0x0800 #define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 @@ -1915,6 +1918,7 @@ EXT4_FEATURE_COMPAT_FUNCS(xattr, EXT_ATTR) EXT4_FEATURE_COMPAT_FUNCS(resize_inode, RESIZE_INODE) EXT4_FEATURE_COMPAT_FUNCS(dir_index, DIR_INDEX) EXT4_FEATURE_COMPAT_FUNCS(sparse_super2, SPARSE_SUPER2) +EXT4_FEATURE_COMPAT_FUNCS(fast_commit, FAST_COMMIT) EXT4_FEATURE_COMPAT_FUNCS(stable_inodes, STABLE_INODES) EXT4_FEATURE_RO_COMPAT_FUNCS(sparse_super, SPARSE_SUPER) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f997fc95cc31..66423c598b70 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1709,7 +1709,7 @@ enum { Opt_dioread_nolock, Opt_dioread_lock, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, - Opt_prefetch_block_bitmaps, + Opt_prefetch_block_bitmaps, Opt_no_fc, }; static const match_table_t tokens = { @@ -1796,6 +1796,7 @@ static const match_table_t tokens = { {Opt_init_itable, "init_itable=%u"}, {Opt_init_itable, "init_itable"}, {Opt_noinit_itable, "noinit_itable"}, + {Opt_no_fc, "no_fc"}, {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, @@ -1922,6 +1923,7 @@ static int clear_qf_name(struct super_block *sb, int qtype) #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) #define MOPT_STRING 0x0400 #define MOPT_SKIP 0x0800 +#define MOPT_2 0x1000 static const struct mount_opts { int token; @@ -2022,6 +2024,8 @@ static const struct mount_opts { {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, {Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS, MOPT_SET}, + {Opt_no_fc, EXT4_MOUNT2_JOURNAL_FAST_COMMIT, + MOPT_CLEAR | MOPT_2 | MOPT_EXT4_ONLY}, {Opt_err, 0, 0} }; @@ -2398,10 +2402,17 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, WARN_ON(1); return -1; } - if (arg != 0) - sbi->s_mount_opt |= m->mount_opt; - else - sbi->s_mount_opt &= ~m->mount_opt; + if (m->flags & MOPT_2) { + if (arg != 0) + sbi->s_mount_opt2 |= m->mount_opt; + else + sbi->s_mount_opt2 &= ~m->mount_opt; + } else { + if (arg != 0) + sbi->s_mount_opt |= m->mount_opt; + else + sbi->s_mount_opt &= ~m->mount_opt; + } } return 1; } @@ -2618,6 +2629,9 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, SEQ_OPTS_PUTS("dax=inode"); } + if (test_opt2(sb, JOURNAL_FAST_COMMIT)) + SEQ_OPTS_PUTS("fast_commit"); + ext4_show_quota_options(seq, sb); return 0; } @@ -4121,6 +4135,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) #ifdef CONFIG_EXT4_FS_POSIX_ACL set_opt(sb, POSIX_ACL); #endif + if (ext4_has_feature_fast_commit(sb)) + set_opt2(sb, JOURNAL_FAST_COMMIT); /* don't forget to enable journal_csum when metadata_csum is enabled. */ if (ext4_has_metadata_csum(sb)) set_opt(sb, JOURNAL_CHECKSUM); @@ -4777,6 +4793,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; clear_opt(sb, JOURNAL_CHECKSUM); clear_opt(sb, DATA_FLAGS); + clear_opt2(sb, JOURNAL_FAST_COMMIT); sbi->s_journal = NULL; needs_recovery = 0; goto no_journal; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 04afa6dcd60d..0685cc95e501 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -289,6 +289,7 @@ typedef struct journal_superblock_s #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 #define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 +#define JBD2_FEATURE_INCOMPAT_FAST_COMMIT 0x00000020 /* See "journal feature predicate functions" below */ @@ -299,7 +300,8 @@ typedef struct journal_superblock_s JBD2_FEATURE_INCOMPAT_64BIT | \ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \ JBD2_FEATURE_INCOMPAT_CSUM_V2 | \ - JBD2_FEATURE_INCOMPAT_CSUM_V3) + JBD2_FEATURE_INCOMPAT_CSUM_V3 | \ + JBD2_FEATURE_INCOMPAT_FAST_COMMIT) #ifdef __KERNEL__ @@ -1263,6 +1265,7 @@ JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT) JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT) JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2) JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) +JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT) /* * Journal flag definitions -- cgit v1.2.3 From 6866d7b3f2bb4f011041ba54c98b1584497fe2fd Mon Sep 17 00:00:00 2001 From: Harshad Shirwadkar Date: Thu, 15 Oct 2020 13:37:55 -0700 Subject: ext4 / jbd2: add fast commit initialization This patch adds fast commit area trackers in the journal_t structure. These are initialized via the jbd2_fc_init() routine that this patch adds. This patch also adds ext4/fast_commit.c and ext4/fast_commit.h files for fast commit code that will be added in subsequent patches in this series. Reported-by: kernel test robot Signed-off-by: Harshad Shirwadkar Link: https://lore.kernel.org/r/20201015203802.3597742-4-harshadshirwadkar@gmail.com Signed-off-by: Theodore Ts'o --- fs/ext4/Makefile | 2 +- fs/ext4/ext4.h | 4 ++++ fs/ext4/fast_commit.c | 20 +++++++++++++++++++ fs/ext4/fast_commit.h | 9 +++++++++ fs/ext4/super.c | 1 + fs/jbd2/journal.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++----- include/linux/jbd2.h | 39 +++++++++++++++++++++++++++++++++++++ 7 files changed, 122 insertions(+), 6 deletions(-) create mode 100644 fs/ext4/fast_commit.c create mode 100644 fs/ext4/fast_commit.h (limited to 'fs') diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile index 2e42f47a7f98..49e7af6cc93f 100644 --- a/fs/ext4/Makefile +++ b/fs/ext4/Makefile @@ -10,7 +10,7 @@ ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \ indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \ mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \ super.o symlink.o sysfs.o xattr.o xattr_hurd.o xattr_trusted.o \ - xattr_user.o + xattr_user.o fast_commit.o ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 02d7dc378505..2c412d32db0f 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -963,6 +963,7 @@ do { \ #endif /* defined(__KERNEL__) || defined(__linux__) */ #include "extents_status.h" +#include "fast_commit.h" /* * Lock subclasses for i_data_sem in the ext4_inode_info structure. @@ -2678,6 +2679,9 @@ extern int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, int barrier); extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate); +/* fast_commit.c */ + +void ext4_fc_init(struct super_block *sb, journal_t *journal); /* mballoc.c */ extern const struct seq_operations ext4_mb_seq_groups_ops; extern long ext4_mb_stats; diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c new file mode 100644 index 000000000000..0dad8bdb1253 --- /dev/null +++ b/fs/ext4/fast_commit.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * fs/ext4/fast_commit.c + * + * Written by Harshad Shirwadkar + * + * Ext4 fast commits routines. + */ +#include "ext4_jbd2.h" + +void ext4_fc_init(struct super_block *sb, journal_t *journal) +{ + if (!test_opt2(sb, JOURNAL_FAST_COMMIT)) + return; + if (jbd2_fc_init(journal, EXT4_NUM_FC_BLKS)) { + pr_warn("Error while enabling fast commits, turning off."); + ext4_clear_feature_fast_commit(sb); + } +} diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h new file mode 100644 index 000000000000..8362bf5e6e00 --- /dev/null +++ b/fs/ext4/fast_commit.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __FAST_COMMIT_H__ +#define __FAST_COMMIT_H__ + +/* Number of blocks in journal area to allocate for fast commits */ +#define EXT4_NUM_FC_BLKS 256 + +#endif /* __FAST_COMMIT_H__ */ diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 66423c598b70..41da649ccaea 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5170,6 +5170,7 @@ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) journal->j_commit_interval = sbi->s_commit_interval; journal->j_min_batch_time = sbi->s_min_batch_time; journal->j_max_batch_time = sbi->s_max_batch_time; + ext4_fc_init(sb, journal); write_lock(&journal->j_state_lock); if (test_opt(sb, BARRIER)) diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index c0600405e7a2..4497bfbac527 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1181,6 +1181,14 @@ static journal_t *journal_init_common(struct block_device *bdev, if (!journal->j_wbuf) goto err_cleanup; + if (journal->j_fc_wbufsize > 0) { + journal->j_fc_wbuf = kmalloc_array(journal->j_fc_wbufsize, + sizeof(struct buffer_head *), + GFP_KERNEL); + if (!journal->j_fc_wbuf) + goto err_cleanup; + } + bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize); if (!bh) { pr_err("%s: Cannot get buffer for journal superblock\n", @@ -1194,11 +1202,23 @@ static journal_t *journal_init_common(struct block_device *bdev, err_cleanup: kfree(journal->j_wbuf); + kfree(journal->j_fc_wbuf); jbd2_journal_destroy_revoke(journal); kfree(journal); return NULL; } +int jbd2_fc_init(journal_t *journal, int num_fc_blks) +{ + journal->j_fc_wbufsize = num_fc_blks; + journal->j_fc_wbuf = kmalloc_array(journal->j_fc_wbufsize, + sizeof(struct buffer_head *), GFP_KERNEL); + if (!journal->j_fc_wbuf) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL(jbd2_fc_init); + /* jbd2_journal_init_dev and jbd2_journal_init_inode: * * Create a journal structure assigned some fixed set of disk blocks to @@ -1316,11 +1336,20 @@ static int journal_reset(journal_t *journal) } journal->j_first = first; - journal->j_last = last; - journal->j_head = first; - journal->j_tail = first; - journal->j_free = last - first; + if (jbd2_has_feature_fast_commit(journal) && + journal->j_fc_wbufsize > 0) { + journal->j_fc_last = last; + journal->j_last = last - journal->j_fc_wbufsize; + journal->j_fc_first = journal->j_last + 1; + journal->j_fc_off = 0; + } else { + journal->j_last = last; + } + + journal->j_head = journal->j_first; + journal->j_tail = journal->j_first; + journal->j_free = journal->j_last - journal->j_first; journal->j_tail_sequence = journal->j_transaction_sequence; journal->j_commit_sequence = journal->j_transaction_sequence - 1; @@ -1665,9 +1694,18 @@ static int load_superblock(journal_t *journal) journal->j_tail_sequence = be32_to_cpu(sb->s_sequence); journal->j_tail = be32_to_cpu(sb->s_start); journal->j_first = be32_to_cpu(sb->s_first); - journal->j_last = be32_to_cpu(sb->s_maxlen); journal->j_errno = be32_to_cpu(sb->s_errno); + if (jbd2_has_feature_fast_commit(journal) && + journal->j_fc_wbufsize > 0) { + journal->j_fc_last = be32_to_cpu(sb->s_maxlen); + journal->j_last = journal->j_fc_last - journal->j_fc_wbufsize; + journal->j_fc_first = journal->j_last + 1; + journal->j_fc_off = 0; + } else { + journal->j_last = be32_to_cpu(sb->s_maxlen); + } + return 0; } @@ -1728,6 +1766,9 @@ int jbd2_journal_load(journal_t *journal) */ journal->j_flags &= ~JBD2_ABORT; + if (journal->j_fc_wbufsize > 0) + jbd2_journal_set_features(journal, 0, 0, + JBD2_FEATURE_INCOMPAT_FAST_COMMIT); /* OK, we've finished with the dynamic journal bits: * reinitialise the dynamic contents of the superblock in memory * and reset them on disk. */ @@ -1811,6 +1852,8 @@ int jbd2_journal_destroy(journal_t *journal) jbd2_journal_destroy_revoke(journal); if (journal->j_chksum_driver) crypto_free_shash(journal->j_chksum_driver); + if (journal->j_fc_wbufsize > 0) + kfree(journal->j_fc_wbuf); kfree(journal->j_wbuf); kfree(journal); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 0685cc95e501..008629b4d615 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -918,6 +918,30 @@ struct journal_s */ unsigned long j_last; + /** + * @j_fc_first: + * + * The block number of the first fast commit block in the journal + * [j_state_lock]. + */ + unsigned long j_fc_first; + + /** + * @j_fc_off: + * + * Number of fast commit blocks currently allocated. + * [j_state_lock]. + */ + unsigned long j_fc_off; + + /** + * @j_fc_last: + * + * The block number one beyond the last fast commit block in the journal + * [j_state_lock]. + */ + unsigned long j_fc_last; + /** * @j_dev: Device where we store the journal. */ @@ -1068,6 +1092,12 @@ struct journal_s */ struct buffer_head **j_wbuf; + /** + * @j_fc_wbuf: Array of fast commit bhs for + * jbd2_journal_commit_transaction. + */ + struct buffer_head **j_fc_wbuf; + /** * @j_wbufsize: * @@ -1075,6 +1105,13 @@ struct journal_s */ int j_wbufsize; + /** + * @j_fc_wbufsize: + * + * Size of @j_fc_wbuf array. + */ + int j_fc_wbufsize; + /** * @j_last_sync_writer: * @@ -1535,6 +1572,8 @@ void __jbd2_log_wait_for_space(journal_t *journal); extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); extern int jbd2_cleanup_journal_tail(journal_t *); +/* Fast commit related APIs */ +int jbd2_fc_init(journal_t *journal, int num_fc_blks); /* * is_journal_abort * -- cgit v1.2.3 From ff780b91efe901b8eecd8114785abae5341820ad Mon Sep 17 00:00:00 2001 From: Harshad Shirwadkar Date: Thu, 15 Oct 2020 13:37:56 -0700 Subject: jbd2: add fast commit machinery This functions adds necessary APIs needed in JBD2 layer for fast commits. Signed-off-by: Harshad Shirwadkar Link: https://lore.kernel.org/r/20201015203802.3597742-5-harshadshirwadkar@gmail.com Signed-off-by: Theodore Ts'o --- fs/ext4/fast_commit.c | 8 +++ fs/jbd2/commit.c | 44 ++++++++++++ fs/jbd2/journal.c | 190 +++++++++++++++++++++++++++++++++++++++++++++++++- include/linux/jbd2.h | 27 +++++++ 4 files changed, 268 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 0dad8bdb1253..f2d11b4c6b62 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -8,11 +8,19 @@ * Ext4 fast commits routines. */ #include "ext4_jbd2.h" +/* + * Fast commit cleanup routine. This is called after every fast commit and + * full commit. full is true if we are called after a full commit. + */ +static void ext4_fc_cleanup(journal_t *journal, int full) +{ +} void ext4_fc_init(struct super_block *sb, journal_t *journal) { if (!test_opt2(sb, JOURNAL_FAST_COMMIT)) return; + journal->j_fc_cleanup_callback = ext4_fc_cleanup; if (jbd2_fc_init(journal, EXT4_NUM_FC_BLKS)) { pr_warn("Error while enabling fast commits, turning off."); ext4_clear_feature_fast_commit(sb); diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 6252b4c50666..fa688e163a80 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -206,6 +206,30 @@ int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode) return generic_writepages(mapping, &wbc); } +/* Send all the data buffers related to an inode */ +int jbd2_submit_inode_data(struct jbd2_inode *jinode) +{ + + if (!jinode || !(jinode->i_flags & JI_WRITE_DATA)) + return 0; + + trace_jbd2_submit_inode_data(jinode->i_vfs_inode); + return jbd2_journal_submit_inode_data_buffers(jinode); + +} +EXPORT_SYMBOL(jbd2_submit_inode_data); + +int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode) +{ + if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) || + !jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping) + return 0; + return filemap_fdatawait_range_keep_errors( + jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start, + jinode->i_dirty_end); +} +EXPORT_SYMBOL(jbd2_wait_inode_data); + /* * Submit all the data buffers of inode associated with the transaction to * disk. @@ -415,6 +439,20 @@ void jbd2_journal_commit_transaction(journal_t *journal) J_ASSERT(journal->j_running_transaction != NULL); J_ASSERT(journal->j_committing_transaction == NULL); + write_lock(&journal->j_state_lock); + journal->j_flags |= JBD2_FULL_COMMIT_ONGOING; + while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) { + DEFINE_WAIT(wait); + + prepare_to_wait(&journal->j_fc_wait, &wait, + TASK_UNINTERRUPTIBLE); + write_unlock(&journal->j_state_lock); + schedule(); + write_lock(&journal->j_state_lock); + finish_wait(&journal->j_fc_wait, &wait); + } + write_unlock(&journal->j_state_lock); + commit_transaction = journal->j_running_transaction; trace_jbd2_start_commit(journal, commit_transaction); @@ -422,6 +460,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) commit_transaction->t_tid); write_lock(&journal->j_state_lock); + journal->j_fc_off = 0; J_ASSERT(commit_transaction->t_state == T_RUNNING); commit_transaction->t_state = T_LOCKED; @@ -1121,12 +1160,16 @@ restart_loop: if (journal->j_commit_callback) journal->j_commit_callback(journal, commit_transaction); + if (journal->j_fc_cleanup_callback) + journal->j_fc_cleanup_callback(journal, 1); trace_jbd2_end_commit(journal, commit_transaction); jbd_debug(1, "JBD2: commit %d complete, head %d\n", journal->j_commit_sequence, journal->j_tail_sequence); write_lock(&journal->j_state_lock); + journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING; + journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING; spin_lock(&journal->j_list_lock); commit_transaction->t_state = T_FINISHED; /* Check if the transaction can be dropped now that we are finished */ @@ -1138,6 +1181,7 @@ restart_loop: spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); wake_up(&journal->j_wait_done_commit); + wake_up(&journal->j_fc_wait); /* * Calculate overall stats diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 4497bfbac527..0c7c42bd530f 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -159,7 +159,9 @@ static void commit_timeout(struct timer_list *t) * * 1) COMMIT: Every so often we need to commit the current state of the * filesystem to disk. The journal thread is responsible for writing - * all of the metadata buffers to disk. + * all of the metadata buffers to disk. If a fast commit is ongoing + * journal thread waits until it's done and then continues from + * there on. * * 2) CHECKPOINT: We cannot reuse a used section of the log file until all * of the data in that part of the log has been rewritten elsewhere on @@ -716,6 +718,75 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) return err; } +/* + * Start a fast commit. If there's an ongoing fast or full commit wait for + * it to complete. Returns 0 if a new fast commit was started. Returns -EALREADY + * if a fast commit is not needed, either because there's an already a commit + * going on or this tid has already been committed. Returns -EINVAL if no jbd2 + * commit has yet been performed. + */ +int jbd2_fc_begin_commit(journal_t *journal, tid_t tid) +{ + /* + * Fast commits only allowed if at least one full commit has + * been processed. + */ + if (!journal->j_stats.ts_tid) + return -EINVAL; + + if (tid <= journal->j_commit_sequence) + return -EALREADY; + + write_lock(&journal->j_state_lock); + if (journal->j_flags & JBD2_FULL_COMMIT_ONGOING || + (journal->j_flags & JBD2_FAST_COMMIT_ONGOING)) { + DEFINE_WAIT(wait); + + prepare_to_wait(&journal->j_fc_wait, &wait, + TASK_UNINTERRUPTIBLE); + write_unlock(&journal->j_state_lock); + schedule(); + finish_wait(&journal->j_fc_wait, &wait); + return -EALREADY; + } + journal->j_flags |= JBD2_FAST_COMMIT_ONGOING; + write_unlock(&journal->j_state_lock); + + return 0; +} +EXPORT_SYMBOL(jbd2_fc_begin_commit); + +/* + * Stop a fast commit. If fallback is set, this function starts commit of + * TID tid before any other fast commit can start. + */ +static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback) +{ + if (journal->j_fc_cleanup_callback) + journal->j_fc_cleanup_callback(journal, 0); + write_lock(&journal->j_state_lock); + journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING; + if (fallback) + journal->j_flags |= JBD2_FULL_COMMIT_ONGOING; + write_unlock(&journal->j_state_lock); + wake_up(&journal->j_fc_wait); + if (fallback) + return jbd2_complete_transaction(journal, tid); + return 0; +} + +int jbd2_fc_end_commit(journal_t *journal) +{ + return __jbd2_fc_end_commit(journal, 0, 0); +} +EXPORT_SYMBOL(jbd2_fc_end_commit); + +int jbd2_fc_end_commit_fallback(journal_t *journal, tid_t tid) +{ + return __jbd2_fc_end_commit(journal, tid, 1); +} +EXPORT_SYMBOL(jbd2_fc_end_commit_fallback); + /* Return 1 when transaction with given tid has already committed. */ int jbd2_transaction_committed(journal_t *journal, tid_t tid) { @@ -784,6 +855,110 @@ int jbd2_journal_next_log_block(journal_t *journal, unsigned long long *retp) return jbd2_journal_bmap(journal, blocknr, retp); } +/* Map one fast commit buffer for use by the file system */ +int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out) +{ + unsigned long long pblock; + unsigned long blocknr; + int ret = 0; + struct buffer_head *bh; + int fc_off; + + *bh_out = NULL; + write_lock(&journal->j_state_lock); + + if (journal->j_fc_off + journal->j_fc_first < journal->j_fc_last) { + fc_off = journal->j_fc_off; + blocknr = journal->j_fc_first + fc_off; + journal->j_fc_off++; + } else { + ret = -EINVAL; + } + write_unlock(&journal->j_state_lock); + + if (ret) + return ret; + + ret = jbd2_journal_bmap(journal, blocknr, &pblock); + if (ret) + return ret; + + bh = __getblk(journal->j_dev, pblock, journal->j_blocksize); + if (!bh) + return -ENOMEM; + + lock_buffer(bh); + + clear_buffer_uptodate(bh); + set_buffer_dirty(bh); + unlock_buffer(bh); + journal->j_fc_wbuf[fc_off] = bh; + + *bh_out = bh; + + return 0; +} +EXPORT_SYMBOL(jbd2_fc_get_buf); + +/* + * Wait on fast commit buffers that were allocated by jbd2_fc_get_buf + * for completion. + */ +int jbd2_fc_wait_bufs(journal_t *journal, int num_blks) +{ + struct buffer_head *bh; + int i, j_fc_off; + + read_lock(&journal->j_state_lock); + j_fc_off = journal->j_fc_off; + read_unlock(&journal->j_state_lock); + + /* + * Wait in reverse order to minimize chances of us being woken up before + * all IOs have completed + */ + for (i = j_fc_off - 1; i >= j_fc_off - num_blks; i--) { + bh = journal->j_fc_wbuf[i]; + wait_on_buffer(bh); + put_bh(bh); + journal->j_fc_wbuf[i] = NULL; + if (unlikely(!buffer_uptodate(bh))) + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(jbd2_fc_wait_bufs); + +/* + * Wait on fast commit buffers that were allocated by jbd2_fc_get_buf + * for completion. + */ +int jbd2_fc_release_bufs(journal_t *journal) +{ + struct buffer_head *bh; + int i, j_fc_off; + + read_lock(&journal->j_state_lock); + j_fc_off = journal->j_fc_off; + read_unlock(&journal->j_state_lock); + + /* + * Wait in reverse order to minimize chances of us being woken up before + * all IOs have completed + */ + for (i = j_fc_off - 1; i >= 0; i--) { + bh = journal->j_fc_wbuf[i]; + if (!bh) + break; + put_bh(bh); + journal->j_fc_wbuf[i] = NULL; + } + + return 0; +} +EXPORT_SYMBOL(jbd2_fc_release_bufs); + /* * Conversion of logical to physical block numbers for the journal * @@ -1142,6 +1317,7 @@ static journal_t *journal_init_common(struct block_device *bdev, init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); init_waitqueue_head(&journal->j_wait_reserved); + init_waitqueue_head(&journal->j_fc_wait); mutex_init(&journal->j_abort_mutex); mutex_init(&journal->j_barrier); mutex_init(&journal->j_checkpoint_mutex); @@ -1495,6 +1671,7 @@ out: static void jbd2_mark_journal_empty(journal_t *journal, int write_op) { journal_superblock_t *sb = journal->j_superblock; + bool had_fast_commit = false; BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); lock_buffer(journal->j_sb_buffer); @@ -1508,9 +1685,20 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op) sb->s_sequence = cpu_to_be32(journal->j_tail_sequence); sb->s_start = cpu_to_be32(0); + if (jbd2_has_feature_fast_commit(journal)) { + /* + * When journal is clean, no need to commit fast commit flag and + * make file system incompatible with older kernels. + */ + jbd2_clear_feature_fast_commit(journal); + had_fast_commit = true; + } jbd2_write_superblock(journal, write_op); + if (had_fast_commit) + jbd2_set_feature_fast_commit(journal); + /* Log is no longer empty */ write_lock(&journal->j_state_lock); journal->j_flags |= JBD2_FLUSHED; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 008629b4d615..a009d9b9c620 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -861,6 +861,13 @@ struct journal_s */ wait_queue_head_t j_wait_reserved; + /** + * @j_fc_wait: + * + * Wait queue to wait for completion of async fast commits. + */ + wait_queue_head_t j_fc_wait; + /** * @j_checkpoint_mutex: * @@ -1232,6 +1239,15 @@ struct journal_s */ struct lockdep_map j_trans_commit_map; #endif + + /** + * @j_fc_cleanup_callback: + * + * Clean-up after fast commit or full commit. JBD2 calls this function + * after every commit operation. + */ + void (*j_fc_cleanup_callback)(struct journal_s *journal, int); + }; #define jbd2_might_wait_for_commit(j) \ @@ -1316,6 +1332,8 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT) #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ +#define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */ +#define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */ /* * Function declarations for the journaling transaction and buffer @@ -1574,6 +1592,15 @@ extern int jbd2_cleanup_journal_tail(journal_t *); /* Fast commit related APIs */ int jbd2_fc_init(journal_t *journal, int num_fc_blks); +int jbd2_fc_begin_commit(journal_t *journal, tid_t tid); +int jbd2_fc_end_commit(journal_t *journal); +int jbd2_fc_end_commit_fallback(journal_t *journal, tid_t tid); +int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out); +int jbd2_submit_inode_data(struct jbd2_inode *jinode); +int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode); +int jbd2_fc_wait_bufs(journal_t *journal, int num_blks); +int jbd2_fc_release_bufs(journal_t *journal); + /* * is_journal_abort * -- cgit v1.2.3 From aa75f4d3daaeb1389b9cce9d6b84401eaf228d4e Mon Sep 17 00:00:00 2001 From: Harshad Shirwadkar Date: Thu, 15 Oct 2020 13:37:57 -0700 Subject: ext4: main fast-commit commit path This patch adds main fast commit commit path handlers. The overall patch can be divided into two inter-related parts: (A) Metadata updates tracking This part consists of helper functions to track changes that need to be committed during a commit operation. These updates are maintained by Ext4 in different in-memory queues. Following are the APIs and their short description that are implemented in this patch: - ext4_fc_track_link/unlink/creat() - Track unlink. link and creat operations - ext4_fc_track_range() - Track changed logical block offsets inodes - ext4_fc_track_inode() - Track inodes - ext4_fc_mark_ineligible() - Mark file system fast commit ineligible() - ext4_fc_start_update() / ext4_fc_stop_update() / ext4_fc_start_ineligible() / ext4_fc_stop_ineligible() These functions are useful for co-ordinating inode updates with commits. (B) Main commit Path This part consists of functions to convert updates tracked in in-memory data structures into on-disk commits. Function ext4_fc_commit() is the main entry point to commit path. Reported-by: kernel test robot Signed-off-by: Harshad Shirwadkar Link: https://lore.kernel.org/r/20201015203802.3597742-6-harshadshirwadkar@gmail.com Signed-off-by: Theodore Ts'o --- fs/ext4/acl.c | 2 + fs/ext4/ext4.h | 70 +++ fs/ext4/extents.c | 48 +- fs/ext4/fast_commit.c | 1183 +++++++++++++++++++++++++++++++++++++++++++ fs/ext4/fast_commit.h | 110 ++++ fs/ext4/file.c | 10 +- fs/ext4/fsync.c | 2 +- fs/ext4/inode.c | 52 +- fs/ext4/ioctl.c | 16 +- fs/ext4/namei.c | 37 +- fs/ext4/super.c | 31 ++ fs/ext4/xattr.c | 3 + include/trace/events/ext4.h | 172 +++++++ 13 files changed, 1707 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 76f634d185f1..68aaed48315f 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -242,6 +242,7 @@ retry: handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits); if (IS_ERR(handle)) return PTR_ERR(handle); + ext4_fc_start_update(inode); if ((type == ACL_TYPE_ACCESS) && acl) { error = posix_acl_update_mode(inode, &mode, &acl); @@ -259,6 +260,7 @@ retry: } out_stop: ext4_journal_stop(handle); + ext4_fc_stop_update(inode); if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; return error; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 2c412d32db0f..003e898df595 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1021,6 +1021,31 @@ struct ext4_inode_info { struct list_head i_orphan; /* unlinked but open inodes */ + /* Fast commit related info */ + + struct list_head i_fc_list; /* + * inodes that need fast commit + * protected by sbi->s_fc_lock. + */ + + /* Fast commit subtid when this inode was committed */ + unsigned int i_fc_committed_subtid; + + /* Start of lblk range that needs to be committed in this fast commit */ + ext4_lblk_t i_fc_lblk_start; + + /* End of lblk range that needs to be committed in this fast commit */ + ext4_lblk_t i_fc_lblk_len; + + /* Number of ongoing updates on this inode */ + atomic_t i_fc_updates; + + /* Fast commit wait queue for this inode */ + wait_queue_head_t i_fc_wait; + + /* Protect concurrent accesses on i_fc_lblk_start, i_fc_lblk_len */ + struct mutex i_fc_lock; + /* * i_disksize keeps track of what the inode size is ON DISK, not * in memory. During truncate, i_size is set to the new size by @@ -1141,6 +1166,10 @@ struct ext4_inode_info { #define EXT4_VALID_FS 0x0001 /* Unmounted cleanly */ #define EXT4_ERROR_FS 0x0002 /* Errors detected */ #define EXT4_ORPHAN_FS 0x0004 /* Orphans being recovered */ +#define EXT4_FC_INELIGIBLE 0x0008 /* Fast commit ineligible */ +#define EXT4_FC_COMMITTING 0x0010 /* File system underoing a fast + * commit. + */ /* * Misc. filesystem flags @@ -1613,6 +1642,30 @@ struct ext4_sb_info { /* Record the errseq of the backing block device */ errseq_t s_bdev_wb_err; spinlock_t s_bdev_wb_lock; + + /* Ext4 fast commit stuff */ + atomic_t s_fc_subtid; + atomic_t s_fc_ineligible_updates; + /* + * After commit starts, the main queue gets locked, and the further + * updates get added in the staging queue. + */ +#define FC_Q_MAIN 0 +#define FC_Q_STAGING 1 + struct list_head s_fc_q[2]; /* Inodes staged for fast commit + * that have data changes in them. + */ + struct list_head s_fc_dentry_q[2]; /* directory entry updates */ + unsigned int s_fc_bytes; + /* + * Main fast commit lock. This lock protects accesses to the + * following fields: + * ei->i_fc_list, s_fc_dentry_q, s_fc_q, s_fc_bytes, s_fc_bh. + */ + spinlock_t s_fc_lock; + struct buffer_head *s_fc_bh; + struct ext4_fc_stats s_fc_stats; + u64 s_fc_avg_commit_time; }; static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) @@ -1723,6 +1776,7 @@ enum { EXT4_STATE_EXT_PRECACHED, /* extents have been precached */ EXT4_STATE_LUSTRE_EA_INODE, /* Lustre-style ea_inode */ EXT4_STATE_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ + EXT4_STATE_FC_COMMITTING, /* Fast commit ongoing */ }; #define EXT4_INODE_BIT_FNS(name, field, offset) \ @@ -2682,6 +2736,22 @@ extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate); /* fast_commit.c */ void ext4_fc_init(struct super_block *sb, journal_t *journal); +void ext4_fc_init_inode(struct inode *inode); +void ext4_fc_track_range(struct inode *inode, ext4_lblk_t start, + ext4_lblk_t end); +void ext4_fc_track_unlink(struct inode *inode, struct dentry *dentry); +void ext4_fc_track_link(struct inode *inode, struct dentry *dentry); +void ext4_fc_track_create(struct inode *inode, struct dentry *dentry); +void ext4_fc_track_inode(struct inode *inode); +void ext4_fc_mark_ineligible(struct super_block *sb, int reason); +void ext4_fc_start_ineligible(struct super_block *sb, int reason); +void ext4_fc_stop_ineligible(struct super_block *sb); +void ext4_fc_start_update(struct inode *inode); +void ext4_fc_stop_update(struct inode *inode); +void ext4_fc_del(struct inode *inode); +int ext4_fc_commit(journal_t *journal, tid_t commit_tid); +int __init ext4_fc_init_dentry_cache(void); + /* mballoc.c */ extern const struct seq_operations ext4_mb_seq_groups_ops; extern long ext4_mb_stats; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e46f3381ba4c..a2bb87d75500 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3723,6 +3723,7 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, err = ext4_ext_dirty(handle, inode, path + path->p_depth); out: ext4_ext_show_leaf(inode, path); + ext4_fc_track_range(inode, ee_block, ee_block + ee_len - 1); return err; } @@ -3794,6 +3795,7 @@ convert_initialized_extent(handle_t *handle, struct inode *inode, if (*allocated > map->m_len) *allocated = map->m_len; map->m_len = *allocated; + ext4_fc_track_range(inode, ee_block, ee_block + ee_len - 1); return 0; } @@ -4327,7 +4329,7 @@ got_allocated_blocks: map->m_len = ar.len; allocated = map->m_len; ext4_ext_show_leaf(inode, path); - + ext4_fc_track_range(inode, map->m_lblk, map->m_lblk + map->m_len - 1); out: ext4_ext_drop_refs(path); kfree(path); @@ -4600,7 +4602,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, ret = ext4_mark_inode_dirty(handle, inode); if (unlikely(ret)) goto out_handle; - + ext4_fc_track_range(inode, offset >> inode->i_sb->s_blocksize_bits, + (offset + len - 1) >> inode->i_sb->s_blocksize_bits); /* Zero out partial block at the edges of the range */ ret = ext4_zero_partial_blocks(handle, inode, offset, len); if (ret >= 0) @@ -4648,23 +4651,34 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)) return -EOPNOTSUPP; + ext4_fc_track_range(inode, offset >> blkbits, + (offset + len - 1) >> blkbits); - if (mode & FALLOC_FL_PUNCH_HOLE) - return ext4_punch_hole(inode, offset, len); + ext4_fc_start_update(inode); + + if (mode & FALLOC_FL_PUNCH_HOLE) { + ret = ext4_punch_hole(inode, offset, len); + goto exit; + } ret = ext4_convert_inline_data(inode); if (ret) - return ret; + goto exit; - if (mode & FALLOC_FL_COLLAPSE_RANGE) - return ext4_collapse_range(inode, offset, len); - - if (mode & FALLOC_FL_INSERT_RANGE) - return ext4_insert_range(inode, offset, len); + if (mode & FALLOC_FL_COLLAPSE_RANGE) { + ret = ext4_collapse_range(inode, offset, len); + goto exit; + } - if (mode & FALLOC_FL_ZERO_RANGE) - return ext4_zero_range(file, offset, len, mode); + if (mode & FALLOC_FL_INSERT_RANGE) { + ret = ext4_insert_range(inode, offset, len); + goto exit; + } + if (mode & FALLOC_FL_ZERO_RANGE) { + ret = ext4_zero_range(file, offset, len, mode); + goto exit; + } trace_ext4_fallocate_enter(inode, offset, len, mode); lblk = offset >> blkbits; @@ -4698,12 +4712,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) goto out; if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { - ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, - EXT4_I(inode)->i_sync_tid); + ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal, + EXT4_I(inode)->i_sync_tid); } out: inode_unlock(inode); trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); +exit: + ext4_fc_stop_update(inode); return ret; } @@ -5291,6 +5307,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) ret = PTR_ERR(handle); goto out_mmap; } + ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE); down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode, 0); @@ -5329,6 +5346,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) out_stop: ext4_journal_stop(handle); + ext4_fc_stop_ineligible(sb); out_mmap: up_write(&EXT4_I(inode)->i_mmap_sem); out_mutex: @@ -5429,6 +5447,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) ret = PTR_ERR(handle); goto out_mmap; } + ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE); /* Expand file to avoid data loss if there is error while shifting */ inode->i_size += len; @@ -5503,6 +5522,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) out_stop: ext4_journal_stop(handle); + ext4_fc_stop_ineligible(sb); out_mmap: up_write(&EXT4_I(inode)->i_mmap_sem); out_mutex: diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index f2d11b4c6b62..79e947c43198 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -7,13 +7,1185 @@ * * Ext4 fast commits routines. */ +#include "ext4.h" #include "ext4_jbd2.h" +#include "ext4_extents.h" +#include "mballoc.h" + +/* + * Ext4 Fast Commits + * ----------------- + * + * Ext4 fast commits implement fine grained journalling for Ext4. + * + * Fast commits are organized as a log of tag-length-value (TLV) structs. (See + * struct ext4_fc_tl). Each TLV contains some delta that is replayed TLV by + * TLV during the recovery phase. For the scenarios for which we currently + * don't have replay code, fast commit falls back to full commits. + * Fast commits record delta in one of the following three categories. + * + * (A) Directory entry updates: + * + * - EXT4_FC_TAG_UNLINK - records directory entry unlink + * - EXT4_FC_TAG_LINK - records directory entry link + * - EXT4_FC_TAG_CREAT - records inode and directory entry creation + * + * (B) File specific data range updates: + * + * - EXT4_FC_TAG_ADD_RANGE - records addition of new blocks to an inode + * - EXT4_FC_TAG_DEL_RANGE - records deletion of blocks from an inode + * + * (C) Inode metadata (mtime / ctime etc): + * + * - EXT4_FC_TAG_INODE - record the inode that should be replayed + * during recovery. Note that iblocks field is + * not replayed and instead derived during + * replay. + * Commit Operation + * ---------------- + * With fast commits, we maintain all the directory entry operations in the + * order in which they are issued in an in-memory queue. This queue is flushed + * to disk during the commit operation. We also maintain a list of inodes + * that need to be committed during a fast commit in another in memory queue of + * inodes. During the commit operation, we commit in the following order: + * + * [1] Lock inodes for any further data updates by setting COMMITTING state + * [2] Submit data buffers of all the inodes + * [3] Wait for [2] to complete + * [4] Commit all the directory entry updates in the fast commit space + * [5] Commit all the changed inode structures + * [6] Write tail tag (this tag ensures the atomicity, please read the following + * section for more details). + * [7] Wait for [4], [5] and [6] to complete. + * + * All the inode updates must call ext4_fc_start_update() before starting an + * update. If such an ongoing update is present, fast commit waits for it to + * complete. The completion of such an update is marked by + * ext4_fc_stop_update(). + * + * Fast Commit Ineligibility + * ------------------------- + * Not all operations are supported by fast commits today (e.g extended + * attributes). Fast commit ineligiblity is marked by calling one of the + * two following functions: + * + * - ext4_fc_mark_ineligible(): This makes next fast commit operation to fall + * back to full commit. This is useful in case of transient errors. + * + * - ext4_fc_start_ineligible() and ext4_fc_stop_ineligible() - This makes all + * the fast commits happening between ext4_fc_start_ineligible() and + * ext4_fc_stop_ineligible() and one fast commit after the call to + * ext4_fc_stop_ineligible() to fall back to full commits. It is important to + * make one more fast commit to fall back to full commit after stop call so + * that it guaranteed that the fast commit ineligible operation contained + * within ext4_fc_start_ineligible() and ext4_fc_stop_ineligible() is + * followed by at least 1 full commit. + * + * Atomicity of commits + * -------------------- + * In order to gaurantee atomicity during the commit operation, fast commit + * uses "EXT4_FC_TAG_TAIL" tag that marks a fast commit as complete. Tail + * tag contains CRC of the contents and TID of the transaction after which + * this fast commit should be applied. Recovery code replays fast commit + * logs only if there's at least 1 valid tail present. For every fast commit + * operation, there is 1 tail. This means, we may end up with multiple tails + * in the fast commit space. Here's an example: + * + * - Create a new file A and remove existing file B + * - fsync() + * - Append contents to file A + * - Truncate file A + * - fsync() + * + * The fast commit space at the end of above operations would look like this: + * [HEAD] [CREAT A] [UNLINK B] [TAIL] [ADD_RANGE A] [DEL_RANGE A] [TAIL] + * |<--- Fast Commit 1 --->|<--- Fast Commit 2 ---->| + * + * Replay code should thus check for all the valid tails in the FC area. + * + * TODOs + * ----- + * 1) Make fast commit atomic updates more fine grained. Today, a fast commit + * eligible update must be protected within ext4_fc_start_update() and + * ext4_fc_stop_update(). These routines are called at much higher + * routines. This can be made more fine grained by combining with + * ext4_journal_start(). + * + * 2) Same above for ext4_fc_start_ineligible() and ext4_fc_stop_ineligible() + * + * 3) Handle more ineligible cases. + */ + +#include +static struct kmem_cache *ext4_fc_dentry_cachep; + +static void ext4_end_buffer_io_sync(struct buffer_head *bh, int uptodate) +{ + BUFFER_TRACE(bh, ""); + if (uptodate) { + ext4_debug("%s: Block %lld up-to-date", + __func__, bh->b_blocknr); + set_buffer_uptodate(bh); + } else { + ext4_debug("%s: Block %lld not up-to-date", + __func__, bh->b_blocknr); + clear_buffer_uptodate(bh); + } + + unlock_buffer(bh); +} + +static inline void ext4_fc_reset_inode(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + + ei->i_fc_lblk_start = 0; + ei->i_fc_lblk_len = 0; +} + +void ext4_fc_init_inode(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + + ext4_fc_reset_inode(inode); + ext4_clear_inode_state(inode, EXT4_STATE_FC_COMMITTING); + INIT_LIST_HEAD(&ei->i_fc_list); + init_waitqueue_head(&ei->i_fc_wait); + atomic_set(&ei->i_fc_updates, 0); + ei->i_fc_committed_subtid = 0; +} + +/* + * Inform Ext4's fast about start of an inode update + * + * This function is called by the high level call VFS callbacks before + * performing any inode update. This function blocks if there's an ongoing + * fast commit on the inode in question. + */ +void ext4_fc_start_update(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + + if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) + return; + +restart: + spin_lock(&EXT4_SB(inode->i_sb)->s_fc_lock); + if (list_empty(&ei->i_fc_list)) + goto out; + + if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) { + wait_queue_head_t *wq; +#if (BITS_PER_LONG < 64) + DEFINE_WAIT_BIT(wait, &ei->i_state_flags, + EXT4_STATE_FC_COMMITTING); + wq = bit_waitqueue(&ei->i_state_flags, + EXT4_STATE_FC_COMMITTING); +#else + DEFINE_WAIT_BIT(wait, &ei->i_flags, + EXT4_STATE_FC_COMMITTING); + wq = bit_waitqueue(&ei->i_flags, + EXT4_STATE_FC_COMMITTING); +#endif + prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); + spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock); + schedule(); + finish_wait(wq, &wait.wq_entry); + goto restart; + } +out: + atomic_inc(&ei->i_fc_updates); + spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock); +} + +/* + * Stop inode update and wake up waiting fast commits if any. + */ +void ext4_fc_stop_update(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + + if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) + return; + + if (atomic_dec_and_test(&ei->i_fc_updates)) + wake_up_all(&ei->i_fc_wait); +} + +/* + * Remove inode from fast commit list. If the inode is being committed + * we wait until inode commit is done. + */ +void ext4_fc_del(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + + if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) + return; + + + if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) + return; + +restart: + spin_lock(&EXT4_SB(inode->i_sb)->s_fc_lock); + if (list_empty(&ei->i_fc_list)) { + spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock); + return; + } + + if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) { + wait_queue_head_t *wq; +#if (BITS_PER_LONG < 64) + DEFINE_WAIT_BIT(wait, &ei->i_state_flags, + EXT4_STATE_FC_COMMITTING); + wq = bit_waitqueue(&ei->i_state_flags, + EXT4_STATE_FC_COMMITTING); +#else + DEFINE_WAIT_BIT(wait, &ei->i_flags, + EXT4_STATE_FC_COMMITTING); + wq = bit_waitqueue(&ei->i_flags, + EXT4_STATE_FC_COMMITTING); +#endif + prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); + spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock); + schedule(); + finish_wait(wq, &wait.wq_entry); + goto restart; + } + if (!list_empty(&ei->i_fc_list)) + list_del_init(&ei->i_fc_list); + spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock); +} + +/* + * Mark file system as fast commit ineligible. This means that next commit + * operation would result in a full jbd2 commit. + */ +void ext4_fc_mark_ineligible(struct super_block *sb, int reason) +{ + struct ext4_sb_info *sbi = EXT4_SB(sb); + + sbi->s_mount_state |= EXT4_FC_INELIGIBLE; + WARN_ON(reason >= EXT4_FC_REASON_MAX); + sbi->s_fc_stats.fc_ineligible_reason_count[reason]++; +} + +/* + * Start a fast commit ineligible update. Any commits that happen while + * such an operation is in progress fall back to full commits. + */ +void ext4_fc_start_ineligible(struct super_block *sb, int reason) +{ + struct ext4_sb_info *sbi = EXT4_SB(sb); + + WARN_ON(reason >= EXT4_FC_REASON_MAX); + sbi->s_fc_stats.fc_ineligible_reason_count[reason]++; + atomic_inc(&sbi->s_fc_ineligible_updates); +} + +/* + * Stop a fast commit ineligible update. We set EXT4_FC_INELIGIBLE flag here + * to ensure that after stopping the ineligible update, at least one full + * commit takes place. + */ +void ext4_fc_stop_ineligible(struct super_block *sb) +{ + EXT4_SB(sb)->s_mount_state |= EXT4_FC_INELIGIBLE; + atomic_dec(&EXT4_SB(sb)->s_fc_ineligible_updates); +} + +static inline int ext4_fc_is_ineligible(struct super_block *sb) +{ + return (EXT4_SB(sb)->s_mount_state & EXT4_FC_INELIGIBLE) || + atomic_read(&EXT4_SB(sb)->s_fc_ineligible_updates); +} + +/* + * Generic fast commit tracking function. If this is the first time this we are + * called after a full commit, we initialize fast commit fields and then call + * __fc_track_fn() with update = 0. If we have already been called after a full + * commit, we pass update = 1. Based on that, the track function can determine + * if it needs to track a field for the first time or if it needs to just + * update the previously tracked value. + * + * If enqueue is set, this function enqueues the inode in fast commit list. + */ +static int ext4_fc_track_template( + struct inode *inode, int (*__fc_track_fn)(struct inode *, void *, bool), + void *args, int enqueue) +{ + tid_t running_txn_tid; + bool update = false; + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + int ret; + + if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) + return -EOPNOTSUPP; + + if (ext4_fc_is_ineligible(inode->i_sb)) + return -EINVAL; + + running_txn_tid = sbi->s_journal ? + sbi->s_journal->j_commit_sequence + 1 : 0; + + mutex_lock(&ei->i_fc_lock); + if (running_txn_tid == ei->i_sync_tid) { + update = true; + } else { + ext4_fc_reset_inode(inode); + ei->i_sync_tid = running_txn_tid; + } + ret = __fc_track_fn(inode, args, update); + mutex_unlock(&ei->i_fc_lock); + + if (!enqueue) + return ret; + + spin_lock(&sbi->s_fc_lock); + if (list_empty(&EXT4_I(inode)->i_fc_list)) + list_add_tail(&EXT4_I(inode)->i_fc_list, + (sbi->s_mount_state & EXT4_FC_COMMITTING) ? + &sbi->s_fc_q[FC_Q_STAGING] : + &sbi->s_fc_q[FC_Q_MAIN]); + spin_unlock(&sbi->s_fc_lock); + + return ret; +} + +struct __track_dentry_update_args { + struct dentry *dentry; + int op; +}; + +/* __track_fn for directory entry updates. Called with ei->i_fc_lock. */ +static int __track_dentry_update(struct inode *inode, void *arg, bool update) +{ + struct ext4_fc_dentry_update *node; + struct ext4_inode_info *ei = EXT4_I(inode); + struct __track_dentry_update_args *dentry_update = + (struct __track_dentry_update_args *)arg; + struct dentry *dentry = dentry_update->dentry; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + + mutex_unlock(&ei->i_fc_lock); + node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS); + if (!node) { + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_MEM); + mutex_lock(&ei->i_fc_lock); + return -ENOMEM; + } + + node->fcd_op = dentry_update->op; + node->fcd_parent = dentry->d_parent->d_inode->i_ino; + node->fcd_ino = inode->i_ino; + if (dentry->d_name.len > DNAME_INLINE_LEN) { + node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS); + if (!node->fcd_name.name) { + kmem_cache_free(ext4_fc_dentry_cachep, node); + ext4_fc_mark_ineligible(inode->i_sb, + EXT4_FC_REASON_MEM); + mutex_lock(&ei->i_fc_lock); + return -ENOMEM; + } + memcpy((u8 *)node->fcd_name.name, dentry->d_name.name, + dentry->d_name.len); + } else { + memcpy(node->fcd_iname, dentry->d_name.name, + dentry->d_name.len); + node->fcd_name.name = node->fcd_iname; + } + node->fcd_name.len = dentry->d_name.len; + + spin_lock(&sbi->s_fc_lock); + if (sbi->s_mount_state & EXT4_FC_COMMITTING) + list_add_tail(&node->fcd_list, + &sbi->s_fc_dentry_q[FC_Q_STAGING]); + else + list_add_tail(&node->fcd_list, &sbi->s_fc_dentry_q[FC_Q_MAIN]); + spin_unlock(&sbi->s_fc_lock); + mutex_lock(&ei->i_fc_lock); + + return 0; +} + +void ext4_fc_track_unlink(struct inode *inode, struct dentry *dentry) +{ + struct __track_dentry_update_args args; + int ret; + + args.dentry = dentry; + args.op = EXT4_FC_TAG_UNLINK; + + ret = ext4_fc_track_template(inode, __track_dentry_update, + (void *)&args, 0); + trace_ext4_fc_track_unlink(inode, dentry, ret); +} + +void ext4_fc_track_link(struct inode *inode, struct dentry *dentry) +{ + struct __track_dentry_update_args args; + int ret; + + args.dentry = dentry; + args.op = EXT4_FC_TAG_LINK; + + ret = ext4_fc_track_template(inode, __track_dentry_update, + (void *)&args, 0); + trace_ext4_fc_track_link(inode, dentry, ret); +} + +void ext4_fc_track_create(struct inode *inode, struct dentry *dentry) +{ + struct __track_dentry_update_args args; + int ret; + + args.dentry = dentry; + args.op = EXT4_FC_TAG_CREAT; + + ret = ext4_fc_track_template(inode, __track_dentry_update, + (void *)&args, 0); + trace_ext4_fc_track_create(inode, dentry, ret); +} + +/* __track_fn for inode tracking */ +static int __track_inode(struct inode *inode, void *arg, bool update) +{ + if (update) + return -EEXIST; + + EXT4_I(inode)->i_fc_lblk_len = 0; + + return 0; +} + +void ext4_fc_track_inode(struct inode *inode) +{ + int ret; + + if (S_ISDIR(inode->i_mode)) + return; + + ret = ext4_fc_track_template(inode, __track_inode, NULL, 1); + trace_ext4_fc_track_inode(inode, ret); +} + +struct __track_range_args { + ext4_lblk_t start, end; +}; + +/* __track_fn for tracking data updates */ +static int __track_range(struct inode *inode, void *arg, bool update) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + ext4_lblk_t oldstart; + struct __track_range_args *__arg = + (struct __track_range_args *)arg; + + if (inode->i_ino < EXT4_FIRST_INO(inode->i_sb)) { + ext4_debug("Special inode %ld being modified\n", inode->i_ino); + return -ECANCELED; + } + + oldstart = ei->i_fc_lblk_start; + + if (update && ei->i_fc_lblk_len > 0) { + ei->i_fc_lblk_start = min(ei->i_fc_lblk_start, __arg->start); + ei->i_fc_lblk_len = + max(oldstart + ei->i_fc_lblk_len - 1, __arg->end) - + ei->i_fc_lblk_start + 1; + } else { + ei->i_fc_lblk_start = __arg->start; + ei->i_fc_lblk_len = __arg->end - __arg->start + 1; + } + + return 0; +} + +void ext4_fc_track_range(struct inode *inode, ext4_lblk_t start, + ext4_lblk_t end) +{ + struct __track_range_args args; + int ret; + + if (S_ISDIR(inode->i_mode)) + return; + + args.start = start; + args.end = end; + + ret = ext4_fc_track_template(inode, __track_range, &args, 1); + + trace_ext4_fc_track_range(inode, start, end, ret); +} + +static void ext4_fc_submit_bh(struct super_block *sb) +{ + int write_flags = REQ_SYNC; + struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh; + + if (test_opt(sb, BARRIER)) + write_flags |= REQ_FUA | REQ_PREFLUSH; + lock_buffer(bh); + clear_buffer_dirty(bh); + set_buffer_uptodate(bh); + bh->b_end_io = ext4_end_buffer_io_sync; + submit_bh(REQ_OP_WRITE, write_flags, bh); + EXT4_SB(sb)->s_fc_bh = NULL; +} + +/* Ext4 commit path routines */ + +/* memzero and update CRC */ +static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len, + u32 *crc) +{ + void *ret; + + ret = memset(dst, 0, len); + if (crc) + *crc = ext4_chksum(EXT4_SB(sb), *crc, dst, len); + return ret; +} + +/* + * Allocate len bytes on a fast commit buffer. + * + * During the commit time this function is used to manage fast commit + * block space. We don't split a fast commit log onto different + * blocks. So this function makes sure that if there's not enough space + * on the current block, the remaining space in the current block is + * marked as unused by adding EXT4_FC_TAG_PAD tag. In that case, + * new block is from jbd2 and CRC is updated to reflect the padding + * we added. + */ +static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc) +{ + struct ext4_fc_tl *tl; + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct buffer_head *bh; + int bsize = sbi->s_journal->j_blocksize; + int ret, off = sbi->s_fc_bytes % bsize; + int pad_len; + + /* + * After allocating len, we should have space at least for a 0 byte + * padding. + */ + if (len + sizeof(struct ext4_fc_tl) > bsize) + return NULL; + + if (bsize - off - 1 > len + sizeof(struct ext4_fc_tl)) { + /* + * Only allocate from current buffer if we have enough space for + * this request AND we have space to add a zero byte padding. + */ + if (!sbi->s_fc_bh) { + ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh); + if (ret) + return NULL; + sbi->s_fc_bh = bh; + } + sbi->s_fc_bytes += len; + return sbi->s_fc_bh->b_data + off; + } + /* Need to add PAD tag */ + tl = (struct ext4_fc_tl *)(sbi->s_fc_bh->b_data + off); + tl->fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD); + pad_len = bsize - off - 1 - sizeof(struct ext4_fc_tl); + tl->fc_len = cpu_to_le16(pad_len); + if (crc) + *crc = ext4_chksum(sbi, *crc, tl, sizeof(*tl)); + if (pad_len > 0) + ext4_fc_memzero(sb, tl + 1, pad_len, crc); + ext4_fc_submit_bh(sb); + + ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh); + if (ret) + return NULL; + sbi->s_fc_bh = bh; + sbi->s_fc_bytes = (sbi->s_fc_bytes / bsize + 1) * bsize + len; + return sbi->s_fc_bh->b_data; +} + +/* memcpy to fc reserved space and update CRC */ +static void *ext4_fc_memcpy(struct super_block *sb, void *dst, const void *src, + int len, u32 *crc) +{ + if (crc) + *crc = ext4_chksum(EXT4_SB(sb), *crc, src, len); + return memcpy(dst, src, len); +} + +/* + * Complete a fast commit by writing tail tag. + * + * Writing tail tag marks the end of a fast commit. In order to guarantee + * atomicity, after writing tail tag, even if there's space remaining + * in the block, next commit shouldn't use it. That's why tail tag + * has the length as that of the remaining space on the block. + */ +static int ext4_fc_write_tail(struct super_block *sb, u32 crc) +{ + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_fc_tl tl; + struct ext4_fc_tail tail; + int off, bsize = sbi->s_journal->j_blocksize; + u8 *dst; + + /* + * ext4_fc_reserve_space takes care of allocating an extra block if + * there's no enough space on this block for accommodating this tail. + */ + dst = ext4_fc_reserve_space(sb, sizeof(tl) + sizeof(tail), &crc); + if (!dst) + return -ENOSPC; + + off = sbi->s_fc_bytes % bsize; + + tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL); + tl.fc_len = cpu_to_le16(bsize - off - 1 + sizeof(struct ext4_fc_tail)); + sbi->s_fc_bytes = round_up(sbi->s_fc_bytes, bsize); + + ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), &crc); + dst += sizeof(tl); + tail.fc_tid = cpu_to_le32(sbi->s_journal->j_running_transaction->t_tid); + ext4_fc_memcpy(sb, dst, &tail.fc_tid, sizeof(tail.fc_tid), &crc); + dst += sizeof(tail.fc_tid); + tail.fc_crc = cpu_to_le32(crc); + ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL); + + ext4_fc_submit_bh(sb); + + return 0; +} + +/* + * Adds tag, length, value and updates CRC. Returns true if tlv was added. + * Returns false if there's not enough space. + */ +static bool ext4_fc_add_tlv(struct super_block *sb, u16 tag, u16 len, u8 *val, + u32 *crc) +{ + struct ext4_fc_tl tl; + u8 *dst; + + dst = ext4_fc_reserve_space(sb, sizeof(tl) + len, crc); + if (!dst) + return false; + + tl.fc_tag = cpu_to_le16(tag); + tl.fc_len = cpu_to_le16(len); + + ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), crc); + ext4_fc_memcpy(sb, dst + sizeof(tl), val, len, crc); + + return true; +} + +/* Same as above, but adds dentry tlv. */ +static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u16 tag, + int parent_ino, int ino, int dlen, + const unsigned char *dname, + u32 *crc) +{ + struct ext4_fc_dentry_info fcd; + struct ext4_fc_tl tl; + u8 *dst = ext4_fc_reserve_space(sb, sizeof(tl) + sizeof(fcd) + dlen, + crc); + + if (!dst) + return false; + + fcd.fc_parent_ino = cpu_to_le32(parent_ino); + fcd.fc_ino = cpu_to_le32(ino); + tl.fc_tag = cpu_to_le16(tag); + tl.fc_len = cpu_to_le16(sizeof(fcd) + dlen); + ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), crc); + dst += sizeof(tl); + ext4_fc_memcpy(sb, dst, &fcd, sizeof(fcd), crc); + dst += sizeof(fcd); + ext4_fc_memcpy(sb, dst, dname, dlen, crc); + dst += dlen; + + return true; +} + +/* + * Writes inode in the fast commit space under TLV with tag @tag. + * Returns 0 on success, error on failure. + */ +static int ext4_fc_write_inode(struct inode *inode, u32 *crc) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + int inode_len = EXT4_GOOD_OLD_INODE_SIZE; + int ret; + struct ext4_iloc iloc; + struct ext4_fc_inode fc_inode; + struct ext4_fc_tl tl; + u8 *dst; + + ret = ext4_get_inode_loc(inode, &iloc); + if (ret) + return ret; + + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) + inode_len += ei->i_extra_isize; + + fc_inode.fc_ino = cpu_to_le32(inode->i_ino); + tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_INODE); + tl.fc_len = cpu_to_le16(inode_len + sizeof(fc_inode.fc_ino)); + + dst = ext4_fc_reserve_space(inode->i_sb, + sizeof(tl) + inode_len + sizeof(fc_inode.fc_ino), crc); + if (!dst) + return -ECANCELED; + + if (!ext4_fc_memcpy(inode->i_sb, dst, &tl, sizeof(tl), crc)) + return -ECANCELED; + dst += sizeof(tl); + if (!ext4_fc_memcpy(inode->i_sb, dst, &fc_inode, sizeof(fc_inode), crc)) + return -ECANCELED; + dst += sizeof(fc_inode); + if (!ext4_fc_memcpy(inode->i_sb, dst, (u8 *)ext4_raw_inode(&iloc), + inode_len, crc)) + return -ECANCELED; + + return 0; +} + +/* + * Writes updated data ranges for the inode in question. Updates CRC. + * Returns 0 on success, error otherwise. + */ +static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc) +{ + ext4_lblk_t old_blk_size, cur_lblk_off, new_blk_size; + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_map_blocks map; + struct ext4_fc_add_range fc_ext; + struct ext4_fc_del_range lrange; + struct ext4_extent *ex; + int ret; + + mutex_lock(&ei->i_fc_lock); + if (ei->i_fc_lblk_len == 0) { + mutex_unlock(&ei->i_fc_lock); + return 0; + } + old_blk_size = ei->i_fc_lblk_start; + new_blk_size = ei->i_fc_lblk_start + ei->i_fc_lblk_len - 1; + ei->i_fc_lblk_len = 0; + mutex_unlock(&ei->i_fc_lock); + + cur_lblk_off = old_blk_size; + jbd_debug(1, "%s: will try writing %d to %d for inode %ld\n", + __func__, cur_lblk_off, new_blk_size, inode->i_ino); + + while (cur_lblk_off <= new_blk_size) { + map.m_lblk = cur_lblk_off; + map.m_len = new_blk_size - cur_lblk_off + 1; + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret < 0) + return -ECANCELED; + + if (map.m_len == 0) { + cur_lblk_off++; + continue; + } + + if (ret == 0) { + lrange.fc_ino = cpu_to_le32(inode->i_ino); + lrange.fc_lblk = cpu_to_le32(map.m_lblk); + lrange.fc_len = cpu_to_le32(map.m_len); + if (!ext4_fc_add_tlv(inode->i_sb, EXT4_FC_TAG_DEL_RANGE, + sizeof(lrange), (u8 *)&lrange, crc)) + return -ENOSPC; + } else { + fc_ext.fc_ino = cpu_to_le32(inode->i_ino); + ex = (struct ext4_extent *)&fc_ext.fc_ex; + ex->ee_block = cpu_to_le32(map.m_lblk); + ex->ee_len = cpu_to_le16(map.m_len); + ext4_ext_store_pblock(ex, map.m_pblk); + if (map.m_flags & EXT4_MAP_UNWRITTEN) + ext4_ext_mark_unwritten(ex); + else + ext4_ext_mark_initialized(ex); + if (!ext4_fc_add_tlv(inode->i_sb, EXT4_FC_TAG_ADD_RANGE, + sizeof(fc_ext), (u8 *)&fc_ext, crc)) + return -ENOSPC; + } + + cur_lblk_off += map.m_len; + } + + return 0; +} + + +/* Submit data for all the fast commit inodes */ +static int ext4_fc_submit_inode_data_all(journal_t *journal) +{ + struct super_block *sb = (struct super_block *)(journal->j_private); + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_inode_info *ei; + struct list_head *pos; + int ret = 0; + + spin_lock(&sbi->s_fc_lock); + sbi->s_mount_state |= EXT4_FC_COMMITTING; + list_for_each(pos, &sbi->s_fc_q[FC_Q_MAIN]) { + ei = list_entry(pos, struct ext4_inode_info, i_fc_list); + ext4_set_inode_state(&ei->vfs_inode, EXT4_STATE_FC_COMMITTING); + while (atomic_read(&ei->i_fc_updates)) { + DEFINE_WAIT(wait); + + prepare_to_wait(&ei->i_fc_wait, &wait, + TASK_UNINTERRUPTIBLE); + if (atomic_read(&ei->i_fc_updates)) { + spin_unlock(&sbi->s_fc_lock); + schedule(); + spin_lock(&sbi->s_fc_lock); + } + finish_wait(&ei->i_fc_wait, &wait); + } + spin_unlock(&sbi->s_fc_lock); + ret = jbd2_submit_inode_data(ei->jinode); + if (ret) + return ret; + spin_lock(&sbi->s_fc_lock); + } + spin_unlock(&sbi->s_fc_lock); + + return ret; +} + +/* Wait for completion of data for all the fast commit inodes */ +static int ext4_fc_wait_inode_data_all(journal_t *journal) +{ + struct super_block *sb = (struct super_block *)(journal->j_private); + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_inode_info *pos, *n; + int ret = 0; + + spin_lock(&sbi->s_fc_lock); + list_for_each_entry_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) { + if (!ext4_test_inode_state(&pos->vfs_inode, + EXT4_STATE_FC_COMMITTING)) + continue; + spin_unlock(&sbi->s_fc_lock); + + ret = jbd2_wait_inode_data(journal, pos->jinode); + if (ret) + return ret; + spin_lock(&sbi->s_fc_lock); + } + spin_unlock(&sbi->s_fc_lock); + + return 0; +} + +/* Commit all the directory entry updates */ +static int ext4_fc_commit_dentry_updates(journal_t *journal, u32 *crc) +{ + struct super_block *sb = (struct super_block *)(journal->j_private); + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_fc_dentry_update *fc_dentry; + struct inode *inode; + struct list_head *pos, *n, *fcd_pos, *fcd_n; + struct ext4_inode_info *ei; + int ret; + + if (list_empty(&sbi->s_fc_dentry_q[FC_Q_MAIN])) + return 0; + list_for_each_safe(fcd_pos, fcd_n, &sbi->s_fc_dentry_q[FC_Q_MAIN]) { + fc_dentry = list_entry(fcd_pos, struct ext4_fc_dentry_update, + fcd_list); + if (fc_dentry->fcd_op != EXT4_FC_TAG_CREAT) { + spin_unlock(&sbi->s_fc_lock); + if (!ext4_fc_add_dentry_tlv( + sb, fc_dentry->fcd_op, + fc_dentry->fcd_parent, fc_dentry->fcd_ino, + fc_dentry->fcd_name.len, + fc_dentry->fcd_name.name, crc)) { + ret = -ENOSPC; + goto lock_and_exit; + } + spin_lock(&sbi->s_fc_lock); + continue; + } + + inode = NULL; + list_for_each_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN]) { + ei = list_entry(pos, struct ext4_inode_info, i_fc_list); + if (ei->vfs_inode.i_ino == fc_dentry->fcd_ino) { + inode = &ei->vfs_inode; + break; + } + } + /* + * If we don't find inode in our list, then it was deleted, + * in which case, we don't need to record it's create tag. + */ + if (!inode) + continue; + spin_unlock(&sbi->s_fc_lock); + + /* + * We first write the inode and then the create dirent. This + * allows the recovery code to create an unnamed inode first + * and then link it to a directory entry. This allows us + * to use namei.c routines almost as is and simplifies + * the recovery code. + */ + ret = ext4_fc_write_inode(inode, crc); + if (ret) + goto lock_and_exit; + + ret = ext4_fc_write_inode_data(inode, crc); + if (ret) + goto lock_and_exit; + + if (!ext4_fc_add_dentry_tlv( + sb, fc_dentry->fcd_op, + fc_dentry->fcd_parent, fc_dentry->fcd_ino, + fc_dentry->fcd_name.len, + fc_dentry->fcd_name.name, crc)) { + spin_lock(&sbi->s_fc_lock); + ret = -ENOSPC; + goto lock_and_exit; + } + + spin_lock(&sbi->s_fc_lock); + } + return 0; +lock_and_exit: + spin_lock(&sbi->s_fc_lock); + return ret; +} + +static int ext4_fc_perform_commit(journal_t *journal) +{ + struct super_block *sb = (struct super_block *)(journal->j_private); + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_inode_info *iter; + struct ext4_fc_head head; + struct list_head *pos; + struct inode *inode; + struct blk_plug plug; + int ret = 0; + u32 crc = 0; + + ret = ext4_fc_submit_inode_data_all(journal); + if (ret) + return ret; + + ret = ext4_fc_wait_inode_data_all(journal); + if (ret) + return ret; + + blk_start_plug(&plug); + if (sbi->s_fc_bytes == 0) { + /* + * Add a head tag only if this is the first fast commit + * in this TID. + */ + head.fc_features = cpu_to_le32(EXT4_FC_SUPPORTED_FEATURES); + head.fc_tid = cpu_to_le32( + sbi->s_journal->j_running_transaction->t_tid); + if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head), + (u8 *)&head, &crc)) + goto out; + } + + spin_lock(&sbi->s_fc_lock); + ret = ext4_fc_commit_dentry_updates(journal, &crc); + if (ret) { + spin_unlock(&sbi->s_fc_lock); + goto out; + } + + list_for_each(pos, &sbi->s_fc_q[FC_Q_MAIN]) { + iter = list_entry(pos, struct ext4_inode_info, i_fc_list); + inode = &iter->vfs_inode; + if (!ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) + continue; + + spin_unlock(&sbi->s_fc_lock); + ret = ext4_fc_write_inode_data(inode, &crc); + if (ret) + goto out; + ret = ext4_fc_write_inode(inode, &crc); + if (ret) + goto out; + spin_lock(&sbi->s_fc_lock); + EXT4_I(inode)->i_fc_committed_subtid = + atomic_read(&sbi->s_fc_subtid); + } + spin_unlock(&sbi->s_fc_lock); + + ret = ext4_fc_write_tail(sb, crc); + +out: + blk_finish_plug(&plug); + return ret; +} + +/* + * The main commit entry point. Performs a fast commit for transaction + * commit_tid if needed. If it's not possible to perform a fast commit + * due to various reasons, we fall back to full commit. Returns 0 + * on success, error otherwise. + */ +int ext4_fc_commit(journal_t *journal, tid_t commit_tid) +{ + struct super_block *sb = (struct super_block *)(journal->j_private); + struct ext4_sb_info *sbi = EXT4_SB(sb); + int nblks = 0, ret, bsize = journal->j_blocksize; + int subtid = atomic_read(&sbi->s_fc_subtid); + int reason = EXT4_FC_REASON_OK, fc_bufs_before = 0; + ktime_t start_time, commit_time; + + trace_ext4_fc_commit_start(sb); + + start_time = ktime_get(); + + if (!test_opt2(sb, JOURNAL_FAST_COMMIT) || + (ext4_fc_is_ineligible(sb))) { + reason = EXT4_FC_REASON_INELIGIBLE; + goto out; + } + +restart_fc: + ret = jbd2_fc_begin_commit(journal, commit_tid); + if (ret == -EALREADY) { + /* There was an ongoing commit, check if we need to restart */ + if (atomic_read(&sbi->s_fc_subtid) <= subtid && + commit_tid > journal->j_commit_sequence) + goto restart_fc; + reason = EXT4_FC_REASON_ALREADY_COMMITTED; + goto out; + } else if (ret) { + sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++; + reason = EXT4_FC_REASON_FC_START_FAILED; + goto out; + } + + fc_bufs_before = (sbi->s_fc_bytes + bsize - 1) / bsize; + ret = ext4_fc_perform_commit(journal); + if (ret < 0) { + sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++; + reason = EXT4_FC_REASON_FC_FAILED; + goto out; + } + nblks = (sbi->s_fc_bytes + bsize - 1) / bsize - fc_bufs_before; + ret = jbd2_fc_wait_bufs(journal, nblks); + if (ret < 0) { + sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++; + reason = EXT4_FC_REASON_FC_FAILED; + goto out; + } + atomic_inc(&sbi->s_fc_subtid); + jbd2_fc_end_commit(journal); +out: + /* Has any ineligible update happened since we started? */ + if (reason == EXT4_FC_REASON_OK && ext4_fc_is_ineligible(sb)) { + sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++; + reason = EXT4_FC_REASON_INELIGIBLE; + } + + spin_lock(&sbi->s_fc_lock); + if (reason != EXT4_FC_REASON_OK && + reason != EXT4_FC_REASON_ALREADY_COMMITTED) { + sbi->s_fc_stats.fc_ineligible_commits++; + } else { + sbi->s_fc_stats.fc_num_commits++; + sbi->s_fc_stats.fc_numblks += nblks; + } + spin_unlock(&sbi->s_fc_lock); + nblks = (reason == EXT4_FC_REASON_OK) ? nblks : 0; + trace_ext4_fc_commit_stop(sb, nblks, reason); + commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); + /* + * weight the commit time higher than the average time so we don't + * react too strongly to vast changes in the commit time + */ + if (likely(sbi->s_fc_avg_commit_time)) + sbi->s_fc_avg_commit_time = (commit_time + + sbi->s_fc_avg_commit_time * 3) / 4; + else + sbi->s_fc_avg_commit_time = commit_time; + jbd_debug(1, + "Fast commit ended with blks = %d, reason = %d, subtid - %d", + nblks, reason, subtid); + if (reason == EXT4_FC_REASON_FC_FAILED) + return jbd2_fc_end_commit_fallback(journal, commit_tid); + if (reason == EXT4_FC_REASON_FC_START_FAILED || + reason == EXT4_FC_REASON_INELIGIBLE) + return jbd2_complete_transaction(journal, commit_tid); + return 0; +} + /* * Fast commit cleanup routine. This is called after every fast commit and * full commit. full is true if we are called after a full commit. */ static void ext4_fc_cleanup(journal_t *journal, int full) { + struct super_block *sb = journal->j_private; + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_inode_info *iter; + struct ext4_fc_dentry_update *fc_dentry; + struct list_head *pos, *n; + + if (full && sbi->s_fc_bh) + sbi->s_fc_bh = NULL; + + jbd2_fc_release_bufs(journal); + + spin_lock(&sbi->s_fc_lock); + list_for_each_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN]) { + iter = list_entry(pos, struct ext4_inode_info, i_fc_list); + list_del_init(&iter->i_fc_list); + ext4_clear_inode_state(&iter->vfs_inode, + EXT4_STATE_FC_COMMITTING); + ext4_fc_reset_inode(&iter->vfs_inode); + /* Make sure EXT4_STATE_FC_COMMITTING bit is clear */ + smp_mb(); +#if (BITS_PER_LONG < 64) + wake_up_bit(&iter->i_state_flags, EXT4_STATE_FC_COMMITTING); +#else + wake_up_bit(&iter->i_flags, EXT4_STATE_FC_COMMITTING); +#endif + } + + while (!list_empty(&sbi->s_fc_dentry_q[FC_Q_MAIN])) { + fc_dentry = list_first_entry(&sbi->s_fc_dentry_q[FC_Q_MAIN], + struct ext4_fc_dentry_update, + fcd_list); + list_del_init(&fc_dentry->fcd_list); + spin_unlock(&sbi->s_fc_lock); + + if (fc_dentry->fcd_name.name && + fc_dentry->fcd_name.len > DNAME_INLINE_LEN) + kfree(fc_dentry->fcd_name.name); + kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry); + spin_lock(&sbi->s_fc_lock); + } + + list_splice_init(&sbi->s_fc_dentry_q[FC_Q_STAGING], + &sbi->s_fc_dentry_q[FC_Q_MAIN]); + list_splice_init(&sbi->s_fc_q[FC_Q_STAGING], + &sbi->s_fc_q[FC_Q_STAGING]); + + sbi->s_mount_state &= ~EXT4_FC_COMMITTING; + sbi->s_mount_state &= ~EXT4_FC_INELIGIBLE; + + if (full) + sbi->s_fc_bytes = 0; + spin_unlock(&sbi->s_fc_lock); + trace_ext4_fc_stats(sb); } void ext4_fc_init(struct super_block *sb, journal_t *journal) @@ -26,3 +1198,14 @@ void ext4_fc_init(struct super_block *sb, journal_t *journal) ext4_clear_feature_fast_commit(sb); } } + +int __init ext4_fc_init_dentry_cache(void) +{ + ext4_fc_dentry_cachep = KMEM_CACHE(ext4_fc_dentry_update, + SLAB_RECLAIM_ACCOUNT); + + if (ext4_fc_dentry_cachep == NULL) + return -ENOMEM; + + return 0; +} diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h index 8362bf5e6e00..560bc9ca8c79 100644 --- a/fs/ext4/fast_commit.h +++ b/fs/ext4/fast_commit.h @@ -6,4 +6,114 @@ /* Number of blocks in journal area to allocate for fast commits */ #define EXT4_NUM_FC_BLKS 256 +/* Fast commit tags */ +#define EXT4_FC_TAG_ADD_RANGE 0x0001 +#define EXT4_FC_TAG_DEL_RANGE 0x0002 +#define EXT4_FC_TAG_CREAT 0x0003 +#define EXT4_FC_TAG_LINK 0x0004 +#define EXT4_FC_TAG_UNLINK 0x0005 +#define EXT4_FC_TAG_INODE 0x0006 +#define EXT4_FC_TAG_PAD 0x0007 +#define EXT4_FC_TAG_TAIL 0x0008 +#define EXT4_FC_TAG_HEAD 0x0009 + +#define EXT4_FC_SUPPORTED_FEATURES 0x0 + +/* On disk fast commit tlv value structures */ + +/* Fast commit on disk tag length structure */ +struct ext4_fc_tl { + __le16 fc_tag; + __le16 fc_len; +}; + +/* Value structure for tag EXT4_FC_TAG_HEAD. */ +struct ext4_fc_head { + __le32 fc_features; + __le32 fc_tid; +}; + +/* Value structure for EXT4_FC_TAG_ADD_RANGE. */ +struct ext4_fc_add_range { + __le32 fc_ino; + __u8 fc_ex[12]; +}; + +/* Value structure for tag EXT4_FC_TAG_DEL_RANGE. */ +struct ext4_fc_del_range { + __le32 fc_ino; + __le32 fc_lblk; + __le32 fc_len; +}; + +/* + * This is the value structure for tags EXT4_FC_TAG_CREAT, EXT4_FC_TAG_LINK + * and EXT4_FC_TAG_UNLINK. + */ +struct ext4_fc_dentry_info { + __le32 fc_parent_ino; + __le32 fc_ino; + u8 fc_dname[0]; +}; + +/* Value structure for EXT4_FC_TAG_INODE and EXT4_FC_TAG_INODE_PARTIAL. */ +struct ext4_fc_inode { + __le32 fc_ino; + __u8 fc_raw_inode[0]; +}; + +/* Value structure for tag EXT4_FC_TAG_TAIL. */ +struct ext4_fc_tail { + __le32 fc_tid; + __le32 fc_crc; +}; + +/* + * In memory list of dentry updates that are performed on the file + * system used by fast commit code. + */ +struct ext4_fc_dentry_update { + int fcd_op; /* Type of update create / unlink / link */ + int fcd_parent; /* Parent inode number */ + int fcd_ino; /* Inode number */ + struct qstr fcd_name; /* Dirent name */ + unsigned char fcd_iname[DNAME_INLINE_LEN]; /* Dirent name string */ + struct list_head fcd_list; +}; + +/* + * Fast commit reason codes + */ +enum { + /* + * Commit status codes: + */ + EXT4_FC_REASON_OK = 0, + EXT4_FC_REASON_INELIGIBLE, + EXT4_FC_REASON_ALREADY_COMMITTED, + EXT4_FC_REASON_FC_START_FAILED, + EXT4_FC_REASON_FC_FAILED, + + /* + * Fast commit ineligiblity reasons: + */ + EXT4_FC_REASON_XATTR = 0, + EXT4_FC_REASON_CROSS_RENAME, + EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, + EXT4_FC_REASON_MEM, + EXT4_FC_REASON_SWAP_BOOT, + EXT4_FC_REASON_RESIZE, + EXT4_FC_REASON_RENAME_DIR, + EXT4_FC_REASON_FALLOC_RANGE, + EXT4_FC_COMMIT_FAILED, + EXT4_FC_REASON_MAX +}; + +struct ext4_fc_stats { + unsigned int fc_ineligible_reason_count[EXT4_FC_REASON_MAX]; + unsigned long fc_num_commits; + unsigned long fc_ineligible_commits; + unsigned long fc_numblks; +}; + #endif /* __FAST_COMMIT_H__ */ diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 02ffbd29d6b0..d85412d12e3a 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -260,6 +260,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, if (iocb->ki_flags & IOCB_NOWAIT) return -EOPNOTSUPP; + ext4_fc_start_update(inode); inode_lock(inode); ret = ext4_write_checks(iocb, from); if (ret <= 0) @@ -271,6 +272,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, out: inode_unlock(inode); + ext4_fc_stop_update(inode); if (likely(ret > 0)) { iocb->ki_pos += ret; ret = generic_write_sync(iocb, ret); @@ -534,7 +536,9 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) goto out; } + ext4_fc_start_update(inode); ret = ext4_orphan_add(handle, inode); + ext4_fc_stop_update(inode); if (ret) { ext4_journal_stop(handle); goto out; @@ -656,8 +660,8 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) #endif if (iocb->ki_flags & IOCB_DIRECT) return ext4_dio_write_iter(iocb, from); - - return ext4_buffered_write_iter(iocb, from); + else + return ext4_buffered_write_iter(iocb, from); } #ifdef CONFIG_FS_DAX @@ -757,6 +761,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) if (!daxdev_mapping_supported(vma, dax_dev)) return -EOPNOTSUPP; + ext4_fc_start_update(inode); file_accessed(file); if (IS_DAX(file_inode(file))) { vma->vm_ops = &ext4_dax_vm_ops; @@ -764,6 +769,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) } else { vma->vm_ops = &ext4_file_vm_ops; } + ext4_fc_stop_update(inode); return 0; } diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 6476994d9861..81a545fd14a3 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -112,7 +112,7 @@ static int ext4_fsync_journal(struct inode *inode, bool datasync, !jbd2_trans_will_send_data_barrier(journal, commit_tid)) *needs_barrier = true; - return jbd2_complete_transaction(journal, commit_tid); + return ext4_fc_commit(journal, commit_tid); } /* diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 09096fe6170e..7da82b6fdb74 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -729,6 +729,8 @@ out_sem: if (ret) return ret; } + ext4_fc_track_range(inode, map->m_lblk, + map->m_lblk + map->m_len - 1); } if (retval < 0) @@ -3300,9 +3302,14 @@ static bool ext4_inode_datasync_dirty(struct inode *inode) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; - if (journal) - return !jbd2_transaction_committed(journal, - EXT4_I(inode)->i_datasync_tid); + if (journal) { + if (jbd2_transaction_committed(journal, + EXT4_I(inode)->i_datasync_tid)) + return true; + return atomic_read(&EXT4_SB(inode->i_sb)->s_fc_subtid) >= + EXT4_I(inode)->i_fc_committed_subtid; + } + /* Any metadata buffers to write? */ if (!list_empty(&inode->i_mapping->private_list)) return true; @@ -4097,6 +4104,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) up_write(&EXT4_I(inode)->i_data_sem); } + ext4_fc_track_range(inode, first_block, stop_block); if (IS_SYNC(inode)) ext4_handle_sync(handle); @@ -4716,6 +4724,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, for (block = 0; block < EXT4_N_BLOCKS; block++) ei->i_data[block] = raw_inode->i_block[block]; INIT_LIST_HEAD(&ei->i_orphan); + ext4_fc_init_inode(&ei->vfs_inode); /* * Set transaction id's of transactions that have to be committed @@ -5162,7 +5171,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) return 0; - err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, + err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal, EXT4_I(inode)->i_sync_tid); } else { struct ext4_iloc iloc; @@ -5291,6 +5300,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) if (error) return error; } + ext4_fc_start_update(inode); if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { handle_t *handle; @@ -5314,6 +5324,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) if (error) { ext4_journal_stop(handle); + ext4_fc_stop_update(inode); return error; } /* Update corresponding info in inode so that everything is in @@ -5336,11 +5347,15 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - if (attr->ia_size > sbi->s_bitmap_maxbytes) + if (attr->ia_size > sbi->s_bitmap_maxbytes) { + ext4_fc_stop_update(inode); return -EFBIG; + } } - if (!S_ISREG(inode->i_mode)) + if (!S_ISREG(inode->i_mode)) { + ext4_fc_stop_update(inode); return -EINVAL; + } if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) inode_inc_iversion(inode); @@ -5364,7 +5379,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) rc = ext4_break_layouts(inode); if (rc) { up_write(&EXT4_I(inode)->i_mmap_sem); - return rc; + goto err_out; } if (attr->ia_size != inode->i_size) { @@ -5385,6 +5400,21 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) inode->i_mtime = current_time(inode); inode->i_ctime = inode->i_mtime; } + + if (shrink) + ext4_fc_track_range(inode, + (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >> + inode->i_sb->s_blocksize_bits, + (oldsize > 0 ? oldsize - 1 : 0) >> + inode->i_sb->s_blocksize_bits); + else + ext4_fc_track_range( + inode, + (oldsize > 0 ? oldsize - 1 : oldsize) >> + inode->i_sb->s_blocksize_bits, + (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >> + inode->i_sb->s_blocksize_bits); + down_write(&EXT4_I(inode)->i_data_sem); EXT4_I(inode)->i_disksize = attr->ia_size; rc = ext4_mark_inode_dirty(handle, inode); @@ -5443,9 +5473,11 @@ out_mmap_sem: rc = posix_acl_chmod(inode, inode->i_mode); err_out: - ext4_std_error(inode->i_sb, error); + if (error) + ext4_std_error(inode->i_sb, error); if (!error) error = rc; + ext4_fc_stop_update(inode); return error; } @@ -5627,6 +5659,8 @@ int ext4_mark_iloc_dirty(handle_t *handle, put_bh(iloc->bh); return -EIO; } + ext4_fc_track_inode(inode); + if (IS_I_VERSION(inode)) inode_inc_iversion(inode); @@ -5950,6 +5984,8 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) if (IS_ERR(handle)) return PTR_ERR(handle); + ext4_fc_mark_ineligible(inode->i_sb, + EXT4_FC_REASON_JOURNAL_FLAG_CHANGE); err = ext4_mark_inode_dirty(handle, inode); ext4_handle_sync(handle); ext4_journal_stop(handle); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 36eca3bc036a..d2f8f50deef6 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -165,6 +165,7 @@ static long swap_inode_boot_loader(struct super_block *sb, err = -EINVAL; goto err_out; } + ext4_fc_start_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT); /* Protect extent tree against block allocations via delalloc */ ext4_double_down_write_data_sem(inode, inode_bl); @@ -247,6 +248,7 @@ revert: err_out1: ext4_journal_stop(handle); + ext4_fc_stop_ineligible(sb); ext4_double_up_write_data_sem(inode, inode_bl); err_out: @@ -807,7 +809,7 @@ static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg) return error; } -long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; @@ -1074,6 +1076,7 @@ mext_out: err = ext4_resize_fs(sb, n_blocks_count); if (EXT4_SB(sb)->s_journal) { + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE); jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); @@ -1308,6 +1311,17 @@ out: } } +long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long ret; + + ext4_fc_start_update(file_inode(filp)); + ret = __ext4_ioctl(filp, cmd, arg); + ext4_fc_stop_update(file_inode(filp)); + + return ret; +} + #ifdef CONFIG_COMPAT long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 701ef9fa21c3..fd7be1435f2d 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2611,7 +2611,7 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { handle_t *handle; - struct inode *inode; + struct inode *inode, *inode_save; int err, credits, retries = 0; err = dquot_initialize(dir); @@ -2629,7 +2629,11 @@ retry: inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); + inode_save = inode; + ihold(inode_save); err = ext4_add_nondir(handle, dentry, &inode); + ext4_fc_track_create(inode_save, dentry); + iput(inode_save); } if (handle) ext4_journal_stop(handle); @@ -2644,7 +2648,7 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { handle_t *handle; - struct inode *inode; + struct inode *inode, *inode_save; int err, credits, retries = 0; err = dquot_initialize(dir); @@ -2661,7 +2665,12 @@ retry: if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &ext4_special_inode_operations; + inode_save = inode; + ihold(inode_save); err = ext4_add_nondir(handle, dentry, &inode); + if (!err) + ext4_fc_track_create(inode_save, dentry); + iput(inode_save); } if (handle) ext4_journal_stop(handle); @@ -2825,7 +2834,9 @@ out_clear_inode: iput(inode); goto out_retry; } + ext4_fc_track_create(inode, dentry); ext4_inc_count(dir); + ext4_update_dx_flag(dir); err = ext4_mark_inode_dirty(handle, dir); if (err) @@ -3165,6 +3176,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) goto end_rmdir; ext4_dec_count(dir); ext4_update_dx_flag(dir); + ext4_fc_track_unlink(inode, dentry); retval = ext4_mark_inode_dirty(handle, dir); #ifdef CONFIG_UNICODE @@ -3251,6 +3263,8 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) inode->i_ctime = current_time(inode); retval = ext4_mark_inode_dirty(handle, inode); + if (!retval) + ext4_fc_track_unlink(d_inode(dentry), dentry); #ifdef CONFIG_UNICODE /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid @@ -3872,6 +3886,22 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, retval = ext4_mark_inode_dirty(handle, old.dir); if (unlikely(retval)) goto end_rename; + + if (S_ISDIR(old.inode->i_mode)) { + /* + * We disable fast commits here that's because the + * replay code is not yet capable of changing dot dot + * dirents in directories. + */ + ext4_fc_mark_ineligible(old.inode->i_sb, + EXT4_FC_REASON_RENAME_DIR); + } else { + if (new.inode) + ext4_fc_track_unlink(new.inode, new.dentry); + ext4_fc_track_link(old.inode, new.dentry); + ext4_fc_track_unlink(old.inode, old.dentry); + } + if (new.inode) { retval = ext4_mark_inode_dirty(handle, new.inode); if (unlikely(retval)) @@ -4015,7 +4045,8 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, retval = ext4_mark_inode_dirty(handle, new.inode); if (unlikely(retval)) goto end_rename; - + ext4_fc_mark_ineligible(new.inode->i_sb, + EXT4_FC_REASON_CROSS_RENAME); if (old.dir_bh) { retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino); if (retval) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 41da649ccaea..10c4df26d257 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1331,6 +1331,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) ei->i_datasync_tid = 0; atomic_set(&ei->i_unwritten, 0); INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); + ext4_fc_init_inode(&ei->vfs_inode); + mutex_init(&ei->i_fc_lock); return &ei->vfs_inode; } @@ -1348,6 +1350,10 @@ static int ext4_drop_inode(struct inode *inode) static void ext4_free_in_core_inode(struct inode *inode) { fscrypt_free_inode(inode); + if (!list_empty(&(EXT4_I(inode)->i_fc_list))) { + pr_warn("%s: inode %ld still in fc list", + __func__, inode->i_ino); + } kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); } @@ -1373,6 +1379,7 @@ static void init_once(void *foo) init_rwsem(&ei->i_data_sem); init_rwsem(&ei->i_mmap_sem); inode_init_once(&ei->vfs_inode); + ext4_fc_init_inode(&ei->vfs_inode); } static int __init init_inodecache(void) @@ -1401,6 +1408,7 @@ static void destroy_inodecache(void) void ext4_clear_inode(struct inode *inode) { + ext4_fc_del(inode); invalidate_inode_buffers(inode); clear_inode(inode); ext4_discard_preallocations(inode, 0); @@ -4744,6 +4752,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); + /* Initialize fast commit stuff */ + atomic_set(&sbi->s_fc_subtid, 0); + atomic_set(&sbi->s_fc_ineligible_updates, 0); + INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]); + INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]); + INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]); + INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]); + sbi->s_fc_bytes = 0; + sbi->s_mount_state &= ~EXT4_FC_INELIGIBLE; + sbi->s_mount_state &= ~EXT4_FC_COMMITTING; + spin_lock_init(&sbi->s_fc_lock); + memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats)); + sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || @@ -6515,6 +6536,10 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, brelse(bh); out: if (inode->i_size < off + len) { + ext4_fc_track_range(inode, + (inode->i_size > 0 ? inode->i_size - 1 : 0) + >> inode->i_sb->s_blocksize_bits, + (off + len) >> inode->i_sb->s_blocksize_bits); i_size_write(inode, off + len); EXT4_I(inode)->i_disksize = inode->i_size; err2 = ext4_mark_inode_dirty(handle, inode); @@ -6643,6 +6668,11 @@ static int __init ext4_init_fs(void) err = init_inodecache(); if (err) goto out1; + + err = ext4_fc_init_dentry_cache(); + if (err) + goto out05; + register_as_ext3(); register_as_ext2(); err = register_filesystem(&ext4_fs_type); @@ -6653,6 +6683,7 @@ static int __init ext4_init_fs(void) out: unregister_as_ext2(); unregister_as_ext3(); +out05: destroy_inodecache(); out1: ext4_exit_mballoc(); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index cba4b877c606..6127e94ea4f5 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -2419,6 +2419,7 @@ retry_inode: if (IS_SYNC(inode)) ext4_handle_sync(handle); } + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR); cleanup: brelse(is.iloc.bh); @@ -2496,6 +2497,7 @@ retry: if (error == 0) error = error2; } + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR); return error; } @@ -2928,6 +2930,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode, error); goto cleanup; } + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR); } error = 0; cleanup: diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 4c8b99ec8606..521de3a82118 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -95,6 +95,16 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B); { FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \ { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"}) +#define show_fc_reason(reason) \ + __print_symbolic(reason, \ + { EXT4_FC_REASON_XATTR, "XATTR"}, \ + { EXT4_FC_REASON_CROSS_RENAME, "CROSS_RENAME"}, \ + { EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, "JOURNAL_FLAG_CHANGE"}, \ + { EXT4_FC_REASON_MEM, "NO_MEM"}, \ + { EXT4_FC_REASON_SWAP_BOOT, "SWAP_BOOT"}, \ + { EXT4_FC_REASON_RESIZE, "RESIZE"}, \ + { EXT4_FC_REASON_RENAME_DIR, "RENAME_DIR"}, \ + { EXT4_FC_REASON_FALLOC_RANGE, "FALLOC_RANGE"}) TRACE_EVENT(ext4_other_inode_update_time, TP_PROTO(struct inode *inode, ino_t orig_ino), @@ -2791,6 +2801,168 @@ TRACE_EVENT(ext4_lazy_itable_init, MAJOR(__entry->dev), MINOR(__entry->dev), __entry->group) ); +TRACE_EVENT(ext4_fc_commit_start, + TP_PROTO(struct super_block *sb), + + TP_ARGS(sb), + + TP_STRUCT__entry( + __field(dev_t, dev) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + ), + + TP_printk("fast_commit started on dev %d,%d", + MAJOR(__entry->dev), MINOR(__entry->dev)) +); + +TRACE_EVENT(ext4_fc_commit_stop, + TP_PROTO(struct super_block *sb, int nblks, int reason), + + TP_ARGS(sb, nblks, reason), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, nblks) + __field(int, reason) + __field(int, num_fc) + __field(int, num_fc_ineligible) + __field(int, nblks_agg) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->nblks = nblks; + __entry->reason = reason; + __entry->num_fc = EXT4_SB(sb)->s_fc_stats.fc_num_commits; + __entry->num_fc_ineligible = + EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits; + __entry->nblks_agg = EXT4_SB(sb)->s_fc_stats.fc_numblks; + ), + + TP_printk("fc on [%d,%d] nblks %d, reason %d, fc = %d, ineligible = %d, agg_nblks %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->nblks, __entry->reason, __entry->num_fc, + __entry->num_fc_ineligible, __entry->nblks_agg) +); + +#define FC_REASON_NAME_STAT(reason) \ + show_fc_reason(reason), \ + __entry->sbi->s_fc_stats.fc_ineligible_reason_count[reason] + +TRACE_EVENT(ext4_fc_stats, + TP_PROTO(struct super_block *sb), + + TP_ARGS(sb), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(struct ext4_sb_info *, sbi) + __field(int, count) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->sbi = EXT4_SB(sb); + ), + + TP_printk("dev %d:%d fc ineligible reasons:\n" + "%s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s,%d; " + "num_commits:%ld, ineligible: %ld, numblks: %ld", + MAJOR(__entry->dev), MINOR(__entry->dev), + FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME), + FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_MEM), + FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT), + FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), + __entry->sbi->s_fc_stats.fc_num_commits, + __entry->sbi->s_fc_stats.fc_ineligible_commits, + __entry->sbi->s_fc_stats.fc_numblks) + +); + +#define DEFINE_TRACE_DENTRY_EVENT(__type) \ + TRACE_EVENT(ext4_fc_track_##__type, \ + TP_PROTO(struct inode *inode, struct dentry *dentry, int ret), \ + \ + TP_ARGS(inode, dentry, ret), \ + \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(int, ino) \ + __field(int, error) \ + ), \ + \ + TP_fast_assign( \ + __entry->dev = inode->i_sb->s_dev; \ + __entry->ino = inode->i_ino; \ + __entry->error = ret; \ + ), \ + \ + TP_printk("dev %d:%d, inode %d, error %d, fc_%s", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, __entry->error, \ + #__type) \ + ) + +DEFINE_TRACE_DENTRY_EVENT(create); +DEFINE_TRACE_DENTRY_EVENT(link); +DEFINE_TRACE_DENTRY_EVENT(unlink); + +TRACE_EVENT(ext4_fc_track_inode, + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, ino) + __field(int, error) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->error = ret; + ), + + TP_printk("dev %d:%d, inode %d, error %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, __entry->error) + ); + +TRACE_EVENT(ext4_fc_track_range, + TP_PROTO(struct inode *inode, long start, long end, int ret), + + TP_ARGS(inode, start, end, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, ino) + __field(long, start) + __field(long, end) + __field(int, error) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->start = start; + __entry->end = end; + __entry->error = ret; + ), + + TP_printk("dev %d:%d, inode %d, error %d, start %ld, end %ld", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, __entry->error, __entry->start, + __entry->end) + ); + #endif /* _TRACE_EXT4_H */ /* This part must be outside protection */ -- cgit v1.2.3 From 5b849b5f96b47d82b5a432d8b91a8ad260e1de46 Mon Sep 17 00:00:00 2001 From: Harshad Shirwadkar Date: Thu, 15 Oct 2020 13:37:58 -0700 Subject: jbd2: fast commit recovery path This patch adds fast commit recovery support in JBD2. Signed-off-by: Harshad Shirwadkar Link: https://lore.kernel.org/r/20201015203802.3597742-7-harshadshirwadkar@gmail.com Signed-off-by: Theodore Ts'o --- fs/ext4/fast_commit.c | 15 ++++++++++++++ fs/jbd2/recovery.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++---- include/linux/jbd2.h | 20 ++++++++++++++++++ 3 files changed, 88 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 79e947c43198..888d9d217d5b 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -1188,8 +1188,23 @@ static void ext4_fc_cleanup(journal_t *journal, int full) trace_ext4_fc_stats(sb); } +/* + * Main recovery path entry point. + */ +static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, + enum passtype pass, int off, tid_t expected_tid) +{ + return 0; +} + void ext4_fc_init(struct super_block *sb, journal_t *journal) { + /* + * We set replay callback even if fast commit disabled because we may + * could still have fast commit blocks that need to be replayed even if + * fast commit has now been turned off. + */ + journal->j_fc_replay_callback = ext4_fc_replay; if (!test_opt2(sb, JOURNAL_FAST_COMMIT)) return; journal->j_fc_cleanup_callback = ext4_fc_cleanup; diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index fb134c7a12c8..eb2606133cd8 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -35,7 +35,6 @@ struct recovery_info int nr_revoke_hits; }; -enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY}; static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass); static int scan_revoke_records(journal_t *, struct buffer_head *, @@ -225,10 +224,51 @@ static int count_tags(journal_t *journal, struct buffer_head *bh) /* Make sure we wrap around the log correctly! */ #define wrap(journal, var) \ do { \ - if (var >= (journal)->j_last) \ - var -= ((journal)->j_last - (journal)->j_first); \ + unsigned long _wrap_last = \ + jbd2_has_feature_fast_commit(journal) ? \ + (journal)->j_fc_last : (journal)->j_last; \ + \ + if (var >= _wrap_last) \ + var -= (_wrap_last - (journal)->j_first); \ } while (0) +static int fc_do_one_pass(journal_t *journal, + struct recovery_info *info, enum passtype pass) +{ + unsigned int expected_commit_id = info->end_transaction; + unsigned long next_fc_block; + struct buffer_head *bh; + int err = 0; + + next_fc_block = journal->j_fc_first; + if (!journal->j_fc_replay_callback) + return 0; + + while (next_fc_block <= journal->j_fc_last) { + jbd_debug(3, "Fast commit replay: next block %ld", + next_fc_block); + err = jread(&bh, journal, next_fc_block); + if (err) { + jbd_debug(3, "Fast commit replay: read error"); + break; + } + + jbd_debug(3, "Processing fast commit blk with seq %d"); + err = journal->j_fc_replay_callback(journal, bh, pass, + next_fc_block - journal->j_fc_first, + expected_commit_id); + next_fc_block++; + if (err < 0 || err == JBD2_FC_REPLAY_STOP) + break; + err = 0; + } + + if (err) + jbd_debug(3, "Fast commit replay failed, err = %d\n", err); + + return err; +} + /** * jbd2_journal_recover - recovers a on-disk journal * @journal: the journal to recover @@ -472,7 +512,9 @@ static int do_one_pass(journal_t *journal, break; jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n", - next_commit_ID, next_log_block, journal->j_last); + next_commit_ID, next_log_block, + jbd2_has_feature_fast_commit(journal) ? + journal->j_fc_last : journal->j_last); /* Skip over each chunk of the transaction looking * either the next descriptor block or the final commit @@ -834,6 +876,13 @@ static int do_one_pass(journal_t *journal, success = -EIO; } } + + if (jbd2_has_feature_fast_commit(journal) && pass != PASS_REVOKE) { + err = fc_do_one_pass(journal, info, pass); + if (err) + success = err; + } + if (block_error && success == 0) success = -EIO; return success; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index a009d9b9c620..fb3d71ad6eea 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -751,6 +751,11 @@ jbd2_time_diff(unsigned long start, unsigned long end) #define JBD2_NR_BATCH 64 +enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY}; + +#define JBD2_FC_REPLAY_STOP 0 +#define JBD2_FC_REPLAY_CONTINUE 1 + /** * struct journal_s - The journal_s type is the concrete type associated with * journal_t. @@ -1248,6 +1253,21 @@ struct journal_s */ void (*j_fc_cleanup_callback)(struct journal_s *journal, int); + /* + * @j_fc_replay_callback: + * + * File-system specific function that performs replay of a fast + * commit. JBD2 calls this function for each fast commit block found in + * the journal. This function should return JBD2_FC_REPLAY_CONTINUE + * to indicate that the block was processed correctly and more fast + * commit replay should continue. Return value of JBD2_FC_REPLAY_STOP + * indicates the end of replay (no more blocks remaining). A negative + * return value indicates error. + */ + int (*j_fc_replay_callback)(struct journal_s *journal, + struct buffer_head *bh, + enum passtype pass, int off, + tid_t expected_commit_id); }; #define jbd2_might_wait_for_commit(j) \ -- cgit v1.2.3 From 8016e29f4362e285f0f7e38fadc61a5b7bdfdfa2 Mon Sep 17 00:00:00 2001 From: Harshad Shirwadkar Date: Thu, 15 Oct 2020 13:37:59 -0700 Subject: ext4: fast commit recovery path This patch adds fast commit recovery path support for Ext4 file system. We add several helper functions that are similar in spirit to e2fsprogs journal recovery path handlers. Example of such functions include - a simple block allocator, idempotent block bitmap update function etc. Using these routines and the fast commit log in the fast commit area, the recovery path (ext4_fc_replay()) performs fast commit log recovery. Reported-by: kernel test robot Signed-off-by: Harshad Shirwadkar Link: https://lore.kernel.org/r/20201015203802.3597742-8-harshadshirwadkar@gmail.com Signed-off-by: Theodore Ts'o --- fs/ext4/balloc.c | 7 +- fs/ext4/ext4.h | 26 ++ fs/ext4/ext4_jbd2.c | 2 +- fs/ext4/extents.c | 261 +++++++++++++ fs/ext4/extents_status.c | 24 ++ fs/ext4/fast_commit.c | 897 +++++++++++++++++++++++++++++++++++++++++++- fs/ext4/fast_commit.h | 40 ++ fs/ext4/ialloc.c | 168 ++++++++- fs/ext4/inode.c | 89 +++-- fs/ext4/ioctl.c | 6 +- fs/ext4/mballoc.c | 206 +++++++++- fs/ext4/namei.c | 149 ++++---- fs/ext4/super.c | 21 ++ include/trace/events/ext4.h | 56 ++- 14 files changed, 1821 insertions(+), 131 deletions(-) (limited to 'fs') diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index dea738ba2acd..1d640b145637 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -368,7 +368,12 @@ static int ext4_validate_block_bitmap(struct super_block *sb, struct buffer_head *bh) { ext4_fsblk_t blk; - struct ext4_group_info *grp = ext4_get_group_info(sb, block_group); + struct ext4_group_info *grp; + + if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) + return 0; + + grp = ext4_get_group_info(sb, block_group); if (buffer_verified(bh)) return 0; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 003e898df595..152500725acf 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1170,6 +1170,7 @@ struct ext4_inode_info { #define EXT4_FC_COMMITTING 0x0010 /* File system underoing a fast * commit. */ +#define EXT4_FC_REPLAY 0x0020 /* Fast commit replay ongoing */ /* * Misc. filesystem flags @@ -1666,6 +1667,10 @@ struct ext4_sb_info { struct buffer_head *s_fc_bh; struct ext4_fc_stats s_fc_stats; u64 s_fc_avg_commit_time; +#ifdef CONFIG_EXT4_DEBUG + int s_fc_debug_max_replay; +#endif + struct ext4_fc_replay_state s_fc_replay_state; }; static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) @@ -2708,6 +2713,7 @@ extern int ext4fs_dirhash(const struct inode *dir, const char *name, int len, struct dx_hash_info *hinfo); /* ialloc.c */ +extern int ext4_mark_inode_used(struct super_block *sb, int ino); extern struct inode *__ext4_new_inode(handle_t *, struct inode *, umode_t, const struct qstr *qstr, __u32 goal, uid_t *owner, __u32 i_flags, @@ -2749,6 +2755,8 @@ void ext4_fc_stop_ineligible(struct super_block *sb); void ext4_fc_start_update(struct inode *inode); void ext4_fc_stop_update(struct inode *inode); void ext4_fc_del(struct inode *inode); +bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t block); +void ext4_fc_replay_cleanup(struct super_block *sb); int ext4_fc_commit(journal_t *journal, tid_t commit_tid); int __init ext4_fc_init_dentry_cache(void); @@ -2781,8 +2789,12 @@ extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ext4_fsblk_t block, unsigned long count); extern int ext4_trim_fs(struct super_block *, struct fstrim_range *); extern void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid); +extern void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, + int len, int state); /* inode.c */ +void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, + struct ext4_inode_info *ei); int ext4_inode_is_fast_symlink(struct inode *inode); struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int); struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int); @@ -2829,6 +2841,8 @@ extern int ext4_sync_inode(handle_t *, struct inode *); extern void ext4_dirty_inode(struct inode *, int); extern int ext4_change_inode_journal_flag(struct inode *, int); extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); +extern int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino, + struct ext4_iloc *iloc); extern int ext4_inode_attach_jinode(struct inode *inode); extern int ext4_can_truncate(struct inode *inode); extern int ext4_truncate(struct inode *); @@ -2862,12 +2876,15 @@ extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode, /* ioctl.c */ extern long ext4_ioctl(struct file *, unsigned int, unsigned long); extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); +extern void ext4_reset_inode_seed(struct inode *inode); /* migrate.c */ extern int ext4_ext_migrate(struct inode *); extern int ext4_ind_migrate(struct inode *inode); /* namei.c */ +extern int ext4_init_new_dir(handle_t *handle, struct inode *dir, + struct inode *inode); extern int ext4_dirblock_csum_verify(struct inode *inode, struct buffer_head *bh); extern int ext4_orphan_add(handle_t *, struct inode *); @@ -3447,6 +3464,10 @@ extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode, extern int ext4_ci_compare(const struct inode *parent, const struct qstr *fname, const struct qstr *entry, bool quick); +extern int __ext4_unlink(struct inode *dir, const struct qstr *d_name, + struct inode *inode); +extern int __ext4_link(struct inode *dir, struct inode *inode, + struct dentry *dentry); #define S_SHIFT 12 static const unsigned char ext4_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = { @@ -3547,6 +3568,11 @@ extern int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu); extern int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode, int check_cred, int restart_cred, int revoke_cred); +extern void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end); +extern int ext4_ext_replay_set_iblocks(struct inode *inode); +extern int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start, + int len, int unwritten, ext4_fsblk_t pblk); +extern int ext4_ext_clear_bb(struct inode *inode); /* move_extent.c */ diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 760b9ee49dc0..0fd0c42a4f7d 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -100,7 +100,7 @@ handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line, return ERR_PTR(err); journal = EXT4_SB(sb)->s_journal; - if (!journal) + if (!journal || (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)) return ext4_get_nojournal(); return jbd2__journal_start(journal, blocks, rsv_blocks, revoke_creds, GFP_NOFS, type, line); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index a2bb87d75500..559100f3e23c 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -5804,3 +5804,264 @@ out: return err ? err : mapped; } + +/* + * Updates physical block address and unwritten status of extent + * starting at lblk start and of len. If such an extent doesn't exist, + * this function splits the extent tree appropriately to create an + * extent like this. This function is called in the fast commit + * replay path. Returns 0 on success and error on failure. + */ +int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start, + int len, int unwritten, ext4_fsblk_t pblk) +{ + struct ext4_ext_path *path = NULL, *ppath; + struct ext4_extent *ex; + int ret; + + path = ext4_find_extent(inode, start, NULL, 0); + if (!path) + return -EINVAL; + ex = path[path->p_depth].p_ext; + if (!ex) { + ret = -EFSCORRUPTED; + goto out; + } + + if (le32_to_cpu(ex->ee_block) != start || + ext4_ext_get_actual_len(ex) != len) { + /* We need to split this extent to match our extent first */ + ppath = path; + down_write(&EXT4_I(inode)->i_data_sem); + ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1); + up_write(&EXT4_I(inode)->i_data_sem); + if (ret) + goto out; + kfree(path); + path = ext4_find_extent(inode, start, NULL, 0); + if (IS_ERR(path)) + return -1; + ppath = path; + ex = path[path->p_depth].p_ext; + WARN_ON(le32_to_cpu(ex->ee_block) != start); + if (ext4_ext_get_actual_len(ex) != len) { + down_write(&EXT4_I(inode)->i_data_sem); + ret = ext4_force_split_extent_at(NULL, inode, &ppath, + start + len, 1); + up_write(&EXT4_I(inode)->i_data_sem); + if (ret) + goto out; + kfree(path); + path = ext4_find_extent(inode, start, NULL, 0); + if (IS_ERR(path)) + return -EINVAL; + ex = path[path->p_depth].p_ext; + } + } + if (unwritten) + ext4_ext_mark_unwritten(ex); + else + ext4_ext_mark_initialized(ex); + ext4_ext_store_pblock(ex, pblk); + down_write(&EXT4_I(inode)->i_data_sem); + ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]); + up_write(&EXT4_I(inode)->i_data_sem); +out: + ext4_ext_drop_refs(path); + kfree(path); + ext4_mark_inode_dirty(NULL, inode); + return ret; +} + +/* Try to shrink the extent tree */ +void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end) +{ + struct ext4_ext_path *path = NULL; + struct ext4_extent *ex; + ext4_lblk_t old_cur, cur = 0; + + while (cur < end) { + path = ext4_find_extent(inode, cur, NULL, 0); + if (IS_ERR(path)) + return; + ex = path[path->p_depth].p_ext; + if (!ex) { + ext4_ext_drop_refs(path); + kfree(path); + ext4_mark_inode_dirty(NULL, inode); + return; + } + old_cur = cur; + cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); + if (cur <= old_cur) + cur = old_cur + 1; + ext4_ext_try_to_merge(NULL, inode, path, ex); + down_write(&EXT4_I(inode)->i_data_sem); + ext4_ext_dirty(NULL, inode, &path[path->p_depth]); + up_write(&EXT4_I(inode)->i_data_sem); + ext4_mark_inode_dirty(NULL, inode); + ext4_ext_drop_refs(path); + kfree(path); + } +} + +/* Check if *cur is a hole and if it is, skip it */ +static void skip_hole(struct inode *inode, ext4_lblk_t *cur) +{ + int ret; + struct ext4_map_blocks map; + + map.m_lblk = *cur; + map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur; + + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret != 0) + return; + *cur = *cur + map.m_len; +} + +/* Count number of blocks used by this inode and update i_blocks */ +int ext4_ext_replay_set_iblocks(struct inode *inode) +{ + struct ext4_ext_path *path = NULL, *path2 = NULL; + struct ext4_extent *ex; + ext4_lblk_t cur = 0, end; + int numblks = 0, i, ret = 0; + ext4_fsblk_t cmp1, cmp2; + struct ext4_map_blocks map; + + /* Determin the size of the file first */ + path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, + EXT4_EX_NOCACHE); + if (IS_ERR(path)) + return PTR_ERR(path); + ex = path[path->p_depth].p_ext; + if (!ex) { + ext4_ext_drop_refs(path); + kfree(path); + goto out; + } + end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); + ext4_ext_drop_refs(path); + kfree(path); + + /* Count the number of data blocks */ + cur = 0; + while (cur < end) { + map.m_lblk = cur; + map.m_len = end - cur; + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret < 0) + break; + if (ret > 0) + numblks += ret; + cur = cur + map.m_len; + } + + /* + * Count the number of extent tree blocks. We do it by looking up + * two successive extents and determining the difference between + * their paths. When path is different for 2 successive extents + * we compare the blocks in the path at each level and increment + * iblocks by total number of differences found. + */ + cur = 0; + skip_hole(inode, &cur); + path = ext4_find_extent(inode, cur, NULL, 0); + if (IS_ERR(path)) + goto out; + numblks += path->p_depth; + ext4_ext_drop_refs(path); + kfree(path); + while (cur < end) { + path = ext4_find_extent(inode, cur, NULL, 0); + if (IS_ERR(path)) + break; + ex = path[path->p_depth].p_ext; + if (!ex) { + ext4_ext_drop_refs(path); + kfree(path); + return 0; + } + cur = max(cur + 1, le32_to_cpu(ex->ee_block) + + ext4_ext_get_actual_len(ex)); + skip_hole(inode, &cur); + + path2 = ext4_find_extent(inode, cur, NULL, 0); + if (IS_ERR(path2)) { + ext4_ext_drop_refs(path); + kfree(path); + break; + } + ex = path2[path2->p_depth].p_ext; + for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) { + cmp1 = cmp2 = 0; + if (i <= path->p_depth) + cmp1 = path[i].p_bh ? + path[i].p_bh->b_blocknr : 0; + if (i <= path2->p_depth) + cmp2 = path2[i].p_bh ? + path2[i].p_bh->b_blocknr : 0; + if (cmp1 != cmp2 && cmp2 != 0) + numblks++; + } + ext4_ext_drop_refs(path); + ext4_ext_drop_refs(path2); + kfree(path); + kfree(path2); + } + +out: + inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9); + ext4_mark_inode_dirty(NULL, inode); + return 0; +} + +int ext4_ext_clear_bb(struct inode *inode) +{ + struct ext4_ext_path *path = NULL; + struct ext4_extent *ex; + ext4_lblk_t cur = 0, end; + int j, ret = 0; + struct ext4_map_blocks map; + + /* Determin the size of the file first */ + path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, + EXT4_EX_NOCACHE); + if (IS_ERR(path)) + return PTR_ERR(path); + ex = path[path->p_depth].p_ext; + if (!ex) { + ext4_ext_drop_refs(path); + kfree(path); + return 0; + } + end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); + ext4_ext_drop_refs(path); + kfree(path); + + cur = 0; + while (cur < end) { + map.m_lblk = cur; + map.m_len = end - cur; + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret < 0) + break; + if (ret > 0) { + path = ext4_find_extent(inode, map.m_lblk, NULL, 0); + if (!IS_ERR_OR_NULL(path)) { + for (j = 0; j < path->p_depth; j++) { + + ext4_mb_mark_bb(inode->i_sb, + path[j].p_block, 1, 0); + } + ext4_ext_drop_refs(path); + kfree(path); + } + ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0); + } + cur = cur + map.m_len; + } + + return 0; +} diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index e75171535375..0a729027322d 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -311,6 +311,9 @@ void ext4_es_find_extent_range(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t end, struct extent_status *es) { + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + return; + trace_ext4_es_find_extent_range_enter(inode, lblk); read_lock(&EXT4_I(inode)->i_es_lock); @@ -361,6 +364,9 @@ bool ext4_es_scan_range(struct inode *inode, { bool ret; + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + return false; + read_lock(&EXT4_I(inode)->i_es_lock); ret = __es_scan_range(inode, matching_fn, lblk, end); read_unlock(&EXT4_I(inode)->i_es_lock); @@ -404,6 +410,9 @@ bool ext4_es_scan_clu(struct inode *inode, { bool ret; + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + return false; + read_lock(&EXT4_I(inode)->i_es_lock); ret = __es_scan_clu(inode, matching_fn, lblk); read_unlock(&EXT4_I(inode)->i_es_lock); @@ -812,6 +821,9 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, int err = 0; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + return 0; + es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n", lblk, len, pblk, status, inode->i_ino); @@ -873,6 +885,9 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, struct extent_status newes; ext4_lblk_t end = lblk + len - 1; + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + return; + newes.es_lblk = lblk; newes.es_len = len; ext4_es_store_pblock_status(&newes, pblk, status); @@ -908,6 +923,9 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, struct rb_node *node; int found = 0; + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + return 0; + trace_ext4_es_lookup_extent_enter(inode, lblk); es_debug("lookup extent in block %u\n", lblk); @@ -1419,6 +1437,9 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, int err = 0; int reserved = 0; + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + return 0; + trace_ext4_es_remove_extent(inode, lblk, len); es_debug("remove [%u/%u) from extent status tree of inode %lu\n", lblk, len, inode->i_ino); @@ -1969,6 +1990,9 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk, struct extent_status newes; int err = 0; + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + return 0; + es_debug("add [%u/1) delayed to extent status tree of inode %lu\n", lblk, inode->i_ino); diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 888d9d217d5b..426c0ab8b70c 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -166,7 +166,8 @@ void ext4_fc_start_update(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); - if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) + if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) || + (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)) return; restart: @@ -205,7 +206,8 @@ void ext4_fc_stop_update(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); - if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) + if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) || + (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)) return; if (atomic_dec_and_test(&ei->i_fc_updates)) @@ -220,11 +222,8 @@ void ext4_fc_del(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); - if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) - return; - - - if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) + if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) || + (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)) return; restart: @@ -266,6 +265,10 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason) { struct ext4_sb_info *sbi = EXT4_SB(sb); + if (!test_opt2(sb, JOURNAL_FAST_COMMIT) || + (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)) + return; + sbi->s_mount_state |= EXT4_FC_INELIGIBLE; WARN_ON(reason >= EXT4_FC_REASON_MAX); sbi->s_fc_stats.fc_ineligible_reason_count[reason]++; @@ -279,6 +282,10 @@ void ext4_fc_start_ineligible(struct super_block *sb, int reason) { struct ext4_sb_info *sbi = EXT4_SB(sb); + if (!test_opt2(sb, JOURNAL_FAST_COMMIT) || + (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)) + return; + WARN_ON(reason >= EXT4_FC_REASON_MAX); sbi->s_fc_stats.fc_ineligible_reason_count[reason]++; atomic_inc(&sbi->s_fc_ineligible_updates); @@ -291,6 +298,10 @@ void ext4_fc_start_ineligible(struct super_block *sb, int reason) */ void ext4_fc_stop_ineligible(struct super_block *sb) { + if (!test_opt2(sb, JOURNAL_FAST_COMMIT) || + (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)) + return; + EXT4_SB(sb)->s_mount_state |= EXT4_FC_INELIGIBLE; atomic_dec(&EXT4_SB(sb)->s_fc_ineligible_updates); } @@ -321,7 +332,8 @@ static int ext4_fc_track_template( struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int ret; - if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) + if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) || + (sbi->s_mount_state & EXT4_FC_REPLAY)) return -EOPNOTSUPP; if (ext4_fc_is_ineligible(inode->i_sb)) @@ -1188,13 +1200,880 @@ static void ext4_fc_cleanup(journal_t *journal, int full) trace_ext4_fc_stats(sb); } +/* Ext4 Replay Path Routines */ + +/* Get length of a particular tlv */ +static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl) +{ + return le16_to_cpu(tl->fc_len); +} + +/* Get a pointer to "value" of a tlv */ +static inline u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl) +{ + return (u8 *)tl + sizeof(*tl); +} + +/* Helper struct for dentry replay routines */ +struct dentry_info_args { + int parent_ino, dname_len, ino, inode_len; + char *dname; +}; + +static inline void tl_to_darg(struct dentry_info_args *darg, + struct ext4_fc_tl *tl) +{ + struct ext4_fc_dentry_info *fcd; + + fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl); + + darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino); + darg->ino = le32_to_cpu(fcd->fc_ino); + darg->dname = fcd->fc_dname; + darg->dname_len = ext4_fc_tag_len(tl) - + sizeof(struct ext4_fc_dentry_info); +} + +/* Unlink replay function */ +static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl) +{ + struct inode *inode, *old_parent; + struct qstr entry; + struct dentry_info_args darg; + int ret = 0; + + tl_to_darg(&darg, tl); + + trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino, + darg.parent_ino, darg.dname_len); + + entry.name = darg.dname; + entry.len = darg.dname_len; + inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL); + + if (IS_ERR_OR_NULL(inode)) { + jbd_debug(1, "Inode %d not found", darg.ino); + return 0; + } + + old_parent = ext4_iget(sb, darg.parent_ino, + EXT4_IGET_NORMAL); + if (IS_ERR_OR_NULL(old_parent)) { + jbd_debug(1, "Dir with inode %d not found", darg.parent_ino); + iput(inode); + return 0; + } + + ret = __ext4_unlink(old_parent, &entry, inode); + /* -ENOENT ok coz it might not exist anymore. */ + if (ret == -ENOENT) + ret = 0; + iput(old_parent); + iput(inode); + return ret; +} + +static int ext4_fc_replay_link_internal(struct super_block *sb, + struct dentry_info_args *darg, + struct inode *inode) +{ + struct inode *dir = NULL; + struct dentry *dentry_dir = NULL, *dentry_inode = NULL; + struct qstr qstr_dname = QSTR_INIT(darg->dname, darg->dname_len); + int ret = 0; + + dir = ext4_iget(sb, darg->parent_ino, EXT4_IGET_NORMAL); + if (IS_ERR(dir)) { + jbd_debug(1, "Dir with inode %d not found.", darg->parent_ino); + dir = NULL; + goto out; + } + + dentry_dir = d_obtain_alias(dir); + if (IS_ERR(dentry_dir)) { + jbd_debug(1, "Failed to obtain dentry"); + dentry_dir = NULL; + goto out; + } + + dentry_inode = d_alloc(dentry_dir, &qstr_dname); + if (!dentry_inode) { + jbd_debug(1, "Inode dentry not created."); + ret = -ENOMEM; + goto out; + } + + ret = __ext4_link(dir, inode, dentry_inode); + /* + * It's possible that link already existed since data blocks + * for the dir in question got persisted before we crashed OR + * we replayed this tag and crashed before the entire replay + * could complete. + */ + if (ret && ret != -EEXIST) { + jbd_debug(1, "Failed to link\n"); + goto out; + } + + ret = 0; +out: + if (dentry_dir) { + d_drop(dentry_dir); + dput(dentry_dir); + } else if (dir) { + iput(dir); + } + if (dentry_inode) { + d_drop(dentry_inode); + dput(dentry_inode); + } + + return ret; +} + +/* Link replay function */ +static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl) +{ + struct inode *inode; + struct dentry_info_args darg; + int ret = 0; + + tl_to_darg(&darg, tl); + trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino, + darg.parent_ino, darg.dname_len); + + inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL); + if (IS_ERR_OR_NULL(inode)) { + jbd_debug(1, "Inode not found."); + return 0; + } + + ret = ext4_fc_replay_link_internal(sb, &darg, inode); + iput(inode); + return ret; +} + +/* + * Record all the modified inodes during replay. We use this later to setup + * block bitmaps correctly. + */ +static int ext4_fc_record_modified_inode(struct super_block *sb, int ino) +{ + struct ext4_fc_replay_state *state; + int i; + + state = &EXT4_SB(sb)->s_fc_replay_state; + for (i = 0; i < state->fc_modified_inodes_used; i++) + if (state->fc_modified_inodes[i] == ino) + return 0; + if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) { + state->fc_modified_inodes_size += + EXT4_FC_REPLAY_REALLOC_INCREMENT; + state->fc_modified_inodes = krealloc( + state->fc_modified_inodes, sizeof(int) * + state->fc_modified_inodes_size, + GFP_KERNEL); + if (!state->fc_modified_inodes) + return -ENOMEM; + } + state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino; + return 0; +} + +/* + * Inode replay function + */ +static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl) +{ + struct ext4_fc_inode *fc_inode; + struct ext4_inode *raw_inode; + struct ext4_inode *raw_fc_inode; + struct inode *inode = NULL; + struct ext4_iloc iloc; + int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag); + struct ext4_extent_header *eh; + + fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl); + + ino = le32_to_cpu(fc_inode->fc_ino); + trace_ext4_fc_replay(sb, tag, ino, 0, 0); + + inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL); + if (!IS_ERR_OR_NULL(inode)) { + ext4_ext_clear_bb(inode); + iput(inode); + } + + ext4_fc_record_modified_inode(sb, ino); + + raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode; + ret = ext4_get_fc_inode_loc(sb, ino, &iloc); + if (ret) + goto out; + + inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode); + raw_inode = ext4_raw_inode(&iloc); + + memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block)); + memcpy(&raw_inode->i_generation, &raw_fc_inode->i_generation, + inode_len - offsetof(struct ext4_inode, i_generation)); + if (le32_to_cpu(raw_inode->i_flags) & EXT4_EXTENTS_FL) { + eh = (struct ext4_extent_header *)(&raw_inode->i_block[0]); + if (eh->eh_magic != EXT4_EXT_MAGIC) { + memset(eh, 0, sizeof(*eh)); + eh->eh_magic = EXT4_EXT_MAGIC; + eh->eh_max = cpu_to_le16( + (sizeof(raw_inode->i_block) - + sizeof(struct ext4_extent_header)) + / sizeof(struct ext4_extent)); + } + } else if (le32_to_cpu(raw_inode->i_flags) & EXT4_INLINE_DATA_FL) { + memcpy(raw_inode->i_block, raw_fc_inode->i_block, + sizeof(raw_inode->i_block)); + } + + /* Immediately update the inode on disk. */ + ret = ext4_handle_dirty_metadata(NULL, NULL, iloc.bh); + if (ret) + goto out; + ret = sync_dirty_buffer(iloc.bh); + if (ret) + goto out; + ret = ext4_mark_inode_used(sb, ino); + if (ret) + goto out; + + /* Given that we just wrote the inode on disk, this SHOULD succeed. */ + inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL); + if (IS_ERR_OR_NULL(inode)) { + jbd_debug(1, "Inode not found."); + return -EFSCORRUPTED; + } + + /* + * Our allocator could have made different decisions than before + * crashing. This should be fixed but until then, we calculate + * the number of blocks the inode. + */ + ext4_ext_replay_set_iblocks(inode); + + inode->i_generation = le32_to_cpu(ext4_raw_inode(&iloc)->i_generation); + ext4_reset_inode_seed(inode); + + ext4_inode_csum_set(inode, ext4_raw_inode(&iloc), EXT4_I(inode)); + ret = ext4_handle_dirty_metadata(NULL, NULL, iloc.bh); + sync_dirty_buffer(iloc.bh); + brelse(iloc.bh); +out: + iput(inode); + if (!ret) + blkdev_issue_flush(sb->s_bdev, GFP_KERNEL); + + return 0; +} + +/* + * Dentry create replay function. + * + * EXT4_FC_TAG_CREAT is preceded by EXT4_FC_TAG_INODE_FULL. Which means, the + * inode for which we are trying to create a dentry here, should already have + * been replayed before we start here. + */ +static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl) +{ + int ret = 0; + struct inode *inode = NULL; + struct inode *dir = NULL; + struct dentry_info_args darg; + + tl_to_darg(&darg, tl); + + trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino, + darg.parent_ino, darg.dname_len); + + /* This takes care of update group descriptor and other metadata */ + ret = ext4_mark_inode_used(sb, darg.ino); + if (ret) + goto out; + + inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL); + if (IS_ERR_OR_NULL(inode)) { + jbd_debug(1, "inode %d not found.", darg.ino); + inode = NULL; + ret = -EINVAL; + goto out; + } + + if (S_ISDIR(inode->i_mode)) { + /* + * If we are creating a directory, we need to make sure that the + * dot and dot dot dirents are setup properly. + */ + dir = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL); + if (IS_ERR_OR_NULL(dir)) { + jbd_debug(1, "Dir %d not found.", darg.ino); + goto out; + } + ret = ext4_init_new_dir(NULL, dir, inode); + iput(dir); + if (ret) { + ret = 0; + goto out; + } + } + ret = ext4_fc_replay_link_internal(sb, &darg, inode); + if (ret) + goto out; + set_nlink(inode, 1); + ext4_mark_inode_dirty(NULL, inode); +out: + if (inode) + iput(inode); + return ret; +} + +/* + * Record physical disk regions which are in use as per fast commit area. Our + * simple replay phase allocator excludes these regions from allocation. + */ +static int ext4_fc_record_regions(struct super_block *sb, int ino, + ext4_lblk_t lblk, ext4_fsblk_t pblk, int len) +{ + struct ext4_fc_replay_state *state; + struct ext4_fc_alloc_region *region; + + state = &EXT4_SB(sb)->s_fc_replay_state; + if (state->fc_regions_used == state->fc_regions_size) { + state->fc_regions_size += + EXT4_FC_REPLAY_REALLOC_INCREMENT; + state->fc_regions = krealloc( + state->fc_regions, + state->fc_regions_size * + sizeof(struct ext4_fc_alloc_region), + GFP_KERNEL); + if (!state->fc_regions) + return -ENOMEM; + } + region = &state->fc_regions[state->fc_regions_used++]; + region->ino = ino; + region->lblk = lblk; + region->pblk = pblk; + region->len = len; + + return 0; +} + +/* Replay add range tag */ +static int ext4_fc_replay_add_range(struct super_block *sb, + struct ext4_fc_tl *tl) +{ + struct ext4_fc_add_range *fc_add_ex; + struct ext4_extent newex, *ex; + struct inode *inode; + ext4_lblk_t start, cur; + int remaining, len; + ext4_fsblk_t start_pblk; + struct ext4_map_blocks map; + struct ext4_ext_path *path = NULL; + int ret; + + fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl); + ex = (struct ext4_extent *)&fc_add_ex->fc_ex; + + trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE, + le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block), + ext4_ext_get_actual_len(ex)); + + inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino), + EXT4_IGET_NORMAL); + if (IS_ERR_OR_NULL(inode)) { + jbd_debug(1, "Inode not found."); + return 0; + } + + ret = ext4_fc_record_modified_inode(sb, inode->i_ino); + + start = le32_to_cpu(ex->ee_block); + start_pblk = ext4_ext_pblock(ex); + len = ext4_ext_get_actual_len(ex); + + cur = start; + remaining = len; + jbd_debug(1, "ADD_RANGE, lblk %d, pblk %lld, len %d, unwritten %d, inode %ld\n", + start, start_pblk, len, ext4_ext_is_unwritten(ex), + inode->i_ino); + + while (remaining > 0) { + map.m_lblk = cur; + map.m_len = remaining; + map.m_pblk = 0; + ret = ext4_map_blocks(NULL, inode, &map, 0); + + if (ret < 0) { + iput(inode); + return 0; + } + + if (ret == 0) { + /* Range is not mapped */ + path = ext4_find_extent(inode, cur, NULL, 0); + if (!path) + continue; + memset(&newex, 0, sizeof(newex)); + newex.ee_block = cpu_to_le32(cur); + ext4_ext_store_pblock( + &newex, start_pblk + cur - start); + newex.ee_len = cpu_to_le16(map.m_len); + if (ext4_ext_is_unwritten(ex)) + ext4_ext_mark_unwritten(&newex); + down_write(&EXT4_I(inode)->i_data_sem); + ret = ext4_ext_insert_extent( + NULL, inode, &path, &newex, 0); + up_write((&EXT4_I(inode)->i_data_sem)); + ext4_ext_drop_refs(path); + kfree(path); + if (ret) { + iput(inode); + return 0; + } + goto next; + } + + if (start_pblk + cur - start != map.m_pblk) { + /* + * Logical to physical mapping changed. This can happen + * if this range was removed and then reallocated to + * map to new physical blocks during a fast commit. + */ + ret = ext4_ext_replay_update_ex(inode, cur, map.m_len, + ext4_ext_is_unwritten(ex), + start_pblk + cur - start); + if (ret) { + iput(inode); + return 0; + } + /* + * Mark the old blocks as free since they aren't used + * anymore. We maintain an array of all the modified + * inodes. In case these blocks are still used at either + * a different logical range in the same inode or in + * some different inode, we will mark them as allocated + * at the end of the FC replay using our array of + * modified inodes. + */ + ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0); + goto next; + } + + /* Range is mapped and needs a state change */ + jbd_debug(1, "Converting from %d to %d %lld", + map.m_flags & EXT4_MAP_UNWRITTEN, + ext4_ext_is_unwritten(ex), map.m_pblk); + ret = ext4_ext_replay_update_ex(inode, cur, map.m_len, + ext4_ext_is_unwritten(ex), map.m_pblk); + if (ret) { + iput(inode); + return 0; + } + /* + * We may have split the extent tree while toggling the state. + * Try to shrink the extent tree now. + */ + ext4_ext_replay_shrink_inode(inode, start + len); +next: + cur += map.m_len; + remaining -= map.m_len; + } + ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >> + sb->s_blocksize_bits); + iput(inode); + return 0; +} + +/* Replay DEL_RANGE tag */ +static int +ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl) +{ + struct inode *inode; + struct ext4_fc_del_range *lrange; + struct ext4_map_blocks map; + ext4_lblk_t cur, remaining; + int ret; + + lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl); + cur = le32_to_cpu(lrange->fc_lblk); + remaining = le32_to_cpu(lrange->fc_len); + + trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE, + le32_to_cpu(lrange->fc_ino), cur, remaining); + + inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL); + if (IS_ERR_OR_NULL(inode)) { + jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino)); + return 0; + } + + ret = ext4_fc_record_modified_inode(sb, inode->i_ino); + + jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n", + inode->i_ino, le32_to_cpu(lrange->fc_lblk), + le32_to_cpu(lrange->fc_len)); + while (remaining > 0) { + map.m_lblk = cur; + map.m_len = remaining; + + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret < 0) { + iput(inode); + return 0; + } + if (ret > 0) { + remaining -= ret; + cur += ret; + ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0); + } else { + remaining -= map.m_len; + cur += map.m_len; + } + } + + ret = ext4_punch_hole(inode, + le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits, + le32_to_cpu(lrange->fc_len) << sb->s_blocksize_bits); + if (ret) + jbd_debug(1, "ext4_punch_hole returned %d", ret); + ext4_ext_replay_shrink_inode(inode, + i_size_read(inode) >> sb->s_blocksize_bits); + ext4_mark_inode_dirty(NULL, inode); + iput(inode); + + return 0; +} + +static inline const char *tag2str(u16 tag) +{ + switch (tag) { + case EXT4_FC_TAG_LINK: + return "TAG_ADD_ENTRY"; + case EXT4_FC_TAG_UNLINK: + return "TAG_DEL_ENTRY"; + case EXT4_FC_TAG_ADD_RANGE: + return "TAG_ADD_RANGE"; + case EXT4_FC_TAG_CREAT: + return "TAG_CREAT_DENTRY"; + case EXT4_FC_TAG_DEL_RANGE: + return "TAG_DEL_RANGE"; + case EXT4_FC_TAG_INODE: + return "TAG_INODE"; + case EXT4_FC_TAG_PAD: + return "TAG_PAD"; + case EXT4_FC_TAG_TAIL: + return "TAG_TAIL"; + case EXT4_FC_TAG_HEAD: + return "TAG_HEAD"; + default: + return "TAG_ERROR"; + } +} + +static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb) +{ + struct ext4_fc_replay_state *state; + struct inode *inode; + struct ext4_ext_path *path = NULL; + struct ext4_map_blocks map; + int i, ret, j; + ext4_lblk_t cur, end; + + state = &EXT4_SB(sb)->s_fc_replay_state; + for (i = 0; i < state->fc_modified_inodes_used; i++) { + inode = ext4_iget(sb, state->fc_modified_inodes[i], + EXT4_IGET_NORMAL); + if (IS_ERR_OR_NULL(inode)) { + jbd_debug(1, "Inode %d not found.", + state->fc_modified_inodes[i]); + continue; + } + cur = 0; + end = EXT_MAX_BLOCKS; + while (cur < end) { + map.m_lblk = cur; + map.m_len = end - cur; + + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret < 0) + break; + + if (ret > 0) { + path = ext4_find_extent(inode, map.m_lblk, NULL, 0); + if (!IS_ERR_OR_NULL(path)) { + for (j = 0; j < path->p_depth; j++) + ext4_mb_mark_bb(inode->i_sb, + path[j].p_block, 1, 1); + ext4_ext_drop_refs(path); + kfree(path); + } + cur += ret; + ext4_mb_mark_bb(inode->i_sb, map.m_pblk, + map.m_len, 1); + } else { + cur = cur + (map.m_len ? map.m_len : 1); + } + } + iput(inode); + } +} + +/* + * Check if block is in excluded regions for block allocation. The simple + * allocator that runs during replay phase is calls this function to see + * if it is okay to use a block. + */ +bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t blk) +{ + int i; + struct ext4_fc_replay_state *state; + + state = &EXT4_SB(sb)->s_fc_replay_state; + for (i = 0; i < state->fc_regions_valid; i++) { + if (state->fc_regions[i].ino == 0 || + state->fc_regions[i].len == 0) + continue; + if (blk >= state->fc_regions[i].pblk && + blk < state->fc_regions[i].pblk + state->fc_regions[i].len) + return true; + } + return false; +} + +/* Cleanup function called after replay */ +void ext4_fc_replay_cleanup(struct super_block *sb) +{ + struct ext4_sb_info *sbi = EXT4_SB(sb); + + sbi->s_mount_state &= ~EXT4_FC_REPLAY; + kfree(sbi->s_fc_replay_state.fc_regions); + kfree(sbi->s_fc_replay_state.fc_modified_inodes); +} + +/* + * Recovery Scan phase handler + * + * This function is called during the scan phase and is responsible + * for doing following things: + * - Make sure the fast commit area has valid tags for replay + * - Count number of tags that need to be replayed by the replay handler + * - Verify CRC + * - Create a list of excluded blocks for allocation during replay phase + * + * This function returns JBD2_FC_REPLAY_CONTINUE to indicate that SCAN is + * incomplete and JBD2 should send more blocks. It returns JBD2_FC_REPLAY_STOP + * to indicate that scan has finished and JBD2 can now start replay phase. + * It returns a negative error to indicate that there was an error. At the end + * of a successful scan phase, sbi->s_fc_replay_state.fc_replay_num_tags is set + * to indicate the number of tags that need to replayed during the replay phase. + */ +static int ext4_fc_replay_scan(journal_t *journal, + struct buffer_head *bh, int off, + tid_t expected_tid) +{ + struct super_block *sb = journal->j_private; + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_fc_replay_state *state; + int ret = JBD2_FC_REPLAY_CONTINUE; + struct ext4_fc_add_range *ext; + struct ext4_fc_tl *tl; + struct ext4_fc_tail *tail; + __u8 *start, *end; + struct ext4_fc_head *head; + struct ext4_extent *ex; + + state = &sbi->s_fc_replay_state; + + start = (u8 *)bh->b_data; + end = (__u8 *)bh->b_data + journal->j_blocksize - 1; + + if (state->fc_replay_expected_off == 0) { + state->fc_cur_tag = 0; + state->fc_replay_num_tags = 0; + state->fc_crc = 0; + state->fc_regions = NULL; + state->fc_regions_valid = state->fc_regions_used = + state->fc_regions_size = 0; + /* Check if we can stop early */ + if (le16_to_cpu(((struct ext4_fc_tl *)start)->fc_tag) + != EXT4_FC_TAG_HEAD) + return 0; + } + + if (off != state->fc_replay_expected_off) { + ret = -EFSCORRUPTED; + goto out_err; + } + + state->fc_replay_expected_off++; + fc_for_each_tl(start, end, tl) { + jbd_debug(3, "Scan phase, tag:%s, blk %lld\n", + tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr); + switch (le16_to_cpu(tl->fc_tag)) { + case EXT4_FC_TAG_ADD_RANGE: + ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl); + ex = (struct ext4_extent *)&ext->fc_ex; + ret = ext4_fc_record_regions(sb, + le32_to_cpu(ext->fc_ino), + le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex), + ext4_ext_get_actual_len(ex)); + if (ret < 0) + break; + ret = JBD2_FC_REPLAY_CONTINUE; + fallthrough; + case EXT4_FC_TAG_DEL_RANGE: + case EXT4_FC_TAG_LINK: + case EXT4_FC_TAG_UNLINK: + case EXT4_FC_TAG_CREAT: + case EXT4_FC_TAG_INODE: + case EXT4_FC_TAG_PAD: + state->fc_cur_tag++; + state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl, + sizeof(*tl) + ext4_fc_tag_len(tl)); + break; + case EXT4_FC_TAG_TAIL: + state->fc_cur_tag++; + tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl); + state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl, + sizeof(*tl) + + offsetof(struct ext4_fc_tail, + fc_crc)); + if (le32_to_cpu(tail->fc_tid) == expected_tid && + le32_to_cpu(tail->fc_crc) == state->fc_crc) { + state->fc_replay_num_tags = state->fc_cur_tag; + state->fc_regions_valid = + state->fc_regions_used; + } else { + ret = state->fc_replay_num_tags ? + JBD2_FC_REPLAY_STOP : -EFSBADCRC; + } + state->fc_crc = 0; + break; + case EXT4_FC_TAG_HEAD: + head = (struct ext4_fc_head *)ext4_fc_tag_val(tl); + if (le32_to_cpu(head->fc_features) & + ~EXT4_FC_SUPPORTED_FEATURES) { + ret = -EOPNOTSUPP; + break; + } + if (le32_to_cpu(head->fc_tid) != expected_tid) { + ret = JBD2_FC_REPLAY_STOP; + break; + } + state->fc_cur_tag++; + state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl, + sizeof(*tl) + ext4_fc_tag_len(tl)); + break; + default: + ret = state->fc_replay_num_tags ? + JBD2_FC_REPLAY_STOP : -ECANCELED; + } + if (ret < 0 || ret == JBD2_FC_REPLAY_STOP) + break; + } + +out_err: + trace_ext4_fc_replay_scan(sb, ret, off); + return ret; +} + /* * Main recovery path entry point. + * The meaning of return codes is similar as above. */ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, enum passtype pass, int off, tid_t expected_tid) { - return 0; + struct super_block *sb = journal->j_private; + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_fc_tl *tl; + __u8 *start, *end; + int ret = JBD2_FC_REPLAY_CONTINUE; + struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state; + struct ext4_fc_tail *tail; + + if (pass == PASS_SCAN) { + state->fc_current_pass = PASS_SCAN; + return ext4_fc_replay_scan(journal, bh, off, expected_tid); + } + + if (state->fc_current_pass != pass) { + state->fc_current_pass = pass; + sbi->s_mount_state |= EXT4_FC_REPLAY; + } + if (!sbi->s_fc_replay_state.fc_replay_num_tags) { + jbd_debug(1, "Replay stops\n"); + ext4_fc_set_bitmaps_and_counters(sb); + return 0; + } + +#ifdef CONFIG_EXT4_DEBUG + if (sbi->s_fc_debug_max_replay && off >= sbi->s_fc_debug_max_replay) { + pr_warn("Dropping fc block %d because max_replay set\n", off); + return JBD2_FC_REPLAY_STOP; + } +#endif + + start = (u8 *)bh->b_data; + end = (__u8 *)bh->b_data + journal->j_blocksize - 1; + + fc_for_each_tl(start, end, tl) { + if (state->fc_replay_num_tags == 0) { + ret = JBD2_FC_REPLAY_STOP; + ext4_fc_set_bitmaps_and_counters(sb); + break; + } + jbd_debug(3, "Replay phase, tag:%s\n", + tag2str(le16_to_cpu(tl->fc_tag))); + state->fc_replay_num_tags--; + switch (le16_to_cpu(tl->fc_tag)) { + case EXT4_FC_TAG_LINK: + ret = ext4_fc_replay_link(sb, tl); + break; + case EXT4_FC_TAG_UNLINK: + ret = ext4_fc_replay_unlink(sb, tl); + break; + case EXT4_FC_TAG_ADD_RANGE: + ret = ext4_fc_replay_add_range(sb, tl); + break; + case EXT4_FC_TAG_CREAT: + ret = ext4_fc_replay_create(sb, tl); + break; + case EXT4_FC_TAG_DEL_RANGE: + ret = ext4_fc_replay_del_range(sb, tl); + break; + case EXT4_FC_TAG_INODE: + ret = ext4_fc_replay_inode(sb, tl); + break; + case EXT4_FC_TAG_PAD: + trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0, + ext4_fc_tag_len(tl), 0); + break; + case EXT4_FC_TAG_TAIL: + trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0, + ext4_fc_tag_len(tl), 0); + tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl); + WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid); + break; + case EXT4_FC_TAG_HEAD: + break; + default: + trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0, + ext4_fc_tag_len(tl), 0); + ret = -ECANCELED; + break; + } + if (ret < 0) + break; + ret = JBD2_FC_REPLAY_CONTINUE; + } + return ret; } void ext4_fc_init(struct super_block *sb, journal_t *journal) diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h index 560bc9ca8c79..06907d485989 100644 --- a/fs/ext4/fast_commit.h +++ b/fs/ext4/fast_commit.h @@ -116,4 +116,44 @@ struct ext4_fc_stats { unsigned long fc_numblks; }; +#define EXT4_FC_REPLAY_REALLOC_INCREMENT 4 + +/* + * Physical block regions added to different inodes due to fast commit + * recovery. These are set during the SCAN phase. During the replay phase, + * our allocator excludes these from its allocation. This ensures that + * we don't accidentally allocating a block that is going to be used by + * another inode. + */ +struct ext4_fc_alloc_region { + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + int ino, len; +}; + +/* + * Fast commit replay state. + */ +struct ext4_fc_replay_state { + int fc_replay_num_tags; + int fc_replay_expected_off; + int fc_current_pass; + int fc_cur_tag; + int fc_crc; + struct ext4_fc_alloc_region *fc_regions; + int fc_regions_size, fc_regions_used, fc_regions_valid; + int *fc_modified_inodes; + int fc_modified_inodes_used, fc_modified_inodes_size; +}; + +#define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1) + +#define fc_for_each_tl(__start, __end, __tl) \ + for (tl = (struct ext4_fc_tl *)start; \ + (u8 *)tl < (u8 *)end; \ + tl = (struct ext4_fc_tl *)((u8 *)tl + \ + sizeof(struct ext4_fc_tl) + \ + + le16_to_cpu(tl->fc_len))) + + #endif /* __FAST_COMMIT_H__ */ diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 33c0fc0197ce..2400a8200435 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -82,7 +82,12 @@ static int ext4_validate_inode_bitmap(struct super_block *sb, struct buffer_head *bh) { ext4_fsblk_t blk; - struct ext4_group_info *grp = ext4_get_group_info(sb, block_group); + struct ext4_group_info *grp; + + if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) + return 0; + + grp = ext4_get_group_info(sb, block_group); if (buffer_verified(bh)) return 0; @@ -281,15 +286,17 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); bitmap_bh = ext4_read_inode_bitmap(sb, block_group); /* Don't bother if the inode bitmap is corrupt. */ - grp = ext4_get_group_info(sb, block_group); if (IS_ERR(bitmap_bh)) { fatal = PTR_ERR(bitmap_bh); bitmap_bh = NULL; goto error_return; } - if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) { - fatal = -EFSCORRUPTED; - goto error_return; + if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { + grp = ext4_get_group_info(sb, block_group); + if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) { + fatal = -EFSCORRUPTED; + goto error_return; + } } BUFFER_TRACE(bitmap_bh, "get_write_access"); @@ -739,6 +746,122 @@ not_found: return 1; } +int ext4_mark_inode_used(struct super_block *sb, int ino) +{ + unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); + struct buffer_head *inode_bitmap_bh = NULL, *group_desc_bh = NULL; + struct ext4_group_desc *gdp; + ext4_group_t group; + int bit; + int err = -EFSCORRUPTED; + + if (ino < EXT4_FIRST_INO(sb) || ino > max_ino) + goto out; + + group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); + bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); + inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); + if (IS_ERR(inode_bitmap_bh)) + return PTR_ERR(inode_bitmap_bh); + + if (ext4_test_bit(bit, inode_bitmap_bh->b_data)) { + err = 0; + goto out; + } + + gdp = ext4_get_group_desc(sb, group, &group_desc_bh); + if (!gdp || !group_desc_bh) { + err = -EINVAL; + goto out; + } + + ext4_set_bit(bit, inode_bitmap_bh->b_data); + + BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); + err = ext4_handle_dirty_metadata(NULL, NULL, inode_bitmap_bh); + if (err) { + ext4_std_error(sb, err); + goto out; + } + err = sync_dirty_buffer(inode_bitmap_bh); + if (err) { + ext4_std_error(sb, err); + goto out; + } + + /* We may have to initialize the block bitmap if it isn't already */ + if (ext4_has_group_desc_csum(sb) && + gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + struct buffer_head *block_bitmap_bh; + + block_bitmap_bh = ext4_read_block_bitmap(sb, group); + if (IS_ERR(block_bitmap_bh)) { + err = PTR_ERR(block_bitmap_bh); + goto out; + } + + BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap"); + err = ext4_handle_dirty_metadata(NULL, NULL, block_bitmap_bh); + sync_dirty_buffer(block_bitmap_bh); + + /* recheck and clear flag under lock if we still need to */ + ext4_lock_group(sb, group); + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); + ext4_free_group_clusters_set(sb, gdp, + ext4_free_clusters_after_init(sb, group, gdp)); + ext4_block_bitmap_csum_set(sb, group, gdp, + block_bitmap_bh); + ext4_group_desc_csum_set(sb, group, gdp); + } + ext4_unlock_group(sb, group); + brelse(block_bitmap_bh); + + if (err) { + ext4_std_error(sb, err); + goto out; + } + } + + /* Update the relevant bg descriptor fields */ + if (ext4_has_group_desc_csum(sb)) { + int free; + + ext4_lock_group(sb, group); /* while we modify the bg desc */ + free = EXT4_INODES_PER_GROUP(sb) - + ext4_itable_unused_count(sb, gdp); + if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); + free = 0; + } + + /* + * Check the relative inode number against the last used + * relative inode number in this group. if it is greater + * we need to update the bg_itable_unused count + */ + if (bit >= free) + ext4_itable_unused_set(sb, gdp, + (EXT4_INODES_PER_GROUP(sb) - bit - 1)); + } else { + ext4_lock_group(sb, group); + } + + ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1); + if (ext4_has_group_desc_csum(sb)) { + ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh, + EXT4_INODES_PER_GROUP(sb) / 8); + ext4_group_desc_csum_set(sb, group, gdp); + } + + ext4_unlock_group(sb, group); + err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh); + sync_dirty_buffer(group_desc_bh); +out: + return err; +} + /* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both @@ -768,7 +891,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, struct inode *ret; ext4_group_t i; ext4_group_t flex_group; - struct ext4_group_info *grp; + struct ext4_group_info *grp = NULL; int encrypt = 0; /* Cannot create files in a deleted directory */ @@ -906,15 +1029,21 @@ got_group: if (ext4_free_inodes_count(sb, gdp) == 0) goto next_group; - grp = ext4_get_group_info(sb, group); - /* Skip groups with already-known suspicious inode tables */ - if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) - goto next_group; + if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { + grp = ext4_get_group_info(sb, group); + /* + * Skip groups with already-known suspicious inode + * tables + */ + if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) + goto next_group; + } brelse(inode_bitmap_bh); inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); /* Skip groups with suspicious inode tables */ - if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) || + if (((!(sbi->s_mount_state & EXT4_FC_REPLAY)) + && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || IS_ERR(inode_bitmap_bh)) { inode_bitmap_bh = NULL; goto next_group; @@ -933,7 +1062,7 @@ repeat_in_this_group: goto next_group; } - if (!handle) { + if ((!(sbi->s_mount_state & EXT4_FC_REPLAY)) && !handle) { BUG_ON(nblocks <= 0); handle = __ext4_journal_start_sb(dir->i_sb, line_no, handle_type, nblocks, 0, @@ -1037,9 +1166,15 @@ got: /* Update the relevant bg descriptor fields */ if (ext4_has_group_desc_csum(sb)) { int free; - struct ext4_group_info *grp = ext4_get_group_info(sb, group); - - down_read(&grp->alloc_sem); /* protect vs itable lazyinit */ + struct ext4_group_info *grp = NULL; + + if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { + grp = ext4_get_group_info(sb, group); + down_read(&grp->alloc_sem); /* + * protect vs itable + * lazyinit + */ + } ext4_lock_group(sb, group); /* while we modify the bg desc */ free = EXT4_INODES_PER_GROUP(sb) - ext4_itable_unused_count(sb, gdp); @@ -1055,7 +1190,8 @@ got: if (ino > free) ext4_itable_unused_set(sb, gdp, (EXT4_INODES_PER_GROUP(sb) - ino)); - up_read(&grp->alloc_sem); + if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) + up_read(&grp->alloc_sem); } else { ext4_lock_group(sb, group); } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7da82b6fdb74..43d6a07262d2 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -101,8 +101,8 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, return provided == calculated; } -static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, - struct ext4_inode_info *ei) +void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, + struct ext4_inode_info *ei) { __u32 csum; @@ -514,7 +514,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, return -EFSCORRUPTED; /* Lookup extent status tree firstly */ - if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { + if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) && + ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk; @@ -827,7 +828,8 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, int create = map_flags & EXT4_GET_BLOCKS_CREATE; int err; - J_ASSERT(handle != NULL || create == 0); + J_ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + || handle != NULL || create == 0); map.m_lblk = block; map.m_len = 1; @@ -843,7 +845,8 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, return ERR_PTR(-ENOMEM); if (map.m_flags & EXT4_MAP_NEW) { J_ASSERT(create != 0); - J_ASSERT(handle != NULL); + J_ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + || (handle != NULL)); /* * Now that we do not always journal data, we should @@ -4284,22 +4287,22 @@ out_trace: * data in memory that is needed to recreate the on-disk version of this * inode. */ -static int __ext4_get_inode_loc(struct inode *inode, - struct ext4_iloc *iloc, int in_mem) +static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino, + struct ext4_iloc *iloc, int in_mem, + ext4_fsblk_t *ret_block) { struct ext4_group_desc *gdp; struct buffer_head *bh; - struct super_block *sb = inode->i_sb; ext4_fsblk_t block; struct blk_plug plug; int inodes_per_block, inode_offset; iloc->bh = NULL; - if (inode->i_ino < EXT4_ROOT_INO || - inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) + if (ino < EXT4_ROOT_INO || + ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) return -EFSCORRUPTED; - iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); + iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); if (!gdp) return -EIO; @@ -4308,7 +4311,7 @@ static int __ext4_get_inode_loc(struct inode *inode, * Figure out the offset within the block group inode table */ inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; - inode_offset = ((inode->i_ino - 1) % + inode_offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)); block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); @@ -4400,14 +4403,14 @@ make_io: * has in-inode xattrs, or we don't have this inode in memory. * Read the block from disk. */ - trace_ext4_load_inode(inode); + trace_ext4_load_inode(sb, ino); ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL); blk_finish_plug(&plug); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { simulate_eio: - ext4_error_inode_block(inode, block, EIO, - "unable to read itable block"); + if (ret_block) + *ret_block = block; brelse(bh); return -EIO; } @@ -4417,11 +4420,43 @@ has_buffer: return 0; } +static int __ext4_get_inode_loc_noinmem(struct inode *inode, + struct ext4_iloc *iloc) +{ + ext4_fsblk_t err_blk; + int ret; + + ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc, 0, + &err_blk); + + if (ret == -EIO) + ext4_error_inode_block(inode, err_blk, EIO, + "unable to read itable block"); + + return ret; +} + int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) { + ext4_fsblk_t err_blk; + int ret; + /* We have all inode data except xattrs in memory here. */ - return __ext4_get_inode_loc(inode, iloc, - !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); + ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc, + !ext4_test_inode_state(inode, EXT4_STATE_XATTR), &err_blk); + + if (ret == -EIO) + ext4_error_inode_block(inode, err_blk, EIO, + "unable to read itable block"); + + return ret; +} + + +int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino, + struct ext4_iloc *iloc) +{ + return __ext4_get_inode_loc(sb, ino, iloc, 0, NULL); } static bool ext4_should_enable_dax(struct inode *inode) @@ -4587,7 +4622,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ei = EXT4_I(inode); iloc.bh = NULL; - ret = __ext4_get_inode_loc(inode, &iloc, 0); + ret = __ext4_get_inode_loc_noinmem(inode, &iloc); if (ret < 0) goto bad_inode; raw_inode = ext4_raw_inode(&iloc); @@ -4633,10 +4668,11 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, sizeof(gen)); } - if (!ext4_inode_csum_verify(inode, raw_inode, ei) || - ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) { - ext4_error_inode_err(inode, function, line, 0, EFSBADCRC, - "iget: checksum invalid"); + if ((!ext4_inode_csum_verify(inode, raw_inode, ei) || + ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) && + (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) { + ext4_error_inode_err(inode, function, line, 0, + EFSBADCRC, "iget: checksum invalid"); ret = -EFSBADCRC; goto bad_inode; } @@ -4790,9 +4826,10 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, goto bad_inode; } else if (!ext4_has_inline_data(inode)) { /* validate the block references in the inode */ - if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - (S_ISLNK(inode->i_mode) && - !ext4_inode_is_fast_symlink(inode))) { + if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) && + (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + (S_ISLNK(inode->i_mode) && + !ext4_inode_is_fast_symlink(inode)))) { if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ret = ext4_ext_check_inode(inode); else @@ -5176,7 +5213,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) } else { struct ext4_iloc iloc; - err = __ext4_get_inode_loc(inode, &iloc, 0); + err = __ext4_get_inode_loc_noinmem(inode, &iloc); if (err) return err; /* diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index d2f8f50deef6..f0381876a7e5 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -86,7 +86,7 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2) i_size_write(inode2, isize); } -static void reset_inode_seed(struct inode *inode) +void ext4_reset_inode_seed(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); @@ -200,8 +200,8 @@ static long swap_inode_boot_loader(struct super_block *sb, inode->i_generation = prandom_u32(); inode_bl->i_generation = prandom_u32(); - reset_inode_seed(inode); - reset_inode_seed(inode_bl); + ext4_reset_inode_seed(inode); + ext4_reset_inode_seed(inode_bl); ext4_discard_preallocations(inode, 0); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 74a48d6ff9cc..85abbfb98cbe 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1502,14 +1502,16 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, blocknr = ext4_group_first_block_no(sb, e4b->bd_group); blocknr += EXT4_C2B(sbi, block); - ext4_grp_locked_error(sb, e4b->bd_group, - inode ? inode->i_ino : 0, - blocknr, - "freeing already freed block " - "(bit %u); block bitmap corrupt.", - block); - ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, + if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { + ext4_grp_locked_error(sb, e4b->bd_group, + inode ? inode->i_ino : 0, + blocknr, + "freeing already freed block (bit %u); block bitmap corrupt.", + block); + ext4_mark_group_bitmap_corrupted( + sb, e4b->bd_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); + } mb_regenerate_buddy(e4b); goto done; } @@ -3296,6 +3298,84 @@ out_err: return err; } +/* + * Idempotent helper for Ext4 fast commit replay path to set the state of + * blocks in bitmaps and update counters. + */ +void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, + int len, int state) +{ + struct buffer_head *bitmap_bh = NULL; + struct ext4_group_desc *gdp; + struct buffer_head *gdp_bh; + struct ext4_sb_info *sbi = EXT4_SB(sb); + ext4_group_t group; + ext4_grpblk_t blkoff; + int i, clen, err; + int already; + + clen = EXT4_B2C(sbi, len); + + ext4_get_group_no_and_offset(sb, block, &group, &blkoff); + bitmap_bh = ext4_read_block_bitmap(sb, group); + if (IS_ERR(bitmap_bh)) { + err = PTR_ERR(bitmap_bh); + bitmap_bh = NULL; + goto out_err; + } + + err = -EIO; + gdp = ext4_get_group_desc(sb, group, &gdp_bh); + if (!gdp) + goto out_err; + + ext4_lock_group(sb, group); + already = 0; + for (i = 0; i < clen; i++) + if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == !state) + already++; + + if (state) + ext4_set_bits(bitmap_bh->b_data, blkoff, clen); + else + mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen); + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); + ext4_free_group_clusters_set(sb, gdp, + ext4_free_clusters_after_init(sb, + group, gdp)); + } + if (state) + clen = ext4_free_group_clusters(sb, gdp) - clen + already; + else + clen = ext4_free_group_clusters(sb, gdp) + clen - already; + + ext4_free_group_clusters_set(sb, gdp, clen); + ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); + ext4_group_desc_csum_set(sb, group, gdp); + + ext4_unlock_group(sb, group); + + if (sbi->s_log_groups_per_flex) { + ext4_group_t flex_group = ext4_flex_group(sbi, group); + + atomic64_sub(len, + &sbi_array_rcu_deref(sbi, s_flex_groups, + flex_group)->free_clusters); + } + + err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); + if (err) + goto out_err; + sync_dirty_buffer(bitmap_bh); + err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); + sync_dirty_buffer(gdp_bh); + +out_err: + brelse(bitmap_bh); +} + /* * here we normalize request for locality group * Group request are normalized to s_mb_group_prealloc, which goes to @@ -4272,6 +4352,9 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed) return; } + if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) + return; + mb_debug(sb, "discard preallocation for inode %lu\n", inode->i_ino); trace_ext4_discard_preallocations(inode, @@ -4819,6 +4902,9 @@ out_dbg: return ret; } +static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, + struct ext4_allocation_request *ar, int *errp); + /* * Main entry point into mballoc to allocate blocks * it tries to use preallocation first, then falls back @@ -4840,6 +4926,8 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, sbi = EXT4_SB(sb); trace_ext4_request_blocks(ar); + if (sbi->s_mount_state & EXT4_FC_REPLAY) + return ext4_mb_new_blocks_simple(handle, ar, errp); /* Allow to use superuser reservation for quota file */ if (ext4_is_quota_file(ar->inode)) @@ -5067,6 +5155,102 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, return 0; } +/* + * Simple allocator for Ext4 fast commit replay path. It searches for blocks + * linearly starting at the goal block and also excludes the blocks which + * are going to be in use after fast commit replay. + */ +static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, + struct ext4_allocation_request *ar, int *errp) +{ + struct buffer_head *bitmap_bh; + struct super_block *sb = ar->inode->i_sb; + ext4_group_t group; + ext4_grpblk_t blkoff; + int i; + ext4_fsblk_t goal, block; + struct ext4_super_block *es = EXT4_SB(sb)->s_es; + + goal = ar->goal; + if (goal < le32_to_cpu(es->s_first_data_block) || + goal >= ext4_blocks_count(es)) + goal = le32_to_cpu(es->s_first_data_block); + + ar->len = 0; + ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); + for (; group < ext4_get_groups_count(sb); group++) { + bitmap_bh = ext4_read_block_bitmap(sb, group); + if (IS_ERR(bitmap_bh)) { + *errp = PTR_ERR(bitmap_bh); + pr_warn("Failed to read block bitmap\n"); + return 0; + } + + ext4_get_group_no_and_offset(sb, + max(ext4_group_first_block_no(sb, group), goal), + NULL, &blkoff); + i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize, + blkoff); + brelse(bitmap_bh); + if (i >= sb->s_blocksize) + continue; + if (ext4_fc_replay_check_excluded(sb, + ext4_group_first_block_no(sb, group) + i)) + continue; + break; + } + + if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize) + return 0; + + block = ext4_group_first_block_no(sb, group) + i; + ext4_mb_mark_bb(sb, block, 1, 1); + ar->len = 1; + + return block; +} + +static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, + unsigned long count) +{ + struct buffer_head *bitmap_bh; + struct super_block *sb = inode->i_sb; + struct ext4_group_desc *gdp; + struct buffer_head *gdp_bh; + ext4_group_t group; + ext4_grpblk_t blkoff; + int already_freed = 0, err, i; + + ext4_get_group_no_and_offset(sb, block, &group, &blkoff); + bitmap_bh = ext4_read_block_bitmap(sb, group); + if (IS_ERR(bitmap_bh)) { + err = PTR_ERR(bitmap_bh); + pr_warn("Failed to read block bitmap\n"); + return; + } + gdp = ext4_get_group_desc(sb, group, &gdp_bh); + if (!gdp) + return; + + for (i = 0; i < count; i++) { + if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) + already_freed++; + } + mb_clear_bits(bitmap_bh->b_data, blkoff, count); + err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); + if (err) + return; + ext4_free_group_clusters_set( + sb, gdp, ext4_free_group_clusters(sb, gdp) + + count - already_freed); + ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); + ext4_group_desc_csum_set(sb, group, gdp); + ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); + sync_dirty_buffer(bitmap_bh); + sync_dirty_buffer(gdp_bh); + brelse(bitmap_bh); +} + /** * ext4_free_blocks() -- Free given blocks and update quota * @handle: handle for this transaction @@ -5093,6 +5277,13 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, int err = 0; int ret; + sbi = EXT4_SB(sb); + + if (sbi->s_mount_state & EXT4_FC_REPLAY) { + ext4_free_blocks_simple(inode, block, count); + return; + } + might_sleep(); if (bh) { if (block) @@ -5101,7 +5292,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, block = bh->b_blocknr; } - sbi = EXT4_SB(sb); if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && !ext4_inode_block_valid(inode, block, count)) { ext4_error(sb, "Freeing blocks not in datazone - " diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index fd7be1435f2d..cde346074662 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2749,7 +2749,7 @@ struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, return ext4_next_entry(de, blocksize); } -static int ext4_init_new_dir(handle_t *handle, struct inode *dir, +int ext4_init_new_dir(handle_t *handle, struct inode *dir, struct inode *inode) { struct buffer_head *dir_block = NULL; @@ -3197,42 +3197,32 @@ end_rmdir: return retval; } -static int ext4_unlink(struct inode *dir, struct dentry *dentry) +int __ext4_unlink(struct inode *dir, const struct qstr *d_name, + struct inode *inode) { - int retval; - struct inode *inode; + int retval = -ENOENT; struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle = NULL; + int skip_remove_dentry = 0; - if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) - return -EIO; - - trace_ext4_unlink_enter(dir, dentry); - /* Initialize quotas before so that eventual writes go - * in separate transaction */ - retval = dquot_initialize(dir); - if (retval) - goto out_trace; - retval = dquot_initialize(d_inode(dentry)); - if (retval) - goto out_trace; - - bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); - if (IS_ERR(bh)) { - retval = PTR_ERR(bh); - goto out_trace; - } - if (!bh) { - retval = -ENOENT; - goto out_trace; - } + bh = ext4_find_entry(dir, d_name, &de, NULL); + if (IS_ERR(bh)) + return PTR_ERR(bh); - inode = d_inode(dentry); + if (!bh) + return -ENOENT; if (le32_to_cpu(de->inode) != inode->i_ino) { - retval = -EFSCORRUPTED; - goto out_bh; + /* + * It's okay if we find dont find dentry which matches + * the inode. That's because it might have gotten + * renamed to a different inode number + */ + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + skip_remove_dentry = 1; + else + goto out_bh; } handle = ext4_journal_start(dir, EXT4_HT_DIR, @@ -3245,17 +3235,21 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); - retval = ext4_delete_entry(handle, dir, de, bh); - if (retval) - goto out_handle; - dir->i_ctime = dir->i_mtime = current_time(dir); - ext4_update_dx_flag(dir); - retval = ext4_mark_inode_dirty(handle, dir); - if (retval) - goto out_handle; + if (!skip_remove_dentry) { + retval = ext4_delete_entry(handle, dir, de, bh); + if (retval) + goto out_handle; + dir->i_ctime = dir->i_mtime = current_time(dir); + ext4_update_dx_flag(dir); + retval = ext4_mark_inode_dirty(handle, dir); + if (retval) + goto out_handle; + } else { + retval = 0; + } if (inode->i_nlink == 0) ext4_warning_inode(inode, "Deleting file '%.*s' with no links", - dentry->d_name.len, dentry->d_name.name); + d_name->len, d_name->name); else drop_nlink(inode); if (!inode->i_nlink) @@ -3263,6 +3257,33 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) inode->i_ctime = current_time(inode); retval = ext4_mark_inode_dirty(handle, inode); +out_handle: + ext4_journal_stop(handle); +out_bh: + brelse(bh); + return retval; +} + +static int ext4_unlink(struct inode *dir, struct dentry *dentry) +{ + int retval; + + if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) + return -EIO; + + trace_ext4_unlink_enter(dir, dentry); + /* + * Initialize quotas before so that eventual writes go + * in separate transaction + */ + retval = dquot_initialize(dir); + if (retval) + goto out_trace; + retval = dquot_initialize(d_inode(dentry)); + if (retval) + goto out_trace; + + retval = __ext4_unlink(dir, &dentry->d_name, d_inode(dentry)); if (!retval) ext4_fc_track_unlink(d_inode(dentry), dentry); #ifdef CONFIG_UNICODE @@ -3276,10 +3297,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) d_invalidate(dentry); #endif -out_handle: - ext4_journal_stop(handle); -out_bh: - brelse(bh); out_trace: trace_ext4_unlink_exit(dentry, retval); return retval; @@ -3360,7 +3377,8 @@ static int ext4_symlink(struct inode *dir, */ drop_nlink(inode); err = ext4_orphan_add(handle, inode); - ext4_journal_stop(handle); + if (handle) + ext4_journal_stop(handle); handle = NULL; if (err) goto err_drop_inode; @@ -3414,29 +3432,10 @@ out_free_encrypted_link: return err; } -static int ext4_link(struct dentry *old_dentry, - struct inode *dir, struct dentry *dentry) +int __ext4_link(struct inode *dir, struct inode *inode, struct dentry *dentry) { handle_t *handle; - struct inode *inode = d_inode(old_dentry); int err, retries = 0; - - if (inode->i_nlink >= EXT4_LINK_MAX) - return -EMLINK; - - err = fscrypt_prepare_link(old_dentry, dir, dentry); - if (err) - return err; - - if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && - (!projid_eq(EXT4_I(dir)->i_projid, - EXT4_I(old_dentry->d_inode)->i_projid))) - return -EXDEV; - - err = dquot_initialize(dir); - if (err) - return err; - retry: handle = ext4_journal_start(dir, EXT4_HT_DIR, (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + @@ -3453,6 +3452,7 @@ retry: err = ext4_add_entry(handle, dentry, inode); if (!err) { + ext4_fc_track_link(inode, dentry); err = ext4_mark_inode_dirty(handle, inode); /* this can happen only for tmpfile being * linked the first time @@ -3470,6 +3470,29 @@ retry: return err; } +static int ext4_link(struct dentry *old_dentry, + struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = d_inode(old_dentry); + int err; + + if (inode->i_nlink >= EXT4_LINK_MAX) + return -EMLINK; + + err = fscrypt_prepare_link(old_dentry, dir, dentry); + if (err) + return err; + + if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && + (!projid_eq(EXT4_I(dir)->i_projid, + EXT4_I(old_dentry->d_inode)->i_projid))) + return -EXDEV; + + err = dquot_initialize(dir); + if (err) + return err; + return __ext4_link(dir, inode, dentry); +} /* * Try to find buffer head where contains the parent block. diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 10c4df26d257..9fcf1e3dfe76 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1718,6 +1718,9 @@ enum { Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, Opt_prefetch_block_bitmaps, Opt_no_fc, +#ifdef CONFIG_EXT4_DEBUG + Opt_fc_debug_max_replay +#endif }; static const match_table_t tokens = { @@ -1805,6 +1808,9 @@ static const match_table_t tokens = { {Opt_init_itable, "init_itable"}, {Opt_noinit_itable, "noinit_itable"}, {Opt_no_fc, "no_fc"}, +#ifdef CONFIG_EXT4_DEBUG + {Opt_fc_debug_max_replay, "fc_debug_max_replay=%u"}, +#endif {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, @@ -2034,6 +2040,9 @@ static const struct mount_opts { MOPT_SET}, {Opt_no_fc, EXT4_MOUNT2_JOURNAL_FAST_COMMIT, MOPT_CLEAR | MOPT_2 | MOPT_EXT4_ONLY}, +#ifdef CONFIG_EXT4_DEBUG + {Opt_fc_debug_max_replay, 0, MOPT_GTE0}, +#endif {Opt_err, 0, 0} }; @@ -2242,6 +2251,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, sbi->s_li_wait_mult = arg; } else if (token == Opt_max_dir_size_kb) { sbi->s_max_dir_size_kb = arg; +#ifdef CONFIG_EXT4_DEBUG + } else if (token == Opt_fc_debug_max_replay) { + sbi->s_fc_debug_max_replay = arg; +#endif } else if (token == Opt_stripe) { sbi->s_stripe = arg; } else if (token == Opt_resuid) { @@ -4764,6 +4777,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_mount_state &= ~EXT4_FC_COMMITTING; spin_lock_init(&sbi->s_fc_lock); memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats)); + sbi->s_fc_replay_state.fc_regions = NULL; + sbi->s_fc_replay_state.fc_regions_size = 0; + sbi->s_fc_replay_state.fc_regions_used = 0; + sbi->s_fc_replay_state.fc_regions_valid = 0; + sbi->s_fc_replay_state.fc_modified_inodes = NULL; + sbi->s_fc_replay_state.fc_modified_inodes_size = 0; + sbi->s_fc_replay_state.fc_modified_inodes_used = 0; sb->s_root = NULL; @@ -4979,6 +4999,7 @@ no_journal: goto failed_mount4a; } } + ext4_fc_replay_cleanup(sb); ext4_ext_init(sb); err = ext4_mb_init(sb); diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 521de3a82118..b14314fcf732 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -1776,9 +1776,9 @@ TRACE_EVENT(ext4_ext_load_extent, ); TRACE_EVENT(ext4_load_inode, - TP_PROTO(struct inode *inode), + TP_PROTO(struct super_block *sb, unsigned long ino), - TP_ARGS(inode), + TP_ARGS(sb, ino), TP_STRUCT__entry( __field( dev_t, dev ) @@ -1786,8 +1786,8 @@ TRACE_EVENT(ext4_load_inode, ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; + __entry->dev = sb->s_dev; + __entry->ino = ino; ), TP_printk("dev %d,%d ino %ld", @@ -2801,6 +2801,54 @@ TRACE_EVENT(ext4_lazy_itable_init, MAJOR(__entry->dev), MINOR(__entry->dev), __entry->group) ); +TRACE_EVENT(ext4_fc_replay_scan, + TP_PROTO(struct super_block *sb, int error, int off), + + TP_ARGS(sb, error, off), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, error) + __field(int, off) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->error = error; + __entry->off = off; + ), + + TP_printk("FC scan pass on dev %d,%d: error %d, off %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->error, __entry->off) +); + +TRACE_EVENT(ext4_fc_replay, + TP_PROTO(struct super_block *sb, int tag, int ino, int priv1, int priv2), + + TP_ARGS(sb, tag, ino, priv1, priv2), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, tag) + __field(int, ino) + __field(int, priv1) + __field(int, priv2) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->tag = tag; + __entry->ino = ino; + __entry->priv1 = priv1; + __entry->priv2 = priv2; + ), + + TP_printk("FC Replay %d,%d: tag %d, ino %d, data1 %d, data2 %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->tag, __entry->ino, __entry->priv1, __entry->priv2) +); + TRACE_EVENT(ext4_fc_commit_start, TP_PROTO(struct super_block *sb), -- cgit v1.2.3 From 0f0672ffb61aebac906cf0aa1607d1f07f266fb6 Mon Sep 17 00:00:00 2001 From: Harshad Shirwadkar Date: Thu, 15 Oct 2020 13:38:00 -0700 Subject: ext4: add a mount opt to forcefully turn fast commits on This is a debug only mount option that forcefully turns fast commits on at mount time. Signed-off-by: Harshad Shirwadkar Link: https://lore.kernel.org/r/20201015203802.3597742-9-harshadshirwadkar@gmail.com Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 9fcf1e3dfe76..5308f0d5fb5a 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1719,8 +1719,9 @@ enum { Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, Opt_prefetch_block_bitmaps, Opt_no_fc, #ifdef CONFIG_EXT4_DEBUG - Opt_fc_debug_max_replay + Opt_fc_debug_max_replay, #endif + Opt_fc_debug_force }; static const match_table_t tokens = { @@ -1808,6 +1809,7 @@ static const match_table_t tokens = { {Opt_init_itable, "init_itable"}, {Opt_noinit_itable, "noinit_itable"}, {Opt_no_fc, "no_fc"}, + {Opt_fc_debug_force, "fc_debug_force"}, #ifdef CONFIG_EXT4_DEBUG {Opt_fc_debug_max_replay, "fc_debug_max_replay=%u"}, #endif @@ -2040,6 +2042,8 @@ static const struct mount_opts { MOPT_SET}, {Opt_no_fc, EXT4_MOUNT2_JOURNAL_FAST_COMMIT, MOPT_CLEAR | MOPT_2 | MOPT_EXT4_ONLY}, + {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT, + MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY}, #ifdef CONFIG_EXT4_DEBUG {Opt_fc_debug_max_replay, 0, MOPT_GTE0}, #endif -- cgit v1.2.3 From ce8c59d197c824789e1ade6f25d36037b4f0faeb Mon Sep 17 00:00:00 2001 From: Harshad Shirwadkar Date: Thu, 15 Oct 2020 13:38:01 -0700 Subject: ext4: add fast commit stats in procfs This commit adds a file in procfs that tracks fast commit related statistics. root@kvm-xfstests:/mnt# cat /proc/fs/ext4/vdc/fc_info fc stats: 7772 commits 15 ineligible 4083 numblks 2242us avg_commit_time Ineligible reasons: "Extended attributes changed": 0 "Cross rename": 0 "Journal flag changed": 0 "Insufficient memory": 0 "Swap boot": 0 "Resize": 0 "Dir renamed": 0 "Falloc range op": 0 "FC Commit Failed": 15 Signed-off-by: Harshad Shirwadkar Link: https://lore.kernel.org/r/20201015203802.3597742-10-harshadshirwadkar@gmail.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 2 +- fs/ext4/fast_commit.c | 34 ++++++++++++++++++++++++++++++++++ fs/ext4/sysfs.c | 2 ++ 3 files changed, 37 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 152500725acf..0906fd48424b 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2740,7 +2740,7 @@ extern int ext4_init_inode_table(struct super_block *sb, extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate); /* fast_commit.c */ - +int ext4_fc_info_show(struct seq_file *seq, void *v); void ext4_fc_init(struct super_block *sb, journal_t *journal); void ext4_fc_init_inode(struct inode *inode); void ext4_fc_track_range(struct inode *inode, ext4_lblk_t start, diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 426c0ab8b70c..447c8d93f480 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -2093,6 +2093,40 @@ void ext4_fc_init(struct super_block *sb, journal_t *journal) } } +const char *fc_ineligible_reasons[] = { + "Extended attributes changed", + "Cross rename", + "Journal flag changed", + "Insufficient memory", + "Swap boot", + "Resize", + "Dir renamed", + "Falloc range op", + "FC Commit Failed" +}; + +int ext4_fc_info_show(struct seq_file *seq, void *v) +{ + struct ext4_sb_info *sbi = EXT4_SB((struct super_block *)seq->private); + struct ext4_fc_stats *stats = &sbi->s_fc_stats; + int i; + + if (v != SEQ_START_TOKEN) + return 0; + + seq_printf(seq, + "fc stats:\n%ld commits\n%ld ineligible\n%ld numblks\n%lluus avg_commit_time\n", + stats->fc_num_commits, stats->fc_ineligible_commits, + stats->fc_numblks, + div_u64(sbi->s_fc_avg_commit_time, 1000)); + seq_puts(seq, "Ineligible reasons:\n"); + for (i = 0; i < EXT4_FC_REASON_MAX; i++) + seq_printf(seq, "\"%s\":\t%d\n", fc_ineligible_reasons[i], + stats->fc_ineligible_reason_count[i]); + + return 0; +} + int __init ext4_fc_init_dentry_cache(void) { ext4_fc_dentry_cachep = KMEM_CACHE(ext4_fc_dentry_update, diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index bfabb799fa45..5ff33d18996a 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -521,6 +521,8 @@ int ext4_register_sysfs(struct super_block *sb) proc_create_single_data("es_shrinker_info", S_IRUGO, sbi->s_proc, ext4_seq_es_shrinker_info_show, sb); + proc_create_single_data("fc_info", 0444, sbi->s_proc, + ext4_fc_info_show, sb); proc_create_seq_data("mb_groups", S_IRUGO, sbi->s_proc, &ext4_mb_seq_groups_ops, sb); } -- cgit v1.2.3 From 1322181170bb01bce3c228b82ae3d5c6b793164f Mon Sep 17 00:00:00 2001 From: Luo Meng Date: Tue, 20 Oct 2020 09:36:31 +0800 Subject: ext4: fix invalid inode checksum During the stability test, there are some errors: ext4_lookup:1590: inode #6967: comm fsstress: iget: checksum invalid. If the inode->i_iblocks too big and doesn't set huge file flag, checksum will not be recalculated when update the inode information to it's buffer. If other inode marks the buffer dirty, then the inconsistent inode will be flushed to disk. Fix this problem by checking i_blocks in advance. Cc: stable@kernel.org Signed-off-by: Luo Meng Reviewed-by: Darrick J. Wong Link: https://lore.kernel.org/r/20201020013631.3796673-1-luomeng12@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/inode.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 43d6a07262d2..03c2253005f0 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5030,6 +5030,12 @@ static int ext4_do_update_inode(handle_t *handle, if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); + err = ext4_inode_blocks_set(handle, raw_inode, ei); + if (err) { + spin_unlock(&ei->i_raw_lock); + goto out_brelse; + } + raw_inode->i_mode = cpu_to_le16(inode->i_mode); i_uid = i_uid_read(inode); i_gid = i_gid_read(inode); @@ -5063,11 +5069,6 @@ static int ext4_do_update_inode(handle_t *handle, EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); - err = ext4_inode_blocks_set(handle, raw_inode, ei); - if (err) { - spin_unlock(&ei->i_raw_lock); - goto out_brelse; - } raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) -- cgit v1.2.3