summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/ext4/Makefile2
-rw-r--r--fs/ext4/acl.c2
-rw-r--r--fs/ext4/balloc.c14
-rw-r--r--fs/ext4/block_validity.c10
-rw-r--r--fs/ext4/dir.c4
-rw-r--r--fs/ext4/ext4.h136
-rw-r--r--fs/ext4/ext4_jbd2.c2
-rw-r--r--fs/ext4/extents.c315
-rw-r--r--fs/ext4/extents_status.c24
-rw-r--r--fs/ext4/fast_commit.c2139
-rw-r--r--fs/ext4/fast_commit.h159
-rw-r--r--fs/ext4/file.c12
-rw-r--r--fs/ext4/fsmap.c11
-rw-r--r--fs/ext4/fsync.c4
-rw-r--r--fs/ext4/ialloc.c173
-rw-r--r--fs/ext4/indirect.c13
-rw-r--r--fs/ext4/inline.c2
-rw-r--r--fs/ext4/inode.c290
-rw-r--r--fs/ext4/ioctl.c22
-rw-r--r--fs/ext4/mballoc.c257
-rw-r--r--fs/ext4/mmp.c10
-rw-r--r--fs/ext4/move_extent.c2
-rw-r--r--fs/ext4/namei.c206
-rw-r--r--fs/ext4/resize.c14
-rw-r--r--fs/ext4/super.c352
-rw-r--r--fs/ext4/sysfs.c2
-rw-r--r--fs/ext4/xattr.c3
-rw-r--r--fs/jbd2/commit.c106
-rw-r--r--fs/jbd2/journal.c245
-rw-r--r--fs/jbd2/recovery.c135
-rw-r--r--fs/ocfs2/journal.c4
31 files changed, 4282 insertions, 388 deletions
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 2e42f47a7f98..49e7af6cc93f 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -10,7 +10,7 @@ ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
super.o symlink.o sysfs.o xattr.o xattr_hurd.o xattr_trusted.o \
- xattr_user.o
+ xattr_user.o fast_commit.o
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 76f634d185f1..68aaed48315f 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -242,6 +242,7 @@ retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
if (IS_ERR(handle))
return PTR_ERR(handle);
+ ext4_fc_start_update(inode);
if ((type == ACL_TYPE_ACCESS) && acl) {
error = posix_acl_update_mode(inode, &mode, &acl);
@@ -259,6 +260,7 @@ retry:
}
out_stop:
ext4_journal_stop(handle);
+ ext4_fc_stop_update(inode);
if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
return error;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 48c3df47748d..1d640b145637 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -368,7 +368,12 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
struct buffer_head *bh)
{
ext4_fsblk_t blk;
- struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
+ struct ext4_group_info *grp;
+
+ if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+
+ grp = ext4_get_group_info(sb, block_group);
if (buffer_verified(bh))
return 0;
@@ -495,10 +500,9 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
*/
set_buffer_new(bh);
trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked);
- bh->b_end_io = ext4_end_bitmap_read;
- get_bh(bh);
- submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO |
- (ignore_locked ? REQ_RAHEAD : 0), bh);
+ ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO |
+ (ignore_locked ? REQ_RAHEAD : 0),
+ ext4_end_bitmap_read);
return bh;
verify:
err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index c54ba52f2dd4..8e6ca23ed172 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -131,7 +131,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
printk(KERN_INFO "System zones: ");
rcu_read_lock();
- system_blks = rcu_dereference(sbi->system_blks);
+ system_blks = rcu_dereference(sbi->s_system_blks);
node = rb_first(&system_blks->root);
while (node) {
entry = rb_entry(node, struct ext4_system_zone, node);
@@ -261,7 +261,7 @@ int ext4_setup_system_zone(struct super_block *sb)
* with ext4_data_block_valid() accessing the rbtree at the same
* time.
*/
- rcu_assign_pointer(sbi->system_blks, system_blks);
+ rcu_assign_pointer(sbi->s_system_blks, system_blks);
if (test_opt(sb, DEBUG))
debug_print_tree(sbi);
@@ -286,9 +286,9 @@ void ext4_release_system_zone(struct super_block *sb)
{
struct ext4_system_blocks *system_blks;
- system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks,
+ system_blks = rcu_dereference_protected(EXT4_SB(sb)->s_system_blks,
lockdep_is_held(&sb->s_umount));
- rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL);
+ rcu_assign_pointer(EXT4_SB(sb)->s_system_blks, NULL);
if (system_blks)
call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
@@ -319,7 +319,7 @@ int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
* mount option.
*/
rcu_read_lock();
- system_blks = rcu_dereference(sbi->system_blks);
+ system_blks = rcu_dereference(sbi->s_system_blks);
if (system_blks == NULL)
goto out_rcu;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index efe77cffc322..5b81f3b080ee 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -674,7 +674,7 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len,
{
struct qstr qstr = {.name = str, .len = len };
const struct dentry *parent = READ_ONCE(dentry->d_parent);
- const struct inode *inode = READ_ONCE(parent->d_inode);
+ const struct inode *inode = d_inode_rcu(parent);
char strbuf[DNAME_INLINE_LEN];
if (!inode || !IS_CASEFOLDED(inode) ||
@@ -706,7 +706,7 @@ static int ext4_d_hash(const struct dentry *dentry, struct qstr *str)
{
const struct ext4_sb_info *sbi = EXT4_SB(dentry->d_sb);
const struct unicode_map *um = sbi->s_encoding;
- const struct inode *inode = READ_ONCE(dentry->d_inode);
+ const struct inode *inode = d_inode_rcu(dentry);
unsigned char *norm;
int len, ret = 0;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f9a692c0a66c..254d1c26bea8 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -27,7 +27,6 @@
#include <linux/seqlock.h>
#include <linux/mutex.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
#include <linux/blockgroup_lock.h>
@@ -492,7 +491,7 @@ struct flex_groups {
/* Flags which are mutually exclusive to DAX */
#define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\
- EXT4_JOURNAL_DATA_FL)
+ EXT4_JOURNAL_DATA_FL | EXT4_INLINE_DATA_FL)
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
@@ -964,6 +963,7 @@ do { \
#endif /* defined(__KERNEL__) || defined(__linux__) */
#include "extents_status.h"
+#include "fast_commit.h"
/*
* Lock subclasses for i_data_sem in the ext4_inode_info structure.
@@ -1021,6 +1021,31 @@ struct ext4_inode_info {
struct list_head i_orphan; /* unlinked but open inodes */
+ /* Fast commit related info */
+
+ struct list_head i_fc_list; /*
+ * inodes that need fast commit
+ * protected by sbi->s_fc_lock.
+ */
+
+ /* Fast commit subtid when this inode was committed */
+ unsigned int i_fc_committed_subtid;
+
+ /* Start of lblk range that needs to be committed in this fast commit */
+ ext4_lblk_t i_fc_lblk_start;
+
+ /* End of lblk range that needs to be committed in this fast commit */
+ ext4_lblk_t i_fc_lblk_len;
+
+ /* Number of ongoing updates on this inode */
+ atomic_t i_fc_updates;
+
+ /* Fast commit wait queue for this inode */
+ wait_queue_head_t i_fc_wait;
+
+ /* Protect concurrent accesses on i_fc_lblk_start, i_fc_lblk_len */
+ struct mutex i_fc_lock;
+
/*
* i_disksize keeps track of what the inode size is ON DISK, not
* in memory. During truncate, i_size is set to the new size by
@@ -1141,6 +1166,11 @@ struct ext4_inode_info {
#define EXT4_VALID_FS 0x0001 /* Unmounted cleanly */
#define EXT4_ERROR_FS 0x0002 /* Errors detected */
#define EXT4_ORPHAN_FS 0x0004 /* Orphans being recovered */
+#define EXT4_FC_INELIGIBLE 0x0008 /* Fast commit ineligible */
+#define EXT4_FC_COMMITTING 0x0010 /* File system underoing a fast
+ * commit.
+ */
+#define EXT4_FC_REPLAY 0x0020 /* Fast commit replay ongoing */
/*
* Misc. filesystem flags
@@ -1214,6 +1244,8 @@ struct ext4_inode_info {
#define EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM 0x00000008 /* User explicitly
specified journal checksum */
+#define EXT4_MOUNT2_JOURNAL_FAST_COMMIT 0x00000010 /* Journal fast commit */
+
#define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \
~EXT4_MOUNT_##opt
#define set_opt(sb, opt) EXT4_SB(sb)->s_mount_opt |= \
@@ -1481,14 +1513,14 @@ struct ext4_sb_info {
unsigned long s_commit_interval;
u32 s_max_batch_time;
u32 s_min_batch_time;
- struct block_device *journal_bdev;
+ struct block_device *s_journal_bdev;
#ifdef CONFIG_QUOTA
/* Names of quota files with journalled quota */
char __rcu *s_qf_names[EXT4_MAXQUOTAS];
int s_jquota_fmt; /* Format of quota to use */
#endif
unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
- struct ext4_system_blocks __rcu *system_blks;
+ struct ext4_system_blocks __rcu *s_system_blks;
#ifdef EXTENTS_STATS
/* ext4 extents stats */
@@ -1611,6 +1643,34 @@ struct ext4_sb_info {
/* Record the errseq of the backing block device */
errseq_t s_bdev_wb_err;
spinlock_t s_bdev_wb_lock;
+
+ /* Ext4 fast commit stuff */
+ atomic_t s_fc_subtid;
+ atomic_t s_fc_ineligible_updates;
+ /*
+ * After commit starts, the main queue gets locked, and the further
+ * updates get added in the staging queue.
+ */
+#define FC_Q_MAIN 0
+#define FC_Q_STAGING 1
+ struct list_head s_fc_q[2]; /* Inodes staged for fast commit
+ * that have data changes in them.
+ */
+ struct list_head s_fc_dentry_q[2]; /* directory entry updates */
+ unsigned int s_fc_bytes;
+ /*
+ * Main fast commit lock. This lock protects accesses to the
+ * following fields:
+ * ei->i_fc_list, s_fc_dentry_q, s_fc_q, s_fc_bytes, s_fc_bh.
+ */
+ spinlock_t s_fc_lock;
+ struct buffer_head *s_fc_bh;
+ struct ext4_fc_stats s_fc_stats;
+ u64 s_fc_avg_commit_time;
+#ifdef CONFIG_EXT4_DEBUG
+ int s_fc_debug_max_replay;
+#endif
+ struct ext4_fc_replay_state s_fc_replay_state;
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1721,6 +1781,7 @@ enum {
EXT4_STATE_EXT_PRECACHED, /* extents have been precached */
EXT4_STATE_LUSTRE_EA_INODE, /* Lustre-style ea_inode */
EXT4_STATE_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
+ EXT4_STATE_FC_COMMITTING, /* Fast commit ongoing */
};
#define EXT4_INODE_BIT_FNS(name, field, offset) \
@@ -1814,6 +1875,7 @@ static inline bool ext4_verity_in_progress(struct inode *inode)
#define EXT4_FEATURE_COMPAT_RESIZE_INODE 0x0010
#define EXT4_FEATURE_COMPAT_DIR_INDEX 0x0020
#define EXT4_FEATURE_COMPAT_SPARSE_SUPER2 0x0200
+#define EXT4_FEATURE_COMPAT_FAST_COMMIT 0x0400
#define EXT4_FEATURE_COMPAT_STABLE_INODES 0x0800
#define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
@@ -1916,6 +1978,7 @@ EXT4_FEATURE_COMPAT_FUNCS(xattr, EXT_ATTR)
EXT4_FEATURE_COMPAT_FUNCS(resize_inode, RESIZE_INODE)
EXT4_FEATURE_COMPAT_FUNCS(dir_index, DIR_INDEX)
EXT4_FEATURE_COMPAT_FUNCS(sparse_super2, SPARSE_SUPER2)
+EXT4_FEATURE_COMPAT_FUNCS(fast_commit, FAST_COMMIT)
EXT4_FEATURE_COMPAT_FUNCS(stable_inodes, STABLE_INODES)
EXT4_FEATURE_RO_COMPAT_FUNCS(sparse_super, SPARSE_SUPER)
@@ -2650,6 +2713,7 @@ extern int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
struct dx_hash_info *hinfo);
/* ialloc.c */
+extern int ext4_mark_inode_used(struct super_block *sb, int ino);
extern struct inode *__ext4_new_inode(handle_t *, struct inode *, umode_t,
const struct qstr *qstr, __u32 goal,
uid_t *owner, __u32 i_flags,
@@ -2675,6 +2739,27 @@ extern int ext4_init_inode_table(struct super_block *sb,
ext4_group_t group, int barrier);
extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
+/* fast_commit.c */
+int ext4_fc_info_show(struct seq_file *seq, void *v);
+void ext4_fc_init(struct super_block *sb, journal_t *journal);
+void ext4_fc_init_inode(struct inode *inode);
+void ext4_fc_track_range(struct inode *inode, ext4_lblk_t start,
+ ext4_lblk_t end);
+void ext4_fc_track_unlink(struct inode *inode, struct dentry *dentry);
+void ext4_fc_track_link(struct inode *inode, struct dentry *dentry);
+void ext4_fc_track_create(struct inode *inode, struct dentry *dentry);
+void ext4_fc_track_inode(struct inode *inode);
+void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
+void ext4_fc_start_ineligible(struct super_block *sb, int reason);
+void ext4_fc_stop_ineligible(struct super_block *sb);
+void ext4_fc_start_update(struct inode *inode);
+void ext4_fc_stop_update(struct inode *inode);
+void ext4_fc_del(struct inode *inode);
+bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t block);
+void ext4_fc_replay_cleanup(struct super_block *sb);
+int ext4_fc_commit(journal_t *journal, tid_t commit_tid);
+int __init ext4_fc_init_dentry_cache(void);
+
/* mballoc.c */
extern const struct seq_operations ext4_mb_seq_groups_ops;
extern long ext4_mb_stats;
@@ -2704,8 +2789,12 @@ extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_fsblk_t block, unsigned long count);
extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
extern void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid);
+extern void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
+ int len, int state);
/* inode.c */
+void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
+ struct ext4_inode_info *ei);
int ext4_inode_is_fast_symlink(struct inode *inode);
struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
@@ -2752,6 +2841,8 @@ extern int ext4_sync_inode(handle_t *, struct inode *);
extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
+extern int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
+ struct ext4_iloc *iloc);
extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern int ext4_truncate(struct inode *);
@@ -2785,12 +2876,15 @@ extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
+extern void ext4_reset_inode_seed(struct inode *inode);
/* migrate.c */
extern int ext4_ext_migrate(struct inode *);
extern int ext4_ind_migrate(struct inode *inode);
/* namei.c */
+extern int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+ struct inode *inode);
extern int ext4_dirblock_csum_verify(struct inode *inode,
struct buffer_head *bh);
extern int ext4_orphan_add(handle_t *, struct inode *);
@@ -2824,6 +2918,14 @@ extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
/* super.c */
extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
sector_t block, int op_flags);
+extern struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
+ sector_t block);
+extern void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
+ bh_end_io_t *end_io);
+extern int ext4_read_bh(struct buffer_head *bh, int op_flags,
+ bh_end_io_t *end_io);
+extern int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait);
+extern void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block);
extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
extern int ext4_calculate_overhead(struct super_block *sb);
extern void ext4_superblock_csum_set(struct super_block *sb);
@@ -3012,22 +3114,24 @@ static inline int ext4_has_group_desc_csum(struct super_block *sb)
return ext4_has_feature_gdt_csum(sb) || ext4_has_metadata_csum(sb);
}
+#define ext4_read_incompat_64bit_val(es, name) \
+ (((es)->s_feature_incompat & cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT) \
+ ? (ext4_fsblk_t)le32_to_cpu(es->name##_hi) << 32 : 0) | \
+ le32_to_cpu(es->name##_lo))
+
static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
{
- return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
- le32_to_cpu(es->s_blocks_count_lo);
+ return ext4_read_incompat_64bit_val(es, s_blocks_count);
}
static inline ext4_fsblk_t ext4_r_blocks_count(struct ext4_super_block *es)
{
- return ((ext4_fsblk_t)le32_to_cpu(es->s_r_blocks_count_hi) << 32) |
- le32_to_cpu(es->s_r_blocks_count_lo);
+ return ext4_read_incompat_64bit_val(es, s_r_blocks_count);
}
static inline ext4_fsblk_t ext4_free_blocks_count(struct ext4_super_block *es)
{
- return ((ext4_fsblk_t)le32_to_cpu(es->s_free_blocks_count_hi) << 32) |
- le32_to_cpu(es->s_free_blocks_count_lo);
+ return ext4_read_incompat_64bit_val(es, s_free_blocks_count);
}
static inline void ext4_blocks_count_set(struct ext4_super_block *es,
@@ -3153,6 +3257,9 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
struct ext4_group_info {
unsigned long bb_state;
+#ifdef AGGRESSIVE_CHECK
+ unsigned long bb_check_counter;
+#endif
struct rb_root bb_free_root;
ext4_grpblk_t bb_first_free; /* first free block */
ext4_grpblk_t bb_free; /* total free blocks */
@@ -3357,6 +3464,10 @@ extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode,
extern int ext4_ci_compare(const struct inode *parent,
const struct qstr *fname,
const struct qstr *entry, bool quick);
+extern int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
+ struct inode *inode);
+extern int __ext4_link(struct inode *dir, struct inode *inode,
+ struct dentry *dentry);
#define S_SHIFT 12
static const unsigned char ext4_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
@@ -3457,6 +3568,11 @@ extern int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu);
extern int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
int check_cred, int restart_cred,
int revoke_cred);
+extern void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end);
+extern int ext4_ext_replay_set_iblocks(struct inode *inode);
+extern int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
+ int len, int unwritten, ext4_fsblk_t pblk);
+extern int ext4_ext_clear_bb(struct inode *inode);
/* move_extent.c */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 760b9ee49dc0..0fd0c42a4f7d 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -100,7 +100,7 @@ handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
return ERR_PTR(err);
journal = EXT4_SB(sb)->s_journal;
- if (!journal)
+ if (!journal || (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
return ext4_get_nojournal();
return jbd2__journal_start(journal, blocks, rsv_blocks, revoke_creds,
GFP_NOFS, type, line);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a0481582187a..559100f3e23c 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -501,7 +501,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
if (!bh_uptodate_or_lock(bh)) {
trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
- err = bh_submit_read(bh);
+ err = ext4_read_bh(bh, 0, NULL);
if (err < 0)
goto errout;
}
@@ -3723,6 +3723,7 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
out:
ext4_ext_show_leaf(inode, path);
+ ext4_fc_track_range(inode, ee_block, ee_block + ee_len - 1);
return err;
}
@@ -3794,6 +3795,7 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
if (*allocated > map->m_len)
*allocated = map->m_len;
map->m_len = *allocated;
+ ext4_fc_track_range(inode, ee_block, ee_block + ee_len - 1);
return 0;
}
@@ -4023,7 +4025,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
* down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
* (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
*
- * return > 0, number of of blocks already mapped/allocated
+ * return > 0, number of blocks already mapped/allocated
* if create == 0 and these are pre-allocated blocks
* buffer head is unmapped
* otherwise blocks are mapped
@@ -4327,7 +4329,7 @@ got_allocated_blocks:
map->m_len = ar.len;
allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
-
+ ext4_fc_track_range(inode, map->m_lblk, map->m_lblk + map->m_len - 1);
out:
ext4_ext_drop_refs(path);
kfree(path);
@@ -4600,7 +4602,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
ret = ext4_mark_inode_dirty(handle, inode);
if (unlikely(ret))
goto out_handle;
-
+ ext4_fc_track_range(inode, offset >> inode->i_sb->s_blocksize_bits,
+ (offset + len - 1) >> inode->i_sb->s_blocksize_bits);
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
if (ret >= 0)
@@ -4648,23 +4651,34 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
FALLOC_FL_INSERT_RANGE))
return -EOPNOTSUPP;
+ ext4_fc_track_range(inode, offset >> blkbits,
+ (offset + len - 1) >> blkbits);
- if (mode & FALLOC_FL_PUNCH_HOLE)
- return ext4_punch_hole(inode, offset, len);
+ ext4_fc_start_update(inode);
+
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ ret = ext4_punch_hole(inode, offset, len);
+ goto exit;
+ }
ret = ext4_convert_inline_data(inode);
if (ret)
- return ret;
+ goto exit;
- if (mode & FALLOC_FL_COLLAPSE_RANGE)
- return ext4_collapse_range(inode, offset, len);
-
- if (mode & FALLOC_FL_INSERT_RANGE)
- return ext4_insert_range(inode, offset, len);
+ if (mode & FALLOC_FL_COLLAPSE_RANGE) {
+ ret = ext4_collapse_range(inode, offset, len);
+ goto exit;
+ }
- if (mode & FALLOC_FL_ZERO_RANGE)
- return ext4_zero_range(file, offset, len, mode);
+ if (mode & FALLOC_FL_INSERT_RANGE) {
+ ret = ext4_insert_range(inode, offset, len);
+ goto exit;
+ }
+ if (mode & FALLOC_FL_ZERO_RANGE) {
+ ret = ext4_zero_range(file, offset, len, mode);
+ goto exit;
+ }
trace_ext4_fallocate_enter(inode, offset, len, mode);
lblk = offset >> blkbits;
@@ -4698,12 +4712,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
goto out;
if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
- ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
- EXT4_I(inode)->i_sync_tid);
+ ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
+ EXT4_I(inode)->i_sync_tid);
}
out:
inode_unlock(inode);
trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
+exit:
+ ext4_fc_stop_update(inode);
return ret;
}
@@ -4769,7 +4785,7 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
{
- int ret, err = 0;
+ int ret = 0, err = 0;
struct ext4_io_end_vec *io_end_vec;
/*
@@ -5291,6 +5307,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ret = PTR_ERR(handle);
goto out_mmap;
}
+ ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode, 0);
@@ -5329,6 +5346,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
out_stop:
ext4_journal_stop(handle);
+ ext4_fc_stop_ineligible(sb);
out_mmap:
up_write(&EXT4_I(inode)->i_mmap_sem);
out_mutex:
@@ -5429,6 +5447,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
ret = PTR_ERR(handle);
goto out_mmap;
}
+ ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
@@ -5503,6 +5522,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
out_stop:
ext4_journal_stop(handle);
+ ext4_fc_stop_ineligible(sb);
out_mmap:
up_write(&EXT4_I(inode)->i_mmap_sem);
out_mutex:
@@ -5784,3 +5804,264 @@ out:
return err ? err : mapped;
}
+
+/*
+ * Updates physical block address and unwritten status of extent
+ * starting at lblk start and of len. If such an extent doesn't exist,
+ * this function splits the extent tree appropriately to create an
+ * extent like this. This function is called in the fast commit
+ * replay path. Returns 0 on success and error on failure.
+ */
+int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
+ int len, int unwritten, ext4_fsblk_t pblk)
+{
+ struct ext4_ext_path *path = NULL, *ppath;
+ struct ext4_extent *ex;
+ int ret;
+
+ path = ext4_find_extent(inode, start, NULL, 0);
+ if (!path)
+ return -EINVAL;
+ ex = path[path->p_depth].p_ext;
+ if (!ex) {
+ ret = -EFSCORRUPTED;
+ goto out;
+ }
+
+ if (le32_to_cpu(ex->ee_block) != start ||
+ ext4_ext_get_actual_len(ex) != len) {
+ /* We need to split this extent to match our extent first */
+ ppath = path;
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (ret)
+ goto out;
+ kfree(path);
+ path = ext4_find_extent(inode, start, NULL, 0);
+ if (IS_ERR(path))
+ return -1;
+ ppath = path;
+ ex = path[path->p_depth].p_ext;
+ WARN_ON(le32_to_cpu(ex->ee_block) != start);
+ if (ext4_ext_get_actual_len(ex) != len) {
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ret = ext4_force_split_extent_at(NULL, inode, &ppath,
+ start + len, 1);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (ret)
+ goto out;
+ kfree(path);
+ path = ext4_find_extent(inode, start, NULL, 0);
+ if (IS_ERR(path))
+ return -EINVAL;
+ ex = path[path->p_depth].p_ext;
+ }
+ }
+ if (unwritten)
+ ext4_ext_mark_unwritten(ex);
+ else
+ ext4_ext_mark_initialized(ex);
+ ext4_ext_store_pblock(ex, pblk);
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
+ up_write(&EXT4_I(inode)->i_data_sem);
+out:
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ ext4_mark_inode_dirty(NULL, inode);
+ return ret;
+}
+
+/* Try to shrink the extent tree */
+void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
+{
+ struct ext4_ext_path *path = NULL;
+ struct ext4_extent *ex;
+ ext4_lblk_t old_cur, cur = 0;
+
+ while (cur < end) {
+ path = ext4_find_extent(inode, cur, NULL, 0);
+ if (IS_ERR(path))
+ return;
+ ex = path[path->p_depth].p_ext;
+ if (!ex) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ ext4_mark_inode_dirty(NULL, inode);
+ return;
+ }
+ old_cur = cur;
+ cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
+ if (cur <= old_cur)
+ cur = old_cur + 1;
+ ext4_ext_try_to_merge(NULL, inode, path, ex);
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ ext4_mark_inode_dirty(NULL, inode);
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ }
+}
+
+/* Check if *cur is a hole and if it is, skip it */
+static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
+{
+ int ret;
+ struct ext4_map_blocks map;
+
+ map.m_lblk = *cur;
+ map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
+
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret != 0)
+ return;
+ *cur = *cur + map.m_len;
+}
+
+/* Count number of blocks used by this inode and update i_blocks */
+int ext4_ext_replay_set_iblocks(struct inode *inode)
+{
+ struct ext4_ext_path *path = NULL, *path2 = NULL;
+ struct ext4_extent *ex;
+ ext4_lblk_t cur = 0, end;
+ int numblks = 0, i, ret = 0;
+ ext4_fsblk_t cmp1, cmp2;
+ struct ext4_map_blocks map;
+
+ /* Determin the size of the file first */
+ path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
+ EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ ex = path[path->p_depth].p_ext;
+ if (!ex) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ goto out;
+ }
+ end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
+ ext4_ext_drop_refs(path);
+ kfree(path);
+
+ /* Count the number of data blocks */
+ cur = 0;
+ while (cur < end) {
+ map.m_lblk = cur;
+ map.m_len = end - cur;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret < 0)
+ break;
+ if (ret > 0)
+ numblks += ret;
+ cur = cur + map.m_len;
+ }
+
+ /*
+ * Count the number of extent tree blocks. We do it by looking up
+ * two successive extents and determining the difference between
+ * their paths. When path is different for 2 successive extents
+ * we compare the blocks in the path at each level and increment
+ * iblocks by total number of differences found.
+ */
+ cur = 0;
+ skip_hole(inode, &cur);
+ path = ext4_find_extent(inode, cur, NULL, 0);
+ if (IS_ERR(path))
+ goto out;
+ numblks += path->p_depth;
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ while (cur < end) {
+ path = ext4_find_extent(inode, cur, NULL, 0);
+ if (IS_ERR(path))
+ break;
+ ex = path[path->p_depth].p_ext;
+ if (!ex) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ return 0;
+ }
+ cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
+ ext4_ext_get_actual_len(ex));
+ skip_hole(inode, &cur);
+
+ path2 = ext4_find_extent(inode, cur, NULL, 0);
+ if (IS_ERR(path2)) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ break;
+ }
+ ex = path2[path2->p_depth].p_ext;
+ for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
+ cmp1 = cmp2 = 0;
+ if (i <= path->p_depth)
+ cmp1 = path[i].p_bh ?
+ path[i].p_bh->b_blocknr : 0;
+ if (i <= path2->p_depth)
+ cmp2 = path2[i].p_bh ?
+ path2[i].p_bh->b_blocknr : 0;
+ if (cmp1 != cmp2 && cmp2 != 0)
+ numblks++;
+ }
+ ext4_ext_drop_refs(path);
+ ext4_ext_drop_refs(path2);
+ kfree(path);
+ kfree(path2);
+ }
+
+out:
+ inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
+ ext4_mark_inode_dirty(NULL, inode);
+ return 0;
+}
+
+int ext4_ext_clear_bb(struct inode *inode)
+{
+ struct ext4_ext_path *path = NULL;
+ struct ext4_extent *ex;
+ ext4_lblk_t cur = 0, end;
+ int j, ret = 0;
+ struct ext4_map_blocks map;
+
+ /* Determin the size of the file first */
+ path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
+ EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ ex = path[path->p_depth].p_ext;
+ if (!ex) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ return 0;
+ }
+ end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
+ ext4_ext_drop_refs(path);
+ kfree(path);
+
+ cur = 0;
+ while (cur < end) {
+ map.m_lblk = cur;
+ map.m_len = end - cur;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret < 0)
+ break;
+ if (ret > 0) {
+ path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
+ if (!IS_ERR_OR_NULL(path)) {
+ for (j = 0; j < path->p_depth; j++) {
+
+ ext4_mb_mark_bb(inode->i_sb,
+ path[j].p_block, 1, 0);
+ }
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ }
+ ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
+ }
+ cur = cur + map.m_len;
+ }
+
+ return 0;
+}
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index e75171535375..0a729027322d 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -311,6 +311,9 @@ void ext4_es_find_extent_range(struct inode *inode,
ext4_lblk_t lblk, ext4_lblk_t end,
struct extent_status *es)
{
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+
trace_ext4_es_find_extent_range_enter(inode, lblk);
read_lock(&EXT4_I(inode)->i_es_lock);
@@ -361,6 +364,9 @@ bool ext4_es_scan_range(struct inode *inode,
{
bool ret;
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return false;
+
read_lock(&EXT4_I(inode)->i_es_lock);
ret = __es_scan_range(inode, matching_fn, lblk, end);
read_unlock(&EXT4_I(inode)->i_es_lock);
@@ -404,6 +410,9 @@ bool ext4_es_scan_clu(struct inode *inode,
{
bool ret;
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return false;
+
read_lock(&EXT4_I(inode)->i_es_lock);
ret = __es_scan_clu(inode, matching_fn, lblk);
read_unlock(&EXT4_I(inode)->i_es_lock);
@@ -812,6 +821,9 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
int err = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+
es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
lblk, len, pblk, status, inode->i_ino);
@@ -873,6 +885,9 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status newes;
ext4_lblk_t end = lblk + len - 1;
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+
newes.es_lblk = lblk;
newes.es_len = len;
ext4_es_store_pblock_status(&newes, pblk, status);
@@ -908,6 +923,9 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
struct rb_node *node;
int found = 0;
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+
trace_ext4_es_lookup_extent_enter(inode, lblk);
es_debug("lookup extent in block %u\n", lblk);
@@ -1419,6 +1437,9 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
int err = 0;
int reserved = 0;
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+
trace_ext4_es_remove_extent(inode, lblk, len);
es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
lblk, len, inode->i_ino);
@@ -1969,6 +1990,9 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
struct extent_status newes;
int err = 0;
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+
es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
lblk, inode->i_ino);
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
new file mode 100644
index 000000000000..447c8d93f480
--- /dev/null
+++ b/fs/ext4/fast_commit.c
@@ -0,0 +1,2139 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * fs/ext4/fast_commit.c
+ *
+ * Written by Harshad Shirwadkar <harshadshirwadkar@gmail.com>
+ *
+ * Ext4 fast commits routines.
+ */
+#include "ext4.h"
+#include "ext4_jbd2.h"
+#include "ext4_extents.h"
+#include "mballoc.h"
+
+/*
+ * Ext4 Fast Commits
+ * -----------------
+ *
+ * Ext4 fast commits implement fine grained journalling for Ext4.
+ *
+ * Fast commits are organized as a log of tag-length-value (TLV) structs. (See
+ * struct ext4_fc_tl). Each TLV contains some delta that is replayed TLV by
+ * TLV during the recovery phase. For the scenarios for which we currently
+ * don't have replay code, fast commit falls back to full commits.
+ * Fast commits record delta in one of the following three categories.
+ *
+ * (A) Directory entry updates:
+ *
+ * - EXT4_FC_TAG_UNLINK - records directory entry unlink
+ * - EXT4_FC_TAG_LINK - records directory entry link
+ * - EXT4_FC_TAG_CREAT - records inode and directory entry creation
+ *
+ * (B) File specific data range updates:
+ *
+ * - EXT4_FC_TAG_ADD_RANGE - records addition of new blocks to an inode
+ * - EXT4_FC_TAG_DEL_RANGE - records deletion of blocks from an inode
+ *
+ * (C) Inode metadata (mtime / ctime etc):
+ *
+ * - EXT4_FC_TAG_INODE - record the inode that should be replayed
+ * during recovery. Note that iblocks field is
+ * not replayed and instead derived during
+ * replay.
+ * Commit Operation
+ * ----------------
+ * With fast commits, we maintain all the directory entry operations in the
+ * order in which they are issued in an in-memory queue. This queue is flushed
+ * to disk during the commit operation. We also maintain a list of inodes
+ * that need to be committed during a fast commit in another in memory queue of
+ * inodes. During the commit operation, we commit in the following order:
+ *
+ * [1] Lock inodes for any further data updates by setting COMMITTING state
+ * [2] Submit data buffers of all the inodes
+ * [3] Wait for [2] to complete
+ * [4] Commit all the directory entry updates in the fast commit space
+ * [5] Commit all the changed inode structures
+ * [6] Write tail tag (this tag ensures the atomicity, please read the following
+ * section for more details).
+ * [7] Wait for [4], [5] and [6] to complete.
+ *
+ * All the inode updates must call ext4_fc_start_update() before starting an
+ * update. If such an ongoing update is present, fast commit waits for it to
+ * complete. The completion of such an update is marked by
+ * ext4_fc_stop_update().
+ *
+ * Fast Commit Ineligibility
+ * -------------------------
+ * Not all operations are supported by fast commits today (e.g extended
+ * attributes). Fast commit ineligiblity is marked by calling one of the
+ * two following functions:
+ *
+ * - ext4_fc_mark_ineligible(): This makes next fast commit operation to fall
+ * back to full commit. This is useful in case of transient errors.
+ *
+ * - ext4_fc_start_ineligible() and ext4_fc_stop_ineligible() - This makes all
+ * the fast commits happening between ext4_fc_start_ineligible() and
+ * ext4_fc_stop_ineligible() and one fast commit after the call to
+ * ext4_fc_stop_ineligible() to fall back to full commits. It is important to
+ * make one more fast commit to fall back to full commit after stop call so
+ * that it guaranteed that the fast commit ineligible operation contained
+ * within ext4_fc_start_ineligible() and ext4_fc_stop_ineligible() is
+ * followed by at least 1 full commit.
+ *
+ * Atomicity of commits
+ * --------------------
+ * In order to gaurantee atomicity during the commit operation, fast commit
+ * uses "EXT4_FC_TAG_TAIL" tag that marks a fast commit as complete. Tail
+ * tag contains CRC of the contents and TID of the transaction after which
+ * this fast commit should be applied. Recovery code replays fast commit
+ * logs only if there's at least 1 valid tail present. For every fast commit
+ * operation, there is 1 tail. This means, we may end up with multiple tails
+ * in the fast commit space. Here's an example:
+ *
+ * - Create a new file A and remove existing file B
+ * - fsync()
+ * - Append contents to file A
+ * - Truncate file A
+ * - fsync()
+ *
+ * The fast commit space at the end of above operations would look like this:
+ * [HEAD] [CREAT A] [UNLINK B] [TAIL] [ADD_RANGE A] [DEL_RANGE A] [TAIL]
+ * |<--- Fast Commit 1 --->|<--- Fast Commit 2 ---->|
+ *
+ * Replay code should thus check for all the valid tails in the FC area.
+ *
+ * TODOs
+ * -----
+ * 1) Make fast commit atomic updates more fine grained. Today, a fast commit
+ * eligible update must be protected within ext4_fc_start_update() and
+ * ext4_fc_stop_update(). These routines are called at much higher
+ * routines. This can be made more fine grained by combining with
+ * ext4_journal_start().
+ *
+ * 2) Same above for ext4_fc_start_ineligible() and ext4_fc_stop_ineligible()
+ *
+ * 3) Handle more ineligible cases.
+ */
+
+#include <trace/events/ext4.h>
+static struct kmem_cache *ext4_fc_dentry_cachep;
+
+static void ext4_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
+{
+ BUFFER_TRACE(bh, "");
+ if (uptodate) {
+ ext4_debug("%s: Block %lld up-to-date",
+ __func__, bh->b_blocknr);
+ set_buffer_uptodate(bh);
+ } else {
+ ext4_debug("%s: Block %lld not up-to-date",
+ __func__, bh->b_blocknr);
+ clear_buffer_uptodate(bh);
+ }
+
+ unlock_buffer(bh);
+}
+
+static inline void ext4_fc_reset_inode(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ ei->i_fc_lblk_start = 0;
+ ei->i_fc_lblk_len = 0;
+}
+
+void ext4_fc_init_inode(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ ext4_fc_reset_inode(inode);
+ ext4_clear_inode_state(inode, EXT4_STATE_FC_COMMITTING);
+ INIT_LIST_HEAD(&ei->i_fc_list);
+ init_waitqueue_head(&ei->i_fc_wait);
+ atomic_set(&ei->i_fc_updates, 0);
+ ei->i_fc_committed_subtid = 0;
+}
+
+/*
+ * Inform Ext4's fast about start of an inode update
+ *
+ * This function is called by the high level call VFS callbacks before
+ * performing any inode update. This function blocks if there's an ongoing
+ * fast commit on the inode in question.
+ */
+void ext4_fc_start_update(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) ||
+ (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY))
+ return;
+
+restart:
+ spin_lock(&EXT4_SB(inode->i_sb)->s_fc_lock);
+ if (list_empty(&ei->i_fc_list))
+ goto out;
+
+ if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
+ wait_queue_head_t *wq;
+#if (BITS_PER_LONG < 64)
+ DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
+ EXT4_STATE_FC_COMMITTING);
+ wq = bit_waitqueue(&ei->i_state_flags,
+ EXT4_STATE_FC_COMMITTING);
+#else
+ DEFINE_WAIT_BIT(wait, &ei->i_flags,
+ EXT4_STATE_FC_COMMITTING);
+ wq = bit_waitqueue(&ei->i_flags,
+ EXT4_STATE_FC_COMMITTING);
+#endif
+ prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
+ schedule();
+ finish_wait(wq, &wait.wq_entry);
+ goto restart;
+ }
+out:
+ atomic_inc(&ei->i_fc_updates);
+ spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
+}
+
+/*
+ * Stop inode update and wake up waiting fast commits if any.
+ */
+void ext4_fc_stop_update(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) ||
+ (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY))
+ return;
+
+ if (atomic_dec_and_test(&ei->i_fc_updates))
+ wake_up_all(&ei->i_fc_wait);
+}
+
+/*
+ * Remove inode from fast commit list. If the inode is being committed
+ * we wait until inode commit is done.
+ */
+void ext4_fc_del(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) ||
+ (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY))
+ return;
+
+restart:
+ spin_lock(&EXT4_SB(inode->i_sb)->s_fc_lock);
+ if (list_empty(&ei->i_fc_list)) {
+ spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
+ return;
+ }
+
+ if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
+ wait_queue_head_t *wq;
+#if (BITS_PER_LONG < 64)
+ DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
+ EXT4_STATE_FC_COMMITTING);
+ wq = bit_waitqueue(&ei->i_state_flags,
+ EXT4_STATE_FC_COMMITTING);
+#else
+ DEFINE_WAIT_BIT(wait, &ei->i_flags,
+ EXT4_STATE_FC_COMMITTING);
+ wq = bit_waitqueue(&ei->i_flags,
+ EXT4_STATE_FC_COMMITTING);
+#endif
+ prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
+ schedule();
+ finish_wait(wq, &wait.wq_entry);
+ goto restart;
+ }
+ if (!list_empty(&ei->i_fc_list))
+ list_del_init(&ei->i_fc_list);
+ spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
+}
+
+/*
+ * Mark file system as fast commit ineligible. This means that next commit
+ * operation would result in a full jbd2 commit.
+ */
+void ext4_fc_mark_ineligible(struct super_block *sb, int reason)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
+ (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
+ return;
+
+ sbi->s_mount_state |= EXT4_FC_INELIGIBLE;
+ WARN_ON(reason >= EXT4_FC_REASON_MAX);
+ sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
+}
+
+/*
+ * Start a fast commit ineligible update. Any commits that happen while
+ * such an operation is in progress fall back to full commits.
+ */
+void ext4_fc_start_ineligible(struct super_block *sb, int reason)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
+ (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
+ return;
+
+ WARN_ON(reason >= EXT4_FC_REASON_MAX);
+ sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
+ atomic_inc(&sbi->s_fc_ineligible_updates);
+}
+
+/*
+ * Stop a fast commit ineligible update. We set EXT4_FC_INELIGIBLE flag here
+ * to ensure that after stopping the ineligible update, at least one full
+ * commit takes place.
+ */
+void ext4_fc_stop_ineligible(struct super_block *sb)
+{
+ if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
+ (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
+ return;
+
+ EXT4_SB(sb)->s_mount_state |= EXT4_FC_INELIGIBLE;
+ atomic_dec(&EXT4_SB(sb)->s_fc_ineligible_updates);
+}
+
+static inline int ext4_fc_is_ineligible(struct super_block *sb)
+{
+ return (EXT4_SB(sb)->s_mount_state & EXT4_FC_INELIGIBLE) ||
+ atomic_read(&EXT4_SB(sb)->s_fc_ineligible_updates);
+}
+
+/*
+ * Generic fast commit tracking function. If this is the first time this we are
+ * called after a full commit, we initialize fast commit fields and then call
+ * __fc_track_fn() with update = 0. If we have already been called after a full
+ * commit, we pass update = 1. Based on that, the track function can determine
+ * if it needs to track a field for the first time or if it needs to just
+ * update the previously tracked value.
+ *
+ * If enqueue is set, this function enqueues the inode in fast commit list.
+ */
+static int ext4_fc_track_template(
+ struct inode *inode, int (*__fc_track_fn)(struct inode *, void *, bool),
+ void *args, int enqueue)
+{
+ tid_t running_txn_tid;
+ bool update = false;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ int ret;
+
+ if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) ||
+ (sbi->s_mount_state & EXT4_FC_REPLAY))
+ return -EOPNOTSUPP;
+
+ if (ext4_fc_is_ineligible(inode->i_sb))
+ return -EINVAL;
+
+ running_txn_tid = sbi->s_journal ?
+ sbi->s_journal->j_commit_sequence + 1 : 0;
+
+ mutex_lock(&ei->i_fc_lock);
+ if (running_txn_tid == ei->i_sync_tid) {
+ update = true;
+ } else {
+ ext4_fc_reset_inode(inode);
+ ei->i_sync_tid = running_txn_tid;
+ }
+ ret = __fc_track_fn(inode, args, update);
+ mutex_unlock(&ei->i_fc_lock);
+
+ if (!enqueue)
+ return ret;
+
+ spin_lock(&sbi->s_fc_lock);
+ if (list_empty(&EXT4_I(inode)->i_fc_list))
+ list_add_tail(&EXT4_I(inode)->i_fc_list,
+ (sbi->s_mount_state & EXT4_FC_COMMITTING) ?
+ &sbi->s_fc_q[FC_Q_STAGING] :
+ &sbi->s_fc_q[FC_Q_MAIN]);
+ spin_unlock(&sbi->s_fc_lock);
+
+ return ret;
+}
+
+struct __track_dentry_update_args {
+ struct dentry *dentry;
+ int op;
+};
+
+/* __track_fn for directory entry updates. Called with ei->i_fc_lock. */
+static int __track_dentry_update(struct inode *inode, void *arg, bool update)
+{
+ struct ext4_fc_dentry_update *node;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct __track_dentry_update_args *dentry_update =
+ (struct __track_dentry_update_args *)arg;
+ struct dentry *dentry = dentry_update->dentry;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+ mutex_unlock(&ei->i_fc_lock);
+ node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
+ if (!node) {
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_MEM);
+ mutex_lock(&ei->i_fc_lock);
+ return -ENOMEM;
+ }
+
+ node->fcd_op = dentry_update->op;
+ node->fcd_parent = dentry->d_parent->d_inode->i_ino;
+ node->fcd_ino = inode->i_ino;
+ if (dentry->d_name.len > DNAME_INLINE_LEN) {
+ node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS);
+ if (!node->fcd_name.name) {
+ kmem_cache_free(ext4_fc_dentry_cachep, node);
+ ext4_fc_mark_ineligible(inode->i_sb,
+ EXT4_FC_REASON_MEM);
+ mutex_lock(&ei->i_fc_lock);
+ return -ENOMEM;
+ }
+ memcpy((u8 *)node->fcd_name.name, dentry->d_name.name,
+ dentry->d_name.len);
+ } else {
+ memcpy(node->fcd_iname, dentry->d_name.name,
+ dentry->d_name.len);
+ node->fcd_name.name = node->fcd_iname;
+ }
+ node->fcd_name.len = dentry->d_name.len;
+
+ spin_lock(&sbi->s_fc_lock);
+ if (sbi->s_mount_state & EXT4_FC_COMMITTING)
+ list_add_tail(&node->fcd_list,
+ &sbi->s_fc_dentry_q[FC_Q_STAGING]);
+ else
+ list_add_tail(&node->fcd_list, &sbi->s_fc_dentry_q[FC_Q_MAIN]);
+ spin_unlock(&sbi->s_fc_lock);
+ mutex_lock(&ei->i_fc_lock);
+
+ return 0;
+}
+
+void ext4_fc_track_unlink(struct inode *inode, struct dentry *dentry)
+{
+ struct __track_dentry_update_args args;
+ int ret;
+
+ args.dentry = dentry;
+ args.op = EXT4_FC_TAG_UNLINK;
+
+ ret = ext4_fc_track_template(inode, __track_dentry_update,
+ (void *)&args, 0);
+ trace_ext4_fc_track_unlink(inode, dentry, ret);
+}
+
+void ext4_fc_track_link(struct inode *inode, struct dentry *dentry)
+{
+ struct __track_dentry_update_args args;
+ int ret;
+
+ args.dentry = dentry;
+ args.op = EXT4_FC_TAG_LINK;
+
+ ret = ext4_fc_track_template(inode, __track_dentry_update,
+ (void *)&args, 0);
+ trace_ext4_fc_track_link(inode, dentry, ret);
+}
+
+void ext4_fc_track_create(struct inode *inode, struct dentry *dentry)
+{
+ struct __track_dentry_update_args args;
+ int ret;
+
+ args.dentry = dentry;
+ args.op = EXT4_FC_TAG_CREAT;
+
+ ret = ext4_fc_track_template(inode, __track_dentry_update,
+ (void *)&args, 0);
+ trace_ext4_fc_track_create(inode, dentry, ret);
+}
+
+/* __track_fn for inode tracking */
+static int __track_inode(struct inode *inode, void *arg, bool update)
+{
+ if (update)
+ return -EEXIST;
+
+ EXT4_I(inode)->i_fc_lblk_len = 0;
+
+ return 0;
+}
+
+void ext4_fc_track_inode(struct inode *inode)
+{
+ int ret;
+
+ if (S_ISDIR(inode->i_mode))
+ return;
+
+ ret = ext4_fc_track_template(inode, __track_inode, NULL, 1);
+ trace_ext4_fc_track_inode(inode, ret);
+}
+
+struct __track_range_args {
+ ext4_lblk_t start, end;
+};
+
+/* __track_fn for tracking data updates */
+static int __track_range(struct inode *inode, void *arg, bool update)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ ext4_lblk_t oldstart;
+ struct __track_range_args *__arg =
+ (struct __track_range_args *)arg;
+
+ if (inode->i_ino < EXT4_FIRST_INO(inode->i_sb)) {
+ ext4_debug("Special inode %ld being modified\n", inode->i_ino);
+ return -ECANCELED;
+ }
+
+ oldstart = ei->i_fc_lblk_start;
+
+ if (update && ei->i_fc_lblk_len > 0) {
+ ei->i_fc_lblk_start = min(ei->i_fc_lblk_start, __arg->start);
+ ei->i_fc_lblk_len =
+ max(oldstart + ei->i_fc_lblk_len - 1, __arg->end) -
+ ei->i_fc_lblk_start + 1;
+ } else {
+ ei->i_fc_lblk_start = __arg->start;
+ ei->i_fc_lblk_len = __arg->end - __arg->start + 1;
+ }
+
+ return 0;
+}
+
+void ext4_fc_track_range(struct inode *inode, ext4_lblk_t start,
+ ext4_lblk_t end)
+{
+ struct __track_range_args args;
+ int ret;
+
+ if (S_ISDIR(inode->i_mode))
+ return;
+
+ args.start = start;
+ args.end = end;
+
+ ret = ext4_fc_track_template(inode, __track_range, &args, 1);
+
+ trace_ext4_fc_track_range(inode, start, end, ret);
+}
+
+static void ext4_fc_submit_bh(struct super_block *sb)
+{
+ int write_flags = REQ_SYNC;
+ struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh;
+
+ if (test_opt(sb, BARRIER))
+ write_flags |= REQ_FUA | REQ_PREFLUSH;
+ lock_buffer(bh);
+ clear_buffer_dirty(bh);
+ set_buffer_uptodate(bh);
+ bh->b_end_io = ext4_end_buffer_io_sync;
+ submit_bh(REQ_OP_WRITE, write_flags, bh);
+ EXT4_SB(sb)->s_fc_bh = NULL;
+}
+
+/* Ext4 commit path routines */
+
+/* memzero and update CRC */
+static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len,
+ u32 *crc)
+{
+ void *ret;
+
+ ret = memset(dst, 0, len);
+ if (crc)
+ *crc = ext4_chksum(EXT4_SB(sb), *crc, dst, len);
+ return ret;
+}
+
+/*
+ * Allocate len bytes on a fast commit buffer.
+ *
+ * During the commit time this function is used to manage fast commit
+ * block space. We don't split a fast commit log onto different
+ * blocks. So this function makes sure that if there's not enough space
+ * on the current block, the remaining space in the current block is
+ * marked as unused by adding EXT4_FC_TAG_PAD tag. In that case,
+ * new block is from jbd2 and CRC is updated to reflect the padding
+ * we added.
+ */
+static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc)
+{
+ struct ext4_fc_tl *tl;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct buffer_head *bh;
+ int bsize = sbi->s_journal->j_blocksize;
+ int ret, off = sbi->s_fc_bytes % bsize;
+ int pad_len;
+
+ /*
+ * After allocating len, we should have space at least for a 0 byte
+ * padding.
+ */
+ if (len + sizeof(struct ext4_fc_tl) > bsize)
+ return NULL;
+
+ if (bsize - off - 1 > len + sizeof(struct ext4_fc_tl)) {
+ /*
+ * Only allocate from current buffer if we have enough space for
+ * this request AND we have space to add a zero byte padding.
+ */
+ if (!sbi->s_fc_bh) {
+ ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
+ if (ret)
+ return NULL;
+ sbi->s_fc_bh = bh;
+ }
+ sbi->s_fc_bytes += len;
+ return sbi->s_fc_bh->b_data + off;
+ }
+ /* Need to add PAD tag */
+ tl = (struct ext4_fc_tl *)(sbi->s_fc_bh->b_data + off);
+ tl->fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD);
+ pad_len = bsize - off - 1 - sizeof(struct ext4_fc_tl);
+ tl->fc_len = cpu_to_le16(pad_len);
+ if (crc)
+ *crc = ext4_chksum(sbi, *crc, tl, sizeof(*tl));
+ if (pad_len > 0)
+ ext4_fc_memzero(sb, tl + 1, pad_len, crc);
+ ext4_fc_submit_bh(sb);
+
+ ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
+ if (ret)
+ return NULL;
+ sbi->s_fc_bh = bh;
+ sbi->s_fc_bytes = (sbi->s_fc_bytes / bsize + 1) * bsize + len;
+ return sbi->s_fc_bh->b_data;
+}
+
+/* memcpy to fc reserved space and update CRC */
+static void *ext4_fc_memcpy(struct super_block *sb, void *dst, const void *src,
+ int len, u32 *crc)
+{
+ if (crc)
+ *crc = ext4_chksum(EXT4_SB(sb), *crc, src, len);
+ return memcpy(dst, src, len);
+}
+
+/*
+ * Complete a fast commit by writing tail tag.
+ *
+ * Writing tail tag marks the end of a fast commit. In order to guarantee
+ * atomicity, after writing tail tag, even if there's space remaining
+ * in the block, next commit shouldn't use it. That's why tail tag
+ * has the length as that of the remaining space on the block.
+ */
+static int ext4_fc_write_tail(struct super_block *sb, u32 crc)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_fc_tl tl;
+ struct ext4_fc_tail tail;
+ int off, bsize = sbi->s_journal->j_blocksize;
+ u8 *dst;
+
+ /*
+ * ext4_fc_reserve_space takes care of allocating an extra block if
+ * there's no enough space on this block for accommodating this tail.
+ */
+ dst = ext4_fc_reserve_space(sb, sizeof(tl) + sizeof(tail), &crc);
+ if (!dst)
+ return -ENOSPC;
+
+ off = sbi->s_fc_bytes % bsize;
+
+ tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL);
+ tl.fc_len = cpu_to_le16(bsize - off - 1 + sizeof(struct ext4_fc_tail));
+ sbi->s_fc_bytes = round_up(sbi->s_fc_bytes, bsize);
+
+ ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), &crc);
+ dst += sizeof(tl);
+ tail.fc_tid = cpu_to_le32(sbi->s_journal->j_running_transaction->t_tid);
+ ext4_fc_memcpy(sb, dst, &tail.fc_tid, sizeof(tail.fc_tid), &crc);
+ dst += sizeof(tail.fc_tid);
+ tail.fc_crc = cpu_to_le32(crc);
+ ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL);
+
+ ext4_fc_submit_bh(sb);
+
+ return 0;
+}
+
+/*
+ * Adds tag, length, value and updates CRC. Returns true if tlv was added.
+ * Returns false if there's not enough space.
+ */
+static bool ext4_fc_add_tlv(struct super_block *sb, u16 tag, u16 len, u8 *val,
+ u32 *crc)
+{
+ struct ext4_fc_tl tl;
+ u8 *dst;
+
+ dst = ext4_fc_reserve_space(sb, sizeof(tl) + len, crc);
+ if (!dst)
+ return false;
+
+ tl.fc_tag = cpu_to_le16(tag);
+ tl.fc_len = cpu_to_le16(len);
+
+ ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), crc);
+ ext4_fc_memcpy(sb, dst + sizeof(tl), val, len, crc);
+
+ return true;
+}
+
+/* Same as above, but adds dentry tlv. */
+static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u16 tag,
+ int parent_ino, int ino, int dlen,
+ const unsigned char *dname,
+ u32 *crc)
+{
+ struct ext4_fc_dentry_info fcd;
+ struct ext4_fc_tl tl;
+ u8 *dst = ext4_fc_reserve_space(sb, sizeof(tl) + sizeof(fcd) + dlen,
+ crc);
+
+ if (!dst)
+ return false;
+
+ fcd.fc_parent_ino = cpu_to_le32(parent_ino);
+ fcd.fc_ino = cpu_to_le32(ino);
+ tl.fc_tag = cpu_to_le16(tag);
+ tl.fc_len = cpu_to_le16(sizeof(fcd) + dlen);
+ ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), crc);
+ dst += sizeof(tl);
+ ext4_fc_memcpy(sb, dst, &fcd, sizeof(fcd), crc);
+ dst += sizeof(fcd);
+ ext4_fc_memcpy(sb, dst, dname, dlen, crc);
+ dst += dlen;
+
+ return true;
+}
+
+/*
+ * Writes inode in the fast commit space under TLV with tag @tag.
+ * Returns 0 on success, error on failure.
+ */
+static int ext4_fc_write_inode(struct inode *inode, u32 *crc)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ int inode_len = EXT4_GOOD_OLD_INODE_SIZE;
+ int ret;
+ struct ext4_iloc iloc;
+ struct ext4_fc_inode fc_inode;
+ struct ext4_fc_tl tl;
+ u8 *dst;
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret)
+ return ret;
+
+ if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE)
+ inode_len += ei->i_extra_isize;
+
+ fc_inode.fc_ino = cpu_to_le32(inode->i_ino);
+ tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_INODE);
+ tl.fc_len = cpu_to_le16(inode_len + sizeof(fc_inode.fc_ino));
+
+ dst = ext4_fc_reserve_space(inode->i_sb,
+ sizeof(tl) + inode_len + sizeof(fc_inode.fc_ino), crc);
+ if (!dst)
+ return -ECANCELED;
+
+ if (!ext4_fc_memcpy(inode->i_sb, dst, &tl, sizeof(tl), crc))
+ return -ECANCELED;
+ dst += sizeof(tl);
+ if (!ext4_fc_memcpy(inode->i_sb, dst, &fc_inode, sizeof(fc_inode), crc))
+ return -ECANCELED;
+ dst += sizeof(fc_inode);
+ if (!ext4_fc_memcpy(inode->i_sb, dst, (u8 *)ext4_raw_inode(&iloc),
+ inode_len, crc))
+ return -ECANCELED;
+
+ return 0;
+}
+
+/*
+ * Writes updated data ranges for the inode in question. Updates CRC.
+ * Returns 0 on success, error otherwise.
+ */
+static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
+{
+ ext4_lblk_t old_blk_size, cur_lblk_off, new_blk_size;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_map_blocks map;
+ struct ext4_fc_add_range fc_ext;
+ struct ext4_fc_del_range lrange;
+ struct ext4_extent *ex;
+ int ret;
+
+ mutex_lock(&ei->i_fc_lock);
+ if (ei->i_fc_lblk_len == 0) {
+ mutex_unlock(&ei->i_fc_lock);
+ return 0;
+ }
+ old_blk_size = ei->i_fc_lblk_start;
+ new_blk_size = ei->i_fc_lblk_start + ei->i_fc_lblk_len - 1;
+ ei->i_fc_lblk_len = 0;
+ mutex_unlock(&ei->i_fc_lock);
+
+ cur_lblk_off = old_blk_size;
+ jbd_debug(1, "%s: will try writing %d to %d for inode %ld\n",
+ __func__, cur_lblk_off, new_blk_size, inode->i_ino);
+
+ while (cur_lblk_off <= new_blk_size) {
+ map.m_lblk = cur_lblk_off;
+ map.m_len = new_blk_size - cur_lblk_off + 1;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret < 0)
+ return -ECANCELED;
+
+ if (map.m_len == 0) {
+ cur_lblk_off++;
+ continue;
+ }
+
+ if (ret == 0) {
+ lrange.fc_ino = cpu_to_le32(inode->i_ino);
+ lrange.fc_lblk = cpu_to_le32(map.m_lblk);
+ lrange.fc_len = cpu_to_le32(map.m_len);
+ if (!ext4_fc_add_tlv(inode->i_sb, EXT4_FC_TAG_DEL_RANGE,
+ sizeof(lrange), (u8 *)&lrange, crc))
+ return -ENOSPC;
+ } else {
+ fc_ext.fc_ino = cpu_to_le32(inode->i_ino);
+ ex = (struct ext4_extent *)&fc_ext.fc_ex;
+ ex->ee_block = cpu_to_le32(map.m_lblk);
+ ex->ee_len = cpu_to_le16(map.m_len);
+ ext4_ext_store_pblock(ex, map.m_pblk);
+ if (map.m_flags & EXT4_MAP_UNWRITTEN)
+ ext4_ext_mark_unwritten(ex);
+ else
+ ext4_ext_mark_initialized(ex);
+ if (!ext4_fc_add_tlv(inode->i_sb, EXT4_FC_TAG_ADD_RANGE,
+ sizeof(fc_ext), (u8 *)&fc_ext, crc))
+ return -ENOSPC;
+ }
+
+ cur_lblk_off += map.m_len;
+ }
+
+ return 0;
+}
+
+
+/* Submit data for all the fast commit inodes */
+static int ext4_fc_submit_inode_data_all(journal_t *journal)
+{
+ struct super_block *sb = (struct super_block *)(journal->j_private);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_inode_info *ei;
+ struct list_head *pos;
+ int ret = 0;
+
+ spin_lock(&sbi->s_fc_lock);
+ sbi->s_mount_state |= EXT4_FC_COMMITTING;
+ list_for_each(pos, &sbi->s_fc_q[FC_Q_MAIN]) {
+ ei = list_entry(pos, struct ext4_inode_info, i_fc_list);
+ ext4_set_inode_state(&ei->vfs_inode, EXT4_STATE_FC_COMMITTING);
+ while (atomic_read(&ei->i_fc_updates)) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&ei->i_fc_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&ei->i_fc_updates)) {
+ spin_unlock(&sbi->s_fc_lock);
+ schedule();
+ spin_lock(&sbi->s_fc_lock);
+ }
+ finish_wait(&ei->i_fc_wait, &wait);
+ }
+ spin_unlock(&sbi->s_fc_lock);
+ ret = jbd2_submit_inode_data(ei->jinode);
+ if (ret)
+ return ret;
+ spin_lock(&sbi->s_fc_lock);
+ }
+ spin_unlock(&sbi->s_fc_lock);
+
+ return ret;
+}
+
+/* Wait for completion of data for all the fast commit inodes */
+static int ext4_fc_wait_inode_data_all(journal_t *journal)
+{
+ struct super_block *sb = (struct super_block *)(journal->j_private);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_inode_info *pos, *n;
+ int ret = 0;
+
+ spin_lock(&sbi->s_fc_lock);
+ list_for_each_entry_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
+ if (!ext4_test_inode_state(&pos->vfs_inode,
+ EXT4_STATE_FC_COMMITTING))
+ continue;
+ spin_unlock(&sbi->s_fc_lock);
+
+ ret = jbd2_wait_inode_data(journal, pos->jinode);
+ if (ret)
+ return ret;
+ spin_lock(&sbi->s_fc_lock);
+ }
+ spin_unlock(&sbi->s_fc_lock);
+
+ return 0;
+}
+
+/* Commit all the directory entry updates */
+static int ext4_fc_commit_dentry_updates(journal_t *journal, u32 *crc)
+{
+ struct super_block *sb = (struct super_block *)(journal->j_private);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_fc_dentry_update *fc_dentry;
+ struct inode *inode;
+ struct list_head *pos, *n, *fcd_pos, *fcd_n;
+ struct ext4_inode_info *ei;
+ int ret;
+
+ if (list_empty(&sbi->s_fc_dentry_q[FC_Q_MAIN]))
+ return 0;
+ list_for_each_safe(fcd_pos, fcd_n, &sbi->s_fc_dentry_q[FC_Q_MAIN]) {
+ fc_dentry = list_entry(fcd_pos, struct ext4_fc_dentry_update,
+ fcd_list);
+ if (fc_dentry->fcd_op != EXT4_FC_TAG_CREAT) {
+ spin_unlock(&sbi->s_fc_lock);
+ if (!ext4_fc_add_dentry_tlv(
+ sb, fc_dentry->fcd_op,
+ fc_dentry->fcd_parent, fc_dentry->fcd_ino,
+ fc_dentry->fcd_name.len,
+ fc_dentry->fcd_name.name, crc)) {
+ ret = -ENOSPC;
+ goto lock_and_exit;
+ }
+ spin_lock(&sbi->s_fc_lock);
+ continue;
+ }
+
+ inode = NULL;
+ list_for_each_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN]) {
+ ei = list_entry(pos, struct ext4_inode_info, i_fc_list);
+ if (ei->vfs_inode.i_ino == fc_dentry->fcd_ino) {
+ inode = &ei->vfs_inode;
+ break;
+ }
+ }
+ /*
+ * If we don't find inode in our list, then it was deleted,
+ * in which case, we don't need to record it's create tag.
+ */
+ if (!inode)
+ continue;
+ spin_unlock(&sbi->s_fc_lock);
+
+ /*
+ * We first write the inode and then the create dirent. This
+ * allows the recovery code to create an unnamed inode first
+ * and then link it to a directory entry. This allows us
+ * to use namei.c routines almost as is and simplifies
+ * the recovery code.
+ */
+ ret = ext4_fc_write_inode(inode, crc);
+ if (ret)
+ goto lock_and_exit;
+
+ ret = ext4_fc_write_inode_data(inode, crc);
+ if (ret)
+ goto lock_and_exit;
+
+ if (!ext4_fc_add_dentry_tlv(
+ sb, fc_dentry->fcd_op,
+ fc_dentry->fcd_parent, fc_dentry->fcd_ino,
+ fc_dentry->fcd_name.len,
+ fc_dentry->fcd_name.name, crc)) {
+ spin_lock(&sbi->s_fc_lock);
+ ret = -ENOSPC;
+ goto lock_and_exit;
+ }
+
+ spin_lock(&sbi->s_fc_lock);
+ }
+ return 0;
+lock_and_exit:
+ spin_lock(&sbi->s_fc_lock);
+ return ret;
+}
+
+static int ext4_fc_perform_commit(journal_t *journal)
+{
+ struct super_block *sb = (struct super_block *)(journal->j_private);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_inode_info *iter;
+ struct ext4_fc_head head;
+ struct list_head *pos;
+ struct inode *inode;
+ struct blk_plug plug;
+ int ret = 0;
+ u32 crc = 0;
+
+ ret = ext4_fc_submit_inode_data_all(journal);
+ if (ret)
+ return ret;
+
+ ret = ext4_fc_wait_inode_data_all(journal);
+ if (ret)
+ return ret;
+
+ blk_start_plug(&plug);
+ if (sbi->s_fc_bytes == 0) {
+ /*
+ * Add a head tag only if this is the first fast commit
+ * in this TID.
+ */
+ head.fc_features = cpu_to_le32(EXT4_FC_SUPPORTED_FEATURES);
+ head.fc_tid = cpu_to_le32(
+ sbi->s_journal->j_running_transaction->t_tid);
+ if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
+ (u8 *)&head, &crc))
+ goto out;
+ }
+
+ spin_lock(&sbi->s_fc_lock);
+ ret = ext4_fc_commit_dentry_updates(journal, &crc);
+ if (ret) {
+ spin_unlock(&sbi->s_fc_lock);
+ goto out;
+ }
+
+ list_for_each(pos, &sbi->s_fc_q[FC_Q_MAIN]) {
+ iter = list_entry(pos, struct ext4_inode_info, i_fc_list);
+ inode = &iter->vfs_inode;
+ if (!ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING))
+ continue;
+
+ spin_unlock(&sbi->s_fc_lock);
+ ret = ext4_fc_write_inode_data(inode, &crc);
+ if (ret)
+ goto out;
+ ret = ext4_fc_write_inode(inode, &crc);
+ if (ret)
+ goto out;
+ spin_lock(&sbi->s_fc_lock);
+ EXT4_I(inode)->i_fc_committed_subtid =
+ atomic_read(&sbi->s_fc_subtid);
+ }
+ spin_unlock(&sbi->s_fc_lock);
+
+ ret = ext4_fc_write_tail(sb, crc);
+
+out:
+ blk_finish_plug(&plug);
+ return ret;
+}
+
+/*
+ * The main commit entry point. Performs a fast commit for transaction
+ * commit_tid if needed. If it's not possible to perform a fast commit
+ * due to various reasons, we fall back to full commit. Returns 0
+ * on success, error otherwise.
+ */
+int ext4_fc_commit(journal_t *journal, tid_t commit_tid)
+{
+ struct super_block *sb = (struct super_block *)(journal->j_private);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int nblks = 0, ret, bsize = journal->j_blocksize;
+ int subtid = atomic_read(&sbi->s_fc_subtid);
+ int reason = EXT4_FC_REASON_OK, fc_bufs_before = 0;
+ ktime_t start_time, commit_time;
+
+ trace_ext4_fc_commit_start(sb);
+
+ start_time = ktime_get();
+
+ if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
+ (ext4_fc_is_ineligible(sb))) {
+ reason = EXT4_FC_REASON_INELIGIBLE;
+ goto out;
+ }
+
+restart_fc:
+ ret = jbd2_fc_begin_commit(journal, commit_tid);
+ if (ret == -EALREADY) {
+ /* There was an ongoing commit, check if we need to restart */
+ if (atomic_read(&sbi->s_fc_subtid) <= subtid &&
+ commit_tid > journal->j_commit_sequence)
+ goto restart_fc;
+ reason = EXT4_FC_REASON_ALREADY_COMMITTED;
+ goto out;
+ } else if (ret) {
+ sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++;
+ reason = EXT4_FC_REASON_FC_START_FAILED;
+ goto out;
+ }
+
+ fc_bufs_before = (sbi->s_fc_bytes + bsize - 1) / bsize;
+ ret = ext4_fc_perform_commit(journal);
+ if (ret < 0) {
+ sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++;
+ reason = EXT4_FC_REASON_FC_FAILED;
+ goto out;
+ }
+ nblks = (sbi->s_fc_bytes + bsize - 1) / bsize - fc_bufs_before;
+ ret = jbd2_fc_wait_bufs(journal, nblks);
+ if (ret < 0) {
+ sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++;
+ reason = EXT4_FC_REASON_FC_FAILED;
+ goto out;
+ }
+ atomic_inc(&sbi->s_fc_subtid);
+ jbd2_fc_end_commit(journal);
+out:
+ /* Has any ineligible update happened since we started? */
+ if (reason == EXT4_FC_REASON_OK && ext4_fc_is_ineligible(sb)) {
+ sbi->s_fc_stats.fc_ineligible_reason_count[EXT4_FC_COMMIT_FAILED]++;
+ reason = EXT4_FC_REASON_INELIGIBLE;
+ }
+
+ spin_lock(&sbi->s_fc_lock);
+ if (reason != EXT4_FC_REASON_OK &&
+ reason != EXT4_FC_REASON_ALREADY_COMMITTED) {
+ sbi->s_fc_stats.fc_ineligible_commits++;
+ } else {
+ sbi->s_fc_stats.fc_num_commits++;
+ sbi->s_fc_stats.fc_numblks += nblks;
+ }
+ spin_unlock(&sbi->s_fc_lock);
+ nblks = (reason == EXT4_FC_REASON_OK) ? nblks : 0;
+ trace_ext4_fc_commit_stop(sb, nblks, reason);
+ commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
+ /*
+ * weight the commit time higher than the average time so we don't
+ * react too strongly to vast changes in the commit time
+ */
+ if (likely(sbi->s_fc_avg_commit_time))
+ sbi->s_fc_avg_commit_time = (commit_time +
+ sbi->s_fc_avg_commit_time * 3) / 4;
+ else
+ sbi->s_fc_avg_commit_time = commit_time;
+ jbd_debug(1,
+ "Fast commit ended with blks = %d, reason = %d, subtid - %d",
+ nblks, reason, subtid);
+ if (reason == EXT4_FC_REASON_FC_FAILED)
+ return jbd2_fc_end_commit_fallback(journal, commit_tid);
+ if (reason == EXT4_FC_REASON_FC_START_FAILED ||
+ reason == EXT4_FC_REASON_INELIGIBLE)
+ return jbd2_complete_transaction(journal, commit_tid);
+ return 0;
+}
+
+/*
+ * Fast commit cleanup routine. This is called after every fast commit and
+ * full commit. full is true if we are called after a full commit.
+ */
+static void ext4_fc_cleanup(journal_t *journal, int full)
+{
+ struct super_block *sb = journal->j_private;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_inode_info *iter;
+ struct ext4_fc_dentry_update *fc_dentry;
+ struct list_head *pos, *n;
+
+ if (full && sbi->s_fc_bh)
+ sbi->s_fc_bh = NULL;
+
+ jbd2_fc_release_bufs(journal);
+
+ spin_lock(&sbi->s_fc_lock);
+ list_for_each_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN]) {
+ iter = list_entry(pos, struct ext4_inode_info, i_fc_list);
+ list_del_init(&iter->i_fc_list);
+ ext4_clear_inode_state(&iter->vfs_inode,
+ EXT4_STATE_FC_COMMITTING);
+ ext4_fc_reset_inode(&iter->vfs_inode);
+ /* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
+ smp_mb();
+#if (BITS_PER_LONG < 64)
+ wake_up_bit(&iter->i_state_flags, EXT4_STATE_FC_COMMITTING);
+#else
+ wake_up_bit(&iter->i_flags, EXT4_STATE_FC_COMMITTING);
+#endif
+ }
+
+ while (!list_empty(&sbi->s_fc_dentry_q[FC_Q_MAIN])) {
+ fc_dentry = list_first_entry(&sbi->s_fc_dentry_q[FC_Q_MAIN],
+ struct ext4_fc_dentry_update,
+ fcd_list);
+ list_del_init(&fc_dentry->fcd_list);
+ spin_unlock(&sbi->s_fc_lock);
+
+ if (fc_dentry->fcd_name.name &&
+ fc_dentry->fcd_name.len > DNAME_INLINE_LEN)
+ kfree(fc_dentry->fcd_name.name);
+ kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
+ spin_lock(&sbi->s_fc_lock);
+ }
+
+ list_splice_init(&sbi->s_fc_dentry_q[FC_Q_STAGING],
+ &sbi->s_fc_dentry_q[FC_Q_MAIN]);
+ list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
+ &sbi->s_fc_q[FC_Q_STAGING]);
+
+ sbi->s_mount_state &= ~EXT4_FC_COMMITTING;
+ sbi->s_mount_state &= ~EXT4_FC_INELIGIBLE;
+
+ if (full)
+ sbi->s_fc_bytes = 0;
+ spin_unlock(&sbi->s_fc_lock);
+ trace_ext4_fc_stats(sb);
+}
+
+/* Ext4 Replay Path Routines */
+
+/* Get length of a particular tlv */
+static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl)
+{
+ return le16_to_cpu(tl->fc_len);
+}
+
+/* Get a pointer to "value" of a tlv */
+static inline u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl)
+{
+ return (u8 *)tl + sizeof(*tl);
+}
+
+/* Helper struct for dentry replay routines */
+struct dentry_info_args {
+ int parent_ino, dname_len, ino, inode_len;
+ char *dname;
+};
+
+static inline void tl_to_darg(struct dentry_info_args *darg,
+ struct ext4_fc_tl *tl)
+{
+ struct ext4_fc_dentry_info *fcd;
+
+ fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl);
+
+ darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino);
+ darg->ino = le32_to_cpu(fcd->fc_ino);
+ darg->dname = fcd->fc_dname;
+ darg->dname_len = ext4_fc_tag_len(tl) -
+ sizeof(struct ext4_fc_dentry_info);
+}
+
+/* Unlink replay function */
+static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
+{
+ struct inode *inode, *old_parent;
+ struct qstr entry;
+ struct dentry_info_args darg;
+ int ret = 0;
+
+ tl_to_darg(&darg, tl);
+
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
+ darg.parent_ino, darg.dname_len);
+
+ entry.name = darg.dname;
+ entry.len = darg.dname_len;
+ inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
+
+ if (IS_ERR_OR_NULL(inode)) {
+ jbd_debug(1, "Inode %d not found", darg.ino);
+ return 0;
+ }
+
+ old_parent = ext4_iget(sb, darg.parent_ino,
+ EXT4_IGET_NORMAL);
+ if (IS_ERR_OR_NULL(old_parent)) {
+ jbd_debug(1, "Dir with inode %d not found", darg.parent_ino);
+ iput(inode);
+ return 0;
+ }
+
+ ret = __ext4_unlink(old_parent, &entry, inode);
+ /* -ENOENT ok coz it might not exist anymore. */
+ if (ret == -ENOENT)
+ ret = 0;
+ iput(old_parent);
+ iput(inode);
+ return ret;
+}
+
+static int ext4_fc_replay_link_internal(struct super_block *sb,
+ struct dentry_info_args *darg,
+ struct inode *inode)
+{
+ struct inode *dir = NULL;
+ struct dentry *dentry_dir = NULL, *dentry_inode = NULL;
+ struct qstr qstr_dname = QSTR_INIT(darg->dname, darg->dname_len);
+ int ret = 0;
+
+ dir = ext4_iget(sb, darg->parent_ino, EXT4_IGET_NORMAL);
+ if (IS_ERR(dir)) {
+ jbd_debug(1, "Dir with inode %d not found.", darg->parent_ino);
+ dir = NULL;
+ goto out;
+ }
+
+ dentry_dir = d_obtain_alias(dir);
+ if (IS_ERR(dentry_dir)) {
+ jbd_debug(1, "Failed to obtain dentry");
+ dentry_dir = NULL;
+ goto out;
+ }
+
+ dentry_inode = d_alloc(dentry_dir, &qstr_dname);
+ if (!dentry_inode) {
+ jbd_debug(1, "Inode dentry not created.");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = __ext4_link(dir, inode, dentry_inode);
+ /*
+ * It's possible that link already existed since data blocks
+ * for the dir in question got persisted before we crashed OR
+ * we replayed this tag and crashed before the entire replay
+ * could complete.
+ */
+ if (ret && ret != -EEXIST) {
+ jbd_debug(1, "Failed to link\n");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (dentry_dir) {
+ d_drop(dentry_dir);
+ dput(dentry_dir);
+ } else if (dir) {
+ iput(dir);
+ }
+ if (dentry_inode) {
+ d_drop(dentry_inode);
+ dput(dentry_inode);
+ }
+
+ return ret;
+}
+
+/* Link replay function */
+static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
+{
+ struct inode *inode;
+ struct dentry_info_args darg;
+ int ret = 0;
+
+ tl_to_darg(&darg, tl);
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
+ darg.parent_ino, darg.dname_len);
+
+ inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
+ if (IS_ERR_OR_NULL(inode)) {
+ jbd_debug(1, "Inode not found.");
+ return 0;
+ }
+
+ ret = ext4_fc_replay_link_internal(sb, &darg, inode);
+ iput(inode);
+ return ret;
+}
+
+/*
+ * Record all the modified inodes during replay. We use this later to setup
+ * block bitmaps correctly.
+ */
+static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
+{
+ struct ext4_fc_replay_state *state;
+ int i;
+
+ state = &EXT4_SB(sb)->s_fc_replay_state;
+ for (i = 0; i < state->fc_modified_inodes_used; i++)
+ if (state->fc_modified_inodes[i] == ino)
+ return 0;
+ if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) {
+ state->fc_modified_inodes_size +=
+ EXT4_FC_REPLAY_REALLOC_INCREMENT;
+ state->fc_modified_inodes = krealloc(
+ state->fc_modified_inodes, sizeof(int) *
+ state->fc_modified_inodes_size,
+ GFP_KERNEL);
+ if (!state->fc_modified_inodes)
+ return -ENOMEM;
+ }
+ state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino;
+ return 0;
+}
+
+/*
+ * Inode replay function
+ */
+static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
+{
+ struct ext4_fc_inode *fc_inode;
+ struct ext4_inode *raw_inode;
+ struct ext4_inode *raw_fc_inode;
+ struct inode *inode = NULL;
+ struct ext4_iloc iloc;
+ int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag);
+ struct ext4_extent_header *eh;
+
+ fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl);
+
+ ino = le32_to_cpu(fc_inode->fc_ino);
+ trace_ext4_fc_replay(sb, tag, ino, 0, 0);
+
+ inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
+ if (!IS_ERR_OR_NULL(inode)) {
+ ext4_ext_clear_bb(inode);
+ iput(inode);
+ }
+
+ ext4_fc_record_modified_inode(sb, ino);
+
+ raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode;
+ ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
+ if (ret)
+ goto out;
+
+ inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode);
+ raw_inode = ext4_raw_inode(&iloc);
+
+ memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
+ memcpy(&raw_inode->i_generation, &raw_fc_inode->i_generation,
+ inode_len - offsetof(struct ext4_inode, i_generation));
+ if (le32_to_cpu(raw_inode->i_flags) & EXT4_EXTENTS_FL) {
+ eh = (struct ext4_extent_header *)(&raw_inode->i_block[0]);
+ if (eh->eh_magic != EXT4_EXT_MAGIC) {
+ memset(eh, 0, sizeof(*eh));
+ eh->eh_magic = EXT4_EXT_MAGIC;
+ eh->eh_max = cpu_to_le16(
+ (sizeof(raw_inode->i_block) -
+ sizeof(struct ext4_extent_header))
+ / sizeof(struct ext4_extent));
+ }
+ } else if (le32_to_cpu(raw_inode->i_flags) & EXT4_INLINE_DATA_FL) {
+ memcpy(raw_inode->i_block, raw_fc_inode->i_block,
+ sizeof(raw_inode->i_block));
+ }
+
+ /* Immediately update the inode on disk. */
+ ret = ext4_handle_dirty_metadata(NULL, NULL, iloc.bh);
+ if (ret)
+ goto out;
+ ret = sync_dirty_buffer(iloc.bh);
+ if (ret)
+ goto out;
+ ret = ext4_mark_inode_used(sb, ino);
+ if (ret)
+ goto out;
+
+ /* Given that we just wrote the inode on disk, this SHOULD succeed. */
+ inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
+ if (IS_ERR_OR_NULL(inode)) {
+ jbd_debug(1, "Inode not found.");
+ return -EFSCORRUPTED;
+ }
+
+ /*
+ * Our allocator could have made different decisions than before
+ * crashing. This should be fixed but until then, we calculate
+ * the number of blocks the inode.
+ */
+ ext4_ext_replay_set_iblocks(inode);
+
+ inode->i_generation = le32_to_cpu(ext4_raw_inode(&iloc)->i_generation);
+ ext4_reset_inode_seed(inode);
+
+ ext4_inode_csum_set(inode, ext4_raw_inode(&iloc), EXT4_I(inode));
+ ret = ext4_handle_dirty_metadata(NULL, NULL, iloc.bh);
+ sync_dirty_buffer(iloc.bh);
+ brelse(iloc.bh);
+out:
+ iput(inode);
+ if (!ret)
+ blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
+
+ return 0;
+}
+
+/*
+ * Dentry create replay function.
+ *
+ * EXT4_FC_TAG_CREAT is preceded by EXT4_FC_TAG_INODE_FULL. Which means, the
+ * inode for which we are trying to create a dentry here, should already have
+ * been replayed before we start here.
+ */
+static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
+{
+ int ret = 0;
+ struct inode *inode = NULL;
+ struct inode *dir = NULL;
+ struct dentry_info_args darg;
+
+ tl_to_darg(&darg, tl);
+
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
+ darg.parent_ino, darg.dname_len);
+
+ /* This takes care of update group descriptor and other metadata */
+ ret = ext4_mark_inode_used(sb, darg.ino);
+ if (ret)
+ goto out;
+
+ inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
+ if (IS_ERR_OR_NULL(inode)) {
+ jbd_debug(1, "inode %d not found.", darg.ino);
+ inode = NULL;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (S_ISDIR(inode->i_mode)) {
+ /*
+ * If we are creating a directory, we need to make sure that the
+ * dot and dot dot dirents are setup properly.
+ */
+ dir = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL);
+ if (IS_ERR_OR_NULL(dir)) {
+ jbd_debug(1, "Dir %d not found.", darg.ino);
+ goto out;
+ }
+ ret = ext4_init_new_dir(NULL, dir, inode);
+ iput(dir);
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+ }
+ ret = ext4_fc_replay_link_internal(sb, &darg, inode);
+ if (ret)
+ goto out;
+ set_nlink(inode, 1);
+ ext4_mark_inode_dirty(NULL, inode);
+out:
+ if (inode)
+ iput(inode);
+ return ret;
+}
+
+/*
+ * Record physical disk regions which are in use as per fast commit area. Our
+ * simple replay phase allocator excludes these regions from allocation.
+ */
+static int ext4_fc_record_regions(struct super_block *sb, int ino,
+ ext4_lblk_t lblk, ext4_fsblk_t pblk, int len)
+{
+ struct ext4_fc_replay_state *state;
+ struct ext4_fc_alloc_region *region;
+
+ state = &EXT4_SB(sb)->s_fc_replay_state;
+ if (state->fc_regions_used == state->fc_regions_size) {
+ state->fc_regions_size +=
+ EXT4_FC_REPLAY_REALLOC_INCREMENT;
+ state->fc_regions = krealloc(
+ state->fc_regions,
+ state->fc_regions_size *
+ sizeof(struct ext4_fc_alloc_region),
+ GFP_KERNEL);
+ if (!state->fc_regions)
+ return -ENOMEM;
+ }
+ region = &state->fc_regions[state->fc_regions_used++];
+ region->ino = ino;
+ region->lblk = lblk;
+ region->pblk = pblk;
+ region->len = len;
+
+ return 0;
+}
+
+/* Replay add range tag */
+static int ext4_fc_replay_add_range(struct super_block *sb,
+ struct ext4_fc_tl *tl)
+{
+ struct ext4_fc_add_range *fc_add_ex;
+ struct ext4_extent newex, *ex;
+ struct inode *inode;
+ ext4_lblk_t start, cur;
+ int remaining, len;
+ ext4_fsblk_t start_pblk;
+ struct ext4_map_blocks map;
+ struct ext4_ext_path *path = NULL;
+ int ret;
+
+ fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
+ ex = (struct ext4_extent *)&fc_add_ex->fc_ex;
+
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
+ le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block),
+ ext4_ext_get_actual_len(ex));
+
+ inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
+ EXT4_IGET_NORMAL);
+ if (IS_ERR_OR_NULL(inode)) {
+ jbd_debug(1, "Inode not found.");
+ return 0;
+ }
+
+ ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+
+ start = le32_to_cpu(ex->ee_block);
+ start_pblk = ext4_ext_pblock(ex);
+ len = ext4_ext_get_actual_len(ex);
+
+ cur = start;
+ remaining = len;
+ jbd_debug(1, "ADD_RANGE, lblk %d, pblk %lld, len %d, unwritten %d, inode %ld\n",
+ start, start_pblk, len, ext4_ext_is_unwritten(ex),
+ inode->i_ino);
+
+ while (remaining > 0) {
+ map.m_lblk = cur;
+ map.m_len = remaining;
+ map.m_pblk = 0;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+
+ if (ret < 0) {
+ iput(inode);
+ return 0;
+ }
+
+ if (ret == 0) {
+ /* Range is not mapped */
+ path = ext4_find_extent(inode, cur, NULL, 0);
+ if (!path)
+ continue;
+ memset(&newex, 0, sizeof(newex));
+ newex.ee_block = cpu_to_le32(cur);
+ ext4_ext_store_pblock(
+ &newex, start_pblk + cur - start);
+ newex.ee_len = cpu_to_le16(map.m_len);
+ if (ext4_ext_is_unwritten(ex))
+ ext4_ext_mark_unwritten(&newex);
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ret = ext4_ext_insert_extent(
+ NULL, inode, &path, &newex, 0);
+ up_write((&EXT4_I(inode)->i_data_sem));
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ if (ret) {
+ iput(inode);
+ return 0;
+ }
+ goto next;
+ }
+
+ if (start_pblk + cur - start != map.m_pblk) {
+ /*
+ * Logical to physical mapping changed. This can happen
+ * if this range was removed and then reallocated to
+ * map to new physical blocks during a fast commit.
+ */
+ ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
+ ext4_ext_is_unwritten(ex),
+ start_pblk + cur - start);
+ if (ret) {
+ iput(inode);
+ return 0;
+ }
+ /*
+ * Mark the old blocks as free since they aren't used
+ * anymore. We maintain an array of all the modified
+ * inodes. In case these blocks are still used at either
+ * a different logical range in the same inode or in
+ * some different inode, we will mark them as allocated
+ * at the end of the FC replay using our array of
+ * modified inodes.
+ */
+ ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
+ goto next;
+ }
+
+ /* Range is mapped and needs a state change */
+ jbd_debug(1, "Converting from %d to %d %lld",
+ map.m_flags & EXT4_MAP_UNWRITTEN,
+ ext4_ext_is_unwritten(ex), map.m_pblk);
+ ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
+ ext4_ext_is_unwritten(ex), map.m_pblk);
+ if (ret) {
+ iput(inode);
+ return 0;
+ }
+ /*
+ * We may have split the extent tree while toggling the state.
+ * Try to shrink the extent tree now.
+ */
+ ext4_ext_replay_shrink_inode(inode, start + len);
+next:
+ cur += map.m_len;
+ remaining -= map.m_len;
+ }
+ ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >>
+ sb->s_blocksize_bits);
+ iput(inode);
+ return 0;
+}
+
+/* Replay DEL_RANGE tag */
+static int
+ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
+{
+ struct inode *inode;
+ struct ext4_fc_del_range *lrange;
+ struct ext4_map_blocks map;
+ ext4_lblk_t cur, remaining;
+ int ret;
+
+ lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl);
+ cur = le32_to_cpu(lrange->fc_lblk);
+ remaining = le32_to_cpu(lrange->fc_len);
+
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
+ le32_to_cpu(lrange->fc_ino), cur, remaining);
+
+ inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
+ if (IS_ERR_OR_NULL(inode)) {
+ jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
+ return 0;
+ }
+
+ ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+
+ jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
+ inode->i_ino, le32_to_cpu(lrange->fc_lblk),
+ le32_to_cpu(lrange->fc_len));
+ while (remaining > 0) {
+ map.m_lblk = cur;
+ map.m_len = remaining;
+
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret < 0) {
+ iput(inode);
+ return 0;
+ }
+ if (ret > 0) {
+ remaining -= ret;
+ cur += ret;
+ ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
+ } else {
+ remaining -= map.m_len;
+ cur += map.m_len;
+ }
+ }
+
+ ret = ext4_punch_hole(inode,
+ le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits,
+ le32_to_cpu(lrange->fc_len) << sb->s_blocksize_bits);
+ if (ret)
+ jbd_debug(1, "ext4_punch_hole returned %d", ret);
+ ext4_ext_replay_shrink_inode(inode,
+ i_size_read(inode) >> sb->s_blocksize_bits);
+ ext4_mark_inode_dirty(NULL, inode);
+ iput(inode);
+
+ return 0;
+}
+
+static inline const char *tag2str(u16 tag)
+{
+ switch (tag) {
+ case EXT4_FC_TAG_LINK:
+ return "TAG_ADD_ENTRY";
+ case EXT4_FC_TAG_UNLINK:
+ return "TAG_DEL_ENTRY";
+ case EXT4_FC_TAG_ADD_RANGE:
+ return "TAG_ADD_RANGE";
+ case EXT4_FC_TAG_CREAT:
+ return "TAG_CREAT_DENTRY";
+ case EXT4_FC_TAG_DEL_RANGE:
+ return "TAG_DEL_RANGE";
+ case EXT4_FC_TAG_INODE:
+ return "TAG_INODE";
+ case EXT4_FC_TAG_PAD:
+ return "TAG_PAD";
+ case EXT4_FC_TAG_TAIL:
+ return "TAG_TAIL";
+ case EXT4_FC_TAG_HEAD:
+ return "TAG_HEAD";
+ default:
+ return "TAG_ERROR";
+ }
+}
+
+static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
+{
+ struct ext4_fc_replay_state *state;
+ struct inode *inode;
+ struct ext4_ext_path *path = NULL;
+ struct ext4_map_blocks map;
+ int i, ret, j;
+ ext4_lblk_t cur, end;
+
+ state = &EXT4_SB(sb)->s_fc_replay_state;
+ for (i = 0; i < state->fc_modified_inodes_used; i++) {
+ inode = ext4_iget(sb, state->fc_modified_inodes[i],
+ EXT4_IGET_NORMAL);
+ if (IS_ERR_OR_NULL(inode)) {
+ jbd_debug(1, "Inode %d not found.",
+ state->fc_modified_inodes[i]);
+ continue;
+ }
+ cur = 0;
+ end = EXT_MAX_BLOCKS;
+ while (cur < end) {
+ map.m_lblk = cur;
+ map.m_len = end - cur;
+
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret < 0)
+ break;
+
+ if (ret > 0) {
+ path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
+ if (!IS_ERR_OR_NULL(path)) {
+ for (j = 0; j < path->p_depth; j++)
+ ext4_mb_mark_bb(inode->i_sb,
+ path[j].p_block, 1, 1);
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ }
+ cur += ret;
+ ext4_mb_mark_bb(inode->i_sb, map.m_pblk,
+ map.m_len, 1);
+ } else {
+ cur = cur + (map.m_len ? map.m_len : 1);
+ }
+ }
+ iput(inode);
+ }
+}
+
+/*
+ * Check if block is in excluded regions for block allocation. The simple
+ * allocator that runs during replay phase is calls this function to see
+ * if it is okay to use a block.
+ */
+bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t blk)
+{
+ int i;
+ struct ext4_fc_replay_state *state;
+
+ state = &EXT4_SB(sb)->s_fc_replay_state;
+ for (i = 0; i < state->fc_regions_valid; i++) {
+ if (state->fc_regions[i].ino == 0 ||
+ state->fc_regions[i].len == 0)
+ continue;
+ if (blk >= state->fc_regions[i].pblk &&
+ blk < state->fc_regions[i].pblk + state->fc_regions[i].len)
+ return true;
+ }
+ return false;
+}
+
+/* Cleanup function called after replay */
+void ext4_fc_replay_cleanup(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ sbi->s_mount_state &= ~EXT4_FC_REPLAY;
+ kfree(sbi->s_fc_replay_state.fc_regions);
+ kfree(sbi->s_fc_replay_state.fc_modified_inodes);
+}
+
+/*
+ * Recovery Scan phase handler
+ *
+ * This function is called during the scan phase and is responsible
+ * for doing following things:
+ * - Make sure the fast commit area has valid tags for replay
+ * - Count number of tags that need to be replayed by the replay handler
+ * - Verify CRC
+ * - Create a list of excluded blocks for allocation during replay phase
+ *
+ * This function returns JBD2_FC_REPLAY_CONTINUE to indicate that SCAN is
+ * incomplete and JBD2 should send more blocks. It returns JBD2_FC_REPLAY_STOP
+ * to indicate that scan has finished and JBD2 can now start replay phase.
+ * It returns a negative error to indicate that there was an error. At the end
+ * of a successful scan phase, sbi->s_fc_replay_state.fc_replay_num_tags is set
+ * to indicate the number of tags that need to replayed during the replay phase.
+ */
+static int ext4_fc_replay_scan(journal_t *journal,
+ struct buffer_head *bh, int off,
+ tid_t expected_tid)
+{
+ struct super_block *sb = journal->j_private;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_fc_replay_state *state;
+ int ret = JBD2_FC_REPLAY_CONTINUE;
+ struct ext4_fc_add_range *ext;
+ struct ext4_fc_tl *tl;
+ struct ext4_fc_tail *tail;
+ __u8 *start, *end;
+ struct ext4_fc_head *head;
+ struct ext4_extent *ex;
+
+ state = &sbi->s_fc_replay_state;
+
+ start = (u8 *)bh->b_data;
+ end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
+
+ if (state->fc_replay_expected_off == 0) {
+ state->fc_cur_tag = 0;
+ state->fc_replay_num_tags = 0;
+ state->fc_crc = 0;
+ state->fc_regions = NULL;
+ state->fc_regions_valid = state->fc_regions_used =
+ state->fc_regions_size = 0;
+ /* Check if we can stop early */
+ if (le16_to_cpu(((struct ext4_fc_tl *)start)->fc_tag)
+ != EXT4_FC_TAG_HEAD)
+ return 0;
+ }
+
+ if (off != state->fc_replay_expected_off) {
+ ret = -EFSCORRUPTED;
+ goto out_err;
+ }
+
+ state->fc_replay_expected_off++;
+ fc_for_each_tl(start, end, tl) {
+ jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
+ tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr);
+ switch (le16_to_cpu(tl->fc_tag)) {
+ case EXT4_FC_TAG_ADD_RANGE:
+ ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
+ ex = (struct ext4_extent *)&ext->fc_ex;
+ ret = ext4_fc_record_regions(sb,
+ le32_to_cpu(ext->fc_ino),
+ le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
+ ext4_ext_get_actual_len(ex));
+ if (ret < 0)
+ break;
+ ret = JBD2_FC_REPLAY_CONTINUE;
+ fallthrough;
+ case EXT4_FC_TAG_DEL_RANGE:
+ case EXT4_FC_TAG_LINK:
+ case EXT4_FC_TAG_UNLINK:
+ case EXT4_FC_TAG_CREAT:
+ case EXT4_FC_TAG_INODE:
+ case EXT4_FC_TAG_PAD:
+ state->fc_cur_tag++;
+ state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
+ sizeof(*tl) + ext4_fc_tag_len(tl));
+ break;
+ case EXT4_FC_TAG_TAIL:
+ state->fc_cur_tag++;
+ tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
+ state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
+ sizeof(*tl) +
+ offsetof(struct ext4_fc_tail,
+ fc_crc));
+ if (le32_to_cpu(tail->fc_tid) == expected_tid &&
+ le32_to_cpu(tail->fc_crc) == state->fc_crc) {
+ state->fc_replay_num_tags = state->fc_cur_tag;
+ state->fc_regions_valid =
+ state->fc_regions_used;
+ } else {
+ ret = state->fc_replay_num_tags ?
+ JBD2_FC_REPLAY_STOP : -EFSBADCRC;
+ }
+ state->fc_crc = 0;
+ break;
+ case EXT4_FC_TAG_HEAD:
+ head = (struct ext4_fc_head *)ext4_fc_tag_val(tl);
+ if (le32_to_cpu(head->fc_features) &
+ ~EXT4_FC_SUPPORTED_FEATURES) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ if (le32_to_cpu(head->fc_tid) != expected_tid) {
+ ret = JBD2_FC_REPLAY_STOP;
+ break;
+ }
+ state->fc_cur_tag++;
+ state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
+ sizeof(*tl) + ext4_fc_tag_len(tl));
+ break;
+ default:
+ ret = state->fc_replay_num_tags ?
+ JBD2_FC_REPLAY_STOP : -ECANCELED;
+ }
+ if (ret < 0 || ret == JBD2_FC_REPLAY_STOP)
+ break;
+ }
+
+out_err:
+ trace_ext4_fc_replay_scan(sb, ret, off);
+ return ret;
+}
+
+/*
+ * Main recovery path entry point.
+ * The meaning of return codes is similar as above.
+ */
+static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
+ enum passtype pass, int off, tid_t expected_tid)
+{
+ struct super_block *sb = journal->j_private;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_fc_tl *tl;
+ __u8 *start, *end;
+ int ret = JBD2_FC_REPLAY_CONTINUE;
+ struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
+ struct ext4_fc_tail *tail;
+
+ if (pass == PASS_SCAN) {
+ state->fc_current_pass = PASS_SCAN;
+ return ext4_fc_replay_scan(journal, bh, off, expected_tid);
+ }
+
+ if (state->fc_current_pass != pass) {
+ state->fc_current_pass = pass;
+ sbi->s_mount_state |= EXT4_FC_REPLAY;
+ }
+ if (!sbi->s_fc_replay_state.fc_replay_num_tags) {
+ jbd_debug(1, "Replay stops\n");
+ ext4_fc_set_bitmaps_and_counters(sb);
+ return 0;
+ }
+
+#ifdef CONFIG_EXT4_DEBUG
+ if (sbi->s_fc_debug_max_replay && off >= sbi->s_fc_debug_max_replay) {
+ pr_warn("Dropping fc block %d because max_replay set\n", off);
+ return JBD2_FC_REPLAY_STOP;
+ }
+#endif
+
+ start = (u8 *)bh->b_data;
+ end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
+
+ fc_for_each_tl(start, end, tl) {
+ if (state->fc_replay_num_tags == 0) {
+ ret = JBD2_FC_REPLAY_STOP;
+ ext4_fc_set_bitmaps_and_counters(sb);
+ break;
+ }
+ jbd_debug(3, "Replay phase, tag:%s\n",
+ tag2str(le16_to_cpu(tl->fc_tag)));
+ state->fc_replay_num_tags--;
+ switch (le16_to_cpu(tl->fc_tag)) {
+ case EXT4_FC_TAG_LINK:
+ ret = ext4_fc_replay_link(sb, tl);
+ break;
+ case EXT4_FC_TAG_UNLINK:
+ ret = ext4_fc_replay_unlink(sb, tl);
+ break;
+ case EXT4_FC_TAG_ADD_RANGE:
+ ret = ext4_fc_replay_add_range(sb, tl);
+ break;
+ case EXT4_FC_TAG_CREAT:
+ ret = ext4_fc_replay_create(sb, tl);
+ break;
+ case EXT4_FC_TAG_DEL_RANGE:
+ ret = ext4_fc_replay_del_range(sb, tl);
+ break;
+ case EXT4_FC_TAG_INODE:
+ ret = ext4_fc_replay_inode(sb, tl);
+ break;
+ case EXT4_FC_TAG_PAD:
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
+ ext4_fc_tag_len(tl), 0);
+ break;
+ case EXT4_FC_TAG_TAIL:
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0,
+ ext4_fc_tag_len(tl), 0);
+ tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
+ WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid);
+ break;
+ case EXT4_FC_TAG_HEAD:
+ break;
+ default:
+ trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0,
+ ext4_fc_tag_len(tl), 0);
+ ret = -ECANCELED;
+ break;
+ }
+ if (ret < 0)
+ break;
+ ret = JBD2_FC_REPLAY_CONTINUE;
+ }
+ return ret;
+}
+
+void ext4_fc_init(struct super_block *sb, journal_t *journal)
+{
+ /*
+ * We set replay callback even if fast commit disabled because we may
+ * could still have fast commit blocks that need to be replayed even if
+ * fast commit has now been turned off.
+ */
+ journal->j_fc_replay_callback = ext4_fc_replay;
+ if (!test_opt2(sb, JOURNAL_FAST_COMMIT))
+ return;
+ journal->j_fc_cleanup_callback = ext4_fc_cleanup;
+ if (jbd2_fc_init(journal, EXT4_NUM_FC_BLKS)) {
+ pr_warn("Error while enabling fast commits, turning off.");
+ ext4_clear_feature_fast_commit(sb);
+ }
+}
+
+const char *fc_ineligible_reasons[] = {
+ "Extended attributes changed",
+ "Cross rename",
+ "Journal flag changed",
+ "Insufficient memory",
+ "Swap boot",
+ "Resize",
+ "Dir renamed",
+ "Falloc range op",
+ "FC Commit Failed"
+};
+
+int ext4_fc_info_show(struct seq_file *seq, void *v)
+{
+ struct ext4_sb_info *sbi = EXT4_SB((struct super_block *)seq->private);
+ struct ext4_fc_stats *stats = &sbi->s_fc_stats;
+ int i;
+
+ if (v != SEQ_START_TOKEN)
+ return 0;
+
+ seq_printf(seq,
+ "fc stats:\n%ld commits\n%ld ineligible\n%ld numblks\n%lluus avg_commit_time\n",
+ stats->fc_num_commits, stats->fc_ineligible_commits,
+ stats->fc_numblks,
+ div_u64(sbi->s_fc_avg_commit_time, 1000));
+ seq_puts(seq, "Ineligible reasons:\n");
+ for (i = 0; i < EXT4_FC_REASON_MAX; i++)
+ seq_printf(seq, "\"%s\":\t%d\n", fc_ineligible_reasons[i],
+ stats->fc_ineligible_reason_count[i]);
+
+ return 0;
+}
+
+int __init ext4_fc_init_dentry_cache(void)
+{
+ ext4_fc_dentry_cachep = KMEM_CACHE(ext4_fc_dentry_update,
+ SLAB_RECLAIM_ACCOUNT);
+
+ if (ext4_fc_dentry_cachep == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h
new file mode 100644
index 000000000000..06907d485989
--- /dev/null
+++ b/fs/ext4/fast_commit.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __FAST_COMMIT_H__
+#define __FAST_COMMIT_H__
+
+/* Number of blocks in journal area to allocate for fast commits */
+#define EXT4_NUM_FC_BLKS 256
+
+/* Fast commit tags */
+#define EXT4_FC_TAG_ADD_RANGE 0x0001
+#define EXT4_FC_TAG_DEL_RANGE 0x0002
+#define EXT4_FC_TAG_CREAT 0x0003
+#define EXT4_FC_TAG_LINK 0x0004
+#define EXT4_FC_TAG_UNLINK 0x0005
+#define EXT4_FC_TAG_INODE 0x0006
+#define EXT4_FC_TAG_PAD 0x0007
+#define EXT4_FC_TAG_TAIL 0x0008
+#define EXT4_FC_TAG_HEAD 0x0009
+
+#define EXT4_FC_SUPPORTED_FEATURES 0x0
+
+/* On disk fast commit tlv value structures */
+
+/* Fast commit on disk tag length structure */
+struct ext4_fc_tl {
+ __le16 fc_tag;
+ __le16 fc_len;
+};
+
+/* Value structure for tag EXT4_FC_TAG_HEAD. */
+struct ext4_fc_head {
+ __le32 fc_features;
+ __le32 fc_tid;
+};
+
+/* Value structure for EXT4_FC_TAG_ADD_RANGE. */
+struct ext4_fc_add_range {
+ __le32 fc_ino;
+ __u8 fc_ex[12];
+};
+
+/* Value structure for tag EXT4_FC_TAG_DEL_RANGE. */
+struct ext4_fc_del_range {
+ __le32 fc_ino;
+ __le32 fc_lblk;
+ __le32 fc_len;
+};
+
+/*
+ * This is the value structure for tags EXT4_FC_TAG_CREAT, EXT4_FC_TAG_LINK
+ * and EXT4_FC_TAG_UNLINK.
+ */
+struct ext4_fc_dentry_info {
+ __le32 fc_parent_ino;
+ __le32 fc_ino;
+ u8 fc_dname[0];
+};
+
+/* Value structure for EXT4_FC_TAG_INODE and EXT4_FC_TAG_INODE_PARTIAL. */
+struct ext4_fc_inode {
+ __le32 fc_ino;
+ __u8 fc_raw_inode[0];
+};
+
+/* Value structure for tag EXT4_FC_TAG_TAIL. */
+struct ext4_fc_tail {
+ __le32 fc_tid;
+ __le32 fc_crc;
+};
+
+/*
+ * In memory list of dentry updates that are performed on the file
+ * system used by fast commit code.
+ */
+struct ext4_fc_dentry_update {
+ int fcd_op; /* Type of update create / unlink / link */
+ int fcd_parent; /* Parent inode number */
+ int fcd_ino; /* Inode number */
+ struct qstr fcd_name; /* Dirent name */
+ unsigned char fcd_iname[DNAME_INLINE_LEN]; /* Dirent name string */
+ struct list_head fcd_list;
+};
+
+/*
+ * Fast commit reason codes
+ */
+enum {
+ /*
+ * Commit status codes:
+ */
+ EXT4_FC_REASON_OK = 0,
+ EXT4_FC_REASON_INELIGIBLE,
+ EXT4_FC_REASON_ALREADY_COMMITTED,
+ EXT4_FC_REASON_FC_START_FAILED,
+ EXT4_FC_REASON_FC_FAILED,
+
+ /*
+ * Fast commit ineligiblity reasons:
+ */
+ EXT4_FC_REASON_XATTR = 0,
+ EXT4_FC_REASON_CROSS_RENAME,
+ EXT4_FC_REASON_JOURNAL_FLAG_CHANGE,
+ EXT4_FC_REASON_MEM,
+ EXT4_FC_REASON_SWAP_BOOT,
+ EXT4_FC_REASON_RESIZE,
+ EXT4_FC_REASON_RENAME_DIR,
+ EXT4_FC_REASON_FALLOC_RANGE,
+ EXT4_FC_COMMIT_FAILED,
+ EXT4_FC_REASON_MAX
+};
+
+struct ext4_fc_stats {
+ unsigned int fc_ineligible_reason_count[EXT4_FC_REASON_MAX];
+ unsigned long fc_num_commits;
+ unsigned long fc_ineligible_commits;
+ unsigned long fc_numblks;
+};
+
+#define EXT4_FC_REPLAY_REALLOC_INCREMENT 4
+
+/*
+ * Physical block regions added to different inodes due to fast commit
+ * recovery. These are set during the SCAN phase. During the replay phase,
+ * our allocator excludes these from its allocation. This ensures that
+ * we don't accidentally allocating a block that is going to be used by
+ * another inode.
+ */
+struct ext4_fc_alloc_region {
+ ext4_lblk_t lblk;
+ ext4_fsblk_t pblk;
+ int ino, len;
+};
+
+/*
+ * Fast commit replay state.
+ */
+struct ext4_fc_replay_state {
+ int fc_replay_num_tags;
+ int fc_replay_expected_off;
+ int fc_current_pass;
+ int fc_cur_tag;
+ int fc_crc;
+ struct ext4_fc_alloc_region *fc_regions;
+ int fc_regions_size, fc_regions_used, fc_regions_valid;
+ int *fc_modified_inodes;
+ int fc_modified_inodes_used, fc_modified_inodes_size;
+};
+
+#define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1)
+
+#define fc_for_each_tl(__start, __end, __tl) \
+ for (tl = (struct ext4_fc_tl *)start; \
+ (u8 *)tl < (u8 *)end; \
+ tl = (struct ext4_fc_tl *)((u8 *)tl + \
+ sizeof(struct ext4_fc_tl) + \
+ + le16_to_cpu(tl->fc_len)))
+
+
+#endif /* __FAST_COMMIT_H__ */
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 7d61069531d3..d85412d12e3a 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -260,6 +260,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
if (iocb->ki_flags & IOCB_NOWAIT)
return -EOPNOTSUPP;
+ ext4_fc_start_update(inode);
inode_lock(inode);
ret = ext4_write_checks(iocb, from);
if (ret <= 0)
@@ -271,6 +272,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
out:
inode_unlock(inode);
+ ext4_fc_stop_update(inode);
if (likely(ret > 0)) {
iocb->ki_pos += ret;
ret = generic_write_sync(iocb, ret);
@@ -534,7 +536,9 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
+ ext4_fc_start_update(inode);
ret = ext4_orphan_add(handle, inode);
+ ext4_fc_stop_update(inode);
if (ret) {
ext4_journal_stop(handle);
goto out;
@@ -656,8 +660,8 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
#endif
if (iocb->ki_flags & IOCB_DIRECT)
return ext4_dio_write_iter(iocb, from);
-
- return ext4_buffered_write_iter(iocb, from);
+ else
+ return ext4_buffered_write_iter(iocb, from);
}
#ifdef CONFIG_FS_DAX
@@ -757,6 +761,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
if (!daxdev_mapping_supported(vma, dax_dev))
return -EOPNOTSUPP;
+ ext4_fc_start_update(inode);
file_accessed(file);
if (IS_DAX(file_inode(file))) {
vma->vm_ops = &ext4_dax_vm_ops;
@@ -764,6 +769,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
} else {
vma->vm_ops = &ext4_file_vm_ops;
}
+ ext4_fc_stop_update(inode);
return 0;
}
@@ -844,7 +850,7 @@ static int ext4_file_open(struct inode *inode, struct file *filp)
return ret;
}
- filp->f_mode |= FMODE_NOWAIT;
+ filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
return dquot_file_open(inode, filp);
}
diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
index dbccf46f1770..b232c2767534 100644
--- a/fs/ext4/fsmap.c
+++ b/fs/ext4/fsmap.c
@@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb,
/* Are we just counting mappings? */
if (info->gfi_head->fmh_count == 0) {
+ if (info->gfi_head->fmh_entries == UINT_MAX)
+ return EXT4_QUERY_RANGE_ABORT;
+
if (rec_fsblk > info->gfi_next_fsblk)
info->gfi_head->fmh_entries++;
@@ -571,8 +574,8 @@ static bool ext4_getfsmap_is_valid_device(struct super_block *sb,
if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX ||
fm->fmr_device == new_encode_dev(sb->s_bdev->bd_dev))
return true;
- if (EXT4_SB(sb)->journal_bdev &&
- fm->fmr_device == new_encode_dev(EXT4_SB(sb)->journal_bdev->bd_dev))
+ if (EXT4_SB(sb)->s_journal_bdev &&
+ fm->fmr_device == new_encode_dev(EXT4_SB(sb)->s_journal_bdev->bd_dev))
return true;
return false;
}
@@ -642,9 +645,9 @@ int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head,
memset(handlers, 0, sizeof(handlers));
handlers[0].gfd_dev = new_encode_dev(sb->s_bdev->bd_dev);
handlers[0].gfd_fn = ext4_getfsmap_datadev;
- if (EXT4_SB(sb)->journal_bdev) {
+ if (EXT4_SB(sb)->s_journal_bdev) {
handlers[1].gfd_dev = new_encode_dev(
- EXT4_SB(sb)->journal_bdev->bd_dev);
+ EXT4_SB(sb)->s_journal_bdev->bd_dev);
handlers[1].gfd_fn = ext4_getfsmap_logdev;
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 1d668c8f131f..81a545fd14a3 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -112,7 +112,7 @@ static int ext4_fsync_journal(struct inode *inode, bool datasync,
!jbd2_trans_will_send_data_barrier(journal, commit_tid))
*needs_barrier = true;
- return jbd2_complete_transaction(journal, commit_tid);
+ return ext4_fc_commit(journal, commit_tid);
}
/*
@@ -150,7 +150,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
ret = file_write_and_wait_range(file, start, end);
if (ret)
- return ret;
+ goto out;
/*
* data=writeback,ordered:
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 698ca4a4db5f..b215c564bc31 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -82,7 +82,12 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
struct buffer_head *bh)
{
ext4_fsblk_t blk;
- struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
+ struct ext4_group_info *grp;
+
+ if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
+ return 0;
+
+ grp = ext4_get_group_info(sb, block_group);
if (buffer_verified(bh))
return 0;
@@ -189,10 +194,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
* submit the buffer_head for reading
*/
trace_ext4_load_inode_bitmap(sb, block_group);
- bh->b_end_io = ext4_end_bitmap_read;
- get_bh(bh);
- submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
- wait_on_buffer(bh);
+ ext4_read_bh(bh, REQ_META | REQ_PRIO, ext4_end_bitmap_read);
ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
if (!buffer_uptodate(bh)) {
put_bh(bh);
@@ -284,15 +286,17 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
/* Don't bother if the inode bitmap is corrupt. */
- grp = ext4_get_group_info(sb, block_group);
if (IS_ERR(bitmap_bh)) {
fatal = PTR_ERR(bitmap_bh);
bitmap_bh = NULL;
goto error_return;
}
- if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
- fatal = -EFSCORRUPTED;
- goto error_return;
+ if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
+ grp = ext4_get_group_info(sb, block_group);
+ if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
+ fatal = -EFSCORRUPTED;
+ goto error_return;
+ }
}
BUFFER_TRACE(bitmap_bh, "get_write_access");
@@ -742,6 +746,122 @@ not_found:
return 1;
}
+int ext4_mark_inode_used(struct super_block *sb, int ino)
+{
+ unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
+ struct buffer_head *inode_bitmap_bh = NULL, *group_desc_bh = NULL;
+ struct ext4_group_desc *gdp;
+ ext4_group_t group;
+ int bit;
+ int err = -EFSCORRUPTED;
+
+ if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
+ goto out;
+
+ group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
+ bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
+ inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
+ if (IS_ERR(inode_bitmap_bh))
+ return PTR_ERR(inode_bitmap_bh);
+
+ if (ext4_test_bit(bit, inode_bitmap_bh->b_data)) {
+ err = 0;
+ goto out;
+ }
+
+ gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
+ if (!gdp || !group_desc_bh) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ ext4_set_bit(bit, inode_bitmap_bh->b_data);
+
+ BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(NULL, NULL, inode_bitmap_bh);
+ if (err) {
+ ext4_std_error(sb, err);
+ goto out;
+ }
+ err = sync_dirty_buffer(inode_bitmap_bh);
+ if (err) {
+ ext4_std_error(sb, err);
+ goto out;
+ }
+
+ /* We may have to initialize the block bitmap if it isn't already */
+ if (ext4_has_group_desc_csum(sb) &&
+ gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ struct buffer_head *block_bitmap_bh;
+
+ block_bitmap_bh = ext4_read_block_bitmap(sb, group);
+ if (IS_ERR(block_bitmap_bh)) {
+ err = PTR_ERR(block_bitmap_bh);
+ goto out;
+ }
+
+ BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
+ err = ext4_handle_dirty_metadata(NULL, NULL, block_bitmap_bh);
+ sync_dirty_buffer(block_bitmap_bh);
+
+ /* recheck and clear flag under lock if we still need to */
+ ext4_lock_group(sb, group);
+ if (ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
+ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
+ ext4_free_group_clusters_set(sb, gdp,
+ ext4_free_clusters_after_init(sb, group, gdp));
+ ext4_block_bitmap_csum_set(sb, group, gdp,
+ block_bitmap_bh);
+ ext4_group_desc_csum_set(sb, group, gdp);
+ }
+ ext4_unlock_group(sb, group);
+ brelse(block_bitmap_bh);
+
+ if (err) {
+ ext4_std_error(sb, err);
+ goto out;
+ }
+ }
+
+ /* Update the relevant bg descriptor fields */
+ if (ext4_has_group_desc_csum(sb)) {
+ int free;
+
+ ext4_lock_group(sb, group); /* while we modify the bg desc */
+ free = EXT4_INODES_PER_GROUP(sb) -
+ ext4_itable_unused_count(sb, gdp);
+ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
+ free = 0;
+ }
+
+ /*
+ * Check the relative inode number against the last used
+ * relative inode number in this group. if it is greater
+ * we need to update the bg_itable_unused count
+ */
+ if (bit >= free)
+ ext4_itable_unused_set(sb, gdp,
+ (EXT4_INODES_PER_GROUP(sb) - bit - 1));
+ } else {
+ ext4_lock_group(sb, group);
+ }
+
+ ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
+ if (ext4_has_group_desc_csum(sb)) {
+ ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
+ EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_group_desc_csum_set(sb, group, gdp);
+ }
+
+ ext4_unlock_group(sb, group);
+ err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh);
+ sync_dirty_buffer(group_desc_bh);
+out:
+ return err;
+}
+
static int ext4_xattr_credits_for_new_inode(struct inode *dir, mode_t mode,
bool encrypt)
{
@@ -818,7 +938,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
struct inode *ret;
ext4_group_t i;
ext4_group_t flex_group;
- struct ext4_group_info *grp;
+ struct ext4_group_info *grp = NULL;
bool encrypt = false;
/* Cannot create files in a deleted directory */
@@ -918,15 +1038,21 @@ got_group:
if (ext4_free_inodes_count(sb, gdp) == 0)
goto next_group;
- grp = ext4_get_group_info(sb, group);
- /* Skip groups with already-known suspicious inode tables */
- if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
- goto next_group;
+ if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
+ grp = ext4_get_group_info(sb, group);
+ /*
+ * Skip groups with already-known suspicious inode
+ * tables
+ */
+ if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
+ goto next_group;
+ }
brelse(inode_bitmap_bh);
inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
/* Skip groups with suspicious inode tables */
- if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) ||
+ if (((!(sbi->s_mount_state & EXT4_FC_REPLAY))
+ && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) ||
IS_ERR(inode_bitmap_bh)) {
inode_bitmap_bh = NULL;
goto next_group;
@@ -945,7 +1071,7 @@ repeat_in_this_group:
goto next_group;
}
- if (!handle) {
+ if ((!(sbi->s_mount_state & EXT4_FC_REPLAY)) && !handle) {
BUG_ON(nblocks <= 0);
handle = __ext4_journal_start_sb(dir->i_sb, line_no,
handle_type, nblocks, 0,
@@ -1049,9 +1175,15 @@ got:
/* Update the relevant bg descriptor fields */
if (ext4_has_group_desc_csum(sb)) {
int free;
- struct ext4_group_info *grp = ext4_get_group_info(sb, group);
-
- down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
+ struct ext4_group_info *grp = NULL;
+
+ if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
+ grp = ext4_get_group_info(sb, group);
+ down_read(&grp->alloc_sem); /*
+ * protect vs itable
+ * lazyinit
+ */
+ }
ext4_lock_group(sb, group); /* while we modify the bg desc */
free = EXT4_INODES_PER_GROUP(sb) -
ext4_itable_unused_count(sb, gdp);
@@ -1067,7 +1199,8 @@ got:
if (ino > free)
ext4_itable_unused_set(sb, gdp,
(EXT4_INODES_PER_GROUP(sb) - ino));
- up_read(&grp->alloc_sem);
+ if (!(sbi->s_mount_state & EXT4_FC_REPLAY))
+ up_read(&grp->alloc_sem);
} else {
ext4_lock_group(sb, group);
}
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 80c9f33800be..05efa682bc2f 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -163,7 +163,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
}
if (!bh_uptodate_or_lock(bh)) {
- if (bh_submit_read(bh) < 0) {
+ if (ext4_read_bh(bh, 0, NULL) < 0) {
put_bh(bh);
goto failure;
}
@@ -593,7 +593,8 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
if (ext4_has_feature_bigalloc(inode->i_sb)) {
EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
"non-extent mapped inodes with bigalloc");
- return -EFSCORRUPTED;
+ err = -EFSCORRUPTED;
+ goto out;
}
/* Set up for the direct block allocation */
@@ -1012,14 +1013,14 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
}
/* Go read the buffer for the next level down */
- bh = sb_bread(inode->i_sb, nr);
+ bh = ext4_sb_bread(inode->i_sb, nr, 0);
/*
* A read failure? Report error and clear slot
* (should be rare).
*/
- if (!bh) {
- ext4_error_inode_block(inode, nr, EIO,
+ if (IS_ERR(bh)) {
+ ext4_error_inode_block(inode, nr, -PTR_ERR(bh),
"Read failure");
continue;
}
@@ -1033,7 +1034,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
brelse(bh);
/*
- * Everything below this this pointer has been
+ * Everything below this pointer has been
* released. Now let this top-of-subtree go.
*
* We want the freeing of this indirect block to be
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 75c97bca0815..caa51473207d 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -354,7 +354,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
if (error)
goto out;
- /* Update the xttr entry. */
+ /* Update the xattr entry. */
i.value = value;
i.value_len = len;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bf596467c234..03c2253005f0 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -101,8 +101,8 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
return provided == calculated;
}
-static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
- struct ext4_inode_info *ei)
+void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
+ struct ext4_inode_info *ei)
{
__u32 csum;
@@ -514,7 +514,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
return -EFSCORRUPTED;
/* Lookup extent status tree firstly */
- if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
+ if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
+ ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
@@ -729,6 +730,8 @@ out_sem:
if (ret)
return ret;
}
+ ext4_fc_track_range(inode, map->m_lblk,
+ map->m_lblk + map->m_len - 1);
}
if (retval < 0)
@@ -825,7 +828,8 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
int create = map_flags & EXT4_GET_BLOCKS_CREATE;
int err;
- J_ASSERT(handle != NULL || create == 0);
+ J_ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ || handle != NULL || create == 0);
map.m_lblk = block;
map.m_len = 1;
@@ -841,7 +845,8 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
return ERR_PTR(-ENOMEM);
if (map.m_flags & EXT4_MAP_NEW) {
J_ASSERT(create != 0);
- J_ASSERT(handle != NULL);
+ J_ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ || (handle != NULL));
/*
* Now that we do not always journal data, we should
@@ -878,18 +883,20 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
ext4_lblk_t block, int map_flags)
{
struct buffer_head *bh;
+ int ret;
bh = ext4_getblk(handle, inode, block, map_flags);
if (IS_ERR(bh))
return bh;
if (!bh || ext4_buffer_uptodate(bh))
return bh;
- ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
- wait_on_buffer(bh);
- if (buffer_uptodate(bh))
- return bh;
- put_bh(bh);
- return ERR_PTR(-EIO);
+
+ ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
+ if (ret) {
+ put_bh(bh);
+ return ERR_PTR(ret);
+ }
+ return bh;
}
/* Read a contiguous batch of blocks. */
@@ -910,8 +917,7 @@ int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
for (i = 0; i < bh_count; i++)
/* Note that NULL bhs[i] is valid because of holes. */
if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
- ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1,
- &bhs[i]);
+ ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
if (!wait)
return 0;
@@ -1081,7 +1087,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
- ll_rw_block(REQ_OP_READ, 0, 1, &bh);
+ ext4_read_bh_lock(bh, 0, false);
wait[nr_wait++] = bh;
}
}
@@ -1912,6 +1918,9 @@ static int __ext4_journalled_writepage(struct page *page,
}
if (ret == 0)
ret = err;
+ err = ext4_jbd2_inode_add_write(handle, inode, 0, len);
+ if (ret == 0)
+ ret = err;
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
err = ext4_journal_stop(handle);
if (!ret)
@@ -2254,7 +2263,7 @@ static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
err = PTR_ERR(io_end_vec);
goto out;
}
- io_end_vec->offset = mpd->map.m_lblk << blkbits;
+ io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
}
*map_bh = true;
goto out;
@@ -2785,7 +2794,7 @@ retry:
* ext4_journal_stop() can wait for transaction commit
* to finish which may depend on writeback of pages to
* complete or on page lock to be released. In that
- * case, we have to wait until after after we have
+ * case, we have to wait until after we have
* submitted all the IO, released page locks we hold,
* and dropped io_end reference (for extent conversion
* to be able to complete) before stopping the handle.
@@ -3296,9 +3305,14 @@ static bool ext4_inode_datasync_dirty(struct inode *inode)
{
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- if (journal)
- return !jbd2_transaction_committed(journal,
- EXT4_I(inode)->i_datasync_tid);
+ if (journal) {
+ if (jbd2_transaction_committed(journal,
+ EXT4_I(inode)->i_datasync_tid))
+ return true;
+ return atomic_read(&EXT4_SB(inode->i_sb)->s_fc_subtid) >=
+ EXT4_I(inode)->i_fc_committed_subtid;
+ }
+
/* Any metadata buffers to write? */
if (!list_empty(&inode->i_mapping->private_list))
return true;
@@ -3436,14 +3450,26 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
- if (flags & IOMAP_WRITE)
+ if (flags & IOMAP_WRITE) {
+ /*
+ * We check here if the blocks are already allocated, then we
+ * don't need to start a journal txn and we can directly return
+ * the mapping information. This could boost performance
+ * especially in multi-threaded overwrite requests.
+ */
+ if (offset + length <= i_size_read(inode)) {
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
+ goto out;
+ }
ret = ext4_iomap_alloc(inode, &map, flags);
- else
+ } else {
ret = ext4_map_blocks(NULL, inode, &map, 0);
+ }
if (ret < 0)
return ret;
-
+out:
ext4_set_iomap(inode, iomap, &map, offset, length);
return 0;
@@ -3601,6 +3627,13 @@ static int ext4_set_page_dirty(struct page *page)
return __set_page_dirty_buffers(page);
}
+static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
+ struct file *file, sector_t *span)
+{
+ return iomap_swapfile_activate(sis, file, span,
+ &ext4_iomap_report_ops);
+}
+
static const struct address_space_operations ext4_aops = {
.readpage = ext4_readpage,
.readahead = ext4_readahead,
@@ -3616,6 +3649,7 @@ static const struct address_space_operations ext4_aops = {
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
+ .swap_activate = ext4_iomap_swap_activate,
};
static const struct address_space_operations ext4_journalled_aops = {
@@ -3632,6 +3666,7 @@ static const struct address_space_operations ext4_journalled_aops = {
.direct_IO = noop_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
+ .swap_activate = ext4_iomap_swap_activate,
};
static const struct address_space_operations ext4_da_aops = {
@@ -3649,6 +3684,7 @@ static const struct address_space_operations ext4_da_aops = {
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
+ .swap_activate = ext4_iomap_swap_activate,
};
static const struct address_space_operations ext4_dax_aops = {
@@ -3657,6 +3693,7 @@ static const struct address_space_operations ext4_dax_aops = {
.set_page_dirty = noop_set_page_dirty,
.bmap = ext4_bmap,
.invalidatepage = noop_invalidatepage,
+ .swap_activate = ext4_iomap_swap_activate,
};
void ext4_set_aops(struct inode *inode)
@@ -3730,11 +3767,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
- err = -EIO;
- ll_rw_block(REQ_OP_READ, 0, 1, &bh);
- wait_on_buffer(bh);
- /* Uhhuh. Read error. Complain and punt. */
- if (!buffer_uptodate(bh))
+ err = ext4_read_bh_lock(bh, 0, true);
+ if (err)
goto unlock;
if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
/* We expect the key to be set. */
@@ -4073,6 +4107,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
up_write(&EXT4_I(inode)->i_data_sem);
}
+ ext4_fc_track_range(inode, first_block, stop_block);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
@@ -4252,22 +4287,22 @@ out_trace:
* data in memory that is needed to recreate the on-disk version of this
* inode.
*/
-static int __ext4_get_inode_loc(struct inode *inode,
- struct ext4_iloc *iloc, int in_mem)
+static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
+ struct ext4_iloc *iloc, int in_mem,
+ ext4_fsblk_t *ret_block)
{
struct ext4_group_desc *gdp;
struct buffer_head *bh;
- struct super_block *sb = inode->i_sb;
ext4_fsblk_t block;
struct blk_plug plug;
int inodes_per_block, inode_offset;
iloc->bh = NULL;
- if (inode->i_ino < EXT4_ROOT_INO ||
- inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
+ if (ino < EXT4_ROOT_INO ||
+ ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
return -EFSCORRUPTED;
- iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
+ iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
if (!gdp)
return -EIO;
@@ -4276,7 +4311,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
* Figure out the offset within the block group inode table
*/
inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
- inode_offset = ((inode->i_ino - 1) %
+ inode_offset = ((ino - 1) %
EXT4_INODES_PER_GROUP(sb));
block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
@@ -4289,16 +4324,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
if (!buffer_uptodate(bh)) {
lock_buffer(bh);
- /*
- * If the buffer has the write error flag, we have failed
- * to write out another inode in the same block. In this
- * case, we don't have to read the block because we may
- * read the old inode data successfully.
- */
- if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
- set_buffer_uptodate(bh);
-
- if (buffer_uptodate(bh)) {
+ if (ext4_buffer_uptodate(bh)) {
/* someone brought it uptodate while we waited */
unlock_buffer(bh);
goto has_buffer;
@@ -4369,7 +4395,7 @@ make_io:
if (end > table)
end = table;
while (b <= end)
- sb_breadahead_unmovable(sb, b++);
+ ext4_sb_breadahead_unmovable(sb, b++);
}
/*
@@ -4377,16 +4403,14 @@ make_io:
* has in-inode xattrs, or we don't have this inode in memory.
* Read the block from disk.
*/
- trace_ext4_load_inode(inode);
- get_bh(bh);
- bh->b_end_io = end_buffer_read_sync;
- submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
+ trace_ext4_load_inode(sb, ino);
+ ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
blk_finish_plug(&plug);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
simulate_eio:
- ext4_error_inode_block(inode, block, EIO,
- "unable to read itable block");
+ if (ret_block)
+ *ret_block = block;
brelse(bh);
return -EIO;
}
@@ -4396,11 +4420,43 @@ has_buffer:
return 0;
}
+static int __ext4_get_inode_loc_noinmem(struct inode *inode,
+ struct ext4_iloc *iloc)
+{
+ ext4_fsblk_t err_blk;
+ int ret;
+
+ ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc, 0,
+ &err_blk);
+
+ if (ret == -EIO)
+ ext4_error_inode_block(inode, err_blk, EIO,
+ "unable to read itable block");
+
+ return ret;
+}
+
int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
{
+ ext4_fsblk_t err_blk;
+ int ret;
+
/* We have all inode data except xattrs in memory here. */
- return __ext4_get_inode_loc(inode, iloc,
- !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
+ ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc,
+ !ext4_test_inode_state(inode, EXT4_STATE_XATTR), &err_blk);
+
+ if (ret == -EIO)
+ ext4_error_inode_block(inode, err_blk, EIO,
+ "unable to read itable block");
+
+ return ret;
+}
+
+
+int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
+ struct ext4_iloc *iloc)
+{
+ return __ext4_get_inode_loc(sb, ino, iloc, 0, NULL);
}
static bool ext4_should_enable_dax(struct inode *inode)
@@ -4566,7 +4622,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ei = EXT4_I(inode);
iloc.bh = NULL;
- ret = __ext4_get_inode_loc(inode, &iloc, 0);
+ ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
if (ret < 0)
goto bad_inode;
raw_inode = ext4_raw_inode(&iloc);
@@ -4612,10 +4668,11 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
sizeof(gen));
}
- if (!ext4_inode_csum_verify(inode, raw_inode, ei) ||
- ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) {
- ext4_error_inode_err(inode, function, line, 0, EFSBADCRC,
- "iget: checksum invalid");
+ if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
+ ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
+ (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
+ ext4_error_inode_err(inode, function, line, 0,
+ EFSBADCRC, "iget: checksum invalid");
ret = -EFSBADCRC;
goto bad_inode;
}
@@ -4703,6 +4760,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
for (block = 0; block < EXT4_N_BLOCKS; block++)
ei->i_data[block] = raw_inode->i_block[block];
INIT_LIST_HEAD(&ei->i_orphan);
+ ext4_fc_init_inode(&ei->vfs_inode);
/*
* Set transaction id's of transactions that have to be committed
@@ -4768,9 +4826,10 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
goto bad_inode;
} else if (!ext4_has_inline_data(inode)) {
/* validate the block references in the inode */
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- (S_ISLNK(inode->i_mode) &&
- !ext4_inode_is_fast_symlink(inode))) {
+ if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ (S_ISLNK(inode->i_mode) &&
+ !ext4_inode_is_fast_symlink(inode)))) {
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_check_inode(inode);
else
@@ -4971,6 +5030,12 @@ static int ext4_do_update_inode(handle_t *handle,
if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
+ err = ext4_inode_blocks_set(handle, raw_inode, ei);
+ if (err) {
+ spin_unlock(&ei->i_raw_lock);
+ goto out_brelse;
+ }
+
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
i_uid = i_uid_read(inode);
i_gid = i_gid_read(inode);
@@ -5004,11 +5069,6 @@ static int ext4_do_update_inode(handle_t *handle,
EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
- err = ext4_inode_blocks_set(handle, raw_inode, ei);
- if (err) {
- spin_unlock(&ei->i_raw_lock);
- goto out_brelse;
- }
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
@@ -5149,12 +5209,12 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
return 0;
- err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
+ err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
EXT4_I(inode)->i_sync_tid);
} else {
struct ext4_iloc iloc;
- err = __ext4_get_inode_loc(inode, &iloc, 0);
+ err = __ext4_get_inode_loc_noinmem(inode, &iloc);
if (err)
return err;
/*
@@ -5278,6 +5338,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
}
+ ext4_fc_start_update(inode);
if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
(ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
handle_t *handle;
@@ -5301,6 +5362,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (error) {
ext4_journal_stop(handle);
+ ext4_fc_stop_update(inode);
return error;
}
/* Update corresponding info in inode so that everything is in
@@ -5323,11 +5385,15 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- if (attr->ia_size > sbi->s_bitmap_maxbytes)
+ if (attr->ia_size > sbi->s_bitmap_maxbytes) {
+ ext4_fc_stop_update(inode);
return -EFBIG;
+ }
}
- if (!S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->i_mode)) {
+ ext4_fc_stop_update(inode);
return -EINVAL;
+ }
if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
inode_inc_iversion(inode);
@@ -5351,7 +5417,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
rc = ext4_break_layouts(inode);
if (rc) {
up_write(&EXT4_I(inode)->i_mmap_sem);
- return rc;
+ goto err_out;
}
if (attr->ia_size != inode->i_size) {
@@ -5372,6 +5438,21 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
inode->i_mtime = current_time(inode);
inode->i_ctime = inode->i_mtime;
}
+
+ if (shrink)
+ ext4_fc_track_range(inode,
+ (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
+ inode->i_sb->s_blocksize_bits,
+ (oldsize > 0 ? oldsize - 1 : 0) >>
+ inode->i_sb->s_blocksize_bits);
+ else
+ ext4_fc_track_range(
+ inode,
+ (oldsize > 0 ? oldsize - 1 : oldsize) >>
+ inode->i_sb->s_blocksize_bits,
+ (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
+ inode->i_sb->s_blocksize_bits);
+
down_write(&EXT4_I(inode)->i_data_sem);
EXT4_I(inode)->i_disksize = attr->ia_size;
rc = ext4_mark_inode_dirty(handle, inode);
@@ -5430,9 +5511,11 @@ out_mmap_sem:
rc = posix_acl_chmod(inode, inode->i_mode);
err_out:
- ext4_std_error(inode->i_sb, error);
+ if (error)
+ ext4_std_error(inode->i_sb, error);
if (!error)
error = rc;
+ ext4_fc_stop_update(inode);
return error;
}
@@ -5614,6 +5697,8 @@ int ext4_mark_iloc_dirty(handle_t *handle,
put_bh(iloc->bh);
return -EIO;
}
+ ext4_fc_track_inode(inode);
+
if (IS_I_VERSION(inode))
inode_inc_iversion(inode);
@@ -5937,6 +6022,8 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
if (IS_ERR(handle))
return PTR_ERR(handle);
+ ext4_fc_mark_ineligible(inode->i_sb,
+ EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
err = ext4_mark_inode_dirty(handle, inode);
ext4_handle_sync(handle);
ext4_journal_stop(handle);
@@ -5977,9 +6064,17 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
if (err)
goto out_ret;
+ /*
+ * On data journalling we skip straight to the transaction handle:
+ * there's no delalloc; page truncated will be checked later; the
+ * early return w/ all buffers mapped (calculates size/len) can't
+ * be used; and there's no dioread_nolock, so only ext4_get_block.
+ */
+ if (ext4_should_journal_data(inode))
+ goto retry_alloc;
+
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
- !ext4_should_journal_data(inode) &&
!ext4_nonda_switch(inode->i_sb)) {
do {
err = block_page_mkwrite(vma, vmf,
@@ -6005,6 +6100,9 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
/*
* Return if we have all the buffers mapped. This avoids the need to do
* journal_start/journal_stop which can block and take a long time
+ *
+ * This cannot be done for data journalling, as we have to add the
+ * inode to the transaction's list to writeprotect pages on commit.
*/
if (page_has_buffers(page)) {
if (!ext4_walk_page_buffers(NULL, page_buffers(page),
@@ -6029,16 +6127,42 @@ retry_alloc:
ret = VM_FAULT_SIGBUS;
goto out;
}
- err = block_page_mkwrite(vma, vmf, get_block);
- if (!err && ext4_should_journal_data(inode)) {
- if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
- PAGE_SIZE, NULL, do_journal_get_write_access)) {
- unlock_page(page);
+ /*
+ * Data journalling can't use block_page_mkwrite() because it
+ * will set_buffer_dirty() before do_journal_get_write_access()
+ * thus might hit warning messages for dirty metadata buffers.
+ */
+ if (!ext4_should_journal_data(inode)) {
+ err = block_page_mkwrite(vma, vmf, get_block);
+ } else {
+ lock_page(page);
+ size = i_size_read(inode);
+ /* Page got truncated from under us? */
+ if (page->mapping != mapping || page_offset(page) > size) {
+ ret = VM_FAULT_NOPAGE;
+ goto out_error;
+ }
+
+ if (page->index == size >> PAGE_SHIFT)
+ len = size & ~PAGE_MASK;
+ else
+ len = PAGE_SIZE;
+
+ err = __block_write_begin(page, 0, len, ext4_get_block);
+ if (!err) {
ret = VM_FAULT_SIGBUS;
- ext4_journal_stop(handle);
- goto out;
+ if (ext4_walk_page_buffers(handle, page_buffers(page),
+ 0, len, NULL, do_journal_get_write_access))
+ goto out_error;
+ if (ext4_walk_page_buffers(handle, page_buffers(page),
+ 0, len, NULL, write_end_fn))
+ goto out_error;
+ if (ext4_jbd2_inode_add_write(handle, inode, 0, len))
+ goto out_error;
+ ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+ } else {
+ unlock_page(page);
}
- ext4_set_inode_state(inode, EXT4_STATE_JDATA);
}
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -6049,6 +6173,10 @@ out:
up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(inode->i_sb);
return ret;
+out_error:
+ unlock_page(page);
+ ext4_journal_stop(handle);
+ goto out;
}
vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 36eca3bc036a..f0381876a7e5 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -86,7 +86,7 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
i_size_write(inode2, isize);
}
-static void reset_inode_seed(struct inode *inode)
+void ext4_reset_inode_seed(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -165,6 +165,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
err = -EINVAL;
goto err_out;
}
+ ext4_fc_start_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT);
/* Protect extent tree against block allocations via delalloc */
ext4_double_down_write_data_sem(inode, inode_bl);
@@ -199,8 +200,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
inode->i_generation = prandom_u32();
inode_bl->i_generation = prandom_u32();
- reset_inode_seed(inode);
- reset_inode_seed(inode_bl);
+ ext4_reset_inode_seed(inode);
+ ext4_reset_inode_seed(inode_bl);
ext4_discard_preallocations(inode, 0);
@@ -247,6 +248,7 @@ revert:
err_out1:
ext4_journal_stop(handle);
+ ext4_fc_stop_ineligible(sb);
ext4_double_up_write_data_sem(inode, inode_bl);
err_out:
@@ -807,7 +809,7 @@ static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg)
return error;
}
-long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
@@ -1074,6 +1076,7 @@ mext_out:
err = ext4_resize_fs(sb, n_blocks_count);
if (EXT4_SB(sb)->s_journal) {
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE);
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
@@ -1308,6 +1311,17 @@ out:
}
}
+long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ ext4_fc_start_update(file_inode(filp));
+ ret = __ext4_ioctl(filp, cmd, arg);
+ ext4_fc_stop_update(file_inode(filp));
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 132c118d12e1..85abbfb98cbe 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -124,7 +124,7 @@
* /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
* terms of number of blocks. If we have mounted the file system with -O
* stripe=<value> option the group prealloc request is normalized to the
- * the smallest multiple of the stripe value (sbi->s_stripe) which is
+ * smallest multiple of the stripe value (sbi->s_stripe) which is
* greater than the default mb_group_prealloc.
*
* The regular allocator (using the buddy cache) supports a few tunables.
@@ -619,11 +619,8 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
void *buddy;
void *buddy2;
- {
- static int mb_check_counter;
- if (mb_check_counter++ % 100 != 0)
- return 0;
- }
+ if (e4b->bd_info->bb_check_counter++ % 10)
+ return 0;
while (order > 1) {
buddy = mb_find_buddy(e4b, order, &max);
@@ -1394,9 +1391,6 @@ void ext4_set_bits(void *bm, int cur, int len)
}
}
-/*
- * _________________________________________________________________ */
-
static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
{
if (mb_test_bit(*bit + side, bitmap)) {
@@ -1508,14 +1502,16 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += EXT4_C2B(sbi, block);
- ext4_grp_locked_error(sb, e4b->bd_group,
- inode ? inode->i_ino : 0,
- blocknr,
- "freeing already freed block "
- "(bit %u); block bitmap corrupt.",
- block);
- ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+ if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
+ ext4_grp_locked_error(sb, e4b->bd_group,
+ inode ? inode->i_ino : 0,
+ blocknr,
+ "freeing already freed block (bit %u); block bitmap corrupt.",
+ block);
+ ext4_mark_group_bitmap_corrupted(
+ sb, e4b->bd_group,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+ }
mb_regenerate_buddy(e4b);
goto done;
}
@@ -2019,7 +2015,7 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
/*
* IF we have corrupt bitmap, we won't find any
* free blocks even though group info says we
- * we have free blocks
+ * have free blocks
*/
ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
"%d free clusters as per "
@@ -3303,6 +3299,84 @@ out_err:
}
/*
+ * Idempotent helper for Ext4 fast commit replay path to set the state of
+ * blocks in bitmaps and update counters.
+ */
+void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
+ int len, int state)
+{
+ struct buffer_head *bitmap_bh = NULL;
+ struct ext4_group_desc *gdp;
+ struct buffer_head *gdp_bh;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_group_t group;
+ ext4_grpblk_t blkoff;
+ int i, clen, err;
+ int already;
+
+ clen = EXT4_B2C(sbi, len);
+
+ ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
+ bitmap_bh = ext4_read_block_bitmap(sb, group);
+ if (IS_ERR(bitmap_bh)) {
+ err = PTR_ERR(bitmap_bh);
+ bitmap_bh = NULL;
+ goto out_err;
+ }
+
+ err = -EIO;
+ gdp = ext4_get_group_desc(sb, group, &gdp_bh);
+ if (!gdp)
+ goto out_err;
+
+ ext4_lock_group(sb, group);
+ already = 0;
+ for (i = 0; i < clen; i++)
+ if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == !state)
+ already++;
+
+ if (state)
+ ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
+ else
+ mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen);
+ if (ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
+ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
+ ext4_free_group_clusters_set(sb, gdp,
+ ext4_free_clusters_after_init(sb,
+ group, gdp));
+ }
+ if (state)
+ clen = ext4_free_group_clusters(sb, gdp) - clen + already;
+ else
+ clen = ext4_free_group_clusters(sb, gdp) + clen - already;
+
+ ext4_free_group_clusters_set(sb, gdp, clen);
+ ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
+ ext4_group_desc_csum_set(sb, group, gdp);
+
+ ext4_unlock_group(sb, group);
+
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi, group);
+
+ atomic64_sub(len,
+ &sbi_array_rcu_deref(sbi, s_flex_groups,
+ flex_group)->free_clusters);
+ }
+
+ err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
+ if (err)
+ goto out_err;
+ sync_dirty_buffer(bitmap_bh);
+ err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
+ sync_dirty_buffer(gdp_bh);
+
+out_err:
+ brelse(bitmap_bh);
+}
+
+/*
* here we normalize request for locality group
* Group request are normalized to s_mb_group_prealloc, which goes to
* s_strip if we set the same via mount option.
@@ -4160,7 +4234,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
struct ext4_buddy e4b;
int err;
int busy = 0;
- int free = 0;
+ int free, free_total = 0;
mb_debug(sb, "discard preallocation for group %u\n", group);
if (list_empty(&grp->bb_prealloc_list))
@@ -4188,8 +4262,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
INIT_LIST_HEAD(&list);
repeat:
+ free = 0;
ext4_lock_group(sb, group);
- this_cpu_inc(discard_pa_seq);
list_for_each_entry_safe(pa, tmp,
&grp->bb_prealloc_list, pa_group_list) {
spin_lock(&pa->pa_lock);
@@ -4206,6 +4280,9 @@ repeat:
/* seems this one can be freed ... */
ext4_mb_mark_pa_deleted(sb, pa);
+ if (!free)
+ this_cpu_inc(discard_pa_seq);
+
/* we can trust pa_free ... */
free += pa->pa_free;
@@ -4215,22 +4292,6 @@ repeat:
list_add(&pa->u.pa_tmp_list, &list);
}
- /* if we still need more blocks and some PAs were used, try again */
- if (free < needed && busy) {
- busy = 0;
- ext4_unlock_group(sb, group);
- cond_resched();
- goto repeat;
- }
-
- /* found anything to free? */
- if (list_empty(&list)) {
- BUG_ON(free != 0);
- mb_debug(sb, "Someone else may have freed PA for this group %u\n",
- group);
- goto out;
- }
-
/* now free all selected PAs */
list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
@@ -4248,14 +4309,22 @@ repeat:
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
}
-out:
+ free_total += free;
+
+ /* if we still need more blocks and some PAs were used, try again */
+ if (free_total < needed && busy) {
+ ext4_unlock_group(sb, group);
+ cond_resched();
+ busy = 0;
+ goto repeat;
+ }
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
out_dbg:
mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
- free, group, grp->bb_free);
- return free;
+ free_total, group, grp->bb_free);
+ return free_total;
}
/*
@@ -4283,6 +4352,9 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
return;
}
+ if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+
mb_debug(sb, "discard preallocation for inode %lu\n",
inode->i_ino);
trace_ext4_discard_preallocations(inode,
@@ -4830,6 +4902,9 @@ out_dbg:
return ret;
}
+static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
+ struct ext4_allocation_request *ar, int *errp);
+
/*
* Main entry point into mballoc to allocate blocks
* it tries to use preallocation first, then falls back
@@ -4851,6 +4926,8 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
sbi = EXT4_SB(sb);
trace_ext4_request_blocks(ar);
+ if (sbi->s_mount_state & EXT4_FC_REPLAY)
+ return ext4_mb_new_blocks_simple(handle, ar, errp);
/* Allow to use superuser reservation for quota file */
if (ext4_is_quota_file(ar->inode))
@@ -5078,6 +5155,102 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
return 0;
}
+/*
+ * Simple allocator for Ext4 fast commit replay path. It searches for blocks
+ * linearly starting at the goal block and also excludes the blocks which
+ * are going to be in use after fast commit replay.
+ */
+static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
+ struct ext4_allocation_request *ar, int *errp)
+{
+ struct buffer_head *bitmap_bh;
+ struct super_block *sb = ar->inode->i_sb;
+ ext4_group_t group;
+ ext4_grpblk_t blkoff;
+ int i;
+ ext4_fsblk_t goal, block;
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+ goal = ar->goal;
+ if (goal < le32_to_cpu(es->s_first_data_block) ||
+ goal >= ext4_blocks_count(es))
+ goal = le32_to_cpu(es->s_first_data_block);
+
+ ar->len = 0;
+ ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
+ for (; group < ext4_get_groups_count(sb); group++) {
+ bitmap_bh = ext4_read_block_bitmap(sb, group);
+ if (IS_ERR(bitmap_bh)) {
+ *errp = PTR_ERR(bitmap_bh);
+ pr_warn("Failed to read block bitmap\n");
+ return 0;
+ }
+
+ ext4_get_group_no_and_offset(sb,
+ max(ext4_group_first_block_no(sb, group), goal),
+ NULL, &blkoff);
+ i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize,
+ blkoff);
+ brelse(bitmap_bh);
+ if (i >= sb->s_blocksize)
+ continue;
+ if (ext4_fc_replay_check_excluded(sb,
+ ext4_group_first_block_no(sb, group) + i))
+ continue;
+ break;
+ }
+
+ if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize)
+ return 0;
+
+ block = ext4_group_first_block_no(sb, group) + i;
+ ext4_mb_mark_bb(sb, block, 1, 1);
+ ar->len = 1;
+
+ return block;
+}
+
+static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
+ unsigned long count)
+{
+ struct buffer_head *bitmap_bh;
+ struct super_block *sb = inode->i_sb;
+ struct ext4_group_desc *gdp;
+ struct buffer_head *gdp_bh;
+ ext4_group_t group;
+ ext4_grpblk_t blkoff;
+ int already_freed = 0, err, i;
+
+ ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
+ bitmap_bh = ext4_read_block_bitmap(sb, group);
+ if (IS_ERR(bitmap_bh)) {
+ err = PTR_ERR(bitmap_bh);
+ pr_warn("Failed to read block bitmap\n");
+ return;
+ }
+ gdp = ext4_get_group_desc(sb, group, &gdp_bh);
+ if (!gdp)
+ return;
+
+ for (i = 0; i < count; i++) {
+ if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
+ already_freed++;
+ }
+ mb_clear_bits(bitmap_bh->b_data, blkoff, count);
+ err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
+ if (err)
+ return;
+ ext4_free_group_clusters_set(
+ sb, gdp, ext4_free_group_clusters(sb, gdp) +
+ count - already_freed);
+ ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
+ ext4_group_desc_csum_set(sb, group, gdp);
+ ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
+ sync_dirty_buffer(bitmap_bh);
+ sync_dirty_buffer(gdp_bh);
+ brelse(bitmap_bh);
+}
+
/**
* ext4_free_blocks() -- Free given blocks and update quota
* @handle: handle for this transaction
@@ -5104,6 +5277,13 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
int err = 0;
int ret;
+ sbi = EXT4_SB(sb);
+
+ if (sbi->s_mount_state & EXT4_FC_REPLAY) {
+ ext4_free_blocks_simple(inode, block, count);
+ return;
+ }
+
might_sleep();
if (bh) {
if (block)
@@ -5112,7 +5292,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
block = bh->b_blocknr;
}
- sbi = EXT4_SB(sb);
if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
!ext4_inode_block_valid(inode, block, count)) {
ext4_error(sb, "Freeing blocks not in datazone - "
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index d34cb8c46655..795c3ff2907c 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -85,15 +85,11 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
}
}
- get_bh(*bh);
lock_buffer(*bh);
- (*bh)->b_end_io = end_buffer_read_sync;
- submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, *bh);
- wait_on_buffer(*bh);
- if (!buffer_uptodate(*bh)) {
- ret = -EIO;
+ ret = ext4_read_bh(*bh, REQ_META | REQ_PRIO, NULL);
+ if (ret)
goto warn_exit;
- }
+
mmp = (struct mmp_struct *)((*bh)->b_data);
if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
ret = -EFSCORRUPTED;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 0d601b822875..64a579734f93 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -215,7 +215,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
for (i = 0; i < nr; i++) {
bh = arr[i];
if (!bh_uptodate_or_lock(bh)) {
- err = bh_submit_read(bh);
+ err = ext4_read_bh(bh, 0, NULL);
if (err)
return err;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 0d74615fcce3..5159830dacb8 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2553,7 +2553,7 @@ out:
* for checking S_ISDIR(inode) (since the INODE_INDEX feature will not be set
* on regular files) and to avoid creating huge/slow non-HTREE directories.
*/
-static void ext4_inc_count(handle_t *handle, struct inode *inode)
+static void ext4_inc_count(struct inode *inode)
{
inc_nlink(inode);
if (is_dx(inode) &&
@@ -2565,7 +2565,7 @@ static void ext4_inc_count(handle_t *handle, struct inode *inode)
* If a directory had nlink == 1, then we should let it be 1. This indicates
* directory has >EXT4_LINK_MAX subdirs.
*/
-static void ext4_dec_count(handle_t *handle, struct inode *inode)
+static void ext4_dec_count(struct inode *inode)
{
if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
drop_nlink(inode);
@@ -2610,7 +2610,7 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
handle_t *handle;
- struct inode *inode;
+ struct inode *inode, *inode_save;
int err, credits, retries = 0;
err = dquot_initialize(dir);
@@ -2628,7 +2628,11 @@ retry:
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
+ inode_save = inode;
+ ihold(inode_save);
err = ext4_add_nondir(handle, dentry, &inode);
+ ext4_fc_track_create(inode_save, dentry);
+ iput(inode_save);
}
if (handle)
ext4_journal_stop(handle);
@@ -2643,7 +2647,7 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
handle_t *handle;
- struct inode *inode;
+ struct inode *inode, *inode_save;
int err, credits, retries = 0;
err = dquot_initialize(dir);
@@ -2660,7 +2664,12 @@ retry:
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
inode->i_op = &ext4_special_inode_operations;
+ inode_save = inode;
+ ihold(inode_save);
err = ext4_add_nondir(handle, dentry, &inode);
+ if (!err)
+ ext4_fc_track_create(inode_save, dentry);
+ iput(inode_save);
}
if (handle)
ext4_journal_stop(handle);
@@ -2739,7 +2748,7 @@ struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
return ext4_next_entry(de, blocksize);
}
-static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+int ext4_init_new_dir(handle_t *handle, struct inode *dir,
struct inode *inode)
{
struct buffer_head *dir_block = NULL;
@@ -2824,7 +2833,9 @@ out_clear_inode:
iput(inode);
goto out_retry;
}
- ext4_inc_count(handle, dir);
+ ext4_fc_track_create(inode, dentry);
+ ext4_inc_count(dir);
+
ext4_update_dx_flag(dir);
err = ext4_mark_inode_dirty(handle, dir);
if (err)
@@ -3162,8 +3173,9 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
retval = ext4_mark_inode_dirty(handle, inode);
if (retval)
goto end_rmdir;
- ext4_dec_count(handle, dir);
+ ext4_dec_count(dir);
ext4_update_dx_flag(dir);
+ ext4_fc_track_unlink(inode, dentry);
retval = ext4_mark_inode_dirty(handle, dir);
#ifdef CONFIG_UNICODE
@@ -3184,42 +3196,32 @@ end_rmdir:
return retval;
}
-static int ext4_unlink(struct inode *dir, struct dentry *dentry)
+int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
+ struct inode *inode)
{
- int retval;
- struct inode *inode;
+ int retval = -ENOENT;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de;
handle_t *handle = NULL;
+ int skip_remove_dentry = 0;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
- return -EIO;
-
- trace_ext4_unlink_enter(dir, dentry);
- /* Initialize quotas before so that eventual writes go
- * in separate transaction */
- retval = dquot_initialize(dir);
- if (retval)
- goto out_trace;
- retval = dquot_initialize(d_inode(dentry));
- if (retval)
- goto out_trace;
-
- bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
- if (IS_ERR(bh)) {
- retval = PTR_ERR(bh);
- goto out_trace;
- }
- if (!bh) {
- retval = -ENOENT;
- goto out_trace;
- }
+ bh = ext4_find_entry(dir, d_name, &de, NULL);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
- inode = d_inode(dentry);
+ if (!bh)
+ return -ENOENT;
if (le32_to_cpu(de->inode) != inode->i_ino) {
- retval = -EFSCORRUPTED;
- goto out_bh;
+ /*
+ * It's okay if we find dont find dentry which matches
+ * the inode. That's because it might have gotten
+ * renamed to a different inode number
+ */
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ skip_remove_dentry = 1;
+ else
+ goto out_bh;
}
handle = ext4_journal_start(dir, EXT4_HT_DIR,
@@ -3232,17 +3234,21 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
- retval = ext4_delete_entry(handle, dir, de, bh);
- if (retval)
- goto out_handle;
- dir->i_ctime = dir->i_mtime = current_time(dir);
- ext4_update_dx_flag(dir);
- retval = ext4_mark_inode_dirty(handle, dir);
- if (retval)
- goto out_handle;
+ if (!skip_remove_dentry) {
+ retval = ext4_delete_entry(handle, dir, de, bh);
+ if (retval)
+ goto out_handle;
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ ext4_update_dx_flag(dir);
+ retval = ext4_mark_inode_dirty(handle, dir);
+ if (retval)
+ goto out_handle;
+ } else {
+ retval = 0;
+ }
if (inode->i_nlink == 0)
ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
- dentry->d_name.len, dentry->d_name.name);
+ d_name->len, d_name->name);
else
drop_nlink(inode);
if (!inode->i_nlink)
@@ -3250,6 +3256,35 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
inode->i_ctime = current_time(inode);
retval = ext4_mark_inode_dirty(handle, inode);
+out_handle:
+ ext4_journal_stop(handle);
+out_bh:
+ brelse(bh);
+ return retval;
+}
+
+static int ext4_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int retval;
+
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
+ return -EIO;
+
+ trace_ext4_unlink_enter(dir, dentry);
+ /*
+ * Initialize quotas before so that eventual writes go
+ * in separate transaction
+ */
+ retval = dquot_initialize(dir);
+ if (retval)
+ goto out_trace;
+ retval = dquot_initialize(d_inode(dentry));
+ if (retval)
+ goto out_trace;
+
+ retval = __ext4_unlink(dir, &dentry->d_name, d_inode(dentry));
+ if (!retval)
+ ext4_fc_track_unlink(d_inode(dentry), dentry);
#ifdef CONFIG_UNICODE
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
@@ -3261,10 +3296,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
d_invalidate(dentry);
#endif
-out_handle:
- ext4_journal_stop(handle);
-out_bh:
- brelse(bh);
out_trace:
trace_ext4_unlink_exit(dentry, retval);
return retval;
@@ -3345,7 +3376,8 @@ static int ext4_symlink(struct inode *dir,
*/
drop_nlink(inode);
err = ext4_orphan_add(handle, inode);
- ext4_journal_stop(handle);
+ if (handle)
+ ext4_journal_stop(handle);
handle = NULL;
if (err)
goto err_drop_inode;
@@ -3399,29 +3431,10 @@ out_free_encrypted_link:
return err;
}
-static int ext4_link(struct dentry *old_dentry,
- struct inode *dir, struct dentry *dentry)
+int __ext4_link(struct inode *dir, struct inode *inode, struct dentry *dentry)
{
handle_t *handle;
- struct inode *inode = d_inode(old_dentry);
int err, retries = 0;
-
- if (inode->i_nlink >= EXT4_LINK_MAX)
- return -EMLINK;
-
- err = fscrypt_prepare_link(old_dentry, dir, dentry);
- if (err)
- return err;
-
- if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) &&
- (!projid_eq(EXT4_I(dir)->i_projid,
- EXT4_I(old_dentry->d_inode)->i_projid)))
- return -EXDEV;
-
- err = dquot_initialize(dir);
- if (err)
- return err;
-
retry:
handle = ext4_journal_start(dir, EXT4_HT_DIR,
(EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
@@ -3433,11 +3446,12 @@ retry:
ext4_handle_sync(handle);
inode->i_ctime = current_time(inode);
- ext4_inc_count(handle, inode);
+ ext4_inc_count(inode);
ihold(inode);
err = ext4_add_entry(handle, dentry, inode);
if (!err) {
+ ext4_fc_track_link(inode, dentry);
err = ext4_mark_inode_dirty(handle, inode);
/* this can happen only for tmpfile being
* linked the first time
@@ -3455,6 +3469,29 @@ retry:
return err;
}
+static int ext4_link(struct dentry *old_dentry,
+ struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = d_inode(old_dentry);
+ int err;
+
+ if (inode->i_nlink >= EXT4_LINK_MAX)
+ return -EMLINK;
+
+ err = fscrypt_prepare_link(old_dentry, dir, dentry);
+ if (err)
+ return err;
+
+ if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) &&
+ (!projid_eq(EXT4_I(dir)->i_projid,
+ EXT4_I(old_dentry->d_inode)->i_projid)))
+ return -EXDEV;
+
+ err = dquot_initialize(dir);
+ if (err)
+ return err;
+ return __ext4_link(dir, inode, dentry);
+}
/*
* Try to find buffer head where contains the parent block.
@@ -3630,9 +3667,9 @@ static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent)
{
if (ent->dir_nlink_delta) {
if (ent->dir_nlink_delta == -1)
- ext4_dec_count(handle, ent->dir);
+ ext4_dec_count(ent->dir);
else
- ext4_inc_count(handle, ent->dir);
+ ext4_inc_count(ent->dir);
ext4_mark_inode_dirty(handle, ent->dir);
}
}
@@ -3844,7 +3881,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
}
if (new.inode) {
- ext4_dec_count(handle, new.inode);
+ ext4_dec_count(new.inode);
new.inode->i_ctime = current_time(new.inode);
}
old.dir->i_ctime = old.dir->i_mtime = current_time(old.dir);
@@ -3854,14 +3891,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if (retval)
goto end_rename;
- ext4_dec_count(handle, old.dir);
+ ext4_dec_count(old.dir);
if (new.inode) {
/* checked ext4_empty_dir above, can't have another
* parent, ext4_dec_count() won't work for many-linked
* dirs */
clear_nlink(new.inode);
} else {
- ext4_inc_count(handle, new.dir);
+ ext4_inc_count(new.dir);
ext4_update_dx_flag(new.dir);
retval = ext4_mark_inode_dirty(handle, new.dir);
if (unlikely(retval))
@@ -3871,6 +3908,22 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
retval = ext4_mark_inode_dirty(handle, old.dir);
if (unlikely(retval))
goto end_rename;
+
+ if (S_ISDIR(old.inode->i_mode)) {
+ /*
+ * We disable fast commits here that's because the
+ * replay code is not yet capable of changing dot dot
+ * dirents in directories.
+ */
+ ext4_fc_mark_ineligible(old.inode->i_sb,
+ EXT4_FC_REASON_RENAME_DIR);
+ } else {
+ if (new.inode)
+ ext4_fc_track_unlink(new.inode, new.dentry);
+ ext4_fc_track_link(old.inode, new.dentry);
+ ext4_fc_track_unlink(old.inode, old.dentry);
+ }
+
if (new.inode) {
retval = ext4_mark_inode_dirty(handle, new.inode);
if (unlikely(retval))
@@ -4014,7 +4067,8 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
retval = ext4_mark_inode_dirty(handle, new.inode);
if (unlikely(retval))
goto end_rename;
-
+ ext4_fc_mark_ineligible(new.inode->i_sb,
+ EXT4_FC_REASON_CROSS_RENAME);
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
if (retval)
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index a50b51270ea9..928700d57eb6 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -843,8 +843,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
BUFFER_TRACE(dind, "get_write_access");
err = ext4_journal_get_write_access(handle, dind);
- if (unlikely(err))
+ if (unlikely(err)) {
ext4_std_error(sb, err);
+ goto errout;
+ }
/* ext4_reserve_inode_write() gets a reference on the iloc */
err = ext4_reserve_inode_write(handle, inode, &iloc);
@@ -1243,7 +1245,7 @@ static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
if (unlikely(!bh))
return NULL;
if (!bh_uptodate_or_lock(bh)) {
- if (bh_submit_read(bh) < 0) {
+ if (ext4_read_bh(bh, 0, NULL) < 0) {
brelse(bh);
return NULL;
}
@@ -1806,8 +1808,8 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
o_blocks_count + add, add);
/* See if the device is actually as big as what was requested */
- bh = sb_bread(sb, o_blocks_count + add - 1);
- if (!bh) {
+ bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
+ if (IS_ERR(bh)) {
ext4_warning(sb, "can't read last block, resize aborted");
return -ENOSPC;
}
@@ -1932,8 +1934,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
int meta_bg;
/* See if the device is actually as big as what was requested */
- bh = sb_bread(sb, n_blocks_count - 1);
- if (!bh) {
+ bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
+ if (IS_ERR(bh)) {
ext4_warning(sb, "can't read last block, resize aborted");
return -ENOSPC;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8b2736283481..03373471131c 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -141,27 +141,115 @@ MODULE_ALIAS_FS("ext3");
MODULE_ALIAS("ext3");
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
+
+static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,
+ bh_end_io_t *end_io)
+{
+ /*
+ * buffer's verified bit is no longer valid after reading from
+ * disk again due to write out error, clear it to make sure we
+ * recheck the buffer contents.
+ */
+ clear_buffer_verified(bh);
+
+ bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
+ get_bh(bh);
+ submit_bh(REQ_OP_READ, op_flags, bh);
+}
+
+void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
+ bh_end_io_t *end_io)
+{
+ BUG_ON(!buffer_locked(bh));
+
+ if (ext4_buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ return;
+ }
+ __ext4_read_bh(bh, op_flags, end_io);
+}
+
+int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io)
+{
+ BUG_ON(!buffer_locked(bh));
+
+ if (ext4_buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ return 0;
+ }
+
+ __ext4_read_bh(bh, op_flags, end_io);
+
+ wait_on_buffer(bh);
+ if (buffer_uptodate(bh))
+ return 0;
+ return -EIO;
+}
+
+int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait)
+{
+ if (trylock_buffer(bh)) {
+ if (wait)
+ return ext4_read_bh(bh, op_flags, NULL);
+ ext4_read_bh_nowait(bh, op_flags, NULL);
+ return 0;
+ }
+ if (wait) {
+ wait_on_buffer(bh);
+ if (buffer_uptodate(bh))
+ return 0;
+ return -EIO;
+ }
+ return 0;
+}
+
/*
- * This works like sb_bread() except it uses ERR_PTR for error
+ * This works like __bread_gfp() except it uses ERR_PTR for error
* returns. Currently with sb_bread it's impossible to distinguish
* between ENOMEM and EIO situations (since both result in a NULL
* return.
*/
-struct buffer_head *
-ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
+static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
+ sector_t block, int op_flags,
+ gfp_t gfp)
{
- struct buffer_head *bh = sb_getblk(sb, block);
+ struct buffer_head *bh;
+ int ret;
+ bh = sb_getblk_gfp(sb, block, gfp);
if (bh == NULL)
return ERR_PTR(-ENOMEM);
if (ext4_buffer_uptodate(bh))
return bh;
- ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
- wait_on_buffer(bh);
- if (buffer_uptodate(bh))
- return bh;
- put_bh(bh);
- return ERR_PTR(-EIO);
+
+ ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
+ if (ret) {
+ put_bh(bh);
+ return ERR_PTR(ret);
+ }
+ return bh;
+}
+
+struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
+ int op_flags)
+{
+ return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE);
+}
+
+struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
+ sector_t block)
+{
+ return __ext4_sb_bread_gfp(sb, block, 0, 0);
+}
+
+void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
+{
+ struct buffer_head *bh = sb_getblk_gfp(sb, block, 0);
+
+ if (likely(bh)) {
+ ext4_read_bh_lock(bh, REQ_RAHEAD, false);
+ brelse(bh);
+ }
}
static int ext4_verify_csum_type(struct super_block *sb,
@@ -201,7 +289,18 @@ void ext4_superblock_csum_set(struct super_block *sb)
if (!ext4_has_metadata_csum(sb))
return;
+ /*
+ * Locking the superblock prevents the scenario
+ * where:
+ * 1) a first thread pauses during checksum calculation.
+ * 2) a second thread updates the superblock, recalculates
+ * the checksum, and updates s_checksum
+ * 3) the first thread resumes and finishes its checksum calculation
+ * and updates s_checksum with a potentially stale or torn value.
+ */
+ lock_buffer(EXT4_SB(sb)->s_sbh);
es->s_checksum = ext4_superblock_csum(sb, es);
+ unlock_buffer(EXT4_SB(sb)->s_sbh);
}
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
@@ -472,6 +571,89 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
spin_unlock(&sbi->s_md_lock);
}
+/*
+ * This writepage callback for write_cache_pages()
+ * takes care of a few cases after page cleaning.
+ *
+ * write_cache_pages() already checks for dirty pages
+ * and calls clear_page_dirty_for_io(), which we want,
+ * to write protect the pages.
+ *
+ * However, we may have to redirty a page (see below.)
+ */
+static int ext4_journalled_writepage_callback(struct page *page,
+ struct writeback_control *wbc,
+ void *data)
+{
+ transaction_t *transaction = (transaction_t *) data;
+ struct buffer_head *bh, *head;
+ struct journal_head *jh;
+
+ bh = head = page_buffers(page);
+ do {
+ /*
+ * We have to redirty a page in these cases:
+ * 1) If buffer is dirty, it means the page was dirty because it
+ * contains a buffer that needs checkpointing. So the dirty bit
+ * needs to be preserved so that checkpointing writes the buffer
+ * properly.
+ * 2) If buffer is not part of the committing transaction
+ * (we may have just accidentally come across this buffer because
+ * inode range tracking is not exact) or if the currently running
+ * transaction already contains this buffer as well, dirty bit
+ * needs to be preserved so that the buffer gets writeprotected
+ * properly on running transaction's commit.
+ */
+ jh = bh2jh(bh);
+ if (buffer_dirty(bh) ||
+ (jh && (jh->b_transaction != transaction ||
+ jh->b_next_transaction))) {
+ redirty_page_for_writepage(wbc, page);
+ goto out;
+ }
+ } while ((bh = bh->b_this_page) != head);
+
+out:
+ return AOP_WRITEPAGE_ACTIVATE;
+}
+
+static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
+{
+ struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = LONG_MAX,
+ .range_start = jinode->i_dirty_start,
+ .range_end = jinode->i_dirty_end,
+ };
+
+ return write_cache_pages(mapping, &wbc,
+ ext4_journalled_writepage_callback,
+ jinode->i_transaction);
+}
+
+static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
+{
+ int ret;
+
+ if (ext4_should_journal_data(jinode->i_vfs_inode))
+ ret = ext4_journalled_submit_inode_data_buffers(jinode);
+ else
+ ret = jbd2_journal_submit_inode_data_buffers(jinode);
+
+ return ret;
+}
+
+static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
+{
+ int ret = 0;
+
+ if (!ext4_should_journal_data(jinode->i_vfs_inode))
+ ret = jbd2_journal_finish_inode_data_buffers(jinode);
+
+ return ret;
+}
+
static bool system_going_down(void)
{
return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
@@ -939,10 +1121,10 @@ static void ext4_blkdev_put(struct block_device *bdev)
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
{
struct block_device *bdev;
- bdev = sbi->journal_bdev;
+ bdev = sbi->s_journal_bdev;
if (bdev) {
ext4_blkdev_put(bdev);
- sbi->journal_bdev = NULL;
+ sbi->s_journal_bdev = NULL;
}
}
@@ -1073,14 +1255,14 @@ static void ext4_put_super(struct super_block *sb)
sync_blockdev(sb->s_bdev);
invalidate_bdev(sb->s_bdev);
- if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
+ if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
* hotswapped, and it breaks the `ro-after' testing code.
*/
- sync_blockdev(sbi->journal_bdev);
- invalidate_bdev(sbi->journal_bdev);
+ sync_blockdev(sbi->s_journal_bdev);
+ invalidate_bdev(sbi->s_journal_bdev);
ext4_blkdev_remove(sbi);
}
@@ -1149,6 +1331,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_datasync_tid = 0;
atomic_set(&ei->i_unwritten, 0);
INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
+ ext4_fc_init_inode(&ei->vfs_inode);
+ mutex_init(&ei->i_fc_lock);
return &ei->vfs_inode;
}
@@ -1166,6 +1350,10 @@ static int ext4_drop_inode(struct inode *inode)
static void ext4_free_in_core_inode(struct inode *inode)
{
fscrypt_free_inode(inode);
+ if (!list_empty(&(EXT4_I(inode)->i_fc_list))) {
+ pr_warn("%s: inode %ld still in fc list",
+ __func__, inode->i_ino);
+ }
kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}
@@ -1191,6 +1379,7 @@ static void init_once(void *foo)
init_rwsem(&ei->i_data_sem);
init_rwsem(&ei->i_mmap_sem);
inode_init_once(&ei->vfs_inode);
+ ext4_fc_init_inode(&ei->vfs_inode);
}
static int __init init_inodecache(void)
@@ -1219,6 +1408,7 @@ static void destroy_inodecache(void)
void ext4_clear_inode(struct inode *inode)
{
+ ext4_fc_del(inode);
invalidate_inode_buffers(inode);
clear_inode(inode);
ext4_discard_preallocations(inode, 0);
@@ -1526,7 +1716,11 @@ enum {
Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
- Opt_prefetch_block_bitmaps,
+ Opt_prefetch_block_bitmaps, Opt_no_fc,
+#ifdef CONFIG_EXT4_DEBUG
+ Opt_fc_debug_max_replay,
+#endif
+ Opt_fc_debug_force
};
static const match_table_t tokens = {
@@ -1613,6 +1807,11 @@ static const match_table_t tokens = {
{Opt_init_itable, "init_itable=%u"},
{Opt_init_itable, "init_itable"},
{Opt_noinit_itable, "noinit_itable"},
+ {Opt_no_fc, "no_fc"},
+ {Opt_fc_debug_force, "fc_debug_force"},
+#ifdef CONFIG_EXT4_DEBUG
+ {Opt_fc_debug_max_replay, "fc_debug_max_replay=%u"},
+#endif
{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
{Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
{Opt_test_dummy_encryption, "test_dummy_encryption"},
@@ -1739,6 +1938,7 @@ static int clear_qf_name(struct super_block *sb, int qtype)
#define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
#define MOPT_STRING 0x0400
#define MOPT_SKIP 0x0800
+#define MOPT_2 0x1000
static const struct mount_opts {
int token;
@@ -1839,6 +2039,13 @@ static const struct mount_opts {
{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
{Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS,
MOPT_SET},
+ {Opt_no_fc, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
+ MOPT_CLEAR | MOPT_2 | MOPT_EXT4_ONLY},
+ {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
+ MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
+#ifdef CONFIG_EXT4_DEBUG
+ {Opt_fc_debug_max_replay, 0, MOPT_GTE0},
+#endif
{Opt_err, 0, 0}
};
@@ -2048,6 +2255,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
sbi->s_li_wait_mult = arg;
} else if (token == Opt_max_dir_size_kb) {
sbi->s_max_dir_size_kb = arg;
+#ifdef CONFIG_EXT4_DEBUG
+ } else if (token == Opt_fc_debug_max_replay) {
+ sbi->s_fc_debug_max_replay = arg;
+#endif
} else if (token == Opt_stripe) {
sbi->s_stripe = arg;
} else if (token == Opt_resuid) {
@@ -2216,10 +2427,17 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
WARN_ON(1);
return -1;
}
- if (arg != 0)
- sbi->s_mount_opt |= m->mount_opt;
- else
- sbi->s_mount_opt &= ~m->mount_opt;
+ if (m->flags & MOPT_2) {
+ if (arg != 0)
+ sbi->s_mount_opt2 |= m->mount_opt;
+ else
+ sbi->s_mount_opt2 &= ~m->mount_opt;
+ } else {
+ if (arg != 0)
+ sbi->s_mount_opt |= m->mount_opt;
+ else
+ sbi->s_mount_opt &= ~m->mount_opt;
+ }
}
return 1;
}
@@ -2436,6 +2654,9 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
SEQ_OPTS_PUTS("dax=inode");
}
+ if (test_opt2(sb, JOURNAL_FAST_COMMIT))
+ SEQ_OPTS_PUTS("fast_commit");
+
ext4_show_quota_options(seq, sb);
return 0;
}
@@ -3754,7 +3975,7 @@ int ext4_calculate_overhead(struct super_block *sb)
* Add the internal journal blocks whether the journal has been
* loaded or not
*/
- if (sbi->s_journal && !sbi->journal_bdev)
+ if (sbi->s_journal && !sbi->s_journal_bdev)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
/* j_inum for internal journal is non-zero */
@@ -3868,8 +4089,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
logical_sb_block = sb_block;
}
- if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
+ bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
+ if (IS_ERR(bh)) {
ext4_msg(sb, KERN_ERR, "unable to read superblock");
+ ret = PTR_ERR(bh);
+ bh = NULL;
goto out_fail;
}
/*
@@ -3936,6 +4160,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_EXT4_FS_POSIX_ACL
set_opt(sb, POSIX_ACL);
#endif
+ if (ext4_has_feature_fast_commit(sb))
+ set_opt2(sb, JOURNAL_FAST_COMMIT);
/* don't forget to enable journal_csum when metadata_csum is enabled. */
if (ext4_has_metadata_csum(sb))
set_opt(sb, JOURNAL_CHECKSUM);
@@ -4265,10 +4491,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
brelse(bh);
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
- bh = sb_bread_unmovable(sb, logical_sb_block);
- if (!bh) {
+ bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
+ if (IS_ERR(bh)) {
ext4_msg(sb, KERN_ERR,
"Can't read superblock on 2nd try");
+ ret = PTR_ERR(bh);
+ bh = NULL;
goto failed_mount;
}
es = (struct ext4_super_block *)(bh->b_data + offset);
@@ -4480,18 +4708,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
/* Pre-read the descriptors into the buffer cache */
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logical_sb_block, i);
- sb_breadahead_unmovable(sb, block);
+ ext4_sb_breadahead_unmovable(sb, block);
}
for (i = 0; i < db_count; i++) {
struct buffer_head *bh;
block = descriptor_loc(sb, logical_sb_block, i);
- bh = sb_bread_unmovable(sb, block);
- if (!bh) {
+ bh = ext4_sb_bread_unmovable(sb, block);
+ if (IS_ERR(bh)) {
ext4_msg(sb, KERN_ERR,
"can't read group descriptor %d", i);
db_count = i;
+ ret = PTR_ERR(bh);
+ bh = NULL;
goto failed_mount2;
}
rcu_read_lock();
@@ -4539,6 +4769,26 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
+ /* Initialize fast commit stuff */
+ atomic_set(&sbi->s_fc_subtid, 0);
+ atomic_set(&sbi->s_fc_ineligible_updates, 0);
+ INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
+ INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
+ INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
+ INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
+ sbi->s_fc_bytes = 0;
+ sbi->s_mount_state &= ~EXT4_FC_INELIGIBLE;
+ sbi->s_mount_state &= ~EXT4_FC_COMMITTING;
+ spin_lock_init(&sbi->s_fc_lock);
+ memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
+ sbi->s_fc_replay_state.fc_regions = NULL;
+ sbi->s_fc_replay_state.fc_regions_size = 0;
+ sbi->s_fc_replay_state.fc_regions_used = 0;
+ sbi->s_fc_replay_state.fc_regions_valid = 0;
+ sbi->s_fc_replay_state.fc_modified_inodes = NULL;
+ sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
+ sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
+
sb->s_root = NULL;
needs_recovery = (es->s_last_orphan != 0 ||
@@ -4588,6 +4838,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
clear_opt(sb, JOURNAL_CHECKSUM);
clear_opt(sb, DATA_FLAGS);
+ clear_opt2(sb, JOURNAL_FAST_COMMIT);
sbi->s_journal = NULL;
needs_recovery = 0;
goto no_journal;
@@ -4646,6 +4897,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
+ sbi->s_journal->j_submit_inode_data_buffers =
+ ext4_journal_submit_inode_data_buffers;
+ sbi->s_journal->j_finish_inode_data_buffers =
+ ext4_journal_finish_inode_data_buffers;
no_journal:
if (!test_opt(sb, NO_MBCACHE)) {
@@ -4748,6 +5003,7 @@ no_journal:
goto failed_mount4a;
}
}
+ ext4_fc_replay_cleanup(sb);
ext4_ext_init(sb);
err = ext4_mb_init(sb);
@@ -4814,9 +5070,8 @@ no_journal:
* used to detect the metadata async write error.
*/
spin_lock_init(&sbi->s_bdev_wb_lock);
- if (!sb_rdonly(sb))
- errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
- &sbi->s_bdev_wb_err);
+ errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
+ &sbi->s_bdev_wb_err);
sb->s_bdev->bd_super = sb;
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
@@ -4872,6 +5127,7 @@ cantfind_ext4:
failed_mount8:
ext4_unregister_sysfs(sb);
+ kobject_put(&sbi->s_kobj);
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
@@ -4960,6 +5216,7 @@ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
journal->j_commit_interval = sbi->s_commit_interval;
journal->j_min_batch_time = sbi->s_min_batch_time;
journal->j_max_batch_time = sbi->s_max_batch_time;
+ ext4_fc_init(sb, journal);
write_lock(&journal->j_state_lock);
if (test_opt(sb, BARRIER))
@@ -5102,9 +5359,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
goto out_bdev;
}
journal->j_private = sb;
- ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
- wait_on_buffer(journal->j_sb_buffer);
- if (!buffer_uptodate(journal->j_sb_buffer)) {
+ if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) {
ext4_msg(sb, KERN_ERR, "I/O error on journal device");
goto out_journal;
}
@@ -5114,7 +5369,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
be32_to_cpu(journal->j_superblock->s_nr_users));
goto out_journal;
}
- EXT4_SB(sb)->journal_bdev = bdev;
+ EXT4_SB(sb)->s_journal_bdev = bdev;
ext4_init_journal_params(sb, journal);
return journal;
@@ -5708,14 +5963,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
/*
- * Update the original bdev mapping's wb_err value
- * which could be used to detect the metadata async
- * write error.
- */
- errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
- &sbi->s_bdev_wb_err);
-
- /*
* Mounting a RDONLY partition read-write, so reread
* and store the current valid flag. (It may have
* been changed by e2fsck since we originally mounted
@@ -5760,7 +6007,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
* Releasing of existing data is done when we are sure remount will
* succeed.
*/
- if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) {
+ if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) {
err = ext4_setup_system_zone(sb);
if (err)
goto restore_opts;
@@ -5786,7 +6033,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
}
#endif
- if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
ext4_release_system_zone(sb);
/*
@@ -5809,7 +6056,7 @@ restore_opts:
sbi->s_commit_interval = old_opts.s_commit_interval;
sbi->s_min_batch_time = old_opts.s_min_batch_time;
sbi->s_max_batch_time = old_opts.s_max_batch_time;
- if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
ext4_release_system_zone(sb);
#ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
@@ -6042,6 +6289,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
/* Quotafile not on the same filesystem? */
if (path->dentry->d_sb != sb)
return -EXDEV;
+
+ /* Quota already enabled for this file? */
+ if (IS_NOQUOTA(d_inode(path->dentry)))
+ return -EBUSY;
+
/* Journaling quota? */
if (EXT4_SB(sb)->s_qf_names[type]) {
/* Quotafile not in fs root? */
@@ -6309,6 +6561,10 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
brelse(bh);
out:
if (inode->i_size < off + len) {
+ ext4_fc_track_range(inode,
+ (inode->i_size > 0 ? inode->i_size - 1 : 0)
+ >> inode->i_sb->s_blocksize_bits,
+ (off + len) >> inode->i_sb->s_blocksize_bits);
i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;
err2 = ext4_mark_inode_dirty(handle, inode);
@@ -6437,6 +6693,11 @@ static int __init ext4_init_fs(void)
err = init_inodecache();
if (err)
goto out1;
+
+ err = ext4_fc_init_dentry_cache();
+ if (err)
+ goto out05;
+
register_as_ext3();
register_as_ext2();
err = register_filesystem(&ext4_fs_type);
@@ -6447,6 +6708,7 @@ static int __init ext4_init_fs(void)
out:
unregister_as_ext2();
unregister_as_ext3();
+out05:
destroy_inodecache();
out1:
ext4_exit_mballoc();
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index bfabb799fa45..5ff33d18996a 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -521,6 +521,8 @@ int ext4_register_sysfs(struct super_block *sb)
proc_create_single_data("es_shrinker_info", S_IRUGO,
sbi->s_proc, ext4_seq_es_shrinker_info_show,
sb);
+ proc_create_single_data("fc_info", 0444, sbi->s_proc,
+ ext4_fc_info_show, sb);
proc_create_seq_data("mb_groups", S_IRUGO, sbi->s_proc,
&ext4_mb_seq_groups_ops, sb);
}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index cba4b877c606..6127e94ea4f5 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -2419,6 +2419,7 @@ retry_inode:
if (IS_SYNC(inode))
ext4_handle_sync(handle);
}
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
cleanup:
brelse(is.iloc.bh);
@@ -2496,6 +2497,7 @@ retry:
if (error == 0)
error = error2;
}
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
return error;
}
@@ -2928,6 +2930,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
error);
goto cleanup;
}
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
}
error = 0;
cleanup:
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 6d2da8ad0e6f..fa688e163a80 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -187,20 +187,48 @@ static int journal_wait_on_commit_record(journal_t *journal,
* use writepages() because with delayed allocation we may be doing
* block allocation in writepages().
*/
-static int journal_submit_inode_data_buffers(struct address_space *mapping,
- loff_t dirty_start, loff_t dirty_end)
+int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
{
- int ret;
+ struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = mapping->nrpages * 2,
- .range_start = dirty_start,
- .range_end = dirty_end,
+ .range_start = jinode->i_dirty_start,
+ .range_end = jinode->i_dirty_end,
};
- ret = generic_writepages(mapping, &wbc);
- return ret;
+ /*
+ * submit the inode data buffers. We use writepage
+ * instead of writepages. Because writepages can do
+ * block allocation with delalloc. We need to write
+ * only allocated blocks here.
+ */
+ return generic_writepages(mapping, &wbc);
+}
+
+/* Send all the data buffers related to an inode */
+int jbd2_submit_inode_data(struct jbd2_inode *jinode)
+{
+
+ if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
+ return 0;
+
+ trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
+ return jbd2_journal_submit_inode_data_buffers(jinode);
+
+}
+EXPORT_SYMBOL(jbd2_submit_inode_data);
+
+int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode)
+{
+ if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) ||
+ !jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping)
+ return 0;
+ return filemap_fdatawait_range_keep_errors(
+ jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start,
+ jinode->i_dirty_end);
}
+EXPORT_SYMBOL(jbd2_wait_inode_data);
/*
* Submit all the data buffers of inode associated with the transaction to
@@ -215,29 +243,20 @@ static int journal_submit_data_buffers(journal_t *journal,
{
struct jbd2_inode *jinode;
int err, ret = 0;
- struct address_space *mapping;
spin_lock(&journal->j_list_lock);
list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
- loff_t dirty_start = jinode->i_dirty_start;
- loff_t dirty_end = jinode->i_dirty_end;
-
if (!(jinode->i_flags & JI_WRITE_DATA))
continue;
- mapping = jinode->i_vfs_inode->i_mapping;
jinode->i_flags |= JI_COMMIT_RUNNING;
spin_unlock(&journal->j_list_lock);
- /*
- * submit the inode data buffers. We use writepage
- * instead of writepages. Because writepages can do
- * block allocation with delalloc. We need to write
- * only allocated blocks here.
- */
+ /* submit the inode data buffers. */
trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
- err = journal_submit_inode_data_buffers(mapping, dirty_start,
- dirty_end);
- if (!ret)
- ret = err;
+ if (journal->j_submit_inode_data_buffers) {
+ err = journal->j_submit_inode_data_buffers(jinode);
+ if (!ret)
+ ret = err;
+ }
spin_lock(&journal->j_list_lock);
J_ASSERT(jinode->i_transaction == commit_transaction);
jinode->i_flags &= ~JI_COMMIT_RUNNING;
@@ -248,6 +267,15 @@ static int journal_submit_data_buffers(journal_t *journal,
return ret;
}
+int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
+{
+ struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
+
+ return filemap_fdatawait_range_keep_errors(mapping,
+ jinode->i_dirty_start,
+ jinode->i_dirty_end);
+}
+
/*
* Wait for data submitted for writeout, refile inodes to proper
* transaction if needed.
@@ -262,18 +290,16 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
/* For locking, see the comment in journal_submit_data_buffers() */
spin_lock(&journal->j_list_lock);
list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
- loff_t dirty_start = jinode->i_dirty_start;
- loff_t dirty_end = jinode->i_dirty_end;
-
if (!(jinode->i_flags & JI_WAIT_DATA))
continue;
jinode->i_flags |= JI_COMMIT_RUNNING;
spin_unlock(&journal->j_list_lock);
- err = filemap_fdatawait_range_keep_errors(
- jinode->i_vfs_inode->i_mapping, dirty_start,
- dirty_end);
- if (!ret)
- ret = err;
+ /* wait for the inode data buffers writeout. */
+ if (journal->j_finish_inode_data_buffers) {
+ err = journal->j_finish_inode_data_buffers(jinode);
+ if (!ret)
+ ret = err;
+ }
spin_lock(&journal->j_list_lock);
jinode->i_flags &= ~JI_COMMIT_RUNNING;
smp_mb();
@@ -413,6 +439,20 @@ void jbd2_journal_commit_transaction(journal_t *journal)
J_ASSERT(journal->j_running_transaction != NULL);
J_ASSERT(journal->j_committing_transaction == NULL);
+ write_lock(&journal->j_state_lock);
+ journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
+ while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&journal->j_fc_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ write_unlock(&journal->j_state_lock);
+ schedule();
+ write_lock(&journal->j_state_lock);
+ finish_wait(&journal->j_fc_wait, &wait);
+ }
+ write_unlock(&journal->j_state_lock);
+
commit_transaction = journal->j_running_transaction;
trace_jbd2_start_commit(journal, commit_transaction);
@@ -420,6 +460,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
commit_transaction->t_tid);
write_lock(&journal->j_state_lock);
+ journal->j_fc_off = 0;
J_ASSERT(commit_transaction->t_state == T_RUNNING);
commit_transaction->t_state = T_LOCKED;
@@ -1119,12 +1160,16 @@ restart_loop:
if (journal->j_commit_callback)
journal->j_commit_callback(journal, commit_transaction);
+ if (journal->j_fc_cleanup_callback)
+ journal->j_fc_cleanup_callback(journal, 1);
trace_jbd2_end_commit(journal, commit_transaction);
jbd_debug(1, "JBD2: commit %d complete, head %d\n",
journal->j_commit_sequence, journal->j_tail_sequence);
write_lock(&journal->j_state_lock);
+ journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING;
+ journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
spin_lock(&journal->j_list_lock);
commit_transaction->t_state = T_FINISHED;
/* Check if the transaction can be dropped now that we are finished */
@@ -1136,6 +1181,7 @@ restart_loop:
spin_unlock(&journal->j_list_lock);
write_unlock(&journal->j_state_lock);
wake_up(&journal->j_wait_done_commit);
+ wake_up(&journal->j_fc_wait);
/*
* Calculate overall stats
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 17fdc482f554..0c7c42bd530f 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -91,6 +91,8 @@ EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
EXPORT_SYMBOL(jbd2_journal_force_commit);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
+EXPORT_SYMBOL(jbd2_journal_submit_inode_data_buffers);
+EXPORT_SYMBOL(jbd2_journal_finish_inode_data_buffers);
EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
@@ -157,7 +159,9 @@ static void commit_timeout(struct timer_list *t)
*
* 1) COMMIT: Every so often we need to commit the current state of the
* filesystem to disk. The journal thread is responsible for writing
- * all of the metadata buffers to disk.
+ * all of the metadata buffers to disk. If a fast commit is ongoing
+ * journal thread waits until it's done and then continues from
+ * there on.
*
* 2) CHECKPOINT: We cannot reuse a used section of the log file until all
* of the data in that part of the log has been rewritten elsewhere on
@@ -714,6 +718,75 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
return err;
}
+/*
+ * Start a fast commit. If there's an ongoing fast or full commit wait for
+ * it to complete. Returns 0 if a new fast commit was started. Returns -EALREADY
+ * if a fast commit is not needed, either because there's an already a commit
+ * going on or this tid has already been committed. Returns -EINVAL if no jbd2
+ * commit has yet been performed.
+ */
+int jbd2_fc_begin_commit(journal_t *journal, tid_t tid)
+{
+ /*
+ * Fast commits only allowed if at least one full commit has
+ * been processed.
+ */
+ if (!journal->j_stats.ts_tid)
+ return -EINVAL;
+
+ if (tid <= journal->j_commit_sequence)
+ return -EALREADY;
+
+ write_lock(&journal->j_state_lock);
+ if (journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
+ (journal->j_flags & JBD2_FAST_COMMIT_ONGOING)) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&journal->j_fc_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ write_unlock(&journal->j_state_lock);
+ schedule();
+ finish_wait(&journal->j_fc_wait, &wait);
+ return -EALREADY;
+ }
+ journal->j_flags |= JBD2_FAST_COMMIT_ONGOING;
+ write_unlock(&journal->j_state_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(jbd2_fc_begin_commit);
+
+/*
+ * Stop a fast commit. If fallback is set, this function starts commit of
+ * TID tid before any other fast commit can start.
+ */
+static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
+{
+ if (journal->j_fc_cleanup_callback)
+ journal->j_fc_cleanup_callback(journal, 0);
+ write_lock(&journal->j_state_lock);
+ journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
+ if (fallback)
+ journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
+ write_unlock(&journal->j_state_lock);
+ wake_up(&journal->j_fc_wait);
+ if (fallback)
+ return jbd2_complete_transaction(journal, tid);
+ return 0;
+}
+
+int jbd2_fc_end_commit(journal_t *journal)
+{
+ return __jbd2_fc_end_commit(journal, 0, 0);
+}
+EXPORT_SYMBOL(jbd2_fc_end_commit);
+
+int jbd2_fc_end_commit_fallback(journal_t *journal, tid_t tid)
+{
+ return __jbd2_fc_end_commit(journal, tid, 1);
+}
+EXPORT_SYMBOL(jbd2_fc_end_commit_fallback);
+
/* Return 1 when transaction with given tid has already committed. */
int jbd2_transaction_committed(journal_t *journal, tid_t tid)
{
@@ -782,6 +855,110 @@ int jbd2_journal_next_log_block(journal_t *journal, unsigned long long *retp)
return jbd2_journal_bmap(journal, blocknr, retp);
}
+/* Map one fast commit buffer for use by the file system */
+int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out)
+{
+ unsigned long long pblock;
+ unsigned long blocknr;
+ int ret = 0;
+ struct buffer_head *bh;
+ int fc_off;
+
+ *bh_out = NULL;
+ write_lock(&journal->j_state_lock);
+
+ if (journal->j_fc_off + journal->j_fc_first < journal->j_fc_last) {
+ fc_off = journal->j_fc_off;
+ blocknr = journal->j_fc_first + fc_off;
+ journal->j_fc_off++;
+ } else {
+ ret = -EINVAL;
+ }
+ write_unlock(&journal->j_state_lock);
+
+ if (ret)
+ return ret;
+
+ ret = jbd2_journal_bmap(journal, blocknr, &pblock);
+ if (ret)
+ return ret;
+
+ bh = __getblk(journal->j_dev, pblock, journal->j_blocksize);
+ if (!bh)
+ return -ENOMEM;
+
+ lock_buffer(bh);
+
+ clear_buffer_uptodate(bh);
+ set_buffer_dirty(bh);
+ unlock_buffer(bh);
+ journal->j_fc_wbuf[fc_off] = bh;
+
+ *bh_out = bh;
+
+ return 0;
+}
+EXPORT_SYMBOL(jbd2_fc_get_buf);
+
+/*
+ * Wait on fast commit buffers that were allocated by jbd2_fc_get_buf
+ * for completion.
+ */
+int jbd2_fc_wait_bufs(journal_t *journal, int num_blks)
+{
+ struct buffer_head *bh;
+ int i, j_fc_off;
+
+ read_lock(&journal->j_state_lock);
+ j_fc_off = journal->j_fc_off;
+ read_unlock(&journal->j_state_lock);
+
+ /*
+ * Wait in reverse order to minimize chances of us being woken up before
+ * all IOs have completed
+ */
+ for (i = j_fc_off - 1; i >= j_fc_off - num_blks; i--) {
+ bh = journal->j_fc_wbuf[i];
+ wait_on_buffer(bh);
+ put_bh(bh);
+ journal->j_fc_wbuf[i] = NULL;
+ if (unlikely(!buffer_uptodate(bh)))
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(jbd2_fc_wait_bufs);
+
+/*
+ * Wait on fast commit buffers that were allocated by jbd2_fc_get_buf
+ * for completion.
+ */
+int jbd2_fc_release_bufs(journal_t *journal)
+{
+ struct buffer_head *bh;
+ int i, j_fc_off;
+
+ read_lock(&journal->j_state_lock);
+ j_fc_off = journal->j_fc_off;
+ read_unlock(&journal->j_state_lock);
+
+ /*
+ * Wait in reverse order to minimize chances of us being woken up before
+ * all IOs have completed
+ */
+ for (i = j_fc_off - 1; i >= 0; i--) {
+ bh = journal->j_fc_wbuf[i];
+ if (!bh)
+ break;
+ put_bh(bh);
+ journal->j_fc_wbuf[i] = NULL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(jbd2_fc_release_bufs);
+
/*
* Conversion of logical to physical block numbers for the journal
*
@@ -1140,6 +1317,7 @@ static journal_t *journal_init_common(struct block_device *bdev,
init_waitqueue_head(&journal->j_wait_commit);
init_waitqueue_head(&journal->j_wait_updates);
init_waitqueue_head(&journal->j_wait_reserved);
+ init_waitqueue_head(&journal->j_fc_wait);
mutex_init(&journal->j_abort_mutex);
mutex_init(&journal->j_barrier);
mutex_init(&journal->j_checkpoint_mutex);
@@ -1179,6 +1357,14 @@ static journal_t *journal_init_common(struct block_device *bdev,
if (!journal->j_wbuf)
goto err_cleanup;
+ if (journal->j_fc_wbufsize > 0) {
+ journal->j_fc_wbuf = kmalloc_array(journal->j_fc_wbufsize,
+ sizeof(struct buffer_head *),
+ GFP_KERNEL);
+ if (!journal->j_fc_wbuf)
+ goto err_cleanup;
+ }
+
bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
if (!bh) {
pr_err("%s: Cannot get buffer for journal superblock\n",
@@ -1192,11 +1378,23 @@ static journal_t *journal_init_common(struct block_device *bdev,
err_cleanup:
kfree(journal->j_wbuf);
+ kfree(journal->j_fc_wbuf);
jbd2_journal_destroy_revoke(journal);
kfree(journal);
return NULL;
}
+int jbd2_fc_init(journal_t *journal, int num_fc_blks)
+{
+ journal->j_fc_wbufsize = num_fc_blks;
+ journal->j_fc_wbuf = kmalloc_array(journal->j_fc_wbufsize,
+ sizeof(struct buffer_head *), GFP_KERNEL);
+ if (!journal->j_fc_wbuf)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(jbd2_fc_init);
+
/* jbd2_journal_init_dev and jbd2_journal_init_inode:
*
* Create a journal structure assigned some fixed set of disk blocks to
@@ -1314,11 +1512,20 @@ static int journal_reset(journal_t *journal)
}
journal->j_first = first;
- journal->j_last = last;
- journal->j_head = first;
- journal->j_tail = first;
- journal->j_free = last - first;
+ if (jbd2_has_feature_fast_commit(journal) &&
+ journal->j_fc_wbufsize > 0) {
+ journal->j_fc_last = last;
+ journal->j_last = last - journal->j_fc_wbufsize;
+ journal->j_fc_first = journal->j_last + 1;
+ journal->j_fc_off = 0;
+ } else {
+ journal->j_last = last;
+ }
+
+ journal->j_head = journal->j_first;
+ journal->j_tail = journal->j_first;
+ journal->j_free = journal->j_last - journal->j_first;
journal->j_tail_sequence = journal->j_transaction_sequence;
journal->j_commit_sequence = journal->j_transaction_sequence - 1;
@@ -1464,6 +1671,7 @@ out:
static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
{
journal_superblock_t *sb = journal->j_superblock;
+ bool had_fast_commit = false;
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
lock_buffer(journal->j_sb_buffer);
@@ -1477,9 +1685,20 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
sb->s_start = cpu_to_be32(0);
+ if (jbd2_has_feature_fast_commit(journal)) {
+ /*
+ * When journal is clean, no need to commit fast commit flag and
+ * make file system incompatible with older kernels.
+ */
+ jbd2_clear_feature_fast_commit(journal);
+ had_fast_commit = true;
+ }
jbd2_write_superblock(journal, write_op);
+ if (had_fast_commit)
+ jbd2_set_feature_fast_commit(journal);
+
/* Log is no longer empty */
write_lock(&journal->j_state_lock);
journal->j_flags |= JBD2_FLUSHED;
@@ -1663,9 +1882,18 @@ static int load_superblock(journal_t *journal)
journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
journal->j_tail = be32_to_cpu(sb->s_start);
journal->j_first = be32_to_cpu(sb->s_first);
- journal->j_last = be32_to_cpu(sb->s_maxlen);
journal->j_errno = be32_to_cpu(sb->s_errno);
+ if (jbd2_has_feature_fast_commit(journal) &&
+ journal->j_fc_wbufsize > 0) {
+ journal->j_fc_last = be32_to_cpu(sb->s_maxlen);
+ journal->j_last = journal->j_fc_last - journal->j_fc_wbufsize;
+ journal->j_fc_first = journal->j_last + 1;
+ journal->j_fc_off = 0;
+ } else {
+ journal->j_last = be32_to_cpu(sb->s_maxlen);
+ }
+
return 0;
}
@@ -1726,6 +1954,9 @@ int jbd2_journal_load(journal_t *journal)
*/
journal->j_flags &= ~JBD2_ABORT;
+ if (journal->j_fc_wbufsize > 0)
+ jbd2_journal_set_features(journal, 0, 0,
+ JBD2_FEATURE_INCOMPAT_FAST_COMMIT);
/* OK, we've finished with the dynamic journal bits:
* reinitialise the dynamic contents of the superblock in memory
* and reset them on disk. */
@@ -1809,6 +2040,8 @@ int jbd2_journal_destroy(journal_t *journal)
jbd2_journal_destroy_revoke(journal);
if (journal->j_chksum_driver)
crypto_free_shash(journal->j_chksum_driver);
+ if (journal->j_fc_wbufsize > 0)
+ kfree(journal->j_fc_wbuf);
kfree(journal->j_wbuf);
kfree(journal);
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index faa97d748474..eb2606133cd8 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -35,7 +35,6 @@ struct recovery_info
int nr_revoke_hits;
};
-enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
static int do_one_pass(journal_t *journal,
struct recovery_info *info, enum passtype pass);
static int scan_revoke_records(journal_t *, struct buffer_head *,
@@ -225,10 +224,51 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
/* Make sure we wrap around the log correctly! */
#define wrap(journal, var) \
do { \
- if (var >= (journal)->j_last) \
- var -= ((journal)->j_last - (journal)->j_first); \
+ unsigned long _wrap_last = \
+ jbd2_has_feature_fast_commit(journal) ? \
+ (journal)->j_fc_last : (journal)->j_last; \
+ \
+ if (var >= _wrap_last) \
+ var -= (_wrap_last - (journal)->j_first); \
} while (0)
+static int fc_do_one_pass(journal_t *journal,
+ struct recovery_info *info, enum passtype pass)
+{
+ unsigned int expected_commit_id = info->end_transaction;
+ unsigned long next_fc_block;
+ struct buffer_head *bh;
+ int err = 0;
+
+ next_fc_block = journal->j_fc_first;
+ if (!journal->j_fc_replay_callback)
+ return 0;
+
+ while (next_fc_block <= journal->j_fc_last) {
+ jbd_debug(3, "Fast commit replay: next block %ld",
+ next_fc_block);
+ err = jread(&bh, journal, next_fc_block);
+ if (err) {
+ jbd_debug(3, "Fast commit replay: read error");
+ break;
+ }
+
+ jbd_debug(3, "Processing fast commit blk with seq %d");
+ err = journal->j_fc_replay_callback(journal, bh, pass,
+ next_fc_block - journal->j_fc_first,
+ expected_commit_id);
+ next_fc_block++;
+ if (err < 0 || err == JBD2_FC_REPLAY_STOP)
+ break;
+ err = 0;
+ }
+
+ if (err)
+ jbd_debug(3, "Fast commit replay failed, err = %d\n", err);
+
+ return err;
+}
+
/**
* jbd2_journal_recover - recovers a on-disk journal
* @journal: the journal to recover
@@ -428,6 +468,8 @@ static int do_one_pass(journal_t *journal,
__u32 crc32_sum = ~0; /* Transactional Checksums */
int descr_csum_size = 0;
int block_error = 0;
+ bool need_check_commit_time = false;
+ __u64 last_trans_commit_time = 0, commit_time;
/*
* First thing is to establish what we expect to find in the log
@@ -470,7 +512,9 @@ static int do_one_pass(journal_t *journal,
break;
jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
- next_commit_ID, next_log_block, journal->j_last);
+ next_commit_ID, next_log_block,
+ jbd2_has_feature_fast_commit(journal) ?
+ journal->j_fc_last : journal->j_last);
/* Skip over each chunk of the transaction looking
* either the next descriptor block or the final commit
@@ -520,12 +564,21 @@ static int do_one_pass(journal_t *journal,
if (descr_csum_size > 0 &&
!jbd2_descriptor_block_csum_verify(journal,
bh->b_data)) {
- printk(KERN_ERR "JBD2: Invalid checksum "
- "recovering block %lu in log\n",
- next_log_block);
- err = -EFSBADCRC;
- brelse(bh);
- goto failed;
+ /*
+ * PASS_SCAN can see stale blocks due to lazy
+ * journal init. Don't error out on those yet.
+ */
+ if (pass != PASS_SCAN) {
+ pr_err("JBD2: Invalid checksum recovering block %lu in log\n",
+ next_log_block);
+ err = -EFSBADCRC;
+ brelse(bh);
+ goto failed;
+ }
+ need_check_commit_time = true;
+ jbd_debug(1,
+ "invalid descriptor block found in %lu\n",
+ next_log_block);
}
/* If it is a valid descriptor block, replay it
@@ -535,6 +588,7 @@ static int do_one_pass(journal_t *journal,
if (pass != PASS_REPLAY) {
if (pass == PASS_SCAN &&
jbd2_has_feature_checksum(journal) &&
+ !need_check_commit_time &&
!info->end_transaction) {
if (calc_chksums(journal, bh,
&next_log_block,
@@ -683,11 +737,41 @@ static int do_one_pass(journal_t *journal,
* mentioned conditions. Hence assume
* "Interrupted Commit".)
*/
+ commit_time = be64_to_cpu(
+ ((struct commit_header *)bh->b_data)->h_commit_sec);
+ /*
+ * If need_check_commit_time is set, it means we are in
+ * PASS_SCAN and csum verify failed before. If
+ * commit_time is increasing, it's the same journal,
+ * otherwise it is stale journal block, just end this
+ * recovery.
+ */
+ if (need_check_commit_time) {
+ if (commit_time >= last_trans_commit_time) {
+ pr_err("JBD2: Invalid checksum found in transaction %u\n",
+ next_commit_ID);
+ err = -EFSBADCRC;
+ brelse(bh);
+ goto failed;
+ }
+ ignore_crc_mismatch:
+ /*
+ * It likely does not belong to same journal,
+ * just end this recovery with success.
+ */
+ jbd_debug(1, "JBD2: Invalid checksum ignored in transaction %u, likely stale data\n",
+ next_commit_ID);
+ err = 0;
+ brelse(bh);
+ goto done;
+ }
- /* Found an expected commit block: if checksums
- * are present verify them in PASS_SCAN; else not
+ /*
+ * Found an expected commit block: if checksums
+ * are present, verify them in PASS_SCAN; else not
* much to do other than move on to the next sequence
- * number. */
+ * number.
+ */
if (pass == PASS_SCAN &&
jbd2_has_feature_checksum(journal)) {
struct commit_header *cbh =
@@ -719,6 +803,8 @@ static int do_one_pass(journal_t *journal,
!jbd2_commit_block_csum_verify(journal,
bh->b_data)) {
chksum_error:
+ if (commit_time < last_trans_commit_time)
+ goto ignore_crc_mismatch;
info->end_transaction = next_commit_ID;
if (!jbd2_has_feature_async_commit(journal)) {
@@ -728,11 +814,24 @@ static int do_one_pass(journal_t *journal,
break;
}
}
+ if (pass == PASS_SCAN)
+ last_trans_commit_time = commit_time;
brelse(bh);
next_commit_ID++;
continue;
case JBD2_REVOKE_BLOCK:
+ /*
+ * Check revoke block crc in pass_scan, if csum verify
+ * failed, check commit block time later.
+ */
+ if (pass == PASS_SCAN &&
+ !jbd2_descriptor_block_csum_verify(journal,
+ bh->b_data)) {
+ jbd_debug(1, "JBD2: invalid revoke block found in %lu\n",
+ next_log_block);
+ need_check_commit_time = true;
+ }
/* If we aren't in the REVOKE pass, then we can
* just skip over this block. */
if (pass != PASS_REVOKE) {
@@ -777,6 +876,13 @@ static int do_one_pass(journal_t *journal,
success = -EIO;
}
}
+
+ if (jbd2_has_feature_fast_commit(journal) && pass != PASS_REVOKE) {
+ err = fc_do_one_pass(journal, info, pass);
+ if (err)
+ success = err;
+ }
+
if (block_error && success == 0)
success = -EIO;
return success;
@@ -800,9 +906,6 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
offset = sizeof(jbd2_journal_revoke_header_t);
rcount = be32_to_cpu(header->r_count);
- if (!jbd2_descriptor_block_csum_verify(journal, header))
- return -EFSBADCRC;
-
if (jbd2_journal_has_csum_v2or3(journal))
csum_size = sizeof(struct jbd2_journal_block_tail);
if (rcount > journal->j_blocksize - csum_size)
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index b425f0b01dce..b9a9d69dde7e 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -883,6 +883,10 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
OCFS2_JOURNAL_DIRTY_FL);
journal->j_journal = j_journal;
+ journal->j_journal->j_submit_inode_data_buffers =
+ jbd2_journal_submit_inode_data_buffers;
+ journal->j_journal->j_finish_inode_data_buffers =
+ jbd2_journal_finish_inode_data_buffers;
journal->j_inode = inode;
journal->j_bh = bh;