summaryrefslogtreecommitdiff
path: root/fs/ext4/mballoc.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/mballoc.c')
-rw-r--r--fs/ext4/mballoc.c254
1 files changed, 122 insertions, 132 deletions
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 21b903fe546e..1e599305d85f 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/nospec.h>
#include <linux/backing-dev.h>
+#include <linux/freezer.h>
#include <trace/events/ext4.h>
/*
@@ -874,7 +875,7 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_group_info *iter, *grp;
+ struct ext4_group_info *iter;
int i;
if (ac->ac_status == AC_STATUS_FOUND)
@@ -883,7 +884,6 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
- grp = NULL;
for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
if (list_empty(&sbi->s_mb_largest_free_orders[i]))
continue;
@@ -892,28 +892,22 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
continue;
}
- grp = NULL;
list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
bb_largest_free_order_node) {
if (sbi->s_mb_stats)
atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
- grp = iter;
- break;
+ *group = iter->bb_group;
+ ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
+ read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
+ return;
}
}
read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
- if (grp)
- break;
}
- if (!grp) {
- /* Increment cr and search again */
- *new_cr = CR_GOAL_LEN_FAST;
- } else {
- *group = grp->bb_group;
- ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
- }
+ /* Increment cr and search again if no group is found */
+ *new_cr = CR_GOAL_LEN_FAST;
}
/*
@@ -966,16 +960,25 @@ static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *
for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
i < MB_NUM_ORDERS(ac->ac_sb); i++) {
grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
- if (grp)
- break;
+ if (grp) {
+ *group = grp->bb_group;
+ ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
+ return;
+ }
}
- if (grp) {
- *group = grp->bb_group;
- ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
- } else {
+ /*
+ * CR_BEST_AVAIL_LEN works based on the concept that we have
+ * a larger normalized goal len request which can be trimmed to
+ * a smaller goal len such that it can still satisfy original
+ * request len. However, allocation request for non-regular
+ * files never gets normalized.
+ * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
+ */
+ if (ac->ac_flags & EXT4_MB_HINT_DATA)
*new_cr = CR_BEST_AVAIL_LEN;
- }
+ else
+ *new_cr = CR_GOAL_LEN_SLOW;
}
/*
@@ -1051,18 +1054,16 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
ac->ac_g_ex.fe_len);
grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
- if (grp)
- break;
+ if (grp) {
+ *group = grp->bb_group;
+ ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
+ return;
+ }
}
- if (grp) {
- *group = grp->bb_group;
- ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
- } else {
- /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
- ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
- *new_cr = CR_GOAL_LEN_SLOW;
- }
+ /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
+ ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
+ *new_cr = CR_GOAL_LEN_SLOW;
}
static inline int should_optimize_scan(struct ext4_allocation_context *ac)
@@ -1080,8 +1081,9 @@ static inline int should_optimize_scan(struct ext4_allocation_context *ac)
* Return next linear group for allocation. If linear traversal should not be
* performed, this function just returns the same group
*/
-static int
-next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
+static ext4_group_t
+next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
+ ext4_group_t ngroups)
{
if (!should_optimize_scan(ac))
goto inc_and_return;
@@ -1255,7 +1257,7 @@ void ext4_mb_generate_buddy(struct super_block *sb,
static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
{
ext4_group_t ngroups;
- int blocksize;
+ unsigned int blocksize;
int blocks_per_page;
int groups_per_page;
int err = 0;
@@ -2450,7 +2452,7 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
break;
}
- if (ac->ac_criteria < CR_FAST) {
+ if (!ext4_mb_cr_expensive(ac->ac_criteria)) {
/*
* In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are
* sure that this group will have a large enough
@@ -2553,7 +2555,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp))
+ if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
return false;
free = grp->bb_free;
@@ -2634,7 +2636,12 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
free = grp->bb_free;
if (free == 0)
goto out;
- if (cr <= CR_FAST && free < ac->ac_g_ex.fe_len)
+ /*
+ * In all criterias except CR_ANY_FREE we try to avoid groups that
+ * can't possibly satisfy the full goal request due to insufficient
+ * free blocks.
+ */
+ if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len)
goto out;
if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
goto out;
@@ -2658,7 +2665,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
* sure we locate metadata blocks in the first block group in
* the flex_bg if possible.
*/
- if (cr < CR_FAST &&
+ if (!ext4_mb_cr_expensive(cr) &&
(!sbi->s_log_groups_per_flex ||
((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
!(ext4_has_group_desc_csum(sb) &&
@@ -2787,8 +2794,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
/*
* ac->ac_2order is set only if the fe_len is a power of 2
- * if ac->ac_2order is set we also set criteria to 0 so that we
- * try exact allocation using buddy.
+ * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED
+ * so that we try exact allocation using buddy.
*/
i = fls(ac->ac_g_ex.fe_len);
ac->ac_2order = 0;
@@ -2800,10 +2807,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
* requests upto maximum buddy size we have constructed.
*/
if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
- /*
- * This should tell if fe_len is exactly power of 2
- */
- if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
+ if (is_power_of_2(ac->ac_g_ex.fe_len))
ac->ac_2order = array_index_nospec(i - 1,
MB_NUM_ORDERS(sb));
}
@@ -2848,11 +2852,11 @@ repeat:
/*
* Batch reads of the block allocation bitmaps
* to get multiple READs in flight; limit
- * prefetching at cr=0/1, otherwise mballoc can
- * spend a lot of time loading imperfect groups
+ * prefetching at inexpensive CR, otherwise mballoc
+ * can spend a lot of time loading imperfect groups
*/
if ((prefetch_grp == group) &&
- (cr >= CR_FAST ||
+ (ext4_mb_cr_expensive(cr) ||
prefetch_ios < sbi->s_mb_prefetch_limit)) {
nr = sbi->s_mb_prefetch;
if (ext4_has_feature_flex_bg(sb)) {
@@ -3501,11 +3505,10 @@ static void ext4_discard_work(struct work_struct *work)
struct super_block *sb = sbi->s_sb;
struct ext4_free_data *fd, *nfd;
struct ext4_buddy e4b;
- struct list_head discard_list;
+ LIST_HEAD(discard_list);
ext4_group_t grp, load_grp;
int err = 0;
- INIT_LIST_HEAD(&discard_list);
spin_lock(&sbi->s_md_lock);
list_splice_init(&sbi->s_discard_list, &discard_list);
spin_unlock(&sbi->s_md_lock);
@@ -3879,12 +3882,10 @@ void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_free_data *entry, *tmp;
- struct list_head freed_data_list;
+ LIST_HEAD(freed_data_list);
struct list_head *cut_pos = NULL;
bool wake;
- INIT_LIST_HEAD(&freed_data_list);
-
spin_lock(&sbi->s_md_lock);
list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
if (entry->efd_tid != commit_tid)
@@ -4084,7 +4085,7 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_group_t group;
ext4_grpblk_t blkoff;
- int i, err;
+ int i, err = 0;
int already;
unsigned int clen, clen_changed, thisgrp_len;
@@ -4222,12 +4223,13 @@ ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_
static inline void
ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
- ext4_lblk_t start, ext4_lblk_t end)
+ ext4_lblk_t start, loff_t end)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_prealloc_space *tmp_pa;
- ext4_lblk_t tmp_pa_start, tmp_pa_end;
+ ext4_lblk_t tmp_pa_start;
+ loff_t tmp_pa_end;
struct rb_node *iter;
read_lock(&ei->i_prealloc_lock);
@@ -4236,7 +4238,7 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
pa_node.inode_node);
tmp_pa_start = tmp_pa->pa_lstart;
- tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
+ tmp_pa_end = pa_logical_end(sbi, tmp_pa);
spin_lock(&tmp_pa->pa_lock);
if (tmp_pa->pa_deleted == 0)
@@ -4258,14 +4260,14 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
*/
static inline void
ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
- ext4_lblk_t *start, ext4_lblk_t *end)
+ ext4_lblk_t *start, loff_t *end)
{
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
struct rb_node *iter;
- ext4_lblk_t new_start, new_end;
- ext4_lblk_t tmp_pa_start, tmp_pa_end, left_pa_end = -1, right_pa_start = -1;
+ ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
+ loff_t new_end, tmp_pa_end, left_pa_end = -1;
new_start = *start;
new_end = *end;
@@ -4284,7 +4286,7 @@ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
pa_node.inode_node);
tmp_pa_start = tmp_pa->pa_lstart;
- tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
+ tmp_pa_end = pa_logical_end(sbi, tmp_pa);
/* PA must not overlap original request */
spin_lock(&tmp_pa->pa_lock);
@@ -4364,8 +4366,7 @@ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
}
if (left_pa) {
- left_pa_end =
- left_pa->pa_lstart + EXT4_C2B(sbi, left_pa->pa_len);
+ left_pa_end = pa_logical_end(sbi, left_pa);
BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
}
@@ -4404,8 +4405,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_super_block *es = sbi->s_es;
int bsbits, max;
- ext4_lblk_t end;
- loff_t size, start_off;
+ loff_t size, start_off, end;
loff_t orig_size __maybe_unused;
ext4_lblk_t start;
@@ -4432,7 +4432,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
/* first, let's learn actual file size
* given current request is allocated */
- size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
+ size = extent_logical_end(sbi, &ac->ac_o_ex);
size = size << bsbits;
if (size < i_size_read(ac->ac_inode))
size = i_size_read(ac->ac_inode);
@@ -4766,7 +4766,6 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_locality_group *lg;
struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
- loff_t tmp_pa_end;
struct rb_node *iter;
ext4_fsblk_t goal_block;
@@ -4862,9 +4861,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
* pa can possibly satisfy the request hence check if it overlaps
* original logical start and stop searching if it doesn't.
*/
- tmp_pa_end = (loff_t)tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
-
- if (ac->ac_o_ex.fe_logical >= tmp_pa_end) {
+ if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
spin_unlock(&tmp_pa->pa_lock);
goto try_group_pa;
}
@@ -4984,7 +4981,6 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
n = rb_next(n);
}
- return;
}
/*
@@ -5180,8 +5176,11 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
pa = ac->ac_pa;
if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
- int new_bex_start;
- int new_bex_end;
+ struct ext4_free_extent ex = {
+ .fe_logical = ac->ac_g_ex.fe_logical,
+ .fe_len = ac->ac_orig_goal_len,
+ };
+ loff_t orig_goal_end = extent_logical_end(sbi, &ex);
/* we can't allocate as much as normalizer wants.
* so, found space must get proper lstart
@@ -5200,29 +5199,23 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
* still cover original start
* 3. Else, keep the best ex at start of original request.
*/
- new_bex_end = ac->ac_g_ex.fe_logical +
- EXT4_C2B(sbi, ac->ac_orig_goal_len);
- new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
- if (ac->ac_o_ex.fe_logical >= new_bex_start)
- goto adjust_bex;
+ ex.fe_len = ac->ac_b_ex.fe_len;
- new_bex_start = ac->ac_g_ex.fe_logical;
- new_bex_end =
- new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
- if (ac->ac_o_ex.fe_logical < new_bex_end)
+ ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
+ if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
goto adjust_bex;
- new_bex_start = ac->ac_o_ex.fe_logical;
- new_bex_end =
- new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
+ ex.fe_logical = ac->ac_g_ex.fe_logical;
+ if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
+ goto adjust_bex;
+ ex.fe_logical = ac->ac_o_ex.fe_logical;
adjust_bex:
- ac->ac_b_ex.fe_logical = new_bex_start;
+ ac->ac_b_ex.fe_logical = ex.fe_logical;
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
- BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical +
- EXT4_C2B(sbi, ac->ac_orig_goal_len)));
+ BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
}
pa->pa_lstart = ac->ac_b_ex.fe_logical;
@@ -5419,7 +5412,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct buffer_head *bitmap_bh = NULL;
struct ext4_prealloc_space *pa, *tmp;
- struct list_head list;
+ LIST_HEAD(list);
struct ext4_buddy e4b;
struct ext4_inode_info *ei;
int err;
@@ -5448,7 +5441,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
goto out_dbg;
}
- INIT_LIST_HEAD(&list);
ext4_lock_group(sb, group);
list_for_each_entry_safe(pa, tmp,
&grp->bb_prealloc_list, pa_group_list) {
@@ -5529,7 +5521,7 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
struct buffer_head *bitmap_bh = NULL;
struct ext4_prealloc_space *pa, *tmp;
ext4_group_t group = 0;
- struct list_head list;
+ LIST_HEAD(list);
struct ext4_buddy e4b;
struct rb_node *iter;
int err;
@@ -5546,8 +5538,6 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
trace_ext4_discard_preallocations(inode,
atomic_read(&ei->i_prealloc_active), needed);
- INIT_LIST_HEAD(&list);
-
if (needed == 0)
needed = UINT_MAX;
@@ -5671,7 +5661,7 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
{
ext4_group_t i, ngroups;
- if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
+ if (ext4_forced_shutdown(sb))
return;
ngroups = ext4_get_groups_count(sb);
@@ -5705,7 +5695,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
- if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
+ if (ext4_forced_shutdown(sb))
return;
mb_debug(sb, "Can't allocate:"
@@ -5738,12 +5728,10 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
#else
static inline void ext4_mb_show_pa(struct super_block *sb)
{
- return;
}
static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
ext4_mb_show_pa(ac->ac_sb);
- return;
}
#endif
@@ -5769,7 +5757,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
group_pa_eligible = sbi->s_mb_group_prealloc > 0;
inode_pa_eligible = true;
- size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
+ size = extent_logical_end(sbi, &ac->ac_o_ex);
isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
>> bsbits;
@@ -5865,13 +5853,11 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
{
ext4_group_t group = 0;
struct ext4_buddy e4b;
- struct list_head discard_list;
+ LIST_HEAD(discard_list);
struct ext4_prealloc_space *pa, *tmp;
mb_debug(sb, "discard locality group preallocation\n");
- INIT_LIST_HEAD(&discard_list);
-
spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
pa_node.lg_list,
@@ -5984,12 +5970,9 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
spin_unlock(&lg->lg_prealloc_lock);
/* Now trim the list to be not more than 8 elements */
- if (lg_prealloc_count > 8) {
+ if (lg_prealloc_count > 8)
ext4_mb_discard_lg_preallocations(sb, lg,
order, lg_prealloc_count);
- return;
- }
- return ;
}
/*
@@ -6102,7 +6085,7 @@ ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
ext4_grpblk_t i = 0;
ext4_fsblk_t goal, block;
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ struct ext4_super_block *es = sbi->s_es;
goal = ar->goal;
if (goal < le32_to_cpu(es->s_first_data_block) ||
@@ -6643,7 +6626,6 @@ do_more:
error_return:
brelse(bitmap_bh);
ext4_std_error(sb, err);
- return;
}
/**
@@ -6746,7 +6728,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
}
ext4_mb_clear_bb(handle, inode, block, count, flags);
- return;
}
/**
@@ -6926,6 +6907,21 @@ __acquires(bitlock)
return ret;
}
+static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
+ ext4_group_t grp)
+{
+ if (grp < ext4_get_groups_count(sb))
+ return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+ return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
+ ext4_group_first_block_no(sb, grp) - 1) >>
+ EXT4_CLUSTER_BITS(sb);
+}
+
+static bool ext4_trim_interrupted(void)
+{
+ return fatal_signal_pending(current) || freezing(current);
+}
+
static int ext4_try_to_trim_range(struct super_block *sb,
struct ext4_buddy *e4b, ext4_grpblk_t start,
ext4_grpblk_t max, ext4_grpblk_t minblocks)
@@ -6933,11 +6929,13 @@ __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
__releases(ext4_group_lock_ptr(sb, e4b->bd_group))
{
ext4_grpblk_t next, count, free_count;
+ bool set_trimmed = false;
void *bitmap;
bitmap = e4b->bd_bitmap;
- start = (e4b->bd_info->bb_first_free > start) ?
- e4b->bd_info->bb_first_free : start;
+ if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
+ set_trimmed = true;
+ start = max(e4b->bd_info->bb_first_free, start);
count = 0;
free_count = 0;
@@ -6951,16 +6949,14 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
int ret = ext4_trim_extent(sb, start, next - start, e4b);
if (ret && ret != -EOPNOTSUPP)
- break;
+ return count;
count += next - start;
}
free_count += next - start;
start = next + 1;
- if (fatal_signal_pending(current)) {
- count = -ERESTARTSYS;
- break;
- }
+ if (ext4_trim_interrupted())
+ return count;
if (need_resched()) {
ext4_unlock_group(sb, e4b->bd_group);
@@ -6972,6 +6968,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
break;
}
+ if (set_trimmed)
+ EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
+
return count;
}
@@ -6982,7 +6981,6 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
* @start: first group block to examine
* @max: last group block to examine
* @minblocks: minimum extent block count
- * @set_trimmed: set the trimmed flag if at least one block is trimmed
*
* ext4_trim_all_free walks through group's block bitmap searching for free
* extents. When the free extent is found, mark it as used in group buddy
@@ -6992,7 +6990,7 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
static ext4_grpblk_t
ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_grpblk_t start, ext4_grpblk_t max,
- ext4_grpblk_t minblocks, bool set_trimmed)
+ ext4_grpblk_t minblocks)
{
struct ext4_buddy e4b;
int ret;
@@ -7009,13 +7007,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_lock_group(sb, group);
if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
- minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
+ minblocks < EXT4_SB(sb)->s_last_trim_minblks)
ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
- if (ret >= 0 && set_trimmed)
- EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
- } else {
+ else
ret = 0;
- }
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
@@ -7048,7 +7043,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
ext4_fsblk_t first_data_blk =
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
- bool whole_group, eof = false;
int ret = 0;
start = range->start >> sb->s_blocksize_bits;
@@ -7067,10 +7061,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
goto out;
}
- if (end >= max_blks - 1) {
+ if (end >= max_blks - 1)
end = max_blks - 1;
- eof = true;
- }
if (end <= first_data_blk)
goto out;
if (start < first_data_blk)
@@ -7084,9 +7076,10 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
/* end now represents the last cluster to discard in this group */
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
- whole_group = true;
for (group = first_group; group <= last_group; group++) {
+ if (ext4_trim_interrupted())
+ break;
grp = ext4_get_group_info(sb, group);
if (!grp)
continue;
@@ -7103,13 +7096,11 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
* change it for the last group, note that last_cluster is
* already computed earlier by ext4_get_group_no_and_offset()
*/
- if (group == last_group) {
+ if (group == last_group)
end = last_cluster;
- whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
- }
if (grp->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, group, first_cluster,
- end, minlen, whole_group);
+ end, minlen);
if (cnt < 0) {
ret = cnt;
break;
@@ -7154,8 +7145,7 @@ ext4_mballoc_query_range(
ext4_lock_group(sb, group);
- start = (e4b.bd_info->bb_first_free > start) ?
- e4b.bd_info->bb_first_free : start;
+ start = max(e4b.bd_info->bb_first_free, start);
if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;