summaryrefslogtreecommitdiff
path: root/fs/ext4/mballoc.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/mballoc.c')
-rw-r--r--fs/ext4/mballoc.c200
1 files changed, 89 insertions, 111 deletions
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 21b903fe546e..c91db9f57524 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -874,7 +874,7 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_group_info *iter, *grp;
+ struct ext4_group_info *iter;
int i;
if (ac->ac_status == AC_STATUS_FOUND)
@@ -883,7 +883,6 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
- grp = NULL;
for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
if (list_empty(&sbi->s_mb_largest_free_orders[i]))
continue;
@@ -892,28 +891,22 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
continue;
}
- grp = NULL;
list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
bb_largest_free_order_node) {
if (sbi->s_mb_stats)
atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
- grp = iter;
- break;
+ *group = iter->bb_group;
+ ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
+ read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
+ return;
}
}
read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
- if (grp)
- break;
}
- if (!grp) {
- /* Increment cr and search again */
- *new_cr = CR_GOAL_LEN_FAST;
- } else {
- *group = grp->bb_group;
- ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
- }
+ /* Increment cr and search again if no group is found */
+ *new_cr = CR_GOAL_LEN_FAST;
}
/*
@@ -966,16 +959,25 @@ static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *
for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
i < MB_NUM_ORDERS(ac->ac_sb); i++) {
grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
- if (grp)
- break;
+ if (grp) {
+ *group = grp->bb_group;
+ ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
+ return;
+ }
}
- if (grp) {
- *group = grp->bb_group;
- ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
- } else {
+ /*
+ * CR_BEST_AVAIL_LEN works based on the concept that we have
+ * a larger normalized goal len request which can be trimmed to
+ * a smaller goal len such that it can still satisfy original
+ * request len. However, allocation request for non-regular
+ * files never gets normalized.
+ * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
+ */
+ if (ac->ac_flags & EXT4_MB_HINT_DATA)
*new_cr = CR_BEST_AVAIL_LEN;
- }
+ else
+ *new_cr = CR_GOAL_LEN_SLOW;
}
/*
@@ -1051,18 +1053,16 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
ac->ac_g_ex.fe_len);
grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
- if (grp)
- break;
+ if (grp) {
+ *group = grp->bb_group;
+ ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
+ return;
+ }
}
- if (grp) {
- *group = grp->bb_group;
- ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
- } else {
- /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
- ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
- *new_cr = CR_GOAL_LEN_SLOW;
- }
+ /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
+ ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
+ *new_cr = CR_GOAL_LEN_SLOW;
}
static inline int should_optimize_scan(struct ext4_allocation_context *ac)
@@ -1080,8 +1080,9 @@ static inline int should_optimize_scan(struct ext4_allocation_context *ac)
* Return next linear group for allocation. If linear traversal should not be
* performed, this function just returns the same group
*/
-static int
-next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
+static ext4_group_t
+next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
+ ext4_group_t ngroups)
{
if (!should_optimize_scan(ac))
goto inc_and_return;
@@ -1255,7 +1256,7 @@ void ext4_mb_generate_buddy(struct super_block *sb,
static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
{
ext4_group_t ngroups;
- int blocksize;
+ unsigned int blocksize;
int blocks_per_page;
int groups_per_page;
int err = 0;
@@ -2450,7 +2451,7 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
break;
}
- if (ac->ac_criteria < CR_FAST) {
+ if (!ext4_mb_cr_expensive(ac->ac_criteria)) {
/*
* In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are
* sure that this group will have a large enough
@@ -2553,7 +2554,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp))
+ if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
return false;
free = grp->bb_free;
@@ -2634,7 +2635,12 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
free = grp->bb_free;
if (free == 0)
goto out;
- if (cr <= CR_FAST && free < ac->ac_g_ex.fe_len)
+ /*
+ * In all criterias except CR_ANY_FREE we try to avoid groups that
+ * can't possibly satisfy the full goal request due to insufficient
+ * free blocks.
+ */
+ if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len)
goto out;
if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
goto out;
@@ -2658,7 +2664,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
* sure we locate metadata blocks in the first block group in
* the flex_bg if possible.
*/
- if (cr < CR_FAST &&
+ if (!ext4_mb_cr_expensive(cr) &&
(!sbi->s_log_groups_per_flex ||
((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
!(ext4_has_group_desc_csum(sb) &&
@@ -2787,8 +2793,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
/*
* ac->ac_2order is set only if the fe_len is a power of 2
- * if ac->ac_2order is set we also set criteria to 0 so that we
- * try exact allocation using buddy.
+ * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED
+ * so that we try exact allocation using buddy.
*/
i = fls(ac->ac_g_ex.fe_len);
ac->ac_2order = 0;
@@ -2800,10 +2806,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
* requests upto maximum buddy size we have constructed.
*/
if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
- /*
- * This should tell if fe_len is exactly power of 2
- */
- if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
+ if (is_power_of_2(ac->ac_g_ex.fe_len))
ac->ac_2order = array_index_nospec(i - 1,
MB_NUM_ORDERS(sb));
}
@@ -2848,11 +2851,11 @@ repeat:
/*
* Batch reads of the block allocation bitmaps
* to get multiple READs in flight; limit
- * prefetching at cr=0/1, otherwise mballoc can
- * spend a lot of time loading imperfect groups
+ * prefetching at inexpensive CR, otherwise mballoc
+ * can spend a lot of time loading imperfect groups
*/
if ((prefetch_grp == group) &&
- (cr >= CR_FAST ||
+ (ext4_mb_cr_expensive(cr) ||
prefetch_ios < sbi->s_mb_prefetch_limit)) {
nr = sbi->s_mb_prefetch;
if (ext4_has_feature_flex_bg(sb)) {
@@ -3501,11 +3504,10 @@ static void ext4_discard_work(struct work_struct *work)
struct super_block *sb = sbi->s_sb;
struct ext4_free_data *fd, *nfd;
struct ext4_buddy e4b;
- struct list_head discard_list;
+ LIST_HEAD(discard_list);
ext4_group_t grp, load_grp;
int err = 0;
- INIT_LIST_HEAD(&discard_list);
spin_lock(&sbi->s_md_lock);
list_splice_init(&sbi->s_discard_list, &discard_list);
spin_unlock(&sbi->s_md_lock);
@@ -3879,12 +3881,10 @@ void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_free_data *entry, *tmp;
- struct list_head freed_data_list;
+ LIST_HEAD(freed_data_list);
struct list_head *cut_pos = NULL;
bool wake;
- INIT_LIST_HEAD(&freed_data_list);
-
spin_lock(&sbi->s_md_lock);
list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
if (entry->efd_tid != commit_tid)
@@ -4084,7 +4084,7 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_group_t group;
ext4_grpblk_t blkoff;
- int i, err;
+ int i, err = 0;
int already;
unsigned int clen, clen_changed, thisgrp_len;
@@ -4222,12 +4222,13 @@ ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_
static inline void
ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
- ext4_lblk_t start, ext4_lblk_t end)
+ ext4_lblk_t start, loff_t end)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_prealloc_space *tmp_pa;
- ext4_lblk_t tmp_pa_start, tmp_pa_end;
+ ext4_lblk_t tmp_pa_start;
+ loff_t tmp_pa_end;
struct rb_node *iter;
read_lock(&ei->i_prealloc_lock);
@@ -4236,7 +4237,7 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
pa_node.inode_node);
tmp_pa_start = tmp_pa->pa_lstart;
- tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
+ tmp_pa_end = pa_logical_end(sbi, tmp_pa);
spin_lock(&tmp_pa->pa_lock);
if (tmp_pa->pa_deleted == 0)
@@ -4258,14 +4259,14 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
*/
static inline void
ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
- ext4_lblk_t *start, ext4_lblk_t *end)
+ ext4_lblk_t *start, loff_t *end)
{
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
struct rb_node *iter;
- ext4_lblk_t new_start, new_end;
- ext4_lblk_t tmp_pa_start, tmp_pa_end, left_pa_end = -1, right_pa_start = -1;
+ ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
+ loff_t new_end, tmp_pa_end, left_pa_end = -1;
new_start = *start;
new_end = *end;
@@ -4284,7 +4285,7 @@ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
pa_node.inode_node);
tmp_pa_start = tmp_pa->pa_lstart;
- tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
+ tmp_pa_end = pa_logical_end(sbi, tmp_pa);
/* PA must not overlap original request */
spin_lock(&tmp_pa->pa_lock);
@@ -4364,8 +4365,7 @@ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
}
if (left_pa) {
- left_pa_end =
- left_pa->pa_lstart + EXT4_C2B(sbi, left_pa->pa_len);
+ left_pa_end = pa_logical_end(sbi, left_pa);
BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
}
@@ -4404,8 +4404,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_super_block *es = sbi->s_es;
int bsbits, max;
- ext4_lblk_t end;
- loff_t size, start_off;
+ loff_t size, start_off, end;
loff_t orig_size __maybe_unused;
ext4_lblk_t start;
@@ -4432,7 +4431,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
/* first, let's learn actual file size
* given current request is allocated */
- size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
+ size = extent_logical_end(sbi, &ac->ac_o_ex);
size = size << bsbits;
if (size < i_size_read(ac->ac_inode))
size = i_size_read(ac->ac_inode);
@@ -4766,7 +4765,6 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_locality_group *lg;
struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
- loff_t tmp_pa_end;
struct rb_node *iter;
ext4_fsblk_t goal_block;
@@ -4862,9 +4860,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
* pa can possibly satisfy the request hence check if it overlaps
* original logical start and stop searching if it doesn't.
*/
- tmp_pa_end = (loff_t)tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
-
- if (ac->ac_o_ex.fe_logical >= tmp_pa_end) {
+ if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
spin_unlock(&tmp_pa->pa_lock);
goto try_group_pa;
}
@@ -4984,7 +4980,6 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
n = rb_next(n);
}
- return;
}
/*
@@ -5180,8 +5175,11 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
pa = ac->ac_pa;
if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
- int new_bex_start;
- int new_bex_end;
+ struct ext4_free_extent ex = {
+ .fe_logical = ac->ac_g_ex.fe_logical,
+ .fe_len = ac->ac_orig_goal_len,
+ };
+ loff_t orig_goal_end = extent_logical_end(sbi, &ex);
/* we can't allocate as much as normalizer wants.
* so, found space must get proper lstart
@@ -5200,29 +5198,23 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
* still cover original start
* 3. Else, keep the best ex at start of original request.
*/
- new_bex_end = ac->ac_g_ex.fe_logical +
- EXT4_C2B(sbi, ac->ac_orig_goal_len);
- new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
- if (ac->ac_o_ex.fe_logical >= new_bex_start)
- goto adjust_bex;
+ ex.fe_len = ac->ac_b_ex.fe_len;
- new_bex_start = ac->ac_g_ex.fe_logical;
- new_bex_end =
- new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
- if (ac->ac_o_ex.fe_logical < new_bex_end)
+ ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
+ if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
goto adjust_bex;
- new_bex_start = ac->ac_o_ex.fe_logical;
- new_bex_end =
- new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
+ ex.fe_logical = ac->ac_g_ex.fe_logical;
+ if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
+ goto adjust_bex;
+ ex.fe_logical = ac->ac_o_ex.fe_logical;
adjust_bex:
- ac->ac_b_ex.fe_logical = new_bex_start;
+ ac->ac_b_ex.fe_logical = ex.fe_logical;
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
- BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical +
- EXT4_C2B(sbi, ac->ac_orig_goal_len)));
+ BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
}
pa->pa_lstart = ac->ac_b_ex.fe_logical;
@@ -5419,7 +5411,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct buffer_head *bitmap_bh = NULL;
struct ext4_prealloc_space *pa, *tmp;
- struct list_head list;
+ LIST_HEAD(list);
struct ext4_buddy e4b;
struct ext4_inode_info *ei;
int err;
@@ -5448,7 +5440,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
goto out_dbg;
}
- INIT_LIST_HEAD(&list);
ext4_lock_group(sb, group);
list_for_each_entry_safe(pa, tmp,
&grp->bb_prealloc_list, pa_group_list) {
@@ -5529,7 +5520,7 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
struct buffer_head *bitmap_bh = NULL;
struct ext4_prealloc_space *pa, *tmp;
ext4_group_t group = 0;
- struct list_head list;
+ LIST_HEAD(list);
struct ext4_buddy e4b;
struct rb_node *iter;
int err;
@@ -5546,8 +5537,6 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
trace_ext4_discard_preallocations(inode,
atomic_read(&ei->i_prealloc_active), needed);
- INIT_LIST_HEAD(&list);
-
if (needed == 0)
needed = UINT_MAX;
@@ -5671,7 +5660,7 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
{
ext4_group_t i, ngroups;
- if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
+ if (ext4_forced_shutdown(sb))
return;
ngroups = ext4_get_groups_count(sb);
@@ -5705,7 +5694,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
- if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
+ if (ext4_forced_shutdown(sb))
return;
mb_debug(sb, "Can't allocate:"
@@ -5738,12 +5727,10 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
#else
static inline void ext4_mb_show_pa(struct super_block *sb)
{
- return;
}
static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
ext4_mb_show_pa(ac->ac_sb);
- return;
}
#endif
@@ -5769,7 +5756,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
group_pa_eligible = sbi->s_mb_group_prealloc > 0;
inode_pa_eligible = true;
- size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
+ size = extent_logical_end(sbi, &ac->ac_o_ex);
isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
>> bsbits;
@@ -5865,13 +5852,11 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
{
ext4_group_t group = 0;
struct ext4_buddy e4b;
- struct list_head discard_list;
+ LIST_HEAD(discard_list);
struct ext4_prealloc_space *pa, *tmp;
mb_debug(sb, "discard locality group preallocation\n");
- INIT_LIST_HEAD(&discard_list);
-
spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
pa_node.lg_list,
@@ -5984,12 +5969,9 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
spin_unlock(&lg->lg_prealloc_lock);
/* Now trim the list to be not more than 8 elements */
- if (lg_prealloc_count > 8) {
+ if (lg_prealloc_count > 8)
ext4_mb_discard_lg_preallocations(sb, lg,
order, lg_prealloc_count);
- return;
- }
- return ;
}
/*
@@ -6102,7 +6084,7 @@ ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
ext4_grpblk_t i = 0;
ext4_fsblk_t goal, block;
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ struct ext4_super_block *es = sbi->s_es;
goal = ar->goal;
if (goal < le32_to_cpu(es->s_first_data_block) ||
@@ -6643,7 +6625,6 @@ do_more:
error_return:
brelse(bitmap_bh);
ext4_std_error(sb, err);
- return;
}
/**
@@ -6746,7 +6727,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
}
ext4_mb_clear_bb(handle, inode, block, count, flags);
- return;
}
/**
@@ -6936,8 +6916,7 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
void *bitmap;
bitmap = e4b->bd_bitmap;
- start = (e4b->bd_info->bb_first_free > start) ?
- e4b->bd_info->bb_first_free : start;
+ start = max(e4b->bd_info->bb_first_free, start);
count = 0;
free_count = 0;
@@ -7154,8 +7133,7 @@ ext4_mballoc_query_range(
ext4_lock_group(sb, group);
- start = (e4b.bd_info->bb_first_free > start) ?
- e4b.bd_info->bb_first_free : start;
+ start = max(e4b.bd_info->bb_first_free, start);
if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;