From 54c4fe08f65e4c39c3d62f5d181f958e9c6c97f7 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Tue, 11 Apr 2023 06:17:57 +0000 Subject: mm/vmscan: simplify shrink_node() The difference between sc->nr_reclaimed and nr_reclaimed is computed three times. Introduce a new variable to record the value, so it only needs to be computed once. Link: https://lkml.kernel.org/r/20230411061757.12041-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Reviewed-by: David Hildenbrand Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/vmscan.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index 98719e72b5e2..7f2441d6dfb2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6463,7 +6463,7 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) { struct reclaim_state *reclaim_state = current->reclaim_state; - unsigned long nr_reclaimed, nr_scanned; + unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed; struct lruvec *target_lruvec; bool reclaimable = false; @@ -6489,13 +6489,14 @@ again: reclaim_state->reclaimed_slab = 0; } + nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; + /* Record the subtree's reclaim efficiency */ if (!sc->proactive) vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, - sc->nr_scanned - nr_scanned, - sc->nr_reclaimed - nr_reclaimed); + sc->nr_scanned - nr_scanned, nr_node_reclaimed); - if (sc->nr_reclaimed - nr_reclaimed) + if (nr_node_reclaimed) reclaimable = true; if (current_is_kswapd()) { @@ -6557,8 +6558,7 @@ again: test_bit(LRUVEC_CONGESTED, &target_lruvec->flags)) reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); - if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, - sc)) + if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc)) goto again; /* -- cgit v1.2.3