summaryrefslogtreecommitdiff
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h91
1 files changed, 45 insertions, 46 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1b9de7d220fb..c4c37fd12104 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -156,6 +156,9 @@ enum zone_stat_item {
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
NR_PAGETABLE, /* used for pagetables */
NR_KERNEL_STACK_KB, /* measured in KiB */
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+ NR_KERNEL_SCS_KB, /* measured in KiB */
+#endif
/* Second 128 byte cacheline */
NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
@@ -193,7 +196,6 @@ enum node_stat_item {
NR_FILE_THPS,
NR_FILE_PMDMAPPED,
NR_ANON_THPS,
- NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_VMSCAN_WRITE,
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
@@ -240,19 +242,6 @@ static inline bool is_active_lru(enum lru_list lru)
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
-struct zone_reclaim_stat {
- /*
- * The pageout code in vmscan.c keeps track of how many of the
- * mem/swap backed and file backed pages are referenced.
- * The higher the rotated/scanned ratio, the more valuable
- * that cache is.
- *
- * The anon LRU stats live in [0], file LRU stats in [1]
- */
- unsigned long recent_rotated[2];
- unsigned long recent_scanned[2];
-};
-
enum lruvec_flags {
LRUVEC_CONGESTED, /* lruvec has many dirty pages
* backed by a congested BDI
@@ -261,7 +250,13 @@ enum lruvec_flags {
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
- struct zone_reclaim_stat reclaim_stat;
+ /*
+ * These track the cost of reclaiming one LRU - file or anon -
+ * over the other. As the observed cost of reclaiming one LRU
+ * increases, the reclaim scan balance tips toward the other.
+ */
+ unsigned long anon_cost;
+ unsigned long file_cost;
/* Evictions & activations on the inactive file list */
atomic_long_t inactive_age;
/* Refaults at the time of last reclaim cycle */
@@ -665,9 +660,21 @@ struct deferred_split {
* per-zone basis.
*/
typedef struct pglist_data {
+ /*
+ * node_zones contains just the zones for THIS node. Not all of the
+ * zones may be populated, but it is the full list. It is referenced by
+ * this node's node_zonelists as well as other node's node_zonelists.
+ */
struct zone node_zones[MAX_NR_ZONES];
+
+ /*
+ * node_zonelists contains references to all zones in all nodes.
+ * Generally the first zones will be references to this node's
+ * node_zones.
+ */
struct zonelist node_zonelists[MAX_ZONELISTS];
- int nr_zones;
+
+ int nr_zones; /* number of populated zones in this node */
#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_PAGE_EXTENSION
@@ -678,6 +685,8 @@ typedef struct pglist_data {
/*
* Must be held any time you expect node_start_pfn,
* node_present_pages, node_spanned_pages or nr_zones to stay constant.
+ * Also synchronizes pgdat->first_deferred_pfn during deferred page
+ * init.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
@@ -697,13 +706,13 @@ typedef struct pglist_data {
struct task_struct *kswapd; /* Protected by
mem_hotplug_begin/end() */
int kswapd_order;
- enum zone_type kswapd_classzone_idx;
+ enum zone_type kswapd_highest_zoneidx;
int kswapd_failures; /* Number of 'reclaimed == 0' runs */
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
- enum zone_type kcompactd_classzone_idx;
+ enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
#endif
@@ -781,15 +790,15 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
- enum zone_type classzone_idx);
+ enum zone_type highest_zoneidx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
- int classzone_idx, unsigned int alloc_flags,
+ int highest_zoneidx, unsigned int alloc_flags,
long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx,
+ unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx);
+ unsigned long mark, int highest_zoneidx);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
@@ -874,7 +883,7 @@ extern int movable_zone;
#ifdef CONFIG_HIGHMEM
static inline int zone_movable_is_highmem(void)
{
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+#ifdef CONFIG_NEED_MULTIPLE_NODES
return movable_zone == ZONE_HIGHMEM;
#else
return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
@@ -909,24 +918,23 @@ static inline int is_highmem(struct zone *zone)
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
-int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+
+int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
+ size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
-int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
+ size_t *, loff_t *);
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+ void *, size_t *, loff_t *);
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+ void *, size_t *, loff_t *);
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-
-extern int numa_zonelist_order_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+ void *, size_t *, loff_t *);
+int numa_zonelist_order_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int percpu_pagelist_fraction;
extern char numa_zonelist_order[];
#define NUMA_ZONELIST_ORDER_LEN 16
@@ -1078,15 +1086,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#include <asm/sparsemem.h>
#endif
-#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
- !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
-static inline unsigned long early_pfn_to_nid(unsigned long pfn)
-{
- BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
- return 0;
-}
-#endif
-
#ifdef CONFIG_FLATMEM
#define pfn_to_nid(pfn) (0)
#endif