summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-13 02:44:27 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-13 02:44:27 +0400
commitac4de9543aca59f2b763746647577302fbedd57e (patch)
tree40407750569ee030de56233c41c9a97f7e89cf67 /include
parent26935fb06ee88f1188789807687c03041f3c70d9 (diff)
parentde32a8177f64bc62e1b19c685dd391af664ab13f (diff)
downloadlinux-ac4de9543aca59f2b763746647577302fbedd57e.tar.xz
Merge branch 'akpm' (patches from Andrew Morton)
Merge more patches from Andrew Morton: "The rest of MM. Plus one misc cleanup" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (35 commits) mm/Kconfig: add MMU dependency for MIGRATION. kernel: replace strict_strto*() with kstrto*() mm, thp: count thp_fault_fallback anytime thp fault fails thp: consolidate code between handle_mm_fault() and do_huge_pmd_anonymous_page() thp: do_huge_pmd_anonymous_page() cleanup thp: move maybe_pmd_mkwrite() out of mk_huge_pmd() mm: cleanup add_to_page_cache_locked() thp: account anon transparent huge pages into NR_ANON_PAGES truncate: drop 'oldsize' truncate_pagecache() parameter mm: make lru_add_drain_all() selective memcg: document cgroup dirty/writeback memory statistics memcg: add per cgroup writeback pages accounting memcg: check for proper lock held in mem_cgroup_update_page_stat memcg: remove MEMCG_NR_FILE_MAPPED memcg: reduce function dereference memcg: avoid overflow caused by PAGE_ALIGN memcg: rename RESOURCE_MAX to RES_COUNTER_MAX memcg: correct RESOURCE_MAX to ULLONG_MAX mm: memcg: do not trap chargers with full callstack on OOM mm: memcg: rework and document OOM waiting and wakeup ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/huge_mm.h3
-rw-r--r--include/linux/memcontrol.h148
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/res_counter.h2
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/swap.h2
6 files changed, 143 insertions, 25 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b60de92e2edc..3935428c57cf 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -96,9 +96,6 @@ extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end);
-extern int handle_pte_fault(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- pte_t *pte, pmd_t *pmd, unsigned int flags);
extern int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c416092e324..60e95872da29 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -30,9 +30,21 @@ struct page;
struct mm_struct;
struct kmem_cache;
-/* Stats that can be updated by kernel. */
-enum mem_cgroup_page_stat_item {
- MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
+/*
+ * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
+ * These two lists should keep in accord with each other.
+ */
+enum mem_cgroup_stat_index {
+ /*
+ * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
+ */
+ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
+ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
+ MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
+ MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
+ MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
+ MEM_CGROUP_STAT_NSTATS,
};
struct mem_cgroup_reclaim_cookie {
@@ -41,6 +53,23 @@ struct mem_cgroup_reclaim_cookie {
unsigned int generation;
};
+enum mem_cgroup_filter_t {
+ VISIT, /* visit current node */
+ SKIP, /* skip the current node and continue traversal */
+ SKIP_TREE, /* skip the whole subtree and continue traversal */
+};
+
+/*
+ * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
+ * iterate through the hierarchy tree. Each tree element is checked by the
+ * predicate before it is returned by the iterator. If a filter returns
+ * SKIP or SKIP_TREE then the iterator code continues traversal (with the
+ * next node down the hierarchy or the next node that doesn't belong under the
+ * memcg's subtree).
+ */
+typedef enum mem_cgroup_filter_t
+(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
+
#ifdef CONFIG_MEMCG
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -108,9 +137,18 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok);
-struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
- struct mem_cgroup *,
- struct mem_cgroup_reclaim_cookie *);
+struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim,
+ mem_cgroup_iter_filter cond);
+
+static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim)
+{
+ return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
+}
+
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
/*
@@ -125,6 +163,48 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage);
+/**
+ * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
+ * @new: true to enable, false to disable
+ *
+ * Toggle whether a failed memcg charge should invoke the OOM killer
+ * or just return -ENOMEM. Returns the previous toggle state.
+ *
+ * NOTE: Any path that enables the OOM killer before charging must
+ * call mem_cgroup_oom_synchronize() afterward to finalize the
+ * OOM handling and clean up.
+ */
+static inline bool mem_cgroup_toggle_oom(bool new)
+{
+ bool old;
+
+ old = current->memcg_oom.may_oom;
+ current->memcg_oom.may_oom = new;
+
+ return old;
+}
+
+static inline void mem_cgroup_enable_oom(void)
+{
+ bool old = mem_cgroup_toggle_oom(true);
+
+ WARN_ON(old == true);
+}
+
+static inline void mem_cgroup_disable_oom(void)
+{
+ bool old = mem_cgroup_toggle_oom(false);
+
+ WARN_ON(old == false);
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return p->memcg_oom.in_memcg_oom;
+}
+
+bool mem_cgroup_oom_synchronize(void);
+
#ifdef CONFIG_MEMCG_SWAP
extern int do_swap_account;
#endif
@@ -165,24 +245,24 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
}
void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx,
+ enum mem_cgroup_stat_index idx,
int val);
static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
mem_cgroup_update_page_stat(page, idx, 1);
}
static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
mem_cgroup_update_page_stat(page, idx, -1);
}
-unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned);
+enum mem_cgroup_filter_t
+mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
+ struct mem_cgroup *root);
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -296,6 +376,15 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok)
{
}
+static inline struct mem_cgroup *
+mem_cgroup_iter_cond(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim,
+ mem_cgroup_iter_filter cond)
+{
+ /* first call must return non-NULL, second return NULL */
+ return (struct mem_cgroup *)(unsigned long)!prev;
+}
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
@@ -348,22 +437,45 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
{
}
+static inline bool mem_cgroup_toggle_oom(bool new)
+{
+ return false;
+}
+
+static inline void mem_cgroup_enable_oom(void)
+{
+}
+
+static inline void mem_cgroup_disable_oom(void)
+{
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_oom_synchronize(void)
+{
+ return false;
+}
+
static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
}
static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
}
static inline
-unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned)
+enum mem_cgroup_filter_t
+mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
+ struct mem_cgroup *root)
{
- return 0;
+ return VISIT;
}
static inline void mem_cgroup_split_huge_fixup(struct page *head)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index caf543c7eaa7..8b6e55ee8855 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -176,6 +176,7 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED 0x40 /* second try */
+#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -876,11 +877,12 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
+#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
- VM_FAULT_HWPOISON_LARGE)
+ VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
@@ -984,7 +986,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
-extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
+extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 96a509b6be04..201a69749659 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -54,7 +54,7 @@ struct res_counter {
struct res_counter *parent;
};
-#define RESOURCE_MAX (unsigned long long)LLONG_MAX
+#define RES_COUNTER_MAX ULLONG_MAX
/**
* Helpers to interact with userspace
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 45f254dddafc..6682da36b293 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1393,6 +1393,13 @@ struct task_struct {
unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
} memcg_batch;
unsigned int memcg_kmem_skip_account;
+ struct memcg_oom_info {
+ unsigned int may_oom:1;
+ unsigned int in_memcg_oom:1;
+ unsigned int oom_locked:1;
+ int wakeups;
+ struct mem_cgroup *wait_on_memcg;
+ } memcg_oom;
#endif
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c03c139219c9..46ba0c6c219f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -280,7 +280,7 @@ extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
-extern int lru_add_drain_all(void);
+extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void swap_setup(void);