summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-01 04:37:43 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-01 04:37:43 +0400
commit5f56886521d6ddd3648777fae44d82382dd8c87f (patch)
treeaa0db6331cdb01c23f1884439840aadd31bbcca4 /mm
parentf1e9a236e5ddab6c349611ee86f54291916f226c (diff)
parente2a8b0a779787314eca1061308a8182e6c5bfabd (diff)
downloadlinux-5f56886521d6ddd3648777fae44d82382dd8c87f.tar.xz
Merge branch 'akpm' (incoming from Andrew)
Merge third batch of fixes from Andrew Morton: "Most of the rest. I still have two large patchsets against AIO and IPC, but they're a bit stuck behind other trees and I'm about to vanish for six days. - random fixlets - inotify - more of the MM queue - show_stack() cleanups - DMI update - kthread/workqueue things - compat cleanups - epoll udpates - binfmt updates - nilfs2 - hfs - hfsplus - ptrace - kmod - coredump - kexec - rbtree - pids - pidns - pps - semaphore tweaks - some w1 patches - relay updates - core Kconfig changes - sysrq tweaks" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (109 commits) Documentation/sysrq: fix inconstistent help message of sysrq key ethernet/emac/sysrq: fix inconstistent help message of sysrq key sparc/sysrq: fix inconstistent help message of sysrq key powerpc/xmon/sysrq: fix inconstistent help message of sysrq key ARM/etm/sysrq: fix inconstistent help message of sysrq key power/sysrq: fix inconstistent help message of sysrq key kgdb/sysrq: fix inconstistent help message of sysrq key lib/decompress.c: fix initconst notifier-error-inject: fix module names in Kconfig kernel/sys.c: make prctl(PR_SET_MM) generally available UAPI: remove empty Kbuild files menuconfig: print more info for symbol without prompts init/Kconfig: re-order CONFIG_EXPERT options to fix menuconfig display kconfig menu: move Virtualization drivers near other virtualization options Kconfig: consolidate CONFIG_DEBUG_STRICT_USER_COPY_CHECKS relay: use macro PAGE_ALIGN instead of FIX_SIZE kernel/relay.c: move FIX_SIZE macro into relay.c kernel/relay.c: remove unused function argument actor drivers/w1/slaves/w1_ds2760.c: fix the error handling in w1_ds2760_add_slave() drivers/w1/slaves/w1_ds2781.c: fix the error handling in w1_ds2781_add_slave() ...
Diffstat (limited to 'mm')
-rw-r--r--mm/cleancache.c265
-rw-r--r--mm/frontswap.c156
-rw-r--r--mm/swapfile.c17
3 files changed, 359 insertions, 79 deletions
diff --git a/mm/cleancache.c b/mm/cleancache.c
index d76ba74be2d0..5875f48ce279 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -19,20 +19,10 @@
#include <linux/cleancache.h>
/*
- * This global enablement flag may be read thousands of times per second
- * by cleancache_get/put/invalidate even on systems where cleancache_ops
- * is not claimed (e.g. cleancache is config'ed on but remains
- * disabled), so is preferred to the slower alternative: a function
- * call that checks a non-global.
- */
-int cleancache_enabled __read_mostly;
-EXPORT_SYMBOL(cleancache_enabled);
-
-/*
* cleancache_ops is set by cleancache_ops_register to contain the pointers
* to the cleancache "backend" implementation functions.
*/
-static struct cleancache_ops cleancache_ops __read_mostly;
+static struct cleancache_ops *cleancache_ops __read_mostly;
/*
* Counters available via /sys/kernel/debug/frontswap (if debugfs is
@@ -45,15 +35,101 @@ static u64 cleancache_puts;
static u64 cleancache_invalidates;
/*
- * register operations for cleancache, returning previous thus allowing
- * detection of multiple backends and possible nesting
+ * When no backend is registered all calls to init_fs and init_shared_fs
+ * are registered and fake poolids (FAKE_FS_POOLID_OFFSET or
+ * FAKE_SHARED_FS_POOLID_OFFSET, plus offset in the respective array
+ * [shared_|]fs_poolid_map) are given to the respective super block
+ * (sb->cleancache_poolid) and no tmem_pools are created. When a backend
+ * registers with cleancache the previous calls to init_fs and init_shared_fs
+ * are executed to create tmem_pools and set the respective poolids. While no
+ * backend is registered all "puts", "gets" and "flushes" are ignored or failed.
+ */
+#define MAX_INITIALIZABLE_FS 32
+#define FAKE_FS_POOLID_OFFSET 1000
+#define FAKE_SHARED_FS_POOLID_OFFSET 2000
+
+#define FS_NO_BACKEND (-1)
+#define FS_UNKNOWN (-2)
+static int fs_poolid_map[MAX_INITIALIZABLE_FS];
+static int shared_fs_poolid_map[MAX_INITIALIZABLE_FS];
+static char *uuids[MAX_INITIALIZABLE_FS];
+/*
+ * Mutex for the [shared_|]fs_poolid_map to guard against multiple threads
+ * invoking umount (and ending in __cleancache_invalidate_fs) and also multiple
+ * threads calling mount (and ending up in __cleancache_init_[shared|]fs).
+ */
+static DEFINE_MUTEX(poolid_mutex);
+/*
+ * When set to false (default) all calls to the cleancache functions, except
+ * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded
+ * by the if (!cleancache_ops) return. This means multiple threads (from
+ * different filesystems) will be checking cleancache_ops. The usage of a
+ * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are
+ * OK if the time between the backend's have been initialized (and
+ * cleancache_ops has been set to not NULL) and when the filesystems start
+ * actually calling the backends. The inverse (when unloading) is obviously
+ * not good - but this shim does not do that (yet).
+ */
+
+/*
+ * The backends and filesystems work all asynchronously. This is b/c the
+ * backends can be built as modules.
+ * The usual sequence of events is:
+ * a) mount / -> __cleancache_init_fs is called. We set the
+ * [shared_|]fs_poolid_map and uuids for.
+ *
+ * b). user does I/Os -> we call the rest of __cleancache_* functions
+ * which return immediately as cleancache_ops is false.
+ *
+ * c). modprobe zcache -> cleancache_register_ops. We init the backend
+ * and set cleancache_ops to true, and for any fs_poolid_map
+ * (which is set by __cleancache_init_fs) we initialize the poolid.
+ *
+ * d). user does I/Os -> now that cleancache_ops is true all the
+ * __cleancache_* functions can call the backend. They all check
+ * that fs_poolid_map is valid and if so invoke the backend.
+ *
+ * e). umount / -> __cleancache_invalidate_fs, the fs_poolid_map is
+ * reset (which is the second check in the __cleancache_* ops
+ * to call the backend).
+ *
+ * The sequence of event could also be c), followed by a), and d). and e). The
+ * c) would not happen anymore. There is also the chance of c), and one thread
+ * doing a) + d), and another doing e). For that case we depend on the
+ * filesystem calling __cleancache_invalidate_fs in the proper sequence (so
+ * that it handles all I/Os before it invalidates the fs (which is last part
+ * of unmounting process).
+ *
+ * Note: The acute reader will notice that there is no "rmmod zcache" case.
+ * This is b/c the functionality for that is not yet implemented and when
+ * done, will require some extra locking not yet devised.
+ */
+
+/*
+ * Register operations for cleancache, returning previous thus allowing
+ * detection of multiple backends and possible nesting.
*/
-struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops)
+struct cleancache_ops *cleancache_register_ops(struct cleancache_ops *ops)
{
- struct cleancache_ops old = cleancache_ops;
+ struct cleancache_ops *old = cleancache_ops;
+ int i;
- cleancache_ops = *ops;
- cleancache_enabled = 1;
+ mutex_lock(&poolid_mutex);
+ for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
+ if (fs_poolid_map[i] == FS_NO_BACKEND)
+ fs_poolid_map[i] = ops->init_fs(PAGE_SIZE);
+ if (shared_fs_poolid_map[i] == FS_NO_BACKEND)
+ shared_fs_poolid_map[i] = ops->init_shared_fs
+ (uuids[i], PAGE_SIZE);
+ }
+ /*
+ * We MUST set cleancache_ops _after_ we have called the backends
+ * init_fs or init_shared_fs functions. Otherwise the compiler might
+ * re-order where cleancache_ops is set in this function.
+ */
+ barrier();
+ cleancache_ops = ops;
+ mutex_unlock(&poolid_mutex);
return old;
}
EXPORT_SYMBOL(cleancache_register_ops);
@@ -61,15 +137,42 @@ EXPORT_SYMBOL(cleancache_register_ops);
/* Called by a cleancache-enabled filesystem at time of mount */
void __cleancache_init_fs(struct super_block *sb)
{
- sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE);
+ int i;
+
+ mutex_lock(&poolid_mutex);
+ for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
+ if (fs_poolid_map[i] == FS_UNKNOWN) {
+ sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET;
+ if (cleancache_ops)
+ fs_poolid_map[i] = cleancache_ops->init_fs(PAGE_SIZE);
+ else
+ fs_poolid_map[i] = FS_NO_BACKEND;
+ break;
+ }
+ }
+ mutex_unlock(&poolid_mutex);
}
EXPORT_SYMBOL(__cleancache_init_fs);
/* Called by a cleancache-enabled clustered filesystem at time of mount */
void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
{
- sb->cleancache_poolid =
- (*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE);
+ int i;
+
+ mutex_lock(&poolid_mutex);
+ for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
+ if (shared_fs_poolid_map[i] == FS_UNKNOWN) {
+ sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET;
+ uuids[i] = uuid;
+ if (cleancache_ops)
+ shared_fs_poolid_map[i] = cleancache_ops->init_shared_fs
+ (uuid, PAGE_SIZE);
+ else
+ shared_fs_poolid_map[i] = FS_NO_BACKEND;
+ break;
+ }
+ }
+ mutex_unlock(&poolid_mutex);
}
EXPORT_SYMBOL(__cleancache_init_shared_fs);
@@ -99,27 +202,53 @@ static int cleancache_get_key(struct inode *inode,
}
/*
+ * Returns a pool_id that is associated with a given fake poolid.
+ */
+static int get_poolid_from_fake(int fake_pool_id)
+{
+ if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET)
+ return shared_fs_poolid_map[fake_pool_id -
+ FAKE_SHARED_FS_POOLID_OFFSET];
+ else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET)
+ return fs_poolid_map[fake_pool_id - FAKE_FS_POOLID_OFFSET];
+ return FS_NO_BACKEND;
+}
+
+/*
* "Get" data from cleancache associated with the poolid/inode/index
* that were specified when the data was put to cleanache and, if
* successful, use it to fill the specified page with data and return 0.
* The pageframe is unchanged and returns -1 if the get fails.
* Page must be locked by caller.
+ *
+ * The function has two checks before any action is taken - whether
+ * a backend is registered and whether the sb->cleancache_poolid
+ * is correct.
*/
int __cleancache_get_page(struct page *page)
{
int ret = -1;
int pool_id;
+ int fake_pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
+ if (!cleancache_ops) {
+ cleancache_failed_gets++;
+ goto out;
+ }
+
VM_BUG_ON(!PageLocked(page));
- pool_id = page->mapping->host->i_sb->cleancache_poolid;
- if (pool_id < 0)
+ fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
+ if (fake_pool_id < 0)
goto out;
+ pool_id = get_poolid_from_fake(fake_pool_id);
if (cleancache_get_key(page->mapping->host, &key) < 0)
goto out;
- ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
+ if (pool_id >= 0)
+ ret = cleancache_ops->get_page(pool_id,
+ key, page->index, page);
if (ret == 0)
cleancache_succ_gets++;
else
@@ -134,17 +263,32 @@ EXPORT_SYMBOL(__cleancache_get_page);
* (previously-obtained per-filesystem) poolid and the page's,
* inode and page index. Page must be locked. Note that a put_page
* always "succeeds", though a subsequent get_page may succeed or fail.
+ *
+ * The function has two checks before any action is taken - whether
+ * a backend is registered and whether the sb->cleancache_poolid
+ * is correct.
*/
void __cleancache_put_page(struct page *page)
{
int pool_id;
+ int fake_pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
+ if (!cleancache_ops) {
+ cleancache_puts++;
+ return;
+ }
+
VM_BUG_ON(!PageLocked(page));
- pool_id = page->mapping->host->i_sb->cleancache_poolid;
+ fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
+ if (fake_pool_id < 0)
+ return;
+
+ pool_id = get_poolid_from_fake(fake_pool_id);
+
if (pool_id >= 0 &&
- cleancache_get_key(page->mapping->host, &key) >= 0) {
- (*cleancache_ops.put_page)(pool_id, key, page->index, page);
+ cleancache_get_key(page->mapping->host, &key) >= 0) {
+ cleancache_ops->put_page(pool_id, key, page->index, page);
cleancache_puts++;
}
}
@@ -153,19 +297,31 @@ EXPORT_SYMBOL(__cleancache_put_page);
/*
* Invalidate any data from cleancache associated with the poolid and the
* page's inode and page index so that a subsequent "get" will fail.
+ *
+ * The function has two checks before any action is taken - whether
+ * a backend is registered and whether the sb->cleancache_poolid
+ * is correct.
*/
void __cleancache_invalidate_page(struct address_space *mapping,
struct page *page)
{
/* careful... page->mapping is NULL sometimes when this is called */
- int pool_id = mapping->host->i_sb->cleancache_poolid;
+ int pool_id;
+ int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
- if (pool_id >= 0) {
+ if (!cleancache_ops)
+ return;
+
+ if (fake_pool_id >= 0) {
+ pool_id = get_poolid_from_fake(fake_pool_id);
+ if (pool_id < 0)
+ return;
+
VM_BUG_ON(!PageLocked(page));
if (cleancache_get_key(mapping->host, &key) >= 0) {
- (*cleancache_ops.invalidate_page)(pool_id,
- key, page->index);
+ cleancache_ops->invalidate_page(pool_id,
+ key, page->index);
cleancache_invalidates++;
}
}
@@ -176,34 +332,63 @@ EXPORT_SYMBOL(__cleancache_invalidate_page);
* Invalidate all data from cleancache associated with the poolid and the
* mappings's inode so that all subsequent gets to this poolid/inode
* will fail.
+ *
+ * The function has two checks before any action is taken - whether
+ * a backend is registered and whether the sb->cleancache_poolid
+ * is correct.
*/
void __cleancache_invalidate_inode(struct address_space *mapping)
{
- int pool_id = mapping->host->i_sb->cleancache_poolid;
+ int pool_id;
+ int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
+ if (!cleancache_ops)
+ return;
+
+ if (fake_pool_id < 0)
+ return;
+
+ pool_id = get_poolid_from_fake(fake_pool_id);
+
if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
- (*cleancache_ops.invalidate_inode)(pool_id, key);
+ cleancache_ops->invalidate_inode(pool_id, key);
}
EXPORT_SYMBOL(__cleancache_invalidate_inode);
/*
* Called by any cleancache-enabled filesystem at time of unmount;
- * note that pool_id is surrendered and may be reutrned by a subsequent
- * cleancache_init_fs or cleancache_init_shared_fs
+ * note that pool_id is surrendered and may be returned by a subsequent
+ * cleancache_init_fs or cleancache_init_shared_fs.
*/
void __cleancache_invalidate_fs(struct super_block *sb)
{
- if (sb->cleancache_poolid >= 0) {
- int old_poolid = sb->cleancache_poolid;
- sb->cleancache_poolid = -1;
- (*cleancache_ops.invalidate_fs)(old_poolid);
+ int index;
+ int fake_pool_id = sb->cleancache_poolid;
+ int old_poolid = fake_pool_id;
+
+ mutex_lock(&poolid_mutex);
+ if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) {
+ index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET;
+ old_poolid = shared_fs_poolid_map[index];
+ shared_fs_poolid_map[index] = FS_UNKNOWN;
+ uuids[index] = NULL;
+ } else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) {
+ index = fake_pool_id - FAKE_FS_POOLID_OFFSET;
+ old_poolid = fs_poolid_map[index];
+ fs_poolid_map[index] = FS_UNKNOWN;
}
+ sb->cleancache_poolid = -1;
+ if (cleancache_ops)
+ cleancache_ops->invalidate_fs(old_poolid);
+ mutex_unlock(&poolid_mutex);
}
EXPORT_SYMBOL(__cleancache_invalidate_fs);
static int __init init_cleancache(void)
{
+ int i;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *root = debugfs_create_dir("cleancache", NULL);
if (root == NULL)
@@ -215,6 +400,10 @@ static int __init init_cleancache(void)
debugfs_create_u64("invalidates", S_IRUGO,
root, &cleancache_invalidates);
#endif
+ for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
+ fs_poolid_map[i] = FS_UNKNOWN;
+ shared_fs_poolid_map[i] = FS_UNKNOWN;
+ }
return 0;
}
module_init(init_cleancache)
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 2890e67d6026..538367ef1372 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -24,15 +24,7 @@
* frontswap_ops is set by frontswap_register_ops to contain the pointers
* to the frontswap "backend" implementation functions.
*/
-static struct frontswap_ops frontswap_ops __read_mostly;
-
-/*
- * This global enablement flag reduces overhead on systems where frontswap_ops
- * has not been registered, so is preferred to the slower alternative: a
- * function call that checks a non-global.
- */
-bool frontswap_enabled __read_mostly;
-EXPORT_SYMBOL(frontswap_enabled);
+static struct frontswap_ops *frontswap_ops __read_mostly;
/*
* If enabled, frontswap_store will return failure even on success. As
@@ -80,16 +72,70 @@ static inline void inc_frontswap_succ_stores(void) { }
static inline void inc_frontswap_failed_stores(void) { }
static inline void inc_frontswap_invalidates(void) { }
#endif
+
+/*
+ * Due to the asynchronous nature of the backends loading potentially
+ * _after_ the swap system has been activated, we have chokepoints
+ * on all frontswap functions to not call the backend until the backend
+ * has registered.
+ *
+ * Specifically when no backend is registered (nobody called
+ * frontswap_register_ops) all calls to frontswap_init (which is done via
+ * swapon -> enable_swap_info -> frontswap_init) are registered and remembered
+ * (via the setting of need_init bitmap) but fail to create tmem_pools. When a
+ * backend registers with frontswap at some later point the previous
+ * calls to frontswap_init are executed (by iterating over the need_init
+ * bitmap) to create tmem_pools and set the respective poolids. All of that is
+ * guarded by us using atomic bit operations on the 'need_init' bitmap.
+ *
+ * This would not guards us against the user deciding to call swapoff right as
+ * we are calling the backend to initialize (so swapon is in action).
+ * Fortunatly for us, the swapon_mutex has been taked by the callee so we are
+ * OK. The other scenario where calls to frontswap_store (called via
+ * swap_writepage) is racing with frontswap_invalidate_area (called via
+ * swapoff) is again guarded by the swap subsystem.
+ *
+ * While no backend is registered all calls to frontswap_[store|load|
+ * invalidate_area|invalidate_page] are ignored or fail.
+ *
+ * The time between the backend being registered and the swap file system
+ * calling the backend (via the frontswap_* functions) is indeterminate as
+ * frontswap_ops is not atomic_t (or a value guarded by a spinlock).
+ * That is OK as we are comfortable missing some of these calls to the newly
+ * registered backend.
+ *
+ * Obviously the opposite (unloading the backend) must be done after all
+ * the frontswap_[store|load|invalidate_area|invalidate_page] start
+ * ignorning or failing the requests - at which point frontswap_ops
+ * would have to be made in some fashion atomic.
+ */
+static DECLARE_BITMAP(need_init, MAX_SWAPFILES);
+
/*
* Register operations for frontswap, returning previous thus allowing
* detection of multiple backends and possible nesting.
*/
-struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
+struct frontswap_ops *frontswap_register_ops(struct frontswap_ops *ops)
{
- struct frontswap_ops old = frontswap_ops;
-
- frontswap_ops = *ops;
- frontswap_enabled = true;
+ struct frontswap_ops *old = frontswap_ops;
+ int i;
+
+ for (i = 0; i < MAX_SWAPFILES; i++) {
+ if (test_and_clear_bit(i, need_init)) {
+ struct swap_info_struct *sis = swap_info[i];
+ /* __frontswap_init _should_ have set it! */
+ if (!sis->frontswap_map)
+ return ERR_PTR(-EINVAL);
+ ops->init(i);
+ }
+ }
+ /*
+ * We MUST have frontswap_ops set _after_ the frontswap_init's
+ * have been called. Otherwise __frontswap_store might fail. Hence
+ * the barrier to make sure compiler does not re-order us.
+ */
+ barrier();
+ frontswap_ops = ops;
return old;
}
EXPORT_SYMBOL(frontswap_register_ops);
@@ -115,20 +161,48 @@ EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);
/*
* Called when a swap device is swapon'd.
*/
-void __frontswap_init(unsigned type)
+void __frontswap_init(unsigned type, unsigned long *map)
{
struct swap_info_struct *sis = swap_info[type];
BUG_ON(sis == NULL);
- if (sis->frontswap_map == NULL)
+
+ /*
+ * p->frontswap is a bitmap that we MUST have to figure out which page
+ * has gone in frontswap. Without it there is no point of continuing.
+ */
+ if (WARN_ON(!map))
return;
- frontswap_ops.init(type);
+ /*
+ * Irregardless of whether the frontswap backend has been loaded
+ * before this function or it will be later, we _MUST_ have the
+ * p->frontswap set to something valid to work properly.
+ */
+ frontswap_map_set(sis, map);
+ if (frontswap_ops)
+ frontswap_ops->init(type);
+ else {
+ BUG_ON(type > MAX_SWAPFILES);
+ set_bit(type, need_init);
+ }
}
EXPORT_SYMBOL(__frontswap_init);
-static inline void __frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
+bool __frontswap_test(struct swap_info_struct *sis,
+ pgoff_t offset)
+{
+ bool ret = false;
+
+ if (frontswap_ops && sis->frontswap_map)
+ ret = test_bit(offset, sis->frontswap_map);
+ return ret;
+}
+EXPORT_SYMBOL(__frontswap_test);
+
+static inline void __frontswap_clear(struct swap_info_struct *sis,
+ pgoff_t offset)
{
- frontswap_clear(sis, offset);
+ clear_bit(offset, sis->frontswap_map);
atomic_dec(&sis->frontswap_pages);
}
@@ -147,13 +221,20 @@ int __frontswap_store(struct page *page)
struct swap_info_struct *sis = swap_info[type];
pgoff_t offset = swp_offset(entry);
+ /*
+ * Return if no backend registed.
+ * Don't need to inc frontswap_failed_stores here.
+ */
+ if (!frontswap_ops)
+ return ret;
+
BUG_ON(!PageLocked(page));
BUG_ON(sis == NULL);
- if (frontswap_test(sis, offset))
+ if (__frontswap_test(sis, offset))
dup = 1;
- ret = frontswap_ops.store(type, offset, page);
+ ret = frontswap_ops->store(type, offset, page);
if (ret == 0) {
- frontswap_set(sis, offset);
+ set_bit(offset, sis->frontswap_map);
inc_frontswap_succ_stores();
if (!dup)
atomic_inc(&sis->frontswap_pages);
@@ -188,13 +269,16 @@ int __frontswap_load(struct page *page)
BUG_ON(!PageLocked(page));
BUG_ON(sis == NULL);
- if (frontswap_test(sis, offset))
- ret = frontswap_ops.load(type, offset, page);
+ /*
+ * __frontswap_test() will check whether there is backend registered
+ */
+ if (__frontswap_test(sis, offset))
+ ret = frontswap_ops->load(type, offset, page);
if (ret == 0) {
inc_frontswap_loads();
if (frontswap_tmem_exclusive_gets_enabled) {
SetPageDirty(page);
- frontswap_clear(sis, offset);
+ __frontswap_clear(sis, offset);
}
}
return ret;
@@ -210,8 +294,11 @@ void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
struct swap_info_struct *sis = swap_info[type];
BUG_ON(sis == NULL);
- if (frontswap_test(sis, offset)) {
- frontswap_ops.invalidate_page(type, offset);
+ /*
+ * __frontswap_test() will check whether there is backend registered
+ */
+ if (__frontswap_test(sis, offset)) {
+ frontswap_ops->invalidate_page(type, offset);
__frontswap_clear(sis, offset);
inc_frontswap_invalidates();
}
@@ -226,12 +313,15 @@ void __frontswap_invalidate_area(unsigned type)
{
struct swap_info_struct *sis = swap_info[type];
- BUG_ON(sis == NULL);
- if (sis->frontswap_map == NULL)
- return;
- frontswap_ops.invalidate_area(type);
- atomic_set(&sis->frontswap_pages, 0);
- memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+ if (frontswap_ops) {
+ BUG_ON(sis == NULL);
+ if (sis->frontswap_map == NULL)
+ return;
+ frontswap_ops->invalidate_area(type);
+ atomic_set(&sis->frontswap_pages, 0);
+ memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+ }
+ clear_bit(type, need_init);
}
EXPORT_SYMBOL(__frontswap_invalidate_area);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d417efddfe74..6c340d908b27 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1509,8 +1509,7 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
}
static void _enable_swap_info(struct swap_info_struct *p, int prio,
- unsigned char *swap_map,
- unsigned long *frontswap_map)
+ unsigned char *swap_map)
{
int i, prev;
@@ -1519,7 +1518,6 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
else
p->prio = --least_priority;
p->swap_map = swap_map;
- frontswap_map_set(p, frontswap_map);
p->flags |= SWP_WRITEOK;
atomic_long_add(p->pages, &nr_swap_pages);
total_swap_pages += p->pages;
@@ -1542,10 +1540,10 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
unsigned char *swap_map,
unsigned long *frontswap_map)
{
+ frontswap_init(p->type, frontswap_map);
spin_lock(&swap_lock);
spin_lock(&p->lock);
- _enable_swap_info(p, prio, swap_map, frontswap_map);
- frontswap_init(p->type);
+ _enable_swap_info(p, prio, swap_map);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
@@ -1554,7 +1552,7 @@ static void reinsert_swap_info(struct swap_info_struct *p)
{
spin_lock(&swap_lock);
spin_lock(&p->lock);
- _enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
+ _enable_swap_info(p, p->prio, p->swap_map);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
@@ -1563,6 +1561,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct *p = NULL;
unsigned char *swap_map;
+ unsigned long *frontswap_map;
struct file *swap_file, *victim;
struct address_space *mapping;
struct inode *inode;
@@ -1662,12 +1661,14 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
swap_map = p->swap_map;
p->swap_map = NULL;
p->flags = 0;
- frontswap_invalidate_area(type);
+ frontswap_map = frontswap_map_get(p);
+ frontswap_map_set(p, NULL);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
+ frontswap_invalidate_area(type);
mutex_unlock(&swapon_mutex);
vfree(swap_map);
- vfree(frontswap_map_get(p));
+ vfree(frontswap_map);
/* Destroy swap account informatin */
swap_cgroup_swapoff(type);