summaryrefslogtreecommitdiff
path: root/drivers/xen/tmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/tmem.c')
-rw-r--r--drivers/xen/tmem.c103
1 files changed, 49 insertions, 54 deletions
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index e3600be4e7fa..83b5c53bec6b 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -5,17 +5,15 @@
* Author: Dan Magenheimer
*/
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/cleancache.h>
-
-/* temporary ifdef until include/linux/frontswap.h is upstream */
-#ifdef CONFIG_FRONTSWAP
#include <linux/frontswap.h>
-#endif
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -24,6 +22,36 @@
#include <asm/xen/hypervisor.h>
#include <xen/tmem.h>
+#ifndef CONFIG_XEN_TMEM_MODULE
+bool __read_mostly tmem_enabled = false;
+
+static int __init enable_tmem(char *s)
+{
+ tmem_enabled = true;
+ return 1;
+}
+__setup("tmem", enable_tmem);
+#endif
+
+#ifdef CONFIG_CLEANCACHE
+static bool cleancache __read_mostly = true;
+module_param(cleancache, bool, S_IRUGO);
+static bool selfballooning __read_mostly = true;
+module_param(selfballooning, bool, S_IRUGO);
+#endif /* CONFIG_CLEANCACHE */
+
+#ifdef CONFIG_FRONTSWAP
+static bool frontswap __read_mostly = true;
+module_param(frontswap, bool, S_IRUGO);
+#else /* CONFIG_FRONTSWAP */
+#define frontswap (0)
+#endif /* CONFIG_FRONTSWAP */
+
+#ifdef CONFIG_XEN_SELFBALLOONING
+static bool selfshrinking __read_mostly = true;
+module_param(selfshrinking, bool, S_IRUGO);
+#endif /* CONFIG_XEN_SELFBALLOONING */
+
#define TMEM_CONTROL 0
#define TMEM_NEW_POOL 1
#define TMEM_DESTROY_POOL 2
@@ -129,16 +157,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
}
-#ifndef CONFIG_XEN_TMEM_MODULE
-bool __read_mostly tmem_enabled = false;
-
-static int __init enable_tmem(char *s)
-{
- tmem_enabled = true;
- return 1;
-}
-__setup("tmem", enable_tmem);
-#endif
#ifdef CONFIG_CLEANCACHE
static int xen_tmem_destroy_pool(u32 pool_id)
@@ -230,20 +248,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
}
-static bool disable_cleancache __read_mostly;
-static bool disable_selfballooning __read_mostly;
-#ifdef CONFIG_XEN_TMEM_MODULE
-module_param(disable_cleancache, bool, S_IRUGO);
-module_param(disable_selfballooning, bool, S_IRUGO);
-#else
-static int __init no_cleancache(char *s)
-{
- disable_cleancache = true;
- return 1;
-}
-__setup("nocleancache", no_cleancache);
-#endif
-
static struct cleancache_ops tmem_cleancache_ops = {
.put_page = tmem_cleancache_put_page,
.get_page = tmem_cleancache_get_page,
@@ -361,20 +365,6 @@ static void tmem_frontswap_init(unsigned ignored)
xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
}
-static bool disable_frontswap __read_mostly;
-static bool disable_frontswap_selfshrinking __read_mostly;
-#ifdef CONFIG_XEN_TMEM_MODULE
-module_param(disable_frontswap, bool, S_IRUGO);
-module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
-#else
-static int __init no_frontswap(char *s)
-{
- disable_frontswap = true;
- return 1;
-}
-__setup("nofrontswap", no_frontswap);
-#endif
-
static struct frontswap_ops tmem_frontswap_ops = {
.store = tmem_frontswap_store,
.load = tmem_frontswap_load,
@@ -382,8 +372,6 @@ static struct frontswap_ops tmem_frontswap_ops = {
.invalidate_area = tmem_frontswap_flush_area,
.init = tmem_frontswap_init
};
-#else /* CONFIG_FRONTSWAP */
-#define disable_frontswap_selfshrinking 1
#endif
static int xen_tmem_init(void)
@@ -391,36 +379,43 @@ static int xen_tmem_init(void)
if (!xen_domain())
return 0;
#ifdef CONFIG_FRONTSWAP
- if (tmem_enabled && !disable_frontswap) {
+ if (tmem_enabled && frontswap) {
char *s = "";
- struct frontswap_ops *old_ops =
- frontswap_register_ops(&tmem_frontswap_ops);
+ struct frontswap_ops *old_ops;
tmem_frontswap_poolid = -1;
+ old_ops = frontswap_register_ops(&tmem_frontswap_ops);
if (IS_ERR(old_ops) || old_ops) {
if (IS_ERR(old_ops))
return PTR_ERR(old_ops);
s = " (WARNING: frontswap_ops overridden)";
}
- printk(KERN_INFO "frontswap enabled, RAM provided by "
- "Xen Transcendent Memory%s\n", s);
+ pr_info("frontswap enabled, RAM provided by Xen Transcendent Memory%s\n",
+ s);
}
#endif
#ifdef CONFIG_CLEANCACHE
BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
- if (tmem_enabled && !disable_cleancache) {
+ if (tmem_enabled && cleancache) {
char *s = "";
struct cleancache_ops *old_ops =
cleancache_register_ops(&tmem_cleancache_ops);
if (old_ops)
s = " (WARNING: cleancache_ops overridden)";
- printk(KERN_INFO "cleancache enabled, RAM provided by "
- "Xen Transcendent Memory%s\n", s);
+ pr_info("cleancache enabled, RAM provided by Xen Transcendent Memory%s\n",
+ s);
}
#endif
#ifdef CONFIG_XEN_SELFBALLOONING
- xen_selfballoon_init(!disable_selfballooning,
- !disable_frontswap_selfshrinking);
+ /*
+ * There is no point of driving pages to the swap system if they
+ * aren't going anywhere in tmem universe.
+ */
+ if (!frontswap) {
+ selfshrinking = false;
+ selfballooning = false;
+ }
+ xen_selfballoon_init(selfballooning, selfshrinking);
#endif
return 0;
}