summaryrefslogtreecommitdiff
path: root/fs/gfs2/glops.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2008-11-20 16:39:47 +0300
committerSteven Whitehouse <swhiteho@redhat.com>2009-01-05 10:39:09 +0300
commit97cc1025b1a91c52e84f12478dcf0f853abc6564 (patch)
treecd71419049aeb13eea7012889d0ee0c715394e4d /fs/gfs2/glops.c
parent9ac1b4d9b6f885ccd7d8f56bceb609003a920ff7 (diff)
downloadlinux-97cc1025b1a91c52e84f12478dcf0f853abc6564.tar.xz
GFS2: Kill two daemons with one patch
This patch removes the two daemons, gfs2_scand and gfs2_glockd and replaces them with a shrinker which is called from the VM. The net result is that GFS2 responds better when there is memory pressure, since it shrinks the glock cache at the same rate as the VFS shrinks the dcache and icache. There are no longer any time based criteria for shrinking glocks, they are kept until such time as the VM asks for more memory and then we demote just as many glocks as required. There are potential future changes to this code, including the possibility of sorting the glocks which are to be written back into inode number order, to get a better I/O ordering. It would be very useful to have an elevator based workqueue implementation for this, as that would automatically deal with the read I/O cases at the same time. This patch is my answer to Andrew Morton's remark, made during the initial review of GFS2, asking why GFS2 needs so many kernel threads, the answer being that it doesn't :-) This patch is a net loss of about 200 lines of code. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glops.c')
-rw-r--r--fs/gfs2/glops.c32
1 files changed, 19 insertions, 13 deletions
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 8ebff8ebae20..8522d3aa64fc 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -201,19 +201,12 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
* Returns: 1 if it's ok
*/
-static int inode_go_demote_ok(struct gfs2_glock *gl)
+static int inode_go_demote_ok(const struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
- int demote = 0;
-
- if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
- demote = 1;
- else if (!sdp->sd_args.ar_localcaching &&
- time_after_eq(jiffies, gl->gl_stamp +
- gfs2_tune_get(sdp, gt_demote_secs) * HZ))
- demote = 1;
-
- return demote;
+ if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
+ return 0;
+ return 1;
}
/**
@@ -284,7 +277,7 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
* Returns: 1 if it's ok
*/
-static int rgrp_go_demote_ok(struct gfs2_glock *gl)
+static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
{
return !gl->gl_aspace->i_mapping->nrpages;
}
@@ -386,13 +379,25 @@ static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
}
/**
+ * trans_go_demote_ok
+ * @gl: the glock
+ *
+ * Always returns 0
+ */
+
+static int trans_go_demote_ok(const struct gfs2_glock *gl)
+{
+ return 0;
+}
+
+/**
* quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
* @gl: the glock
*
* Returns: 1 if it's ok
*/
-static int quota_go_demote_ok(struct gfs2_glock *gl)
+static int quota_go_demote_ok(const struct gfs2_glock *gl)
{
return !atomic_read(&gl->gl_lvb_count);
}
@@ -426,6 +431,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
const struct gfs2_glock_operations gfs2_trans_glops = {
.go_xmote_th = trans_go_sync,
.go_xmote_bh = trans_go_xmote_bh,
+ .go_demote_ok = trans_go_demote_ok,
.go_type = LM_TYPE_NONDISK,
};