summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/fat/namei_vfat.c6
-rw-r--r--fs/logfs/dev_bdev.c9
-rw-r--r--fs/logfs/dir.c4
-rw-r--r--fs/logfs/journal.c7
-rw-r--r--fs/logfs/logfs.h1
-rw-r--r--fs/logfs/readwrite.c13
-rw-r--r--fs/logfs/segment.c54
-rw-r--r--fs/logfs/super.c15
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/task_mmu.c87
-rw-r--r--fs/reiserfs/super.c10
11 files changed, 115 insertions, 96 deletions
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index c1ef50154868..6fcc7e71fbaa 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
{
struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
wchar_t *ip, *ext_start, *end, *name_start;
- unsigned char base[9], ext[4], buf[8], *p;
+ unsigned char base[9], ext[4], buf[5], *p;
unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
int chl, chi;
int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
@@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
return 0;
}
- i = jiffies & 0xffff;
+ i = jiffies;
sz = (jiffies >> 16) & 0x7;
if (baselen > 2) {
baselen = numtail2_baselen;
@@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
name_res[baselen + 4] = '~';
name_res[baselen + 5] = '1' + sz;
while (1) {
- sprintf(buf, "%04X", i);
+ snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
memcpy(&name_res[baselen], buf, 4);
if (vfat_find_form(dir, name_res) < 0)
break;
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 18e8c144c7f1..243c00071f76 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -81,6 +81,7 @@ static void writeseg_end_io(struct bio *bio, int err)
prefetchw(&bvec->bv_page->flags);
end_page_writeback(page);
+ page_cache_release(page);
} while (bvec >= bio->bi_io_vec);
bio_put(bio);
if (atomic_dec_and_test(&super->s_pending_writes))
@@ -98,8 +99,10 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
int i;
+ if (max_pages > BIO_MAX_PAGES)
+ max_pages = BIO_MAX_PAGES;
bio = bio_alloc(GFP_NOFS, max_pages);
- BUG_ON(!bio); /* FIXME: handle this */
+ BUG_ON(!bio);
for (i = 0; i < nr_pages; i++) {
if (i >= max_pages) {
@@ -192,8 +195,10 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
int i;
+ if (max_pages > BIO_MAX_PAGES)
+ max_pages = BIO_MAX_PAGES;
bio = bio_alloc(GFP_NOFS, max_pages);
- BUG_ON(!bio); /* FIXME: handle this */
+ BUG_ON(!bio);
for (i = 0; i < nr_pages; i++) {
if (i >= max_pages) {
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index e1cb99566100..2396a85c0f55 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -303,12 +303,12 @@ static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
(filler_t *)logfs_readpage, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
- dd = kmap_atomic(page, KM_USER0);
+ dd = kmap(page);
BUG_ON(dd->namelen == 0);
full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
pos, be64_to_cpu(dd->ino), dd->type);
- kunmap_atomic(dd, KM_USER0);
+ kunmap(page);
page_cache_release(page);
if (full)
break;
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index f186043e862a..33bd260b8309 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -801,6 +801,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
{
struct logfs_super *super = logfs_super(sb);
struct logfs_area *area = super->s_journal_area;
+ struct btree_head32 *head = &super->s_reserved_segments;
u32 segno, ec;
int i, err;
@@ -808,6 +809,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
/* Drop old segments */
journal_for_each(i)
if (super->s_journal_seg[i]) {
+ btree_remove32(head, super->s_journal_seg[i]);
logfs_set_segment_unreserved(sb,
super->s_journal_seg[i],
super->s_journal_ec[i]);
@@ -820,8 +822,13 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
super->s_journal_seg[i] = segno;
super->s_journal_ec[i] = ec;
logfs_set_segment_reserved(sb, segno);
+ err = btree_insert32(head, segno, (void *)1, GFP_KERNEL);
+ BUG_ON(err); /* mempool should prevent this */
+ err = logfs_erase_segment(sb, segno, 1);
+ BUG_ON(err); /* FIXME: remount-ro would be nicer */
}
/* Manually move journal_area */
+ freeseg(sb, area->a_segno);
area->a_segno = super->s_journal_seg[0];
area->a_is_open = 0;
area->a_used_bytes = 0;
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 129779431373..b84b0eec6024 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -587,6 +587,7 @@ void move_page_to_btree(struct page *page);
int logfs_init_mapping(struct super_block *sb);
void logfs_sync_area(struct logfs_area *area);
void logfs_sync_segments(struct super_block *sb);
+void freeseg(struct super_block *sb, u32 segno);
/* area handling */
int logfs_init_areas(struct super_block *sb);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index d5919af2c7a7..bff40253dfb2 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1595,7 +1595,6 @@ int logfs_delete(struct inode *inode, pgoff_t index,
return ret;
}
-/* Rewrite cannot mark the inode dirty but has to write it immediatly. */
int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
gc_level_t gc_level, long flags)
{
@@ -1612,6 +1611,18 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
if (level != 0)
alloc_indirect_block(inode, page, 0);
err = logfs_write_buf(inode, page, flags);
+ if (!err && shrink_level(gc_level) == 0) {
+ /* Rewrite cannot mark the inode dirty but has to
+ * write it immediatly.
+ * Q: Can't we just create an alias for the inode
+ * instead? And if not, why not?
+ */
+ if (inode->i_ino == LOGFS_INO_MASTER)
+ logfs_write_anchor(inode->i_sb);
+ else {
+ err = __logfs_write_inode(inode, flags);
+ }
+ }
}
logfs_put_write_page(page);
return err;
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 614d7a6fda2d..801a3a141625 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -94,50 +94,58 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
} while (len);
}
-/*
- * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
- */
-static void pad_wbuf(struct logfs_area *area, int final)
+static void pad_partial_page(struct logfs_area *area)
{
struct super_block *sb = area->a_sb;
- struct logfs_super *super = logfs_super(sb);
struct page *page;
u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
pgoff_t index = ofs >> PAGE_SHIFT;
long offset = ofs & (PAGE_SIZE-1);
u32 len = PAGE_SIZE - offset;
- if (len == PAGE_SIZE) {
- /* The math in this function can surely use some love */
- len = 0;
- }
- if (len) {
- BUG_ON(area->a_used_bytes >= super->s_segsize);
-
- page = get_mapping_page(area->a_sb, index, 0);
+ if (len % PAGE_SIZE) {
+ page = get_mapping_page(sb, index, 0);
BUG_ON(!page); /* FIXME: reserve a pool */
memset(page_address(page) + offset, 0xff, len);
SetPagePrivate(page);
page_cache_release(page);
}
+}
- if (!final)
- return;
+static void pad_full_pages(struct logfs_area *area)
+{
+ struct super_block *sb = area->a_sb;
+ struct logfs_super *super = logfs_super(sb);
+ u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
+ u32 len = super->s_segsize - area->a_used_bytes;
+ pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
+ pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
+ struct page *page;
- area->a_used_bytes += len;
- for ( ; area->a_used_bytes < super->s_segsize;
- area->a_used_bytes += PAGE_SIZE) {
- /* Memset another page */
- index++;
- page = get_mapping_page(area->a_sb, index, 0);
+ while (no_indizes) {
+ page = get_mapping_page(sb, index, 0);
BUG_ON(!page); /* FIXME: reserve a pool */
- memset(page_address(page), 0xff, PAGE_SIZE);
+ SetPageUptodate(page);
+ memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
SetPagePrivate(page);
page_cache_release(page);
+ index++;
+ no_indizes--;
}
}
/*
+ * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
+ * Also make sure we allocate (and memset) all pages for final writeout.
+ */
+static void pad_wbuf(struct logfs_area *area, int final)
+{
+ pad_partial_page(area);
+ if (final)
+ pad_full_pages(area);
+}
+
+/*
* We have to be careful with the alias tree. Since lookup is done by bix,
* it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
* indirect blocks. So always use it through accessor functions.
@@ -684,7 +692,7 @@ int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow)
return 0;
}
-static void freeseg(struct super_block *sb, u32 segno)
+void freeseg(struct super_block *sb, u32 segno)
{
struct logfs_super *super = logfs_super(sb);
struct address_space *mapping = super->s_mapping_inode->i_mapping;
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 46990eafe052..b60bfac3263c 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -278,7 +278,7 @@ static int logfs_recover_sb(struct super_block *sb)
}
if (valid0 && valid1 && ds_cmp(ds0, ds1)) {
printk(KERN_INFO"Superblocks don't match - fixing.\n");
- return write_one_sb(sb, super->s_devops->find_last_sb);
+ return logfs_write_sb(sb);
}
/* If neither is valid now, something's wrong. Didn't we properly
* check them before?!? */
@@ -290,6 +290,10 @@ static int logfs_make_writeable(struct super_block *sb)
{
int err;
+ err = logfs_open_segfile(sb);
+ if (err)
+ return err;
+
/* Repair any broken superblock copies */
err = logfs_recover_sb(sb);
if (err)
@@ -300,10 +304,6 @@ static int logfs_make_writeable(struct super_block *sb)
if (err)
return err;
- err = logfs_open_segfile(sb);
- if (err)
- return err;
-
/* Do one GC pass before any data gets dirtied */
logfs_gc_pass(sb);
@@ -329,7 +329,7 @@ static int logfs_get_sb_final(struct super_block *sb, struct vfsmount *mnt)
sb->s_root = d_alloc_root(rootdir);
if (!sb->s_root)
- goto fail;
+ goto fail2;
super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
if (!super->s_erase_page)
@@ -573,8 +573,7 @@ int logfs_get_sb_device(struct file_system_type *type, int flags,
return 0;
err1:
- up_write(&sb->s_umount);
- deactivate_super(sb);
+ deactivate_locked_super(sb);
return err;
err0:
kfree(super);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9e82adc37b0c..7621db800a74 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -443,12 +443,13 @@ static const struct file_operations proc_lstats_operations = {
unsigned long badness(struct task_struct *p, unsigned long uptime);
static int proc_oom_score(struct task_struct *task, char *buffer)
{
- unsigned long points;
+ unsigned long points = 0;
struct timespec uptime;
do_posix_clock_monotonic_gettime(&uptime);
read_lock(&tasklist_lock);
- points = badness(task->group_leader, uptime.tv_sec);
+ if (pid_alive(task))
+ points = badness(task, uptime.tv_sec);
read_unlock(&tasklist_lock);
return sprintf(buffer, "%lu\n", points);
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2d45889931f6..caf0337dff73 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -407,6 +407,7 @@ static int show_smap(struct seq_file *m, void *v)
memset(&mss, 0, sizeof mss);
mss.vma = vma;
+ /* mmap_sem is held in m_start */
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
@@ -553,7 +554,8 @@ const struct file_operations proc_clear_refs_operations = {
};
struct pagemapread {
- u64 __user *out, *end;
+ int pos, len;
+ u64 *buffer;
};
#define PM_ENTRY_BYTES sizeof(u64)
@@ -576,10 +578,8 @@ struct pagemapread {
static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
- if (put_user(pfn, pm->out))
- return -EFAULT;
- pm->out++;
- if (pm->out >= pm->end)
+ pm->buffer[pm->pos++] = pfn;
+ if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
}
@@ -721,21 +721,20 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
* determine which areas of memory are actually mapped and llseek to
* skip over unmapped regions.
*/
+#define PAGEMAP_WALK_SIZE (PMD_SIZE)
static ssize_t pagemap_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
- struct page **pages, *page;
- unsigned long uaddr, uend;
struct mm_struct *mm;
struct pagemapread pm;
- int pagecount;
int ret = -ESRCH;
struct mm_walk pagemap_walk = {};
unsigned long src;
unsigned long svpfn;
unsigned long start_vaddr;
unsigned long end_vaddr;
+ int copied = 0;
if (!task)
goto out;
@@ -758,35 +757,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!mm)
goto out_task;
-
- uaddr = (unsigned long)buf & PAGE_MASK;
- uend = (unsigned long)(buf + count);
- pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
- ret = 0;
- if (pagecount == 0)
- goto out_mm;
- pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+ pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+ pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
ret = -ENOMEM;
- if (!pages)
+ if (!pm.buffer)
goto out_mm;
- down_read(&current->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, uaddr, pagecount,
- 1, 0, pages, NULL);
- up_read(&current->mm->mmap_sem);
-
- if (ret < 0)
- goto out_free;
-
- if (ret != pagecount) {
- pagecount = ret;
- ret = -EFAULT;
- goto out_pages;
- }
-
- pm.out = (u64 __user *)buf;
- pm.end = (u64 __user *)(buf + count);
-
pagemap_walk.pmd_entry = pagemap_pte_range;
pagemap_walk.pte_hole = pagemap_pte_hole;
pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
@@ -808,23 +784,36 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
* user buffer is tracked in "pm", and the walk
* will stop when we hit the end of the buffer.
*/
- ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk);
- if (ret == PM_END_OF_BUFFER)
- ret = 0;
- /* don't need mmap_sem for these, but this looks cleaner */
- *ppos += (char __user *)pm.out - buf;
- if (!ret)
- ret = (char __user *)pm.out - buf;
-
-out_pages:
- for (; pagecount; pagecount--) {
- page = pages[pagecount-1];
- if (!PageReserved(page))
- SetPageDirty(page);
- page_cache_release(page);
+ ret = 0;
+ while (count && (start_vaddr < end_vaddr)) {
+ int len;
+ unsigned long end;
+
+ pm.pos = 0;
+ end = start_vaddr + PAGEMAP_WALK_SIZE;
+ /* overflow ? */
+ if (end < start_vaddr || end > end_vaddr)
+ end = end_vaddr;
+ down_read(&mm->mmap_sem);
+ ret = walk_page_range(start_vaddr, end, &pagemap_walk);
+ up_read(&mm->mmap_sem);
+ start_vaddr = end;
+
+ len = min(count, PM_ENTRY_BYTES * pm.pos);
+ if (copy_to_user(buf, pm.buffer, len) < 0) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ copied += len;
+ buf += len;
+ count -= len;
}
+ *ppos += copied;
+ if (!ret || ret == PM_END_OF_BUFFER)
+ ret = copied;
+
out_free:
- kfree(pages);
+ kfree(pm.buffer);
out_mm:
mmput(mm);
out_task:
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index d8fd90d83ab3..59125fb36d42 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1619,10 +1619,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
save_mount_options(s, data);
sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
- if (!sbi) {
- errval = -ENOMEM;
- goto error_alloc;
- }
+ if (!sbi)
+ return -ENOMEM;
s->s_fs_info = sbi;
/* Set default values for options: non-aggressive tails, RO on errors */
REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
@@ -1879,12 +1877,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
return (0);
error:
- reiserfs_write_unlock(s);
-error_alloc:
if (jinit_done) { /* kill the commit thread, free journal ram */
journal_release_error(NULL, s);
}
+ reiserfs_write_unlock(s);
+
reiserfs_free_bitmap_cache(s);
if (SB_BUFFER_WITH_SB(s))
brelse(SB_BUFFER_WITH_SB(s));