summaryrefslogtreecommitdiff
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h53
1 files changed, 43 insertions, 10 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 20225b067583..4686f9ab0636 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -559,7 +559,6 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return pgoff;
}
-/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
struct wait_page_key {
struct page *page;
int bit_nr;
@@ -683,11 +682,32 @@ static inline int wait_on_page_locked_killable(struct page *page)
int put_and_wait_on_page_locked(struct page *page, int state);
void wait_on_page_writeback(struct page *page);
+int wait_on_page_writeback_killable(struct page *page);
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
void page_endio(struct page *page, bool is_write, int err);
+/**
+ * set_page_private_2 - Set PG_private_2 on a page and take a ref
+ * @page: The page.
+ *
+ * Set the PG_private_2 flag on a page and take the reference needed for the VM
+ * to handle its lifetime correctly. This sets the flag and takes the
+ * reference unconditionally, so care must be taken not to set the flag again
+ * if it's already set.
+ */
+static inline void set_page_private_2(struct page *page)
+{
+ page = compound_head(page);
+ get_page(page);
+ SetPagePrivate2(page);
+}
+
+void end_page_private_2(struct page *page);
+void wait_on_page_private_2(struct page *page);
+int wait_on_page_private_2_killable(struct page *page);
+
/*
* Add an arbitrary waiter to a page's wait queue
*/
@@ -792,20 +812,23 @@ static inline int add_to_page_cache(struct page *page,
* @file: The file, used primarily by network filesystems for authentication.
* May be NULL if invoked internally by the filesystem.
* @mapping: Readahead this filesystem object.
+ * @ra: File readahead state. May be NULL.
*/
struct readahead_control {
struct file *file;
struct address_space *mapping;
+ struct file_ra_state *ra;
/* private: use the readahead_* accessors instead */
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
};
-#define DEFINE_READAHEAD(rac, f, m, i) \
- struct readahead_control rac = { \
+#define DEFINE_READAHEAD(ractl, f, r, m, i) \
+ struct readahead_control ractl = { \
.file = f, \
.mapping = m, \
+ .ra = r, \
._index = i, \
}
@@ -813,10 +836,11 @@ struct readahead_control {
void page_cache_ra_unbounded(struct readahead_control *,
unsigned long nr_to_read, unsigned long lookahead_count);
-void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *,
+void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
+void page_cache_async_ra(struct readahead_control *, struct page *,
unsigned long req_count);
-void page_cache_async_ra(struct readahead_control *, struct file_ra_state *,
- struct page *, unsigned long req_count);
+void readahead_expand(struct readahead_control *ractl,
+ loff_t new_start, size_t new_len);
/**
* page_cache_sync_readahead - generic file readahead
@@ -836,8 +860,8 @@ void page_cache_sync_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file, pgoff_t index,
unsigned long req_count)
{
- DEFINE_READAHEAD(ractl, file, mapping, index);
- page_cache_sync_ra(&ractl, ra, req_count);
+ DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ page_cache_sync_ra(&ractl, req_count);
}
/**
@@ -859,8 +883,8 @@ void page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
struct page *page, pgoff_t index, unsigned long req_count)
{
- DEFINE_READAHEAD(ractl, file, mapping, index);
- page_cache_async_ra(&ractl, ra, page, req_count);
+ DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ page_cache_async_ra(&ractl, page, req_count);
}
/**
@@ -981,6 +1005,15 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
return rac->_nr_pages;
}
+/**
+ * readahead_batch_length - The number of bytes in the current batch.
+ * @rac: The readahead request.
+ */
+static inline loff_t readahead_batch_length(struct readahead_control *rac)
+{
+ return rac->_batch_count * PAGE_SIZE;
+}
+
static inline unsigned long dir_pages(struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>