summaryrefslogtreecommitdiff
path: root/fs/netfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/netfs')
-rw-r--r--fs/netfs/internal.h2
-rw-r--r--fs/netfs/read_helper.c164
-rw-r--r--fs/netfs/stats.c11
3 files changed, 174 insertions, 3 deletions
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 98b6f4516da1..b7f2c4459f33 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -34,8 +34,10 @@ extern atomic_t netfs_n_rh_read_failed;
extern atomic_t netfs_n_rh_zero;
extern atomic_t netfs_n_rh_short_read;
extern atomic_t netfs_n_rh_write;
+extern atomic_t netfs_n_rh_write_begin;
extern atomic_t netfs_n_rh_write_done;
extern atomic_t netfs_n_rh_write_failed;
+extern atomic_t netfs_n_rh_write_zskip;
static inline void netfs_stat(atomic_t *stat)
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 6d6ed30f417e..da34aedea053 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -772,3 +772,167 @@ int netfs_readpage(struct file *file,
return ret;
}
EXPORT_SYMBOL(netfs_readpage);
+
+static void netfs_clear_thp(struct page *page)
+{
+ unsigned int i;
+
+ for (i = 0; i < thp_nr_pages(page); i++)
+ clear_highpage(page + i);
+}
+
+/**
+ * netfs_write_begin - Helper to prepare for writing
+ * @file: The file to read from
+ * @mapping: The mapping to read from
+ * @pos: File position at which the write will begin
+ * @len: The length of the write in this page
+ * @flags: AOP_* flags
+ * @_page: Where to put the resultant page
+ * @_fsdata: Place for the netfs to store a cookie
+ * @ops: The network filesystem's operations for the helper to use
+ * @netfs_priv: Private netfs data to be retained in the request
+ *
+ * Pre-read data for a write-begin request by drawing data from the cache if
+ * possible, or the netfs if not. Space beyond the EOF is zero-filled.
+ * Multiple I/O requests from different sources will get munged together. If
+ * necessary, the readahead window can be expanded in either direction to a
+ * more convenient alighment for RPC efficiency or to make storage in the cache
+ * feasible.
+ *
+ * The calling netfs must provide a table of operations, only one of which,
+ * issue_op, is mandatory.
+ *
+ * The check_write_begin() operation can be provided to check for and flush
+ * conflicting writes once the page is grabbed and locked. It is passed a
+ * pointer to the fsdata cookie that gets returned to the VM to be passed to
+ * write_end. It is permitted to sleep. It should return 0 if the request
+ * should go ahead; unlock the page and return -EAGAIN to cause the page to be
+ * regot; or return an error.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+int netfs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int flags,
+ struct page **_page, void **_fsdata,
+ const struct netfs_read_request_ops *ops,
+ void *netfs_priv)
+{
+ struct netfs_read_request *rreq;
+ struct page *page, *xpage;
+ struct inode *inode = file_inode(file);
+ unsigned int debug_index = 0;
+ pgoff_t index = pos >> PAGE_SHIFT;
+ int pos_in_page = pos & ~PAGE_MASK;
+ loff_t size;
+ int ret;
+
+ DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
+
+retry:
+ page = grab_cache_page_write_begin(mapping, index, 0);
+ if (!page)
+ return -ENOMEM;
+
+ if (ops->check_write_begin) {
+ /* Allow the netfs (eg. ceph) to flush conflicts. */
+ ret = ops->check_write_begin(file, pos, len, page, _fsdata);
+ if (ret < 0) {
+ if (ret == -EAGAIN)
+ goto retry;
+ goto error;
+ }
+ }
+
+ if (PageUptodate(page))
+ goto have_page;
+
+ /* If the page is beyond the EOF, we want to clear it - unless it's
+ * within the cache granule containing the EOF, in which case we need
+ * to preload the granule.
+ */
+ size = i_size_read(inode);
+ if (!ops->is_cache_enabled(inode) &&
+ ((pos_in_page == 0 && len == thp_size(page)) ||
+ (pos >= size) ||
+ (pos_in_page == 0 && (pos + len) >= size))) {
+ netfs_clear_thp(page);
+ SetPageUptodate(page);
+ netfs_stat(&netfs_n_rh_write_zskip);
+ goto have_page_no_wait;
+ }
+
+ ret = -ENOMEM;
+ rreq = netfs_alloc_read_request(ops, netfs_priv, file);
+ if (!rreq)
+ goto error;
+ rreq->mapping = page->mapping;
+ rreq->start = page->index * PAGE_SIZE;
+ rreq->len = thp_size(page);
+ rreq->no_unlock_page = page->index;
+ __set_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags);
+ netfs_priv = NULL;
+
+ netfs_stat(&netfs_n_rh_write_begin);
+ trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
+
+ /* Expand the request to meet caching requirements and download
+ * preferences.
+ */
+ ractl._nr_pages = thp_nr_pages(page);
+ netfs_rreq_expand(rreq, &ractl);
+ netfs_get_read_request(rreq);
+
+ /* We hold the page locks, so we can drop the references */
+ while ((xpage = readahead_page(&ractl)))
+ if (xpage != page)
+ put_page(xpage);
+
+ atomic_set(&rreq->nr_rd_ops, 1);
+ do {
+ if (!netfs_rreq_submit_slice(rreq, &debug_index))
+ break;
+
+ } while (rreq->submitted < rreq->len);
+
+ /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
+ * the service code isn't punted off to a random thread pool to
+ * process.
+ */
+ for (;;) {
+ wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
+ netfs_rreq_assess(rreq, false);
+ if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
+ break;
+ cond_resched();
+ }
+
+ ret = rreq->error;
+ if (ret == 0 && rreq->submitted < rreq->len)
+ ret = -EIO;
+ netfs_put_read_request(rreq, false);
+ if (ret < 0)
+ goto error;
+
+have_page:
+ ret = wait_on_page_fscache_killable(page);
+ if (ret < 0)
+ goto error;
+have_page_no_wait:
+ if (netfs_priv)
+ ops->cleanup(netfs_priv, mapping);
+ *_page = page;
+ _leave(" = 0");
+ return 0;
+
+error_put:
+ netfs_put_read_request(rreq, false);
+error:
+ unlock_page(page);
+ put_page(page);
+ if (netfs_priv)
+ ops->cleanup(netfs_priv, mapping);
+ _leave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_write_begin);
diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c
index df6ff5718f25..9ae538c85378 100644
--- a/fs/netfs/stats.c
+++ b/fs/netfs/stats.c
@@ -24,19 +24,24 @@ atomic_t netfs_n_rh_read_failed;
atomic_t netfs_n_rh_zero;
atomic_t netfs_n_rh_short_read;
atomic_t netfs_n_rh_write;
+atomic_t netfs_n_rh_write_begin;
atomic_t netfs_n_rh_write_done;
atomic_t netfs_n_rh_write_failed;
+atomic_t netfs_n_rh_write_zskip;
void netfs_stats_show(struct seq_file *m)
{
- seq_printf(m, "RdHelp : RA=%u RP=%u rr=%u sr=%u\n",
+ seq_printf(m, "RdHelp : RA=%u RP=%u WB=%u WBZ=%u rr=%u sr=%u\n",
atomic_read(&netfs_n_rh_readahead),
atomic_read(&netfs_n_rh_readpage),
+ atomic_read(&netfs_n_rh_write_begin),
+ atomic_read(&netfs_n_rh_write_zskip),
atomic_read(&netfs_n_rh_rreq),
atomic_read(&netfs_n_rh_sreq));
- seq_printf(m, "RdHelp : ZR=%u sh=%u\n",
+ seq_printf(m, "RdHelp : ZR=%u sh=%u sk=%u\n",
atomic_read(&netfs_n_rh_zero),
- atomic_read(&netfs_n_rh_short_read));
+ atomic_read(&netfs_n_rh_short_read),
+ atomic_read(&netfs_n_rh_write_zskip));
seq_printf(m, "RdHelp : DL=%u ds=%u df=%u di=%u\n",
atomic_read(&netfs_n_rh_download),
atomic_read(&netfs_n_rh_download_done),