summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c30
1 files changed, 20 insertions, 10 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index f8400bbd6473..f6e5235df7c9 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -43,7 +43,7 @@ static kmem_zone_t *xfs_buf_zone;
* pag_buf_lock
* lru_lock
*
- * xfs_buftarg_wait_rele
+ * xfs_buftarg_drain_rele
* lru_lock
* b_lock (trylock due to inversion)
*
@@ -88,7 +88,7 @@ xfs_buf_vmap_len(
* because the corresponding decrement is deferred to buffer release. Buffers
* can undergo I/O multiple times in a hold-release cycle and per buffer I/O
* tracking adds unnecessary overhead. This is used for sychronization purposes
- * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
+ * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
* in-flight buffers.
*
* Buffers that are never released (e.g., superblock, iclog buffers) must set
@@ -1786,7 +1786,7 @@ __xfs_buf_mark_corrupt(
* while freeing all the buffers only held by the LRU.
*/
static enum lru_status
-xfs_buftarg_wait_rele(
+xfs_buftarg_drain_rele(
struct list_head *item,
struct list_lru_one *lru,
spinlock_t *lru_lock,
@@ -1798,7 +1798,7 @@ xfs_buftarg_wait_rele(
if (atomic_read(&bp->b_hold) > 1) {
/* need to wait, so skip it this pass */
- trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
+ trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
return LRU_SKIP;
}
if (!spin_trylock(&bp->b_lock))
@@ -1815,14 +1815,13 @@ xfs_buftarg_wait_rele(
return LRU_REMOVED;
}
+/*
+ * Wait for outstanding I/O on the buftarg to complete.
+ */
void
-xfs_wait_buftarg(
+xfs_buftarg_wait(
struct xfs_buftarg *btp)
{
- LIST_HEAD(dispose);
- int loop = 0;
- bool write_fail = false;
-
/*
* First wait on the buftarg I/O count for all in-flight buffers to be
* released. This is critical as new buffers do not make the LRU until
@@ -1838,10 +1837,21 @@ xfs_wait_buftarg(
while (percpu_counter_sum(&btp->bt_io_count))
delay(100);
flush_workqueue(btp->bt_mount->m_buf_workqueue);
+}
+
+void
+xfs_buftarg_drain(
+ struct xfs_buftarg *btp)
+{
+ LIST_HEAD(dispose);
+ int loop = 0;
+ bool write_fail = false;
+
+ xfs_buftarg_wait(btp);
/* loop until there is nothing left on the lru list. */
while (list_lru_count(&btp->bt_lru)) {
- list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
+ list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
&dispose, LONG_MAX);
while (!list_empty(&dispose)) {