summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-02-20 06:31:25 +0400
committerBen Myers <bpm@sgi.com>2012-02-23 08:19:53 +0400
commit28496968a6ac37c8b8c44b5156e633c581bb8378 (patch)
tree4e0c38612e2718e87219e00b7a3152d53393d145 /fs/xfs/xfs_log.c
parent14a7235fba4302a82d61150eda92ec90d3ae9cfb (diff)
downloadlinux-28496968a6ac37c8b8c44b5156e633c581bb8378.tar.xz
xfs: add the xlog_grant_head structure
Add a new data structure to allow sharing code between the log grant and regrant code. Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c112
1 files changed, 56 insertions, 56 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 02a35fba5eae..ad0cac378e9a 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -158,7 +158,7 @@ xlog_reserveq_wake(
struct xlog_ticket *tic;
int need_bytes;
- list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+ list_for_each_entry(tic, &log->l_reserve_head.waiters, t_queue) {
if (tic->t_flags & XLOG_TIC_PERM_RESERV)
need_bytes = tic->t_unit_res * tic->t_cnt;
else
@@ -183,7 +183,7 @@ xlog_writeq_wake(
struct xlog_ticket *tic;
int need_bytes;
- list_for_each_entry(tic, &log->l_writeq, t_queue) {
+ list_for_each_entry(tic, &log->l_write_head.waiters, t_queue) {
ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
need_bytes = tic->t_unit_res;
@@ -205,7 +205,7 @@ xlog_reserveq_wait(
struct xlog_ticket *tic,
int need_bytes)
{
- list_add_tail(&tic->t_queue, &log->l_reserveq);
+ list_add_tail(&tic->t_queue, &log->l_reserve_head.waiters);
do {
if (XLOG_FORCED_SHUTDOWN(log))
@@ -213,7 +213,7 @@ xlog_reserveq_wait(
xlog_grant_push_ail(log, need_bytes);
__set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock(&log->l_grant_reserve_lock);
+ spin_unlock(&log->l_reserve_head.lock);
XFS_STATS_INC(xs_sleep_logspace);
@@ -221,10 +221,10 @@ xlog_reserveq_wait(
schedule();
trace_xfs_log_grant_wake(log, tic);
- spin_lock(&log->l_grant_reserve_lock);
+ spin_lock(&log->l_reserve_head.lock);
if (XLOG_FORCED_SHUTDOWN(log))
goto shutdown;
- } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+ } while (xlog_space_left(log, &log->l_reserve_head.grant) < need_bytes);
list_del_init(&tic->t_queue);
return 0;
@@ -239,7 +239,7 @@ xlog_writeq_wait(
struct xlog_ticket *tic,
int need_bytes)
{
- list_add_tail(&tic->t_queue, &log->l_writeq);
+ list_add_tail(&tic->t_queue, &log->l_write_head.waiters);
do {
if (XLOG_FORCED_SHUTDOWN(log))
@@ -247,7 +247,7 @@ xlog_writeq_wait(
xlog_grant_push_ail(log, need_bytes);
__set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock(&log->l_grant_write_lock);
+ spin_unlock(&log->l_write_head.lock);
XFS_STATS_INC(xs_sleep_logspace);
@@ -255,10 +255,10 @@ xlog_writeq_wait(
schedule();
trace_xfs_log_regrant_write_wake(log, tic);
- spin_lock(&log->l_grant_write_lock);
+ spin_lock(&log->l_write_head.lock);
if (XLOG_FORCED_SHUTDOWN(log))
goto shutdown;
- } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+ } while (xlog_space_left(log, &log->l_write_head.grant) < need_bytes);
list_del_init(&tic->t_queue);
return 0;
@@ -779,22 +779,22 @@ xfs_log_space_wake(
if (XLOG_FORCED_SHUTDOWN(log))
return;
- if (!list_empty_careful(&log->l_writeq)) {
+ if (!list_empty_careful(&log->l_write_head.waiters)) {
ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
- spin_lock(&log->l_grant_write_lock);
- free_bytes = xlog_space_left(log, &log->l_grant_write_head);
+ spin_lock(&log->l_write_head.lock);
+ free_bytes = xlog_space_left(log, &log->l_write_head.grant);
xlog_writeq_wake(log, &free_bytes);
- spin_unlock(&log->l_grant_write_lock);
+ spin_unlock(&log->l_write_head.lock);
}
- if (!list_empty_careful(&log->l_reserveq)) {
+ if (!list_empty_careful(&log->l_reserve_head.waiters)) {
ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
- spin_lock(&log->l_grant_reserve_lock);
- free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+ spin_lock(&log->l_reserve_head.lock);
+ free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
xlog_reserveq_wake(log, &free_bytes);
- spin_unlock(&log->l_grant_reserve_lock);
+ spin_unlock(&log->l_reserve_head.lock);
}
}
@@ -1070,12 +1070,12 @@ xlog_alloc_log(xfs_mount_t *mp,
xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
- xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
- xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
- INIT_LIST_HEAD(&log->l_reserveq);
- INIT_LIST_HEAD(&log->l_writeq);
- spin_lock_init(&log->l_grant_reserve_lock);
- spin_lock_init(&log->l_grant_write_lock);
+ xlog_assign_grant_head(&log->l_reserve_head.grant, 1, 0);
+ xlog_assign_grant_head(&log->l_write_head.grant, 1, 0);
+ INIT_LIST_HEAD(&log->l_reserve_head.waiters);
+ INIT_LIST_HEAD(&log->l_write_head.waiters);
+ spin_lock_init(&log->l_reserve_head.lock);
+ spin_lock_init(&log->l_write_head.lock);
error = EFSCORRUPTED;
if (xfs_sb_version_hassector(&mp->m_sb)) {
@@ -1250,7 +1250,7 @@ xlog_grant_push_ail(
ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
- free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+ free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
free_blocks = BTOBBT(free_bytes);
/*
@@ -1382,8 +1382,8 @@ xlog_sync(xlog_t *log,
roundoff < BBTOB(1)));
/* move grant heads by roundoff in sync */
- xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff);
- xlog_grant_add_space(log, &log->l_grant_write_head, roundoff);
+ xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
+ xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
/* put cycle number in every block */
xlog_pack_data(log, iclog, roundoff);
@@ -2547,8 +2547,8 @@ restart:
* path. Hence any lock will be globally hot if we take it unconditionally on
* every pass.
*
- * As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * As tickets are only ever moved on and off the l_reserve.waiters under the
+ * l_reserve.lock, we only need to take that lock if we are going to add
* the ticket to the queue and sleep. We can avoid taking the lock if the ticket
* was never added to the reserveq because the t_queue list head will be empty
* and we hold the only reference to it so it can safely be checked unlocked.
@@ -2574,23 +2574,23 @@ xlog_grant_log_space(
need_bytes = tic->t_unit_res;
if (tic->t_flags & XFS_LOG_PERM_RESERV)
need_bytes *= tic->t_ocnt;
- free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
- if (!list_empty_careful(&log->l_reserveq)) {
- spin_lock(&log->l_grant_reserve_lock);
+ free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
+ if (!list_empty_careful(&log->l_reserve_head.waiters)) {
+ spin_lock(&log->l_reserve_head.lock);
if (!xlog_reserveq_wake(log, &free_bytes) ||
free_bytes < need_bytes)
error = xlog_reserveq_wait(log, tic, need_bytes);
- spin_unlock(&log->l_grant_reserve_lock);
+ spin_unlock(&log->l_reserve_head.lock);
} else if (free_bytes < need_bytes) {
- spin_lock(&log->l_grant_reserve_lock);
+ spin_lock(&log->l_reserve_head.lock);
error = xlog_reserveq_wait(log, tic, need_bytes);
- spin_unlock(&log->l_grant_reserve_lock);
+ spin_unlock(&log->l_reserve_head.lock);
}
if (error)
return error;
- xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
- xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
+ xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
+ xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
trace_xfs_log_grant_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
@@ -2627,23 +2627,23 @@ xlog_regrant_write_log_space(
* otherwise try to get some space for this transaction.
*/
need_bytes = tic->t_unit_res;
- free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- if (!list_empty_careful(&log->l_writeq)) {
- spin_lock(&log->l_grant_write_lock);
+ free_bytes = xlog_space_left(log, &log->l_write_head.grant);
+ if (!list_empty_careful(&log->l_write_head.waiters)) {
+ spin_lock(&log->l_write_head.lock);
if (!xlog_writeq_wake(log, &free_bytes) ||
free_bytes < need_bytes)
error = xlog_writeq_wait(log, tic, need_bytes);
- spin_unlock(&log->l_grant_write_lock);
+ spin_unlock(&log->l_write_head.lock);
} else if (free_bytes < need_bytes) {
- spin_lock(&log->l_grant_write_lock);
+ spin_lock(&log->l_write_head.lock);
error = xlog_writeq_wait(log, tic, need_bytes);
- spin_unlock(&log->l_grant_write_lock);
+ spin_unlock(&log->l_write_head.lock);
}
if (error)
return error;
- xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
+ xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
trace_xfs_log_regrant_write_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
@@ -2665,9 +2665,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
if (ticket->t_cnt > 0)
ticket->t_cnt--;
- xlog_grant_sub_space(log, &log->l_grant_reserve_head,
+ xlog_grant_sub_space(log, &log->l_reserve_head.grant,
ticket->t_curr_res);
- xlog_grant_sub_space(log, &log->l_grant_write_head,
+ xlog_grant_sub_space(log, &log->l_write_head.grant,
ticket->t_curr_res);
ticket->t_curr_res = ticket->t_unit_res;
xlog_tic_reset_res(ticket);
@@ -2678,7 +2678,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
if (ticket->t_cnt > 0)
return;
- xlog_grant_add_space(log, &log->l_grant_reserve_head,
+ xlog_grant_add_space(log, &log->l_reserve_head.grant,
ticket->t_unit_res);
trace_xfs_log_regrant_reserve_exit(log, ticket);
@@ -2724,8 +2724,8 @@ xlog_ungrant_log_space(xlog_t *log,
bytes += ticket->t_unit_res*ticket->t_cnt;
}
- xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes);
- xlog_grant_sub_space(log, &log->l_grant_write_head, bytes);
+ xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
+ xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
trace_xfs_log_ungrant_exit(log, ticket);
@@ -3349,7 +3349,7 @@ xlog_verify_grant_tail(
int tail_cycle, tail_blocks;
int cycle, space;
- xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
+ xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
if (tail_cycle != cycle) {
if (cycle - 1 != tail_cycle &&
@@ -3619,15 +3619,15 @@ xfs_log_force_umount(
* we don't enqueue anything once the SHUTDOWN flag is set, and this
* action is protected by the grant locks.
*/
- spin_lock(&log->l_grant_reserve_lock);
- list_for_each_entry(tic, &log->l_reserveq, t_queue)
+ spin_lock(&log->l_reserve_head.lock);
+ list_for_each_entry(tic, &log->l_reserve_head.waiters, t_queue)
wake_up_process(tic->t_task);
- spin_unlock(&log->l_grant_reserve_lock);
+ spin_unlock(&log->l_reserve_head.lock);
- spin_lock(&log->l_grant_write_lock);
- list_for_each_entry(tic, &log->l_writeq, t_queue)
+ spin_lock(&log->l_write_head.lock);
+ list_for_each_entry(tic, &log->l_write_head.waiters, t_queue)
wake_up_process(tic->t_task);
- spin_unlock(&log->l_grant_write_lock);
+ spin_unlock(&log->l_write_head.lock);
if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
ASSERT(!logerror);