summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_trans.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_trans.c')
-rw-r--r--fs/xfs/xfs_trans.c165
1 files changed, 139 insertions, 26 deletions
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 67dec11e34c7..0ac717aad380 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -25,7 +25,7 @@
#include "xfs_dquot.h"
#include "xfs_icache.h"
-kmem_zone_t *xfs_trans_zone;
+struct kmem_cache *xfs_trans_cache;
#if defined(CONFIG_TRACEPOINTS)
static void
@@ -76,7 +76,7 @@ xfs_trans_free(
if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
sb_end_intwrite(tp->t_mountp->m_super);
xfs_trans_free_dqinfo(tp);
- kmem_cache_free(xfs_trans_zone, tp);
+ kmem_cache_free(xfs_trans_cache, tp);
}
/*
@@ -95,7 +95,7 @@ xfs_trans_dup(
trace_xfs_trans_dup(tp, _RET_IP_);
- ntp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
+ ntp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL);
/*
* Initialize the new transaction structure.
@@ -263,7 +263,7 @@ xfs_trans_alloc(
* by doing GFP_KERNEL allocations inside sb_start_intwrite().
*/
retry:
- tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
+ tp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL);
if (!(flags & XFS_TRANS_NO_WRITECOUNT))
sb_start_intwrite(mp->m_super);
xfs_trans_set_context(tp);
@@ -477,7 +477,7 @@ STATIC void
xfs_trans_apply_sb_deltas(
xfs_trans_t *tp)
{
- xfs_dsb_t *sbp;
+ struct xfs_dsb *sbp;
struct xfs_buf *bp;
int whole = 0;
@@ -541,14 +541,14 @@ xfs_trans_apply_sb_deltas(
/*
* Log the whole thing, the fields are noncontiguous.
*/
- xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
+ xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1);
else
/*
* Since all the modifiable fields are contiguous, we
* can get away with this.
*/
- xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
- offsetof(xfs_dsb_t, sb_frextents) +
+ xfs_trans_log_buf(tp, bp, offsetof(struct xfs_dsb, sb_icount),
+ offsetof(struct xfs_dsb, sb_frextents) +
sizeof(sbp->sb_frextents) - 1);
}
@@ -648,7 +648,7 @@ xfs_trans_add_item(
struct xfs_trans *tp,
struct xfs_log_item *lip)
{
- ASSERT(lip->li_mountp == tp->t_mountp);
+ ASSERT(lip->li_log == tp->t_mountp->m_log);
ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
ASSERT(list_empty(&lip->li_trans));
ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
@@ -775,7 +775,7 @@ xfs_trans_committed_bulk(
* object into the AIL as we are in a shutdown situation.
*/
if (aborted) {
- ASSERT(xfs_is_shutdown(ailp->ail_mount));
+ ASSERT(xlog_is_shutdown(ailp->ail_log));
if (lip->li_ops->iop_unpin)
lip->li_ops->iop_unpin(lip, 1);
continue;
@@ -836,6 +836,7 @@ __xfs_trans_commit(
bool regrant)
{
struct xfs_mount *mp = tp->t_mountp;
+ struct xlog *log = mp->m_log;
xfs_csn_t commit_seq = 0;
int error = 0;
int sync = tp->t_flags & XFS_TRANS_SYNC;
@@ -864,7 +865,13 @@ __xfs_trans_commit(
if (!(tp->t_flags & XFS_TRANS_DIRTY))
goto out_unreserve;
- if (xfs_is_shutdown(mp)) {
+ /*
+ * We must check against log shutdown here because we cannot abort log
+ * items and leave them dirty, inconsistent and unpinned in memory while
+ * the log is active. This leaves them open to being written back to
+ * disk, and that will lead to on-disk corruption.
+ */
+ if (xlog_is_shutdown(log)) {
error = -EIO;
goto out_unreserve;
}
@@ -878,7 +885,7 @@ __xfs_trans_commit(
xfs_trans_apply_sb_deltas(tp);
xfs_trans_apply_dquot_deltas(tp);
- xlog_cil_commit(mp->m_log, tp, &commit_seq, regrant);
+ xlog_cil_commit(log, tp, &commit_seq, regrant);
xfs_trans_free(tp);
@@ -905,10 +912,10 @@ out_unreserve:
*/
xfs_trans_unreserve_and_mod_dquots(tp);
if (tp->t_ticket) {
- if (regrant && !xlog_is_shutdown(mp->m_log))
- xfs_log_ticket_regrant(mp->m_log, tp->t_ticket);
+ if (regrant && !xlog_is_shutdown(log))
+ xfs_log_ticket_regrant(log, tp->t_ticket);
else
- xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
+ xfs_log_ticket_ungrant(log, tp->t_ticket);
tp->t_ticket = NULL;
}
xfs_trans_free_items(tp, !!error);
@@ -926,36 +933,56 @@ xfs_trans_commit(
}
/*
- * Unlock all of the transaction's items and free the transaction.
- * The transaction must not have modified any of its items, because
- * there is no way to restore them to their previous state.
+ * Unlock all of the transaction's items and free the transaction. If the
+ * transaction is dirty, we must shut down the filesystem because there is no
+ * way to restore them to their previous state.
*
- * If the transaction has made a log reservation, make sure to release
- * it as well.
+ * If the transaction has made a log reservation, make sure to release it as
+ * well.
+ *
+ * This is a high level function (equivalent to xfs_trans_commit()) and so can
+ * be called after the transaction has effectively been aborted due to the mount
+ * being shut down. However, if the mount has not been shut down and the
+ * transaction is dirty we will shut the mount down and, in doing so, that
+ * guarantees that the log is shut down, too. Hence we don't need to be as
+ * careful with shutdown state and dirty items here as we need to be in
+ * xfs_trans_commit().
*/
void
xfs_trans_cancel(
struct xfs_trans *tp)
{
struct xfs_mount *mp = tp->t_mountp;
+ struct xlog *log = mp->m_log;
bool dirty = (tp->t_flags & XFS_TRANS_DIRTY);
trace_xfs_trans_cancel(tp, _RET_IP_);
- if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
+ /*
+ * It's never valid to cancel a transaction with deferred ops attached,
+ * because the transaction is effectively dirty. Complain about this
+ * loudly before freeing the in-memory defer items.
+ */
+ if (!list_empty(&tp->t_dfops)) {
+ ASSERT(xfs_is_shutdown(mp) || list_empty(&tp->t_dfops));
+ ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+ dirty = true;
xfs_defer_cancel(tp);
+ }
/*
- * See if the caller is relying on us to shut down the
- * filesystem. This happens in paths where we detect
- * corruption and decide to give up.
+ * See if the caller is relying on us to shut down the filesystem. We
+ * only want an error report if there isn't already a shutdown in
+ * progress, so we only need to check against the mount shutdown state
+ * here.
*/
if (dirty && !xfs_is_shutdown(mp)) {
XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
}
#ifdef DEBUG
- if (!dirty && !xfs_is_shutdown(mp)) {
+ /* Log items need to be consistent until the log is shut down. */
+ if (!dirty && !xlog_is_shutdown(log)) {
struct xfs_log_item *lip;
list_for_each_entry(lip, &tp->t_items, li_trans)
@@ -966,7 +993,7 @@ xfs_trans_cancel(
xfs_trans_unreserve_and_mod_dquots(tp);
if (tp->t_ticket) {
- xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
+ xfs_log_ticket_ungrant(log, tp->t_ticket);
tp->t_ticket = NULL;
}
@@ -1201,3 +1228,89 @@ out_cancel:
xfs_trans_cancel(tp);
return error;
}
+
+/*
+ * Allocate an transaction, lock and join the directory and child inodes to it,
+ * and reserve quota for a directory update. If there isn't sufficient space,
+ * @dblocks will be set to zero for a reservationless directory update and
+ * @nospace_error will be set to a negative errno describing the space
+ * constraint we hit.
+ *
+ * The caller must ensure that the on-disk dquots attached to this inode have
+ * already been allocated and initialized. The ILOCKs will be dropped when the
+ * transaction is committed or cancelled.
+ */
+int
+xfs_trans_alloc_dir(
+ struct xfs_inode *dp,
+ struct xfs_trans_res *resv,
+ struct xfs_inode *ip,
+ unsigned int *dblocks,
+ struct xfs_trans **tpp,
+ int *nospace_error)
+{
+ struct xfs_trans *tp;
+ struct xfs_mount *mp = ip->i_mount;
+ unsigned int resblks;
+ bool retried = false;
+ int error;
+
+retry:
+ *nospace_error = 0;
+ resblks = *dblocks;
+ error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp);
+ if (error == -ENOSPC) {
+ *nospace_error = error;
+ resblks = 0;
+ error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp);
+ }
+ if (error)
+ return error;
+
+ xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
+
+ xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ error = xfs_qm_dqattach_locked(dp, false);
+ if (error) {
+ /* Caller should have allocated the dquots! */
+ ASSERT(error != -ENOENT);
+ goto out_cancel;
+ }
+
+ error = xfs_qm_dqattach_locked(ip, false);
+ if (error) {
+ /* Caller should have allocated the dquots! */
+ ASSERT(error != -ENOENT);
+ goto out_cancel;
+ }
+
+ if (resblks == 0)
+ goto done;
+
+ error = xfs_trans_reserve_quota_nblks(tp, dp, resblks, 0, false);
+ if (error == -EDQUOT || error == -ENOSPC) {
+ if (!retried) {
+ xfs_trans_cancel(tp);
+ xfs_blockgc_free_quota(dp, 0);
+ retried = true;
+ goto retry;
+ }
+
+ *nospace_error = error;
+ resblks = 0;
+ error = 0;
+ }
+ if (error)
+ goto out_cancel;
+
+done:
+ *tpp = tp;
+ *dblocks = resblks;
+ return 0;
+
+out_cancel:
+ xfs_trans_cancel(tp);
+ return error;
+}