summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_iomap.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_iomap.c')
-rw-r--r--fs/xfs/xfs_iomap.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index b9a8c3798e08..3abb8b9d6f4c 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -293,11 +293,11 @@ out_trans_cancel:
STATIC bool
xfs_quota_need_throttle(
- struct xfs_inode *ip,
- int type,
- xfs_fsblock_t alloc_blocks)
+ struct xfs_inode *ip,
+ xfs_dqtype_t type,
+ xfs_fsblock_t alloc_blocks)
{
- struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
+ struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
if (!dq || !xfs_this_quota_on(ip->i_mount, type))
return false;
@@ -307,7 +307,7 @@ xfs_quota_need_throttle(
return false;
/* under the lo watermark, no throttle */
- if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
+ if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark)
return false;
return true;
@@ -315,24 +315,24 @@ xfs_quota_need_throttle(
STATIC void
xfs_quota_calc_throttle(
- struct xfs_inode *ip,
- int type,
- xfs_fsblock_t *qblocks,
- int *qshift,
- int64_t *qfreesp)
+ struct xfs_inode *ip,
+ xfs_dqtype_t type,
+ xfs_fsblock_t *qblocks,
+ int *qshift,
+ int64_t *qfreesp)
{
- int64_t freesp;
- int shift = 0;
- struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
+ struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
+ int64_t freesp;
+ int shift = 0;
/* no dq, or over hi wmark, squash the prealloc completely */
- if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
+ if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) {
*qblocks = 0;
*qfreesp = 0;
return;
}
- freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
+ freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved;
if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
shift = 2;
if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
@@ -450,14 +450,14 @@ xfs_iomap_prealloc_size(
* Check each quota to cap the prealloc size, provide a shift value to
* throttle with and adjust amount of available space.
*/
- if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
- xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
+ if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
+ xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
&freesp);
- if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
- xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
+ if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
+ xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
&freesp);
- if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
- xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
+ if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
+ xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
&freesp);
/*
@@ -865,7 +865,7 @@ xfs_buffered_write_iomap_begin(
}
/*
- * Search the data fork fork first to look up our source mapping. We
+ * Search the data fork first to look up our source mapping. We
* always need the data fork map, as we have to return it to the
* iomap code so that the higher level write code can read data in to
* perform read-modify-write cycles for unaligned writes.