summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h62
1 files changed, 41 insertions, 21 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3d9cf326574f..48f05d768a53 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -90,18 +90,17 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
- unsigned long fifo_time;
+ u64 fifo_time;
};
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
- u64 cmd_flags;
+ int cpu;
unsigned cmd_type;
+ u64 cmd_flags;
unsigned long atomic_flags;
- int cpu;
-
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
sector_t __sector; /* sector cursor */
@@ -200,6 +199,20 @@ struct request {
struct request *next_rq;
};
+#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
+#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
+
+#define req_set_op(req, op) do { \
+ WARN_ON(op >= (1 << REQ_OP_BITS)); \
+ (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
+ (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
+} while (0)
+
+#define req_set_op_attrs(req, op, flags) do { \
+ req_set_op(req, op); \
+ (req)->cmd_flags |= flags; \
+} while (0)
+
static inline unsigned short req_get_ioprio(struct request *req)
{
return req->ioprio;
@@ -492,6 +505,7 @@ struct request_queue {
#define QUEUE_FLAG_WC 23 /* Write back caching */
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
+#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -581,6 +595,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
+#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -597,7 +612,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1))
+#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
/*
* Driver can handle struct request, if it either has an old style
@@ -616,14 +631,14 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q)
/*
* We regard a request as sync, if either a read or a sync write
*/
-static inline bool rw_is_sync(unsigned int rw_flags)
+static inline bool rw_is_sync(int op, unsigned int rw_flags)
{
- return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
+ return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
}
static inline bool rq_is_sync(struct request *rq)
{
- return rw_is_sync(rq->cmd_flags);
+ return rw_is_sync(req_op(rq), rq->cmd_flags);
}
static inline bool blk_rl_full(struct request_list *rl, bool sync)
@@ -652,22 +667,25 @@ static inline bool rq_mergeable(struct request *rq)
if (rq->cmd_type != REQ_TYPE_FS)
return false;
+ if (req_op(rq) == REQ_OP_FLUSH)
+ return false;
+
if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
return false;
return true;
}
-static inline bool blk_check_merge_flags(unsigned int flags1,
- unsigned int flags2)
+static inline bool blk_check_merge_flags(unsigned int flags1, unsigned int op1,
+ unsigned int flags2, unsigned int op2)
{
- if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
+ if ((op1 == REQ_OP_DISCARD) != (op2 == REQ_OP_DISCARD))
return false;
if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
return false;
- if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
+ if ((op1 == REQ_OP_WRITE_SAME) != (op2 == REQ_OP_WRITE_SAME))
return false;
return true;
@@ -879,12 +897,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
}
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
- unsigned int cmd_flags)
+ int op)
{
- if (unlikely(cmd_flags & REQ_DISCARD))
+ if (unlikely(op == REQ_OP_DISCARD))
return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
- if (unlikely(cmd_flags & REQ_WRITE_SAME))
+ if (unlikely(op == REQ_OP_WRITE_SAME))
return q->limits.max_write_same_sectors;
return q->limits.max_sectors;
@@ -904,18 +922,19 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
(offset & (q->limits.chunk_sectors - 1));
}
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
+static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
+ sector_t offset)
{
struct request_queue *q = rq->q;
if (unlikely(rq->cmd_type != REQ_TYPE_FS))
return q->limits.max_hw_sectors;
- if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
- return blk_queue_get_max_sectors(q, rq->cmd_flags);
+ if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
+ return blk_queue_get_max_sectors(q, req_op(rq));
- return min(blk_max_size_offset(q, blk_rq_pos(rq)),
- blk_queue_get_max_sectors(q, rq->cmd_flags));
+ return min(blk_max_size_offset(q, offset),
+ blk_queue_get_max_sectors(q, req_op(rq)));
}
static inline unsigned int blk_rq_count_bios(struct request *rq)
@@ -1141,7 +1160,8 @@ extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop);
+ sector_t nr_sects, gfp_t gfp_mask, int op_flags,
+ struct bio **biop);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,