summaryrefslogtreecommitdiff
path: root/block/blk.h
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk.h')
-rw-r--r--block/blk.h37
1 files changed, 17 insertions, 20 deletions
diff --git a/block/blk.h b/block/blk.h
index b5d1f0fc6547..49e2928a1632 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -14,9 +14,7 @@
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
-#ifdef CONFIG_DEBUG_FS
extern struct dentry *blk_debugfs_root;
-#endif
struct blk_flush_queue {
unsigned int flush_pending_idx:1;
@@ -27,11 +25,6 @@ struct blk_flush_queue {
struct list_head flush_data_in_flight;
struct request *flush_rq;
- /*
- * flush_rq shares tag with this rq, both can't be active
- * at the same time
- */
- struct request *orig_rq;
struct lock_class_key key;
spinlock_t mq_flush_lock;
};
@@ -223,21 +216,11 @@ ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
-
-#ifdef CONFIG_FAIL_IO_TIMEOUT
-int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
const char *, size_t);
-#else
-static inline int blk_should_fake_timeout(struct request_queue *q)
-{
- return 0;
-}
-#endif
-void __blk_queue_split(struct request_queue *q, struct bio **bio,
- unsigned int *nr_segs);
+void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs);
int ll_front_merge_fn(struct request *req, struct bio *bio,
@@ -282,6 +265,20 @@ static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
}
/*
+ * The max bio size which is aligned to q->limits.discard_granularity. This
+ * is a hint to split large discard bio in generic block layer, then if device
+ * driver needs to split the discard bio into smaller ones, their bi_size can
+ * be very probably and easily aligned to discard_granularity of the device's
+ * queue.
+ */
+static inline unsigned int bio_aligned_discard_max_sectors(
+ struct request_queue *q)
+{
+ return round_down(UINT_MAX, q->limits.discard_granularity) >>
+ SECTOR_SHIFT;
+}
+
+/*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);
@@ -299,10 +296,12 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
extern void blk_throtl_register_queue(struct request_queue *q);
+bool blk_throtl_bio(struct bio *bio);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
static inline void blk_throtl_register_queue(struct request_queue *q) { }
+static inline bool blk_throtl_bio(struct bio *bio) { return false; }
#endif /* CONFIG_BLK_DEV_THROTTLING */
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
@@ -434,8 +433,6 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
#endif
}
-struct request_queue *__blk_alloc_queue(int node_id);
-
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
unsigned int max_sectors, bool *same_page);