summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/badblocks.c29
-rw-r--r--block/blk-flush.c28
-rw-r--r--block/blk-map.c4
-rw-r--r--block/blk-settings.c1
-rw-r--r--block/blk-sysfs.c1
5 files changed, 61 insertions, 2 deletions
diff --git a/block/badblocks.c b/block/badblocks.c
index 7be53cb1cc3c..6ebcef282314 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -133,6 +133,26 @@ retry:
}
EXPORT_SYMBOL_GPL(badblocks_check);
+static void badblocks_update_acked(struct badblocks *bb)
+{
+ u64 *p = bb->page;
+ int i;
+ bool unacked = false;
+
+ if (!bb->unacked_exist)
+ return;
+
+ for (i = 0; i < bb->count ; i++) {
+ if (!BB_ACK(p[i])) {
+ unacked = true;
+ break;
+ }
+ }
+
+ if (!unacked)
+ bb->unacked_exist = 0;
+}
+
/**
* badblocks_set() - Add a range of bad blocks to the table.
* @bb: the badblocks structure that holds all badblock information
@@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
+ else
+ badblocks_update_acked(bb);
write_sequnlock_irqrestore(&bb->lock, flags);
return rv;
@@ -354,7 +376,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
* current range. Earlier ranges could also overlap,
* but only this one can overlap the end of the range.
*/
- if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
+ if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
+ (BB_OFFSET(p[lo]) < target)) {
/* Partial overlap, leave the tail of this range */
int ack = BB_ACK(p[lo]);
sector_t a = BB_OFFSET(p[lo]);
@@ -377,7 +400,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
lo--;
}
while (lo >= 0 &&
- BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
+ (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
+ (BB_OFFSET(p[lo]) < target)) {
/* This range does overlap */
if (BB_OFFSET(p[lo]) < s) {
/* Keep the early parts of this range. */
@@ -399,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
}
}
+ badblocks_update_acked(bb);
bb->changed = 1;
out:
write_sequnlock_irq(&bb->lock);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 27a42dab5a36..20b7c7a02f1c 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -341,6 +341,34 @@ static void flush_data_end_io(struct request *rq, int error)
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
/*
+ * Updating q->in_flight[] here for making this tag usable
+ * early. Because in blk_queue_start_tag(),
+ * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
+ * reserve tags for sync I/O.
+ *
+ * More importantly this way can avoid the following I/O
+ * deadlock:
+ *
+ * - suppose there are 40 fua requests comming to flush queue
+ * and queue depth is 31
+ * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
+ * tag for async I/O any more
+ * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
+ * and flush_data_end_io() is called
+ * - the other rqs still can't go ahead if not updating
+ * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
+ * are held in flush data queue and make no progress of
+ * handling post flush rq
+ * - only after the post flush rq is handled, all these rqs
+ * can be completed
+ */
+
+ elv_completed_request(q, rq);
+
+ /* for avoiding double accounting */
+ rq->rq_flags &= ~RQF_STARTED;
+
+ /*
* After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io().
*/
diff --git a/block/blk-map.c b/block/blk-map.c
index 0173a72a8aa9..0acb6640ead7 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -120,6 +120,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct iov_iter i;
int ret;
+ if (!iter_is_iovec(iter))
+ goto fail;
+
if (map_data)
copy = true;
else if (iov_iter_alignment(iter) & align)
@@ -142,6 +145,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
unmap_rq:
__blk_rq_unmap_user(bio);
+fail:
rq->bio = NULL;
return -EINVAL;
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 8a2bc124a684..529e55f52a03 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -253,6 +253,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
limits->max_sectors = max_sectors;
+ q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 706b27bd73a1..1dbce057592d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -236,6 +236,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
spin_lock_irq(q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
+ q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(q->queue_lock);
return ret;