summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.h2
-rw-r--r--block/blk-core.c42
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c18
-rw-r--r--block/blk-lib.c2
-rw-r--r--block/blk-settings.c7
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-throttle.c131
-rw-r--r--block/cfq-iosched.c41
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c4
-rw-r--r--block/ioctl.c8
12 files changed, 144 insertions, 119 deletions
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index ea4861bdd549..57e7234c5ae5 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -240,7 +240,7 @@ static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
#endif
-#define BLKIO_WEIGHT_MIN 100
+#define BLKIO_WEIGHT_MIN 10
#define BLKIO_WEIGHT_MAX 1000
#define BLKIO_WEIGHT_DEFAULT 500
diff --git a/block/blk-core.c b/block/blk-core.c
index 7e9715ae18c8..e1fcf7a24668 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -217,7 +217,7 @@ static void blk_delay_work(struct work_struct *work)
q = container_of(work, struct request_queue, delay_work.work);
spin_lock_irq(q->queue_lock);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock_irq(q->queue_lock);
}
@@ -251,7 +251,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
}
EXPORT_SYMBOL(blk_start_queue);
@@ -289,11 +289,14 @@ EXPORT_SYMBOL(blk_stop_queue);
* that its ->make_request_fn will not re-add plugging prior to calling
* this function.
*
+ * This function does not cancel any asynchronous activity arising
+ * out of elevator or throttling code. That would require elevaotor_exit()
+ * and blk_throtl_exit() to be called with queue lock initialized.
+ *
*/
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
- throtl_shutdown_timer_wq(q);
cancel_delayed_work_sync(&q->delay_work);
queue_sync_plugs(q);
}
@@ -302,13 +305,14 @@ EXPORT_SYMBOL(blk_sync_queue);
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
+ * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
*
*/
-void __blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{
if (unlikely(blk_queue_stopped(q)))
return;
@@ -317,7 +321,7 @@ void __blk_run_queue(struct request_queue *q)
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
- if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+ if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else
@@ -338,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -348,6 +352,11 @@ void blk_put_queue(struct request_queue *q)
kobject_put(&q->kobj);
}
+/*
+ * Note: If a driver supplied the queue lock, it should not zap that lock
+ * unexpectedly as some queue cleanup components like elevator_exit() and
+ * blk_throtl_exit() need queue lock.
+ */
void blk_cleanup_queue(struct request_queue *q)
{
/*
@@ -366,6 +375,8 @@ void blk_cleanup_queue(struct request_queue *q)
if (q->elevator)
elevator_exit(q->elevator);
+ blk_throtl_exit(q);
+
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
@@ -439,6 +450,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock);
+ /*
+ * By default initialize queue_lock to internal lock and driver can
+ * override it later if need be.
+ */
+ q->queue_lock = &q->__queue_lock;
+
return q;
}
EXPORT_SYMBOL(blk_alloc_queue_node);
@@ -522,7 +539,10 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
q->queue_flags = QUEUE_FLAG_DEFAULT;
- q->queue_lock = lock;
+
+ /* Override internal queue lock with supplied lock pointer */
+ if (lock)
+ q->queue_lock = lock;
/*
* This also sets hw/phys segments, boundary and size
@@ -971,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
blk_queue_end_tag(q, rq);
add_acct_request(q, rq, where);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
@@ -1307,7 +1327,7 @@ get_rq:
} else {
spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
out_unlock:
spin_unlock_irq(q->queue_lock);
}
@@ -2654,7 +2674,7 @@ static void flush_plug_list(struct blk_plug *plug)
BUG_ON(!rq->q);
if (rq->q != q) {
if (q) {
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock(q->queue_lock);
}
q = rq->q;
@@ -2669,7 +2689,7 @@ static void flush_plug_list(struct blk_plug *plug)
}
if (q) {
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock(q->queue_lock);
}
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 81e31819a597..7482b7fa863b 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
/* the queue is stopped so it won't be plugged+unplugged */
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 671fa9da7560..93d5fd8e51eb 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -211,9 +211,14 @@ static void flush_end_io(struct request *flush_rq, int error)
queued |= blk_flush_complete_seq(rq, seq, error);
}
- /* after populating an empty queue, kick it to avoid stall */
+ /*
+ * Moving a request silently to empty queue_head may stall the
+ * queue. Kick the queue in those cases. This function is called
+ * from request completion path and calling directly into
+ * request_fn may confuse the driver. Always use kblockd.
+ */
if (queued)
- __blk_run_queue(q);
+ __blk_run_queue(q, true);
}
/**
@@ -256,7 +261,7 @@ static bool blk_kick_flush(struct request_queue *q)
q->flush_rq.end_io = flush_end_io;
q->flush_pending_idx ^= 1;
- elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_FRONT);
+ elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE);
return true;
}
@@ -264,9 +269,12 @@ static void flush_data_end_io(struct request *rq, int error)
{
struct request_queue *q = rq->q;
- /* after populating an empty queue, kick it to avoid stall */
+ /*
+ * After populating an empty queue, kick it to avoid stall. Read
+ * the comment in flush_end_io().
+ */
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
- __blk_run_queue(q);
+ __blk_run_queue(q, true);
}
/**
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 1a320d2406b0..eec78becb355 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
}
/**
- * blkdev_issue_zeroout generate number of zero filed write bios
+ * blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
* @nr_sects: number of sectors to write
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c8d68921dddb..1fa769293597 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -168,13 +168,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
/*
- * If the caller didn't supply a lock, fall back to our embedded
- * per-queue locks
- */
- if (!q->queue_lock)
- q->queue_lock = &q->__queue_lock;
-
- /*
* by default assume old behaviour and bounce for any highmem page
*/
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 41fb69150b4d..261c75c665ae 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -471,8 +471,6 @@ static void blk_release_queue(struct kobject *kobj)
blk_sync_queue(q);
- blk_throtl_exit(q);
-
if (rl->rq_pool)
mempool_destroy(rl->rq_pool);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 658ee505315b..37abbfc68590 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */
+/* A workqueue to queue throttle related work */
+static struct workqueue_struct *kthrotld_workqueue;
+static void throtl_schedule_delayed_work(struct throtl_data *td,
+ unsigned long delay);
+
struct throtl_rb_root {
struct rb_root rb;
struct rb_node *left;
@@ -97,7 +102,7 @@ struct throtl_data
/* Work for dispatching throttled bios */
struct delayed_work throtl_work;
- atomic_t limits_changed;
+ bool limits_changed;
};
enum tg_state_flags {
@@ -196,6 +201,7 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
RB_CLEAR_NODE(&tg->rb_node);
bio_list_init(&tg->bio_lists[0]);
bio_list_init(&tg->bio_lists[1]);
+ td->limits_changed = false;
/*
* Take the initial reference that will be released on destroy
@@ -345,10 +351,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
update_min_dispatch_time(st);
if (time_before_eq(st->min_disptime, jiffies))
- throtl_schedule_delayed_work(td->queue, 0);
+ throtl_schedule_delayed_work(td, 0);
else
- throtl_schedule_delayed_work(td->queue,
- (st->min_disptime - jiffies));
+ throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
}
static inline void
@@ -733,34 +738,27 @@ static void throtl_process_limit_change(struct throtl_data *td)
struct throtl_grp *tg;
struct hlist_node *pos, *n;
- if (!atomic_read(&td->limits_changed))
+ if (!td->limits_changed)
return;
- throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
+ xchg(&td->limits_changed, false);
- /*
- * Make sure updates from throtl_update_blkio_group_read_bps() group
- * of functions to tg->limits_changed are visible. We do not
- * want update td->limits_changed to be visible but update to
- * tg->limits_changed not being visible yet on this cpu. Hence
- * the read barrier.
- */
- smp_rmb();
+ throtl_log(td, "limits changed");
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
- if (throtl_tg_on_rr(tg) && tg->limits_changed) {
- throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
- " riops=%u wiops=%u", tg->bps[READ],
- tg->bps[WRITE], tg->iops[READ],
- tg->iops[WRITE]);
+ if (!tg->limits_changed)
+ continue;
+
+ if (!xchg(&tg->limits_changed, false))
+ continue;
+
+ throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
+ " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
+ tg->iops[READ], tg->iops[WRITE]);
+
+ if (throtl_tg_on_rr(tg))
tg_update_disptime(td, tg);
- tg->limits_changed = false;
- }
}
-
- smp_mb__before_atomic_dec();
- atomic_dec(&td->limits_changed);
- smp_mb__after_atomic_dec();
}
/* Dispatch throttled bios. Should be called without queue lock held. */
@@ -817,10 +815,10 @@ void blk_throtl_work(struct work_struct *work)
}
/* Call with queue lock held */
-void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
+static void
+throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
{
- struct throtl_data *td = q->td;
struct delayed_work *dwork = &td->throtl_work;
if (total_nr_queued(td) > 0) {
@@ -829,12 +827,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
* Cancel that and schedule a new one.
*/
__cancel_delayed_work(dwork);
- kblockd_schedule_delayed_work(q, dwork, delay);
+ queue_delayed_work(kthrotld_workqueue, dwork, delay);
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
delay, jiffies);
}
}
-EXPORT_SYMBOL(throtl_schedule_delayed_work);
static void
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -897,6 +894,15 @@ void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
spin_unlock_irqrestore(td->queue->queue_lock, flags);
}
+static void throtl_update_blkio_group_common(struct throtl_data *td,
+ struct throtl_grp *tg)
+{
+ xchg(&tg->limits_changed, true);
+ xchg(&td->limits_changed, true);
+ /* Schedule a work now to process the limit change */
+ throtl_schedule_delayed_work(td, 0);
+}
+
/*
* For all update functions, key should be a valid pointer because these
* update functions are called under blkcg_lock, that means, blkg is
@@ -910,64 +916,43 @@ static void throtl_update_blkio_group_read_bps(void *key,
struct blkio_group *blkg, u64 read_bps)
{
struct throtl_data *td = key;
+ struct throtl_grp *tg = tg_of_blkg(blkg);
- tg_of_blkg(blkg)->bps[READ] = read_bps;
- /* Make sure read_bps is updated before setting limits_changed */
- smp_wmb();
- tg_of_blkg(blkg)->limits_changed = true;
-
- /* Make sure tg->limits_changed is updated before td->limits_changed */
- smp_mb__before_atomic_inc();
- atomic_inc(&td->limits_changed);
- smp_mb__after_atomic_inc();
-
- /* Schedule a work now to process the limit change */
- throtl_schedule_delayed_work(td->queue, 0);
+ tg->bps[READ] = read_bps;
+ throtl_update_blkio_group_common(td, tg);
}
static void throtl_update_blkio_group_write_bps(void *key,
struct blkio_group *blkg, u64 write_bps)
{
struct throtl_data *td = key;
+ struct throtl_grp *tg = tg_of_blkg(blkg);
- tg_of_blkg(blkg)->bps[WRITE] = write_bps;
- smp_wmb();
- tg_of_blkg(blkg)->limits_changed = true;
- smp_mb__before_atomic_inc();
- atomic_inc(&td->limits_changed);
- smp_mb__after_atomic_inc();
- throtl_schedule_delayed_work(td->queue, 0);
+ tg->bps[WRITE] = write_bps;
+ throtl_update_blkio_group_common(td, tg);
}
static void throtl_update_blkio_group_read_iops(void *key,
struct blkio_group *blkg, unsigned int read_iops)
{
struct throtl_data *td = key;
+ struct throtl_grp *tg = tg_of_blkg(blkg);
- tg_of_blkg(blkg)->iops[READ] = read_iops;
- smp_wmb();
- tg_of_blkg(blkg)->limits_changed = true;
- smp_mb__before_atomic_inc();
- atomic_inc(&td->limits_changed);
- smp_mb__after_atomic_inc();
- throtl_schedule_delayed_work(td->queue, 0);
+ tg->iops[READ] = read_iops;
+ throtl_update_blkio_group_common(td, tg);
}
static void throtl_update_blkio_group_write_iops(void *key,
struct blkio_group *blkg, unsigned int write_iops)
{
struct throtl_data *td = key;
+ struct throtl_grp *tg = tg_of_blkg(blkg);
- tg_of_blkg(blkg)->iops[WRITE] = write_iops;
- smp_wmb();
- tg_of_blkg(blkg)->limits_changed = true;
- smp_mb__before_atomic_inc();
- atomic_inc(&td->limits_changed);
- smp_mb__after_atomic_inc();
- throtl_schedule_delayed_work(td->queue, 0);
+ tg->iops[WRITE] = write_iops;
+ throtl_update_blkio_group_common(td, tg);
}
-void throtl_shutdown_timer_wq(struct request_queue *q)
+static void throtl_shutdown_wq(struct request_queue *q)
{
struct throtl_data *td = q->td;
@@ -1008,15 +993,10 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
/*
* There is already another bio queued in same dir. No
* need to update dispatch time.
- * Still update the disptime if rate limits on this group
- * were changed.
*/
- if (!tg->limits_changed)
- update_disptime = false;
- else
- tg->limits_changed = false;
-
+ update_disptime = false;
goto queue_bio;
+
}
/* Bio is with-in rate limit of group */
@@ -1057,7 +1037,7 @@ int blk_throtl_init(struct request_queue *q)
INIT_HLIST_HEAD(&td->tg_list);
td->tg_service_tree = THROTL_RB_ROOT;
- atomic_set(&td->limits_changed, 0);
+ td->limits_changed = false;
/* Init root group */
tg = &td->root_tg;
@@ -1069,6 +1049,7 @@ int blk_throtl_init(struct request_queue *q)
/* Practically unlimited BW */
tg->bps[0] = tg->bps[1] = -1;
tg->iops[0] = tg->iops[1] = -1;
+ td->limits_changed = false;
/*
* Set root group reference to 2. One reference will be dropped when
@@ -1101,7 +1082,7 @@ void blk_throtl_exit(struct request_queue *q)
BUG_ON(!td);
- throtl_shutdown_timer_wq(q);
+ throtl_shutdown_wq(q);
spin_lock_irq(q->queue_lock);
throtl_release_tgs(td);
@@ -1131,12 +1112,16 @@ void blk_throtl_exit(struct request_queue *q)
* update limits through cgroup and another work got queued, cancel
* it.
*/
- throtl_shutdown_timer_wq(q);
+ throtl_shutdown_wq(q);
throtl_td_free(td);
}
static int __init throtl_init(void)
{
+ kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
+ if (!kthrotld_workqueue)
+ panic("Failed to create kthrotld\n");
+
blkio_policy_register(&blkio_policy_throtl);
return 0;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ef631539dd2a..c826ef81c679 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -237,6 +237,7 @@ struct cfq_data {
struct rb_root prio_trees[CFQ_PRIO_LISTS];
unsigned int busy_queues;
+ unsigned int busy_sync_queues;
int rq_in_driver;
int rq_in_flight[2];
@@ -549,15 +550,13 @@ static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
static void update_min_vdisktime(struct cfq_rb_root *st)
{
- u64 vdisktime = st->min_vdisktime;
struct cfq_group *cfqg;
if (st->left) {
cfqg = rb_entry_cfqg(st->left);
- vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
+ st->min_vdisktime = max_vdisktime(st->min_vdisktime,
+ cfqg->vdisktime);
}
-
- st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
}
/*
@@ -1337,6 +1336,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++;
+ if (cfq_cfqq_sync(cfqq))
+ cfqd->busy_sync_queues++;
cfq_resort_rr_list(cfqd, cfqq);
}
@@ -1363,6 +1364,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
cfq_group_service_tree_del(cfqd, cfqq->cfqg);
BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
+ if (cfq_cfqq_sync(cfqq))
+ cfqd->busy_sync_queues--;
}
/*
@@ -2370,6 +2373,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* Does this cfqq already have too much IO in flight?
*/
if (cfqq->dispatched >= max_dispatch) {
+ bool promote_sync = false;
/*
* idle queue must always only have a single IO in flight
*/
@@ -2377,15 +2381,31 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return false;
/*
+ * If there is only one sync queue, and its think time is
+ * small, we can ignore async queue here and give the sync
+ * queue no dispatch limit. The reason is a sync queue can
+ * preempt async queue, limiting the sync queue doesn't make
+ * sense. This is useful for aiostress test.
+ */
+ if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) {
+ struct cfq_io_context *cic = RQ_CIC(cfqq->next_rq);
+
+ if (sample_valid(cic->ttime_samples) &&
+ cic->ttime_mean < cfqd->cfq_slice_idle)
+ promote_sync = true;
+ }
+
+ /*
* We have other queues, don't allow more IO from this one
*/
- if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
+ if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
+ !promote_sync)
return false;
/*
* Sole queue user, no limit
*/
- if (cfqd->busy_queues == 1)
+ if (cfqd->busy_queues == 1 || promote_sync)
max_dispatch = -1;
else
/*
@@ -3317,7 +3337,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
- __blk_run_queue(cfqd->queue);
+ __blk_run_queue(cfqd->queue, false);
} else {
cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg);
@@ -3332,7 +3352,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
- __blk_run_queue(cfqd->queue);
+ __blk_run_queue(cfqd->queue, false);
}
}
@@ -3668,12 +3688,11 @@ new_queue:
cfqq->allocated[rw]++;
- spin_unlock_irqrestore(q->queue_lock, flags);
-
cfqq->ref++;
rq->elevator_private[0] = cic;
rq->elevator_private[1] = cfqq;
rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
+ spin_unlock_irqrestore(q->queue_lock, flags);
return 0;
queue_fail:
@@ -3693,7 +3712,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
- __blk_run_queue(cfqd->queue);
+ __blk_run_queue(cfqd->queue, false);
spin_unlock_irq(q->queue_lock);
}
diff --git a/block/elevator.c b/block/elevator.c
index 3ea208256e78..542ce826b401 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -604,7 +604,7 @@ void elv_quiesce_start(struct request_queue *q)
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
@@ -644,7 +644,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue
* processing.
*/
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
break;
case ELEVATOR_INSERT_SORT:
diff --git a/block/genhd.c b/block/genhd.c
index 73d85a8e3c85..c91a2dac6b6b 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
struct block_device *bdev = bdget_disk(disk, partno);
if (bdev) {
fsync_bdev(bdev);
- res = __invalidate_device(bdev);
+ res = __invalidate_device(bdev, true);
bdput(bdev);
}
return res;
@@ -1494,7 +1494,7 @@ void disk_block_events(struct gendisk *disk)
void disk_unblock_events(struct gendisk *disk)
{
if (disk->ev)
- __disk_unblock_events(disk, true);
+ __disk_unblock_events(disk, false);
}
/**
diff --git a/block/ioctl.c b/block/ioctl.c
index 9049d460fa89..1124cd297263 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EINVAL;
if (get_user(n, (int __user *) arg))
return -EFAULT;
- if (!(mode & FMODE_EXCL) &&
- blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
- return -EBUSY;
+ if (!(mode & FMODE_EXCL)) {
+ bdgrab(bdev);
+ if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+ return -EBUSY;
+ }
ret = set_blocksize(bdev, n);
if (!(mode & FMODE_EXCL))
blkdev_put(bdev, mode | FMODE_EXCL);