summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-18 02:03:32 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-18 02:03:32 +0300
commit24b9f0cf00c8e8df29a4ddfec8c139ad62753113 (patch)
tree95eb986ead9bd6734c1901b4971a940619141fe1 /block
parenta4d1dbed0e27030b3c3ca2d1d5c33a1b45bc53d2 (diff)
parent116f7d4a21fe450efc652c4850eb27cda36c9db0 (diff)
downloadlinux-24b9f0cf00c8e8df29a4ddfec8c139ad62753113.tar.xz
Merge branch 'for-4.7/drivers' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: "On top of the core pull request, this is the drivers pull request for this merge window. This contains: - Switch drivers to the new write back cache API, and kill off the flush flags. From me. - Kill the discard support for the STEC pci-e flash driver. It's trivially broken, and apparently unmaintained, so it's safer to just remove it. From Jeff Moyer. - A set of lightnvm updates from the usual suspects (Matias/Javier, and Simon), and fixes from Arnd, Jeff Mahoney, Sagi, and Wenwei Tao. - A set of updates for NVMe: - Turn the controller state management into a proper state machine. From Christoph. - Shuffling of code in preparation for NVMe-over-fabrics, also from Christoph. - Cleanup of the command prep part from Ming Lin. - Rewrite of the discard support from Ming Lin. - Deadlock fix for namespace removal from Ming Lin. - Use the now exported blk-mq tag helper for IO termination. From Sagi. - Various little fixes from Christoph, Guilherme, Keith, Ming Lin, Wang Sheng-Hui. - Convert mtip32xx to use the now exported blk-mq tag iter function, from Keith" * 'for-4.7/drivers' of git://git.kernel.dk/linux-block: (74 commits) lightnvm: reserved space calculation incorrect lightnvm: rename nr_pages to nr_ppas on nvm_rq lightnvm: add is_cached entry to struct ppa_addr lightnvm: expose gennvm_mark_blk to targets lightnvm: remove mgt targets on mgt removal lightnvm: pass dma address to hardware rather than pointer lightnvm: do not assume sequential lun alloc. nvme/lightnvm: Log using the ctrl named device lightnvm: rename dma helper functions lightnvm: enable metadata to be sent to device lightnvm: do not free unused metadata on rrpc lightnvm: fix out of bound ppa lun id on bb tbl lightnvm: refactor set_bb_tbl for accepting ppa list lightnvm: move responsibility for bad blk mgmt to target lightnvm: make nvm_set_rqd_ppalist() aware of vblks lightnvm: remove struct factory_blks lightnvm: refactor device ops->get_bb_tbl() lightnvm: introduce nvm_for_each_lun_ppa() macro lightnvm: refactor dev->online_target to global nvm_targets lightnvm: rename nvm_targets to nvm_tgt_type ...
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-flush.c11
-rw-r--r--block/blk-mq-tag.c5
-rw-r--r--block/blk-settings.c38
4 files changed, 20 insertions, 37 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c50227796a26..2475b1c72773 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1964,7 +1964,8 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
- if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+ if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+ !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
if (!nr_sectors) {
err = 0;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9c423e53324a..b1c91d229e5e 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -95,17 +95,18 @@ enum {
static bool blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq);
-static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
+static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
{
unsigned int policy = 0;
if (blk_rq_sectors(rq))
policy |= REQ_FSEQ_DATA;
- if (fflags & REQ_FLUSH) {
+ if (fflags & (1UL << QUEUE_FLAG_WC)) {
if (rq->cmd_flags & REQ_FLUSH)
policy |= REQ_FSEQ_PREFLUSH;
- if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
+ if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
+ (rq->cmd_flags & REQ_FUA))
policy |= REQ_FSEQ_POSTFLUSH;
}
return policy;
@@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
void blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
- unsigned int fflags = q->flush_flags; /* may change, cache */
+ unsigned long fflags = q->queue_flags; /* may change, cache */
unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
@@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq)
* REQ_FLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_FLUSH;
- if (!(fflags & REQ_FUA))
+ if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
rq->cmd_flags &= ~REQ_FUA;
/*
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 2fd04286f103..56a0c37a3d06 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -464,15 +464,14 @@ static void bt_tags_for_each(struct blk_mq_tags *tags,
}
}
-void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
- void *priv)
+static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
+ busy_tag_iter_fn *fn, void *priv)
{
if (tags->nr_reserved_tags)
bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
false);
}
-EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c903bee43cf8..f679ae122843 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -820,29 +820,14 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-/**
- * blk_queue_flush - configure queue's cache flush capability
- * @q: the request queue for the device
- * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
- *
- * Tell block layer cache flush capability of @q. If it supports
- * flushing, REQ_FLUSH should be set. If it supports bypassing
- * write cache for individual writes, REQ_FUA should be set.
- */
-void blk_queue_flush(struct request_queue *q, unsigned int flush)
-{
- WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
-
- if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
- flush &= ~REQ_FUA;
-
- q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
-}
-EXPORT_SYMBOL_GPL(blk_queue_flush);
-
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
{
- q->flush_not_queueable = !queueable;
+ spin_lock_irq(q->queue_lock);
+ if (queueable)
+ clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ else
+ set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
@@ -857,16 +842,13 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
{
spin_lock_irq(q->queue_lock);
- if (wc) {
+ if (wc)
queue_flag_set(QUEUE_FLAG_WC, q);
- q->flush_flags = REQ_FLUSH;
- } else
+ else
queue_flag_clear(QUEUE_FLAG_WC, q);
- if (fua) {
- if (wc)
- q->flush_flags |= REQ_FUA;
+ if (fua)
queue_flag_set(QUEUE_FLAG_FUA, q);
- } else
+ else
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
}