summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio.c23
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-flush.c11
-rw-r--r--block/blk-lib.c178
-rw-r--r--block/blk-map.c47
-rw-r--r--block/blk-mq-tag.c17
-rw-r--r--block/blk-mq.c5
-rw-r--r--block/blk-settings.c62
-rw-r--r--block/blk-sysfs.c47
-rw-r--r--block/blk-throttle.c5
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/ioctl.c37
-rw-r--r--block/partition-generic.c21
-rw-r--r--block/partitions/efi.c4
-rw-r--r--block/partitions/ldm.c60
16 files changed, 211 insertions, 322 deletions
diff --git a/block/bio.c b/block/bio.c
index f124a0a624fc..0e4aa42bc30d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -311,17 +311,6 @@ static void bio_chain_endio(struct bio *bio)
bio_endio(__bio_chain_endio(bio));
}
-/*
- * Increment chain count for the bio. Make sure the CHAIN flag update
- * is visible before the raised count.
- */
-static inline void bio_inc_remaining(struct bio *bio)
-{
- bio_set_flag(bio, BIO_CHAIN);
- smp_mb__before_atomic();
- atomic_inc(&bio->__bi_remaining);
-}
-
/**
* bio_chain - chain bio completions
* @bio: the target bio
@@ -1339,7 +1328,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
* release the pages we didn't map into the bio, if any
*/
while (j < page_limit)
- page_cache_release(pages[j++]);
+ put_page(pages[j++]);
}
kfree(pages);
@@ -1365,7 +1354,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < nr_pages; j++) {
if (!pages[j])
break;
- page_cache_release(pages[j]);
+ put_page(pages[j]);
}
out:
kfree(pages);
@@ -1385,7 +1374,7 @@ static void __bio_unmap_user(struct bio *bio)
if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page);
- page_cache_release(bvec->bv_page);
+ put_page(bvec->bv_page);
}
bio_put(bio);
@@ -1615,8 +1604,8 @@ static void bio_release_pages(struct bio *bio)
* the BIO and the offending pages and re-dirty the pages in process context.
*
* It is expected that bio_check_pages_dirty() will wholly own the BIO from
- * here on. It will run one page_cache_release() against each page and will
- * run one bio_put() against the BIO.
+ * here on. It will run one put_page() against each page and will run one
+ * bio_put() against the BIO.
*/
static void bio_dirty_fn(struct work_struct *work);
@@ -1658,7 +1647,7 @@ void bio_check_pages_dirty(struct bio *bio)
struct page *page = bvec->bv_page;
if (PageDirty(page) || PageCompound(page)) {
- page_cache_release(page);
+ put_page(page);
bvec->bv_page = NULL;
} else {
nr_clean_pages++;
diff --git a/block/blk-core.c b/block/blk-core.c
index 827f8badd143..2475b1c72773 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
goto fail_id;
q->backing_dev_info.ra_pages =
- (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+ (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info.name = "block";
q->node = node_id;
@@ -1523,6 +1523,7 @@ EXPORT_SYMBOL(blk_put_request);
* blk_add_request_payload - add a payload to a request
* @rq: request to update
* @page: page backing the payload
+ * @offset: offset in page
* @len: length of the payload.
*
* This allows to later add a payload to an already submitted request by
@@ -1533,12 +1534,12 @@ EXPORT_SYMBOL(blk_put_request);
* discard requests should ever use it.
*/
void blk_add_request_payload(struct request *rq, struct page *page,
- unsigned int len)
+ int offset, unsigned int len)
{
struct bio *bio = rq->bio;
bio->bi_io_vec->bv_page = page;
- bio->bi_io_vec->bv_offset = 0;
+ bio->bi_io_vec->bv_offset = offset;
bio->bi_io_vec->bv_len = len;
bio->bi_iter.bi_size = len;
@@ -1963,7 +1964,8 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
- if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+ if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+ !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
if (!nr_sectors) {
err = 0;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9c423e53324a..b1c91d229e5e 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -95,17 +95,18 @@ enum {
static bool blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq);
-static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
+static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
{
unsigned int policy = 0;
if (blk_rq_sectors(rq))
policy |= REQ_FSEQ_DATA;
- if (fflags & REQ_FLUSH) {
+ if (fflags & (1UL << QUEUE_FLAG_WC)) {
if (rq->cmd_flags & REQ_FLUSH)
policy |= REQ_FSEQ_PREFLUSH;
- if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
+ if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
+ (rq->cmd_flags & REQ_FUA))
policy |= REQ_FSEQ_POSTFLUSH;
}
return policy;
@@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
void blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
- unsigned int fflags = q->flush_flags; /* may change, cache */
+ unsigned long fflags = q->queue_flags; /* may change, cache */
unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
@@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq)
* REQ_FLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_FLUSH;
- if (!(fflags & REQ_FUA))
+ if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
rq->cmd_flags &= ~REQ_FUA;
/*
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9ebf65379556..23d7f301a196 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -9,82 +9,46 @@
#include "blk.h"
-struct bio_batch {
- atomic_t done;
- int error;
- struct completion *wait;
-};
-
-static void bio_batch_end_io(struct bio *bio)
+static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
+ gfp_t gfp)
{
- struct bio_batch *bb = bio->bi_private;
+ struct bio *new = bio_alloc(gfp, nr_pages);
+
+ if (bio) {
+ bio_chain(bio, new);
+ submit_bio(rw, bio);
+ }
- if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
- bb->error = bio->bi_error;
- if (atomic_dec_and_test(&bb->done))
- complete(bb->wait);
- bio_put(bio);
+ return new;
}
-/**
- * blkdev_issue_discard - queue a discard
- * @bdev: blockdev to issue discard for
- * @sector: start sector
- * @nr_sects: number of sectors to discard
- * @gfp_mask: memory allocation flags (for bio_alloc)
- * @flags: BLKDEV_IFL_* flags to control behaviour
- *
- * Description:
- * Issue a discard request for the sectors in question.
- */
-int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
{
- DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
- int type = REQ_WRITE | REQ_DISCARD;
+ struct bio *bio = *biop;
unsigned int granularity;
int alignment;
- struct bio_batch bb;
- struct bio *bio;
- int ret = 0;
- struct blk_plug plug;
if (!q)
return -ENXIO;
-
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
+ return -EOPNOTSUPP;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
- if (flags & BLKDEV_DISCARD_SECURE) {
- if (!blk_queue_secdiscard(q))
- return -EOPNOTSUPP;
- type |= REQ_SECURE;
- }
-
- atomic_set(&bb.done, 1);
- bb.error = 0;
- bb.wait = &wait;
-
- blk_start_plug(&plug);
while (nr_sects) {
unsigned int req_sects;
sector_t end_sect, tmp;
- bio = bio_alloc(gfp_mask, 1);
- if (!bio) {
- ret = -ENOMEM;
- break;
- }
-
/* Make sure bi_size doesn't overflow */
req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
- /*
+ /**
* If splitting a request, and the next starting sector would be
* misaligned, stop the discard at the previous aligned sector.
*/
@@ -98,18 +62,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
req_sects = end_sect - sector;
}
+ bio = next_bio(bio, type, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
- bio->bi_private = &bb;
bio->bi_iter.bi_size = req_sects << 9;
nr_sects -= req_sects;
sector = end_sect;
- atomic_inc(&bb.done);
- submit_bio(type, bio);
-
/*
* We can loop for a long time in here, if someone does
* full device discards (like mkfs). Be nice and allow
@@ -118,14 +78,44 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*/
cond_resched();
}
- blk_finish_plug(&plug);
- /* Wait for bios in-flight */
- if (!atomic_dec_and_test(&bb.done))
- wait_for_completion_io(&wait);
+ *biop = bio;
+ return 0;
+}
+EXPORT_SYMBOL(__blkdev_issue_discard);
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev: blockdev to issue discard for
+ * @sector: start sector
+ * @nr_sects: number of sectors to discard
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @flags: BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ * Issue a discard request for the sectors in question.
+ */
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+ int type = REQ_WRITE | REQ_DISCARD;
+ struct bio *bio = NULL;
+ struct blk_plug plug;
+ int ret;
+
+ if (flags & BLKDEV_DISCARD_SECURE)
+ type |= REQ_SECURE;
+
+ blk_start_plug(&plug);
+ ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
+ &bio);
+ if (!ret && bio) {
+ ret = submit_bio_wait(type, bio);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ }
+ blk_finish_plug(&plug);
- if (bb.error)
- return bb.error;
return ret;
}
EXPORT_SYMBOL(blkdev_issue_discard);
@@ -145,11 +135,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask,
struct page *page)
{
- DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
unsigned int max_write_same_sectors;
- struct bio_batch bb;
- struct bio *bio;
+ struct bio *bio = NULL;
int ret = 0;
if (!q)
@@ -158,21 +146,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
max_write_same_sectors = UINT_MAX >> 9;
- atomic_set(&bb.done, 1);
- bb.error = 0;
- bb.wait = &wait;
-
while (nr_sects) {
- bio = bio_alloc(gfp_mask, 1);
- if (!bio) {
- ret = -ENOMEM;
- break;
- }
-
+ bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
- bio->bi_private = &bb;
bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
@@ -186,18 +163,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bio->bi_iter.bi_size = nr_sects << 9;
nr_sects = 0;
}
-
- atomic_inc(&bb.done);
- submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
}
- /* Wait for bios in-flight */
- if (!atomic_dec_and_test(&bb.done))
- wait_for_completion_io(&wait);
-
- if (bb.error)
- return bb.error;
- return ret;
+ if (bio)
+ ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+ return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -216,28 +186,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask)
{
int ret;
- struct bio *bio;
- struct bio_batch bb;
+ struct bio *bio = NULL;
unsigned int sz;
- DECLARE_COMPLETION_ONSTACK(wait);
-
- atomic_set(&bb.done, 1);
- bb.error = 0;
- bb.wait = &wait;
- ret = 0;
while (nr_sects != 0) {
- bio = bio_alloc(gfp_mask,
- min(nr_sects, (sector_t)BIO_MAX_PAGES));
- if (!bio) {
- ret = -ENOMEM;
- break;
- }
-
+ bio = next_bio(bio, WRITE,
+ min(nr_sects, (sector_t)BIO_MAX_PAGES),
+ gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
- bio->bi_end_io = bio_batch_end_io;
- bio->bi_private = &bb;
while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@@ -247,18 +204,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if (ret < (sz << 9))
break;
}
- ret = 0;
- atomic_inc(&bb.done);
- submit_bio(WRITE, bio);
}
- /* Wait for bios in-flight */
- if (!atomic_dec_and_test(&bb.done))
- wait_for_completion_io(&wait);
-
- if (bb.error)
- return bb.error;
- return ret;
+ if (bio)
+ return submit_bio_wait(WRITE, bio);
+ return 0;
}
/**
diff --git a/block/blk-map.c b/block/blk-map.c
index a54f0543b956..b9f88b7751fb 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -9,24 +9,6 @@
#include "blk.h"
-static bool iovec_gap_to_prv(struct request_queue *q,
- struct iovec *prv, struct iovec *cur)
-{
- unsigned long prev_end;
-
- if (!queue_virt_boundary(q))
- return false;
-
- if (prv->iov_base == NULL && prv->iov_len == 0)
- /* prv is not set - don't check */
- return false;
-
- prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
-
- return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
- prev_end & queue_virt_boundary(q));
-}
-
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio)
{
@@ -125,31 +107,18 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask)
{
- struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
- bool copy = (q->dma_pad_mask & iter->count) || map_data;
+ bool copy = false;
+ unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL;
struct iov_iter i;
int ret;
- if (!iter || !iter->count)
- return -EINVAL;
-
- iov_for_each(iov, i, *iter) {
- unsigned long uaddr = (unsigned long) iov.iov_base;
-
- if (!iov.iov_len)
- return -EINVAL;
-
- /*
- * Keep going so we check length of all segments
- */
- if ((uaddr & queue_dma_alignment(q)) ||
- iovec_gap_to_prv(q, &prv, &iov))
- copy = true;
-
- prv.iov_base = iov.iov_base;
- prv.iov_len = iov.iov_len;
- }
+ if (map_data)
+ copy = true;
+ else if (iov_iter_alignment(iter) & align)
+ copy = true;
+ else if (queue_virt_boundary(q))
+ copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
i = *iter;
do {
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index abdbb47405cb..56a0c37a3d06 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -464,15 +464,26 @@ static void bt_tags_for_each(struct blk_mq_tags *tags,
}
}
-void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
- void *priv)
+static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
+ busy_tag_iter_fn *fn, void *priv)
{
if (tags->nr_reserved_tags)
bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
false);
}
-EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
+
+void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+ busy_tag_iter_fn *fn, void *priv)
+{
+ int i;
+
+ for (i = 0; i < tagset->nr_hw_queues; i++) {
+ if (tagset->tags && tagset->tags[i])
+ blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
+ }
+}
+EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1699baf39b78..7df9c9263b21 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1122,8 +1122,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
init_request_from_bio(rq, bio);
- if (blk_do_io_stat(rq))
- blk_account_io_start(rq, 1);
+ blk_account_io_start(rq, 1);
}
static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
@@ -1496,7 +1495,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
int to_do;
void *p;
- while (left < order_to_size(this_order - 1) && this_order)
+ while (this_order && left < order_to_size(this_order - 1))
this_order--;
do {
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c7bb666aafd1..f679ae122843 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
struct queue_limits *limits = &q->limits;
unsigned int max_sectors;
- if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
- max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+ if ((max_hw_sectors << 9) < PAGE_SIZE) {
+ max_hw_sectors = 1 << (PAGE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_hw_sectors);
}
@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
**/
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
- if (max_size < PAGE_CACHE_SIZE) {
- max_size = PAGE_CACHE_SIZE;
+ if (max_size < PAGE_SIZE) {
+ max_size = PAGE_SIZE;
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_size);
}
@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
**/
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
- if (mask < PAGE_CACHE_SIZE - 1) {
- mask = PAGE_CACHE_SIZE - 1;
+ if (mask < PAGE_SIZE - 1) {
+ mask = PAGE_SIZE - 1;
printk(KERN_INFO "%s: set to minimum %lx\n",
__func__, mask);
}
@@ -820,32 +820,40 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-/**
- * blk_queue_flush - configure queue's cache flush capability
- * @q: the request queue for the device
- * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
- *
- * Tell block layer cache flush capability of @q. If it supports
- * flushing, REQ_FLUSH should be set. If it supports bypassing
- * write cache for individual writes, REQ_FUA should be set.
- */
-void blk_queue_flush(struct request_queue *q, unsigned int flush)
-{
- WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
-
- if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
- flush &= ~REQ_FUA;
-
- q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
-}
-EXPORT_SYMBOL_GPL(blk_queue_flush);
-
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
{
- q->flush_not_queueable = !queueable;
+ spin_lock_irq(q->queue_lock);
+ if (queueable)
+ clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ else
+ set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+/**
+ * blk_queue_write_cache - configure queue's write cache
+ * @q: the request queue for the device
+ * @wc: write back cache on or off
+ * @fua: device supports FUA writes, if true
+ *
+ * Tell the block layer about the write cache of @q.
+ */
+void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+ spin_lock_irq(q->queue_lock);
+ if (wc)
+ queue_flag_set(QUEUE_FLAG_WC, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_WC, q);
+ if (fua)
+ queue_flag_set(QUEUE_FLAG_FUA, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_FUA, q);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
+
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index dd93763057ce..99205965f559 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
- (PAGE_CACHE_SHIFT - 10);
+ (PAGE_SHIFT - 10);
return queue_var_show(ra_kb, (page));
}
@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
- q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+ q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret;
}
@@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
if (blk_queue_cluster(q))
return queue_var_show(queue_max_segment_size(q), (page));
- return queue_var_show(PAGE_CACHE_SIZE, (page));
+ return queue_var_show(PAGE_SIZE, (page));
}
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long max_sectors_kb,
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
- page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+ page_kb = 1 << (PAGE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
if (ret < 0)
@@ -347,6 +347,38 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_wc_show(struct request_queue *q, char *page)
+{
+ if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ return sprintf(page, "write back\n");
+
+ return sprintf(page, "write through\n");
+}
+
+static ssize_t queue_wc_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ int set = -1;
+
+ if (!strncmp(page, "write back", 10))
+ set = 1;
+ else if (!strncmp(page, "write through", 13) ||
+ !strncmp(page, "none", 4))
+ set = 0;
+
+ if (set == -1)
+ return -EINVAL;
+
+ spin_lock_irq(q->queue_lock);
+ if (set)
+ queue_flag_set(QUEUE_FLAG_WC, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_WC, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return count;
+}
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
@@ -478,6 +510,12 @@ static struct queue_sysfs_entry queue_poll_entry = {
.store = queue_poll_store,
};
+static struct queue_sysfs_entry queue_wc_entry = {
+ .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wc_show,
+ .store = queue_wc_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -503,6 +541,7 @@ static struct attribute *default_attrs[] = {
&queue_iostats_entry.attr,
&queue_random_entry.attr,
&queue_poll_entry.attr,
+ &queue_wc_entry.attr,
NULL,
};
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 2149a1ddbacf..47a3e540631a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -211,15 +211,14 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
*
* The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
* throtl_grp; otherwise, just "throtl".
- *
- * TODO: this should be made a function and name formatting should happen
- * after testing whether blktrace is enabled.
*/
#define throtl_log(sq, fmt, args...) do { \
struct throtl_grp *__tg = sq_to_tg((sq)); \
struct throtl_data *__td = sq_to_td((sq)); \
\
(void)__td; \
+ if (likely(!blk_trace_note_message_enabled(__td->queue))) \
+ break; \
if ((__tg)) { \
char __pbuf[128]; \
\
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e3c591dd8f19..4a349787bc62 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* idle timer unplug to continue working.
*/
if (cfq_cfqq_wait_request(cfqq)) {
- if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
+ if (blk_rq_bytes(rq) > PAGE_SIZE ||
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index f678c733df40..556826ac7cb4 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
return compat_put_long(arg,
- (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+ (bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET: /* compatible */
return compat_put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
@@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
- bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+ bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);
diff --git a/block/ioctl.c b/block/ioctl.c
index d8996bbd7f12..ed2397f8de9d 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -4,7 +4,6 @@
#include <linux/gfp.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h>
-#include <linux/badblocks.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/blktrace_api.h>
@@ -407,35 +406,6 @@ static inline int is_unrecognized_ioctl(int ret)
ret == -ENOIOCTLCMD;
}
-#ifdef CONFIG_FS_DAX
-bool blkdev_dax_capable(struct block_device *bdev)
-{
- struct gendisk *disk = bdev->bd_disk;
-
- if (!disk->fops->direct_access)
- return false;
-
- /*
- * If the partition is not aligned on a page boundary, we can't
- * do dax I/O to it.
- */
- if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512))
- || (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
- return false;
-
- /*
- * If the device has known bad blocks, force all I/O through the
- * driver / page cache.
- *
- * TODO: support finer grained dax error handling
- */
- if (disk->bb && disk->bb->count)
- return false;
-
- return true;
-}
-#endif
-
static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
@@ -550,7 +520,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (!arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
- return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+ return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET:
return put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
@@ -578,7 +548,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
- bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+ bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKBSZSET:
return blkdev_bszset(bdev, mode, argp);
@@ -598,9 +568,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKTRACESETUP:
case BLKTRACETEARDOWN:
return blk_trace_ioctl(bdev, cmd, argp);
- case BLKDAXGET:
- return put_int(arg, !!(bdev->bd_inode->i_flags & S_DAX));
- break;
case IOC_PR_REGISTER:
return blkdev_pr_register(bdev, argp);
case IOC_PR_RESERVE:
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 5d8701941054..d7eb77e1e3a8 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -361,15 +361,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
goto out_del;
}
+ err = hd_ref_init(p);
+ if (err) {
+ if (flags & ADDPART_FLAG_WHOLEDISK)
+ goto out_remove_file;
+ goto out_del;
+ }
+
/* everything is up and running, commence */
rcu_assign_pointer(ptbl->part[partno], p);
/* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD);
-
- if (!hd_ref_init(p))
- return p;
+ return p;
out_free_info:
free_part_info(p);
@@ -378,6 +383,8 @@ out_free_stats:
out_free:
kfree(p);
return ERR_PTR(err);
+out_remove_file:
+ device_remove_file(pdev, &dev_attr_whole_disk);
out_del:
kobject_put(p->holder_dir);
device_del(pdev);
@@ -566,8 +573,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
{
struct address_space *mapping = bdev->bd_inode->i_mapping;
- return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
- NULL);
+ return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
+ NULL);
}
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
@@ -584,9 +591,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
if (PageError(page))
goto fail;
p->v = page;
- return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
+ return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
fail:
- page_cache_release(page);
+ put_page(page);
}
p->v = NULL;
return NULL;
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 26cb624ace05..bcd86e5cd546 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -430,7 +430,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
}
/* Check that sizeof_partition_entry has the correct value */
if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
- pr_debug("GUID Partitition Entry Size check failed.\n");
+ pr_debug("GUID Partition Entry Size check failed.\n");
goto fail;
}
@@ -443,7 +443,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
le32_to_cpu((*gpt)->sizeof_partition_entry));
if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
- pr_debug("GUID Partitition Entry Array CRC check failed.\n");
+ pr_debug("GUID Partition Entry Array CRC check failed.\n");
goto fail_ptes;
}
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index e507cfbd044e..edcea70674c9 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -27,6 +27,8 @@
#include <linux/pagemap.h>
#include <linux/stringify.h>
#include <linux/kernel.h>
+#include <linux/uuid.h>
+
#include "ldm.h"
#include "check.h"
#include "msdos.h"
@@ -66,60 +68,6 @@ void _ldm_printk(const char *level, const char *function, const char *fmt, ...)
}
/**
- * ldm_parse_hexbyte - Convert a ASCII hex number to a byte
- * @src: Pointer to at least 2 characters to convert.
- *
- * Convert a two character ASCII hex string to a number.
- *
- * Return: 0-255 Success, the byte was parsed correctly
- * -1 Error, an invalid character was supplied
- */
-static int ldm_parse_hexbyte (const u8 *src)
-{
- unsigned int x; /* For correct wrapping */
- int h;
-
- /* high part */
- x = h = hex_to_bin(src[0]);
- if (h < 0)
- return -1;
-
- /* low part */
- h = hex_to_bin(src[1]);
- if (h < 0)
- return -1;
-
- return (x << 4) + h;
-}
-
-/**
- * ldm_parse_guid - Convert GUID from ASCII to binary
- * @src: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
- * @dest: Memory block to hold binary GUID (16 bytes)
- *
- * N.B. The GUID need not be NULL terminated.
- *
- * Return: 'true' @dest contains binary GUID
- * 'false' @dest contents are undefined
- */
-static bool ldm_parse_guid (const u8 *src, u8 *dest)
-{
- static const int size[] = { 4, 2, 2, 2, 6 };
- int i, j, v;
-
- if (src[8] != '-' || src[13] != '-' ||
- src[18] != '-' || src[23] != '-')
- return false;
-
- for (j = 0; j < 5; j++, src++)
- for (i = 0; i < size[j]; i++, src+=2, *dest++ = v)
- if ((v = ldm_parse_hexbyte (src)) < 0)
- return false;
-
- return true;
-}
-
-/**
* ldm_parse_privhead - Read the LDM Database PRIVHEAD structure
* @data: Raw database PRIVHEAD structure loaded from the device
* @ph: In-memory privhead structure in which to return parsed information
@@ -167,7 +115,7 @@ static bool ldm_parse_privhead(const u8 *data, struct privhead *ph)
ldm_error("PRIVHEAD disk size doesn't match real disk size");
return false;
}
- if (!ldm_parse_guid(data + 0x0030, ph->disk_id)) {
+ if (uuid_be_to_bin(data + 0x0030, (uuid_be *)ph->disk_id)) {
ldm_error("PRIVHEAD contains an invalid GUID.");
return false;
}
@@ -944,7 +892,7 @@ static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb)
disk = &vb->vblk.disk;
ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name,
sizeof (disk->alt_name));
- if (!ldm_parse_guid (buffer + 0x19 + r_name, disk->disk_id))
+ if (uuid_be_to_bin(buffer + 0x19 + r_name, (uuid_be *)disk->disk_id))
return false;
return true;