summaryrefslogtreecommitdiff
path: root/mm/bounce.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-08 21:13:35 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-08 21:13:35 +0400
commit4de13d7aa8f4d02f4dc99d4609575659f92b3c5a (patch)
tree3bc9729eabe79c6164cd29a5d605000bc82bf837 /mm/bounce.c
parent5af43c24ca59a448c9312dd4a4a51d27ec3b9a73 (diff)
parentb8d4a5bf6a049303a29a3275f463f09a490b50ea (diff)
downloadlinux-4de13d7aa8f4d02f4dc99d4609575659f92b3c5a.tar.xz
Merge branch 'for-3.10/core' of git://git.kernel.dk/linux-block
Pull block core updates from Jens Axboe: - Major bit is Kents prep work for immutable bio vecs. - Stable candidate fix for a scheduling-while-atomic in the queue bypass operation. - Fix for the hang on exceeded rq->datalen 32-bit unsigned when merging discard bios. - Tejuns changes to convert the writeback thread pool to the generic workqueue mechanism. - Runtime PM framework, SCSI patches exists on top of these in James' tree. - A few random fixes. * 'for-3.10/core' of git://git.kernel.dk/linux-block: (40 commits) relay: move remove_buf_file inside relay_close_buf partitions/efi.c: replace useless kzalloc's by kmalloc's fs/block_dev.c: fix iov_shorten() criteria in blkdev_aio_read() block: fix max discard sectors limit blkcg: fix "scheduling while atomic" in blk_queue_bypass_start Documentation: cfq-iosched: update documentation help for cfq tunables writeback: expose the bdi_wq workqueue writeback: replace custom worker pool implementation with unbound workqueue writeback: remove unused bdi_pending_list aoe: Fix unitialized var usage bio-integrity: Add explicit field for owner of bip_buf block: Add an explicit bio flag for bios that own their bvec block: Add bio_alloc_pages() block: Convert some code to bio_for_each_segment_all() block: Add bio_for_each_segment_all() bounce: Refactor __blk_queue_bounce to not use bi_io_vec raid1: use bio_copy_data() pktcdvd: Use bio_reset() in disabled code to kill bi_idx usage pktcdvd: use bio_copy_data() block: Add bio_copy_data() ...
Diffstat (limited to 'mm/bounce.c')
-rw-r--r--mm/bounce.c75
1 files changed, 20 insertions, 55 deletions
diff --git a/mm/bounce.c b/mm/bounce.c
index a5c2ec3589cb..c9f0a4339a7d 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -101,7 +101,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
struct bio_vec *tovec, *fromvec;
int i;
- __bio_for_each_segment(tovec, to, i, 0) {
+ bio_for_each_segment(tovec, to, i) {
fromvec = from->bi_io_vec + i;
/*
@@ -134,7 +134,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
/*
* free up bounce indirect pages used
*/
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment_all(bvec, bio, i) {
org_vec = bio_orig->bi_io_vec + i;
if (bvec->bv_page == org_vec->bv_page)
continue;
@@ -199,78 +199,43 @@ static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool, int force)
{
- struct page *page;
- struct bio *bio = NULL;
- int i, rw = bio_data_dir(*bio_orig);
+ struct bio *bio;
+ int rw = bio_data_dir(*bio_orig);
struct bio_vec *to, *from;
+ unsigned i;
- bio_for_each_segment(from, *bio_orig, i) {
- page = from->bv_page;
+ bio_for_each_segment(from, *bio_orig, i)
+ if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
+ goto bounce;
- /*
- * is destination page below bounce pfn?
- */
- if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
- continue;
-
- /*
- * irk, bounce it
- */
- if (!bio) {
- unsigned int cnt = (*bio_orig)->bi_vcnt;
+ return;
+bounce:
+ bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
- bio = bio_alloc(GFP_NOIO, cnt);
- memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
- }
-
+ bio_for_each_segment_all(to, bio, i) {
+ struct page *page = to->bv_page;
- to = bio->bi_io_vec + i;
+ if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
+ continue;
- to->bv_page = mempool_alloc(pool, q->bounce_gfp);
- to->bv_len = from->bv_len;
- to->bv_offset = from->bv_offset;
inc_zone_page_state(to->bv_page, NR_BOUNCE);
+ to->bv_page = mempool_alloc(pool, q->bounce_gfp);
if (rw == WRITE) {
char *vto, *vfrom;
- flush_dcache_page(from->bv_page);
+ flush_dcache_page(page);
+
vto = page_address(to->bv_page) + to->bv_offset;
- vfrom = kmap(from->bv_page) + from->bv_offset;
+ vfrom = kmap_atomic(page) + to->bv_offset;
memcpy(vto, vfrom, to->bv_len);
- kunmap(from->bv_page);
+ kunmap_atomic(vfrom);
}
}
- /*
- * no pages bounced
- */
- if (!bio)
- return;
-
trace_block_bio_bounce(q, *bio_orig);
- /*
- * at least one page was bounced, fill in possible non-highmem
- * pages
- */
- __bio_for_each_segment(from, *bio_orig, i, 0) {
- to = bio_iovec_idx(bio, i);
- if (!to->bv_page) {
- to->bv_page = from->bv_page;
- to->bv_len = from->bv_len;
- to->bv_offset = from->bv_offset;
- }
- }
-
- bio->bi_bdev = (*bio_orig)->bi_bdev;
bio->bi_flags |= (1 << BIO_BOUNCED);
- bio->bi_sector = (*bio_orig)->bi_sector;
- bio->bi_rw = (*bio_orig)->bi_rw;
-
- bio->bi_vcnt = (*bio_orig)->bi_vcnt;
- bio->bi_idx = (*bio_orig)->bi_idx;
- bio->bi_size = (*bio_orig)->bi_size;
if (pool == page_pool) {
bio->bi_end_io = bounce_end_io_write;