summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 21:52:27 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 22:10:35 +0400
commitc9059598ea8981d02356eead3188bf7fa4d717b8 (patch)
tree03e73b20a30e988da7c6a3e0ad93b2dc5843274d
parent0a33f80a8373eca7f4bea3961d1346c3815fa5ed (diff)
parentb0fd271d5fba0b2d00888363f3869e3f9b26caa9 (diff)
downloadlinux-c9059598ea8981d02356eead3188bf7fa4d717b8.tar.xz
Merge branch 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block: (153 commits) block: add request clone interface (v2) floppy: fix hibernation ramdisk: remove long-deprecated "ramdisk=" boot-time parameter fs/bio.c: add missing __user annotation block: prevent possible io_context->refcount overflow Add serial number support for virtio_blk, V4a block: Add missing bounce_pfn stacking and fix comments Revert "block: Fix bounce limit setting in DM" cciss: decode unit attention in SCSI error handling code cciss: Remove no longer needed sendcmd reject processing code cciss: change SCSI error handling routines to work with interrupts enabled. cciss: separate error processing and command retrying code in sendcmd_withirq_core() cciss: factor out fix target status processing code from sendcmd functions cciss: simplify interface of sendcmd() and sendcmd_withirq() cciss: factor out core of sendcmd_withirq() for use by SCSI error handling code cciss: Use schedule_timeout_uninterruptible in SCSI error handling code block: needs to set the residual length of a bidi request Revert "block: implement blkdev_readpages" block: Fix bounce limit setting in DM Removed reference to non-existing file Documentation/PCI/PCI-DMA-mapping.txt ... Manually fix conflicts with tracing updates in: block/blk-sysfs.c drivers/ide/ide-atapi.c drivers/ide/ide-cd.c drivers/ide/ide-floppy.c drivers/ide/ide-tape.c include/trace/events/block.h kernel/trace/blktrace.c
-rw-r--r--Documentation/ABI/testing/sysfs-block59
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci-devices-cciss33
-rw-r--r--Documentation/block/biodoc.txt2
-rw-r--r--arch/arm/plat-omap/mailbox.c63
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/um/drivers/ubd_kern.c36
-rw-r--r--block/Kconfig11
-rw-r--r--block/as-iosched.c24
-rw-r--r--block/blk-barrier.c27
-rw-r--r--block/blk-core.c848
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-integrity.c2
-rw-r--r--block/blk-ioc.c12
-rw-r--r--block/blk-map.c25
-rw-r--r--block/blk-merge.c71
-rw-r--r--block/blk-settings.c269
-rw-r--r--block/blk-sysfs.c62
-rw-r--r--block/blk-tag.c17
-rw-r--r--block/blk-timeout.c22
-rw-r--r--block/blk.h51
-rw-r--r--block/bsg.c8
-rw-r--r--block/cfq-iosched.c38
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c185
-rw-r--r--block/genhd.c11
-rw-r--r--block/ioctl.c12
-rw-r--r--block/scsi_ioctl.c13
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/block/DAC960.c10
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/amiflop.c54
-rw-r--r--drivers/block/ataflop.c66
-rw-r--r--drivers/block/brd.c7
-rw-r--r--drivers/block/cciss.c927
-rw-r--r--drivers/block/cciss.h34
-rw-r--r--drivers/block/cciss_cmd.h2
-rw-r--r--drivers/block/cciss_scsi.c109
-rw-r--r--drivers/block/cpqarray.c20
-rw-r--r--drivers/block/floppy.c85
-rw-r--r--drivers/block/hd.c106
-rw-r--r--drivers/block/loop.c37
-rw-r--r--drivers/block/mg_disk.c537
-rw-r--r--drivers/block/nbd.c23
-rw-r--r--drivers/block/paride/pcd.c29
-rw-r--r--drivers/block/paride/pd.c22
-rw-r--r--drivers/block/paride/pf.c47
-rw-r--r--drivers/block/pktcdvd.c8
-rw-r--r--drivers/block/ps3disk.c24
-rw-r--r--drivers/block/sunvdc.c14
-rw-r--r--drivers/block/swim.c48
-rw-r--r--drivers/block/swim3.c107
-rw-r--r--drivers/block/sx8.c17
-rw-r--r--drivers/block/ub.c54
-rw-r--r--drivers/block/viodasd.c12
-rw-r--r--drivers/block/virtio_blk.c110
-rw-r--r--drivers/block/xd.c41
-rw-r--r--drivers/block/xen-blkfront.c34
-rw-r--r--drivers/block/xsysace.c46
-rw-r--r--drivers/block/z2ram.c19
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/cdrom/gdrom.c36
-rw-r--r--drivers/cdrom/viocd.c33
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/ide/ide-atapi.c21
-rw-r--r--drivers/ide/ide-cd.c64
-rw-r--r--drivers/ide/ide-disk.c10
-rw-r--r--drivers/ide/ide-dma.c2
-rw-r--r--drivers/ide/ide-floppy.c10
-rw-r--r--drivers/ide/ide-io.c43
-rw-r--r--drivers/ide/ide-lib.c2
-rw-r--r--drivers/ide/ide-tape.c12
-rw-r--r--drivers/ide/ide-taskfile.c2
-rw-r--r--drivers/ide/pdc202xx_old.c2
-rw-r--r--drivers/ide/tc86c001.c2
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-exception-store.c2
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-table.c38
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/memstick/core/mspro_block.c19
-rw-r--r--drivers/message/fusion/mptsas.c22
-rw-r--r--drivers/message/i2o/i2o_block.c43
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mmc/card/queue.c11
-rw-r--r--drivers/mtd/mtd_blkdevs.c43
-rw-r--r--drivers/s390/block/dasd.c37
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_fba.c7
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c26
-rw-r--r--drivers/sbus/char/jsflash.c26
-rw-r--r--drivers/scsi/eata.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c16
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c23
-rw-r--r--drivers/scsi/osd/osd_initiator.c72
-rw-r--r--drivers/scsi/scsi_lib.c87
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c26
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c17
-rw-r--r--drivers/scsi/sr.c17
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/u14-34f.c22
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--fs/bio.c26
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/coda/file.c9
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/exofs/osd.c4
-rw-r--r--fs/ext3/super.c4
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/nilfs2/the_nilfs.c2
-rw-r--r--fs/ntfs/super.c6
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/partitions/check.c10
-rw-r--r--fs/partitions/ibm.c2
-rw-r--r--fs/partitions/msdos.c4
-rw-r--r--fs/pipe.c14
-rw-r--r--fs/read_write.c7
-rw-r--r--fs/splice.c338
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c2
-rw-r--r--include/linux/bio.h10
-rw-r--r--include/linux/blkdev.h245
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/elevator.h4
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/iocontext.h6
-rw-r--r--include/linux/loop.h3
-rw-r--r--include/linux/mg_disk.h206
-rw-r--r--include/linux/pipe_fs_i.h1
-rw-r--r--include/linux/splice.h3
-rw-r--r--include/linux/virtio_blk.h12
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/trace/events/block.h29
-rw-r--r--kernel/trace/blktrace.c21
-rw-r--r--mm/bounce.c4
158 files changed, 3790 insertions, 2760 deletions
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 44f52a4f5903..cbbd3e069945 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -60,3 +60,62 @@ Description:
Indicates whether the block layer should automatically
generate checksums for write requests bound for
devices that support receiving integrity metadata.
+
+What: /sys/block/<disk>/alignment_offset
+Date: April 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Storage devices may report a physical block size that is
+ bigger than the logical block size (for instance a drive
+ with 4KB physical sectors exposing 512-byte logical
+ blocks to the operating system). This parameter
+ indicates how many bytes the beginning of the device is
+ offset from the disk's natural alignment.
+
+What: /sys/block/<disk>/<partition>/alignment_offset
+Date: April 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Storage devices may report a physical block size that is
+ bigger than the logical block size (for instance a drive
+ with 4KB physical sectors exposing 512-byte logical
+ blocks to the operating system). This parameter
+ indicates how many bytes the beginning of the partition
+ is offset from the disk's natural alignment.
+
+What: /sys/block/<disk>/queue/logical_block_size
+Date: May 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ This is the smallest unit the storage device can
+ address. It is typically 512 bytes.
+
+What: /sys/block/<disk>/queue/physical_block_size
+Date: May 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ This is the smallest unit the storage device can write
+ without resorting to read-modify-write operation. It is
+ usually the same as the logical block size but may be
+ bigger. One example is SATA drives with 4KB sectors
+ that expose a 512-byte logical block size to the
+ operating system.
+
+What: /sys/block/<disk>/queue/minimum_io_size
+Date: April 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Storage devices may report a preferred minimum I/O size,
+ which is the smallest request the device can perform
+ without incurring a read-modify-write penalty. For disk
+ drives this is often the physical block size. For RAID
+ arrays it is often the stripe chunk size.
+
+What: /sys/block/<disk>/queue/optimal_io_size
+Date: April 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Storage devices may report an optimal I/O size, which is
+ the device's preferred unit of receiving I/O. This is
+ rarely reported for disk drives. For RAID devices it is
+ usually the stripe width or the internal block size.
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
new file mode 100644
index 000000000000..0a92a7c93a62
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
@@ -0,0 +1,33 @@
+Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/model
+Date: March 2009
+Kernel Version: 2.6.30
+Contact: iss_storagedev@hp.com
+Description: Displays the SCSI INQUIRY page 0 model for logical drive
+ Y of controller X.
+
+Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/rev
+Date: March 2009
+Kernel Version: 2.6.30
+Contact: iss_storagedev@hp.com
+Description: Displays the SCSI INQUIRY page 0 revision for logical
+ drive Y of controller X.
+
+Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/unique_id
+Date: March 2009
+Kernel Version: 2.6.30
+Contact: iss_storagedev@hp.com
+Description: Displays the SCSI INQUIRY page 83 serial number for logical
+ drive Y of controller X.
+
+Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/vendor
+Date: March 2009
+Kernel Version: 2.6.30
+Contact: iss_storagedev@hp.com
+Description: Displays the SCSI INQUIRY page 0 vendor for logical drive
+ Y of controller X.
+
+Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/block:cciss!cXdY
+Date: March 2009
+Kernel Version: 2.6.30
+Contact: iss_storagedev@hp.com
+Description: A symbolic link to /sys/block/cciss!cXdY
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 6fab97ea7e6b..8d2158a1c6aa 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -186,7 +186,7 @@ a virtual address mapping (unlike the earlier scheme of virtual address
do not have a corresponding kernel virtual address space mapping) and
low-memory pages.
-Note: Please refer to Documentation/PCI/PCI-DMA-mapping.txt for a discussion
+Note: Please refer to Documentation/DMA-mapping.txt for a discussion
on PCI high mem DMA aspects and mapping of scatter gather lists, and support
for 64 bit PCI.
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 0abfbaa59871..40424edae939 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -147,24 +147,40 @@ static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void *arg)
return ret;
}
+struct omap_msg_tx_data {
+ mbox_msg_t msg;
+ void *arg;
+};
+
+static void omap_msg_tx_end_io(struct request *rq, int error)
+{
+ kfree(rq->special);
+ __blk_put_request(rq->q, rq);
+}
+
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg)
{
+ struct omap_msg_tx_data *tx_data;
struct request *rq;
struct request_queue *q = mbox->txq->queue;
- int ret = 0;
+
+ tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
+ if (unlikely(!tx_data))
+ return -ENOMEM;
rq = blk_get_request(q, WRITE, GFP_ATOMIC);
if (unlikely(!rq)) {
- ret = -ENOMEM;
- goto fail;
+ kfree(tx_data);
+ return -ENOMEM;
}
- rq->data = (void *)msg;
- blk_insert_request(q, rq, 0, arg);
+ tx_data->msg = msg;
+ tx_data->arg = arg;
+ rq->end_io = omap_msg_tx_end_io;
+ blk_insert_request(q, rq, 0, tx_data);
schedule_work(&mbox->txq->work);
- fail:
- return ret;
+ return 0;
}
EXPORT_SYMBOL(omap_mbox_msg_send);
@@ -178,22 +194,28 @@ static void mbox_tx_work(struct work_struct *work)
struct request_queue *q = mbox->txq->queue;
while (1) {
+ struct omap_msg_tx_data *tx_data;
+
spin_lock(q->queue_lock);
- rq = elv_next_request(q);
+ rq = blk_fetch_request(q);
spin_unlock(q->queue_lock);
if (!rq)
break;
- ret = __mbox_msg_send(mbox, (mbox_msg_t) rq->data, rq->special);
+ tx_data = rq->special;
+
+ ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg);
if (ret) {
enable_mbox_irq(mbox, IRQ_TX);
+ spin_lock(q->queue_lock);
+ blk_requeue_request(q, rq);
+ spin_unlock(q->queue_lock);
return;
}
spin_lock(q->queue_lock);
- if (__blk_end_request(rq, 0, 0))
- BUG();
+ __blk_end_request_all(rq, 0);
spin_unlock(q->queue_lock);
}
}
@@ -218,16 +240,13 @@ static void mbox_rx_work(struct work_struct *work)
while (1) {
spin_lock_irqsave(q->queue_lock, flags);
- rq = elv_next_request(q);
+ rq = blk_fetch_request(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (!rq)
break;
- msg = (mbox_msg_t) rq->data;
-
- if (blk_end_request(rq, 0, 0))
- BUG();
-
+ msg = (mbox_msg_t)rq->special;
+ blk_end_request_all(rq, 0);
mbox->rxq->callback((void *)msg);
}
}
@@ -264,7 +283,6 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
goto nomem;
msg = mbox_fifo_read(mbox);
- rq->data = (void *)msg;
if (unlikely(mbox_seq_test(mbox, msg))) {
pr_info("mbox: Illegal seq bit!(%08x)\n", msg);
@@ -272,7 +290,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
mbox->err_notify();
}
- blk_insert_request(q, rq, 0, NULL);
+ blk_insert_request(q, rq, 0, (void *)msg);
if (mbox->ops->type == OMAP_MBOX_TYPE1)
break;
}
@@ -329,16 +347,15 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
while (1) {
spin_lock_irqsave(q->queue_lock, flags);
- rq = elv_next_request(q);
+ rq = blk_fetch_request(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (!rq)
break;
- *p = (mbox_msg_t) rq->data;
+ *p = (mbox_msg_t)rq->special;
- if (blk_end_request(rq, 0, 0))
- BUG();
+ blk_end_request_all(rq, 0);
if (unlikely(mbox_seq_test(mbox, *p))) {
pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 9e105cbc5e5f..a4779912a5ca 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -250,7 +250,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
- blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
+ blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
add_disk(bank->disk);
bank->irq_id = irq_of_parse_and_map(device->node, 0);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index f934225fd8ef..aa9e926e13d7 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q);
/* Only changed by ubd_init, which is an initcall. */
static int thread_fd = -1;
-
-static void ubd_end_request(struct request *req, int bytes, int error)
-{
- blk_end_request(req, error, bytes);
-}
-
-/* Callable only from interrupt context - otherwise you need to do
- * spin_lock_irq()/spin_lock_irqsave() */
-static inline void ubd_finish(struct request *req, int bytes)
-{
- if(bytes < 0){
- ubd_end_request(req, 0, -EIO);
- return;
- }
- ubd_end_request(req, bytes, 0);
-}
-
static LIST_HEAD(restart);
/* XXX - move this inside ubd_intr. */
@@ -475,7 +458,6 @@ static LIST_HEAD(restart);
static void ubd_handler(void)
{
struct io_thread_req *req;
- struct request *rq;
struct ubd *ubd;
struct list_head *list, *next_ele;
unsigned long flags;
@@ -492,10 +474,7 @@ static void ubd_handler(void)
return;
}
- rq = req->req;
- rq->nr_sectors -= req->length >> 9;
- if(rq->nr_sectors == 0)
- ubd_finish(rq, rq->hard_nr_sectors << 9);
+ blk_end_request(req->req, 0, req->length);
kfree(req);
}
reactivate_fd(thread_fd, UBD_IRQ);
@@ -1243,27 +1222,26 @@ static void do_ubd_request(struct request_queue *q)
{
struct io_thread_req *io_req;
struct request *req;
- int n, last_sectors;
+ sector_t sector;
+ int n;
while(1){
struct ubd *dev = q->queuedata;
if(dev->end_sg == 0){
- struct request *req = elv_next_request(q);
+ struct request *req = blk_fetch_request(q);
if(req == NULL)
return;
dev->request = req;
- blkdev_dequeue_request(req);
dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
}
req = dev->request;
- last_sectors = 0;
+ sector = blk_rq_pos(req);
while(dev->start_sg < dev->end_sg){
struct scatterlist *sg = &dev->sg[dev->start_sg];
- req->sector += last_sectors;
io_req = kmalloc(sizeof(struct io_thread_req),
GFP_ATOMIC);
if(io_req == NULL){
@@ -1272,10 +1250,10 @@ static void do_ubd_request(struct request_queue *q)
return;
}
prepare_request(req, io_req,
- (unsigned long long) req->sector << 9,
+ (unsigned long long)sector << 9,
sg->offset, sg->length, sg_page(sg));
- last_sectors = sg->length >> 9;
+ sector += sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
sizeof(struct io_thread_req *));
if(n != sizeof(struct io_thread_req *)){
diff --git a/block/Kconfig b/block/Kconfig
index e7d12782bcfb..2c39527aa7db 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -26,6 +26,7 @@ if BLOCK
config LBD
bool "Support for large block devices and files"
depends on !64BIT
+ default y
help
Enable block devices or files of size 2TB and larger.
@@ -38,11 +39,13 @@ config LBD
The ext4 filesystem requires that this feature be enabled in
order to support filesystems that have the huge_file feature
- enabled. Otherwise, it will refuse to mount any filesystems
- that use the huge_file feature, which is enabled by default
- by mke2fs.ext4. The GFS2 filesystem also requires this feature.
+ enabled. Otherwise, it will refuse to mount in the read-write
+ mode any filesystems that use the huge_file feature, which is
+ enabled by default by mke2fs.ext4.
- If unsure, say N.
+ The GFS2 filesystem also requires this feature.
+
+ If unsure, say Y.
config BLK_DEV_BSG
bool "Block layer SG support v4 (EXPERIMENTAL)"
diff --git a/block/as-iosched.c b/block/as-iosched.c
index c48fa670d221..7a12cf6ee1d3 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -306,8 +306,8 @@ as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
data_dir = rq_is_sync(rq1);
last = ad->last_sector[data_dir];
- s1 = rq1->sector;
- s2 = rq2->sector;
+ s1 = blk_rq_pos(rq1);
+ s2 = blk_rq_pos(rq2);
BUG_ON(data_dir != rq_is_sync(rq2));
@@ -566,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
as_update_thinktime(ad, aic, thinktime);
/* Calculate read -> read seek distance */
- if (aic->last_request_pos < rq->sector)
- seek_dist = rq->sector - aic->last_request_pos;
+ if (aic->last_request_pos < blk_rq_pos(rq))
+ seek_dist = blk_rq_pos(rq) -
+ aic->last_request_pos;
else
- seek_dist = aic->last_request_pos - rq->sector;
+ seek_dist = aic->last_request_pos -
+ blk_rq_pos(rq);
as_update_seekdist(ad, aic, seek_dist);
}
- aic->last_request_pos = rq->sector + rq->nr_sectors;
+ aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
set_bit(AS_TASK_IOSTARTED, &aic->state);
spin_unlock(&aic->lock);
}
@@ -587,7 +589,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic,
{
unsigned long delay; /* jiffies */
sector_t last = ad->last_sector[ad->batch_data_dir];
- sector_t next = rq->sector;
+ sector_t next = blk_rq_pos(rq);
sector_t delta; /* acceptable close offset (in sectors) */
sector_t s;
@@ -981,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
* This has to be set in order to be correctly updated by
* as_find_next_rq
*/
- ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
+ ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (data_dir == BLK_RW_SYNC) {
struct io_context *ioc = RQ_IOC(rq);
@@ -1312,12 +1314,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
static void as_work_handler(struct work_struct *work)
{
struct as_data *ad = container_of(work, struct as_data, antic_work);
- struct request_queue *q = ad->q;
- unsigned long flags;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queueing(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_run_queue(ad->q);
}
static int as_may_queue(struct request_queue *q, int rw)
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 20b4111fa050..30022b4e2f63 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
*/
q->ordseq = 0;
rq = q->orig_bar_rq;
-
- if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
- BUG();
-
+ __blk_end_request_all(rq, q->orderr);
return true;
}
@@ -166,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
* For an empty barrier, there's no actual BAR request, which
* in turn makes POSTFLUSH unnecessary. Mask them off.
*/
- if (!rq->hard_nr_sectors) {
+ if (!blk_rq_sectors(rq)) {
q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
QUEUE_ORDERED_DO_POSTFLUSH);
/*
@@ -183,7 +180,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
}
/* stash away the original request */
- elv_dequeue_request(q, rq);
+ blk_dequeue_request(rq);
q->orig_bar_rq = rq;
rq = NULL;
@@ -221,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
} else
skip |= QUEUE_ORDSEQ_PREFLUSH;
- if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
+ if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
rq = NULL;
else
skip |= QUEUE_ORDSEQ_DRAIN;
@@ -251,10 +248,8 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
* Queue ordering not supported. Terminate
* with prejudice.
*/
- elv_dequeue_request(q, rq);
- if (__blk_end_request(rq, -EOPNOTSUPP,
- blk_rq_bytes(rq)))
- BUG();
+ blk_dequeue_request(rq);
+ __blk_end_request_all(rq, -EOPNOTSUPP);
*rqp = NULL;
return false;
}
@@ -329,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
/*
* The driver must store the error location in ->bi_sector, if
* it supports it. For non-stacked drivers, this should be copied
- * from rq->sector.
+ * from blk_rq_pos(rq).
*/
if (error_sector)
*error_sector = bio->bi_sector;
@@ -393,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
bio->bi_sector = sector;
- if (nr_sects > q->max_hw_sectors) {
- bio->bi_size = q->max_hw_sectors << 9;
- nr_sects -= q->max_hw_sectors;
- sector += q->max_hw_sectors;
+ if (nr_sects > queue_max_hw_sectors(q)) {
+ bio->bi_size = queue_max_hw_sectors(q) << 9;
+ nr_sects -= queue_max_hw_sectors(q);
+ sector += queue_max_hw_sectors(q);
} else {
bio->bi_size = nr_sects << 9;
nr_sects = 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index 648f15cb41f1..d17d71c71d4f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -60,11 +60,11 @@ static void drive_stat_acct(struct request *rq, int new_io)
int rw = rq_data_dir(rq);
int cpu;
- if (!blk_fs_request(rq) || !blk_do_io_stat(rq))
+ if (!blk_do_io_stat(rq))
return;
cpu = part_stat_lock();
- part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
+ part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!new_io)
part_stat_inc(cpu, part, merges[rw]);
@@ -119,13 +119,14 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
- rq->sector = rq->hard_sector = (sector_t) -1;
+ rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->cmd = rq->__cmd;
rq->cmd_len = BLK_MAX_CDB;
rq->tag = -1;
rq->ref_count = 1;
+ rq->start_time = jiffies;
}
EXPORT_SYMBOL(blk_rq_init);
@@ -176,14 +177,11 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
rq->cmd_flags);
- printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
- (unsigned long long)rq->sector,
- rq->nr_sectors,
- rq->current_nr_sectors);
- printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
- rq->bio, rq->biotail,
- rq->buffer, rq->data,
- rq->data_len);
+ printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
+ (unsigned long long)blk_rq_pos(rq),
+ blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
+ printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
+ rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
if (blk_pc_request(rq)) {
printk(KERN_INFO " cdb: ");
@@ -325,24 +323,6 @@ void blk_unplug(struct request_queue *q)
}
EXPORT_SYMBOL(blk_unplug);
-static void blk_invoke_request_fn(struct request_queue *q)
-{
- if (unlikely(blk_queue_stopped(q)))
- return;
-
- /*
- * one level of recursion is ok and is much faster than kicking
- * the unplug handling
- */
- if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
- q->request_fn(q);
- queue_flag_clear(QUEUE_FLAG_REENTER, q);
- } else {
- queue_flag_set(QUEUE_FLAG_PLUGGED, q);
- kblockd_schedule_work(q, &q->unplug_work);
- }
-}
-
/**
* blk_start_queue - restart a previously stopped queue
* @q: The &struct request_queue in question
@@ -357,7 +337,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- blk_invoke_request_fn(q);
+ __blk_run_queue(q);
}
EXPORT_SYMBOL(blk_start_queue);
@@ -417,12 +397,23 @@ void __blk_run_queue(struct request_queue *q)
{
blk_remove_plug(q);
+ if (unlikely(blk_queue_stopped(q)))
+ return;
+
+ if (elv_queue_empty(q))
+ return;
+
/*
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
- if (!elv_queue_empty(q))
- blk_invoke_request_fn(q);
+ if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+ q->request_fn(q);
+ queue_flag_clear(QUEUE_FLAG_REENTER, q);
+ } else {
+ queue_flag_set(QUEUE_FLAG_PLUGGED, q);
+ kblockd_schedule_work(q, &q->unplug_work);
+ }
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -432,9 +423,7 @@ EXPORT_SYMBOL(__blk_run_queue);
*
* Description:
* Invoke request handling on this queue, if it has pending work to do.
- * May be used to restart queueing when a request has completed. Also
- * See @blk_start_queueing.
- *
+ * May be used to restart queueing when a request has completed.
*/
void blk_run_queue(struct request_queue *q)
{
@@ -894,26 +883,58 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
EXPORT_SYMBOL(blk_get_request);
/**
- * blk_start_queueing - initiate dispatch of requests to device
- * @q: request queue to kick into gear
+ * blk_make_request - given a bio, allocate a corresponding struct request.
+ *
+ * @bio: The bio describing the memory mappings that will be submitted for IO.
+ * It may be a chained-bio properly constructed by block/bio layer.
*
- * This is basically a helper to remove the need to know whether a queue
- * is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue. Should be used to start queueing on a device outside
- * of ->request_fn() context. Also see @blk_run_queue.
+ * blk_make_request is the parallel of generic_make_request for BLOCK_PC
+ * type commands. Where the struct request needs to be farther initialized by
+ * the caller. It is passed a &struct bio, which describes the memory info of
+ * the I/O transfer.
*
- * The queue lock must be held with interrupts disabled.
+ * The caller of blk_make_request must make sure that bi_io_vec
+ * are set to describe the memory buffers. That bio_data_dir() will return
+ * the needed direction of the request. (And all bio's in the passed bio-chain
+ * are properly set accordingly)
+ *
+ * If called under none-sleepable conditions, mapped bio buffers must not
+ * need bouncing, by calling the appropriate masked or flagged allocator,
+ * suitable for the target device. Otherwise the call to blk_queue_bounce will
+ * BUG.
+ *
+ * WARNING: When allocating/cloning a bio-chain, careful consideration should be
+ * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
+ * anything but the first bio in the chain. Otherwise you risk waiting for IO
+ * completion of a bio that hasn't been submitted yet, thus resulting in a
+ * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
+ * of bio_alloc(), as that avoids the mempool deadlock.
+ * If possible a big IO should be split into smaller parts when allocation
+ * fails. Partial allocation should not be an error, or you risk a live-lock.
*/
-void blk_start_queueing(struct request_queue *q)
+struct request *blk_make_request(struct request_queue *q, struct bio *bio,
+ gfp_t gfp_mask)
{
- if (!blk_queue_plugged(q)) {
- if (unlikely(blk_queue_stopped(q)))
- return;
- q->request_fn(q);
- } else
- __generic_unplug_device(q);
+ struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
+
+ if (unlikely(!rq))
+ return ERR_PTR(-ENOMEM);
+
+ for_each_bio(bio) {
+ struct bio *bounce_bio = bio;
+ int ret;
+
+ blk_queue_bounce(q, &bounce_bio);
+ ret = blk_rq_append_bio(q, rq, bounce_bio);
+ if (unlikely(ret)) {
+ blk_put_request(rq);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return rq;
}
-EXPORT_SYMBOL(blk_start_queueing);
+EXPORT_SYMBOL(blk_make_request);
/**
* blk_requeue_request - put a request back on queue
@@ -934,6 +955,8 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq);
+ BUG_ON(blk_queued_rq(rq));
+
elv_requeue_request(q, rq);
}
EXPORT_SYMBOL(blk_requeue_request);
@@ -969,7 +992,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
* barrier
*/
rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->cmd_flags |= REQ_SOFTBARRIER;
rq->special = data;
@@ -983,7 +1005,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0);
- blk_start_queueing(q);
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
@@ -1105,16 +1127,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
if (bio_failfast_driver(bio))
req->cmd_flags |= REQ_FAILFAST_DRIVER;
- /*
- * REQ_BARRIER implies no merging, but lets make it explicit
- */
if (unlikely(bio_discard(bio))) {
req->cmd_flags |= REQ_DISCARD;
if (bio_barrier(bio))
req->cmd_flags |= REQ_SOFTBARRIER;
req->q->prepare_discard_fn(req->q, req);
} else if (unlikely(bio_barrier(bio)))
- req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+ req->cmd_flags |= REQ_HARDBARRIER;
if (bio_sync(bio))
req->cmd_flags |= REQ_RW_SYNC;
@@ -1124,9 +1143,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
- req->hard_sector = req->sector = bio->bi_sector;
+ req->__sector = bio->bi_sector;
req->ioprio = bio_prio(bio);
- req->start_time = jiffies;
blk_rq_bio_prep(req->q, req, bio);
}
@@ -1142,14 +1160,13 @@ static inline bool queue_should_plug(struct request_queue *q)
static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
- int el_ret, nr_sectors;
+ int el_ret;
+ unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
const int unplug = bio_unplug(bio);
int rw_flags;
- nr_sectors = bio_sectors(bio);
-
/*
* low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
@@ -1174,7 +1191,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->biotail->bi_next = bio;
req->biotail = bio;
- req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->__data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
@@ -1200,10 +1217,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
- req->current_nr_sectors = bio_cur_sectors(bio);
- req->hard_cur_sectors = req->current_nr_sectors;
- req->sector = req->hard_sector = bio->bi_sector;
- req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->__sector = bio->bi_sector;
+ req->__data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
@@ -1414,11 +1429,11 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io;
}
- if (unlikely(nr_sectors > q->max_hw_sectors)) {
+ if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
- bdevname(bio->bi_bdev, b),
- bio_sectors(bio),
- q->max_hw_sectors);
+ bdevname(bio->bi_bdev, b),
+ bio_sectors(bio),
+ queue_max_hw_sectors(q));
goto end_io;
}
@@ -1584,8 +1599,8 @@ EXPORT_SYMBOL(submit_bio);
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
- if (rq->nr_sectors > q->max_sectors ||
- rq->data_len > q->max_hw_sectors << 9) {
+ if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
+ blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
}
@@ -1597,8 +1612,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
* limitation.
*/
blk_recalc_rq_segments(rq);
- if (rq->nr_phys_segments > q->max_phys_segments ||
- rq->nr_phys_segments > q->max_hw_segments) {
+ if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
+ rq->nr_phys_segments > queue_max_hw_segments(q)) {
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
return -EIO;
}
@@ -1642,40 +1657,15 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
-/**
- * blkdev_dequeue_request - dequeue request and start timeout timer
- * @req: request to dequeue
- *
- * Dequeue @req and start timeout timer on it. This hands off the
- * request to the driver.
- *
- * Block internal functions which don't want to start timer should
- * call elv_dequeue_request().
- */
-void blkdev_dequeue_request(struct request *req)
-{
- elv_dequeue_request(req->q, req);
-
- /*
- * We are now handing the request to the hardware, add the
- * timeout handler.
- */
- blk_add_timer(req);
-}
-EXPORT_SYMBOL(blkdev_dequeue_request);
-
static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
- if (!blk_do_io_stat(req))
- return;
-
- if (blk_fs_request(req)) {
+ if (blk_do_io_stat(req)) {
const int rw = rq_data_dir(req);
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
- part = disk_map_sector_rcu(req->rq_disk, req->sector);
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
part_stat_unlock();
}
@@ -1683,22 +1673,19 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
static void blk_account_io_done(struct request *req)
{
- if (!blk_do_io_stat(req))
- return;
-
/*
* Account IO completion. bar_rq isn't accounted as a normal
* IO on queueing nor completion. Accounting the containing
* request is enough.
*/
- if (blk_fs_request(req) && req != &req->q->bar_rq) {
+ if (blk_do_io_stat(req) && req != &req->q->bar_rq) {
unsigned long duration = jiffies - req->start_time;
const int rw = rq_data_dir(req);
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
- part = disk_map_sector_rcu(req->rq_disk, req->sector);
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
@@ -1710,25 +1697,209 @@ static void blk_account_io_done(struct request *req)
}
/**
- * __end_that_request_first - end I/O on a request
- * @req: the request being processed
+ * blk_peek_request - peek at the top of a request queue
+ * @q: request queue to peek at
+ *
+ * Description:
+ * Return the request at the top of @q. The returned request
+ * should be started using blk_start_request() before LLD starts
+ * processing it.
+ *
+ * Return:
+ * Pointer to the request at the top of @q if available. Null
+ * otherwise.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+struct request *blk_peek_request(struct request_queue *q)
+{
+ struct request *rq;
+ int ret;
+
+ while ((rq = __elv_next_request(q)) != NULL) {
+ if (!(rq->cmd_flags & REQ_STARTED)) {
+ /*
+ * This is the first time the device driver
+ * sees this request (possibly after
+ * requeueing). Notify IO scheduler.
+ */
+ if (blk_sorted_rq(rq))
+ elv_activate_rq(q, rq);
+
+ /*
+ * just mark as started even if we don't start
+ * it, a request that has been delayed should
+ * not be passed by new incoming requests
+ */
+ rq->cmd_flags |= REQ_STARTED;
+ trace_block_rq_issue(q, rq);
+ }
+
+ if (!q->boundary_rq || q->boundary_rq == rq) {
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = NULL;
+ }
+
+ if (rq->cmd_flags & REQ_DONTPREP)
+ break;
+
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
+ /*
+ * make sure space for the drain appears we
+ * know we can do this because max_hw_segments
+ * has been adjusted to be one fewer than the
+ * device can handle
+ */
+ rq->nr_phys_segments++;
+ }
+
+ if (!q->prep_rq_fn)
+ break;
+
+ ret = q->prep_rq_fn(q, rq);
+ if (ret == BLKPREP_OK) {
+ break;
+ } else if (ret == BLKPREP_DEFER) {
+ /*
+ * the request may have been (partially) prepped.
+ * we need to keep this request in the front to
+ * avoid resource deadlock. REQ_STARTED will
+ * prevent other fs requests from passing this one.
+ */
+ if (q->dma_drain_size && blk_rq_bytes(rq) &&
+ !(rq->cmd_flags & REQ_DONTPREP)) {
+ /*
+ * remove the space for the drain we added
+ * so that we don't add it again
+ */
+ --rq->nr_phys_segments;
+ }
+
+ rq = NULL;
+ break;
+ } else if (ret == BLKPREP_KILL) {
+ rq->cmd_flags |= REQ_QUIET;
+ /*
+ * Mark this request as started so we don't trigger
+ * any debug logic in the end I/O path.
+ */
+ blk_start_request(rq);
+ __blk_end_request_all(rq, -EIO);
+ } else {
+ printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
+ break;
+ }
+ }
+
+ return rq;
+}
+EXPORT_SYMBOL(blk_peek_request);
+
+void blk_dequeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ BUG_ON(list_empty(&rq->queuelist));
+ BUG_ON(ELV_ON_HASH(rq));
+
+ list_del_init(&rq->queuelist);
+
+ /*
+ * the time frame between a request being removed from the lists
+ * and to it is freed is accounted as io that is in progress at
+ * the driver side.
+ */
+ if (blk_account_rq(rq))
+ q->in_flight[rq_is_sync(rq)]++;
+}
+
+/**
+ * blk_start_request - start request processing on the driver
+ * @req: request to dequeue
+ *
+ * Description:
+ * Dequeue @req and start timeout timer on it. This hands off the
+ * request to the driver.
+ *
+ * Block internal functions which don't want to start timer should
+ * call blk_dequeue_request().
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+void blk_start_request(struct request *req)
+{
+ blk_dequeue_request(req);
+
+ /*
+ * We are now handing the request to the hardware, initialize
+ * resid_len to full count and add the timeout handler.
+ */
+ req->resid_len = blk_rq_bytes(req);
+ if (unlikely(blk_bidi_rq(req)))
+ req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
+
+ blk_add_timer(req);
+}
+EXPORT_SYMBOL(blk_start_request);
+
+/**
+ * blk_fetch_request - fetch a request from a request queue
+ * @q: request queue to fetch a request from
+ *
+ * Description:
+ * Return the request at the top of @q. The request is started on
+ * return and LLD can start processing it immediately.
+ *
+ * Return:
+ * Pointer to the request at the top of @q if available. Null
+ * otherwise.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+struct request *blk_fetch_request(struct request_queue *q)
+{
+ struct request *rq;
+
+ rq = blk_peek_request(q);
+ if (rq)
+ blk_start_request(rq);
+ return rq;
+}
+EXPORT_SYMBOL(blk_fetch_request);
+
+/**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @rq: the request being processed
* @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
+ * @nr_bytes: number of bytes to complete @rq
*
* Description:
- * Ends I/O on a number of bytes attached to @req, and sets it up
- * for the next range of segments (if any) in the cluster.
+ * Ends I/O on a number of bytes attached to @rq, but doesn't complete
+ * the request structure even if @rq doesn't have leftover.
+ * If @rq has leftover, sets it up for the next range of segments.
+ *
+ * This special helper function is only for request stacking drivers
+ * (e.g. request-based dm) so that they can handle partial completion.
+ * Actual device drivers should use blk_end_request instead.
+ *
+ * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
+ * %false return from this function.
*
* Return:
- * %0 - we are done with this request, call end_that_request_last()
- * %1 - still buffers pending for this request
+ * %false - this request doesn't have any more data
+ * %true - this request has more data
**/
-static int __end_that_request_first(struct request *req, int error,
- int nr_bytes)
+bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
{
int total_bytes, bio_nbytes, next_idx = 0;
struct bio *bio;
+ if (!req->bio)
+ return false;
+
trace_block_rq_complete(req->q, req);
/*
@@ -1745,7 +1916,7 @@ static int __end_that_request_first(struct request *req, int error,
if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
req->rq_disk ? req->rq_disk->disk_name : "?",
- (unsigned long long)req->sector);
+ (unsigned long long)blk_rq_pos(req));
}
blk_account_io_completion(req, nr_bytes);
@@ -1805,8 +1976,15 @@ static int __end_that_request_first(struct request *req, int error,
/*
* completely done
*/
- if (!req->bio)
- return 0;
+ if (!req->bio) {
+ /*
+ * Reset counters so that the request stacking driver
+ * can find how many bytes remain in the request
+ * later.
+ */
+ req->__data_len = 0;
+ return false;
+ }
/*
* if the request wasn't completed, update state
@@ -1818,21 +1996,55 @@ static int __end_that_request_first(struct request *req, int error,
bio_iovec(bio)->bv_len -= nr_bytes;
}
- blk_recalc_rq_sectors(req, total_bytes >> 9);
+ req->__data_len -= total_bytes;
+ req->buffer = bio_data(req->bio);
+
+ /* update sector only for requests with clear definition of sector */
+ if (blk_fs_request(req) || blk_discard_rq(req))
+ req->__sector += total_bytes >> 9;
+
+ /*
+ * If total number of sectors is less than the first segment
+ * size, something has gone terribly wrong.
+ */
+ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
+ printk(KERN_ERR "blk: request botched\n");
+ req->__data_len = blk_rq_cur_bytes(req);
+ }
+
+ /* recalculate the number of segments */
blk_recalc_rq_segments(req);
- return 1;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
+static bool blk_update_bidi_request(struct request *rq, int error,
+ unsigned int nr_bytes,
+ unsigned int bidi_bytes)
+{
+ if (blk_update_request(rq, error, nr_bytes))
+ return true;
+
+ /* Bidi request must be completed as a whole */
+ if (unlikely(blk_bidi_rq(rq)) &&
+ blk_update_request(rq->next_rq, error, bidi_bytes))
+ return true;
+
+ add_disk_randomness(rq->rq_disk);
+
+ return false;
}
/*
* queue lock must be held
*/
-static void end_that_request_last(struct request *req, int error)
+static void blk_finish_request(struct request *req, int error)
{
if (blk_rq_tagged(req))
blk_queue_end_tag(req->q, req);
- if (blk_queued_rq(req))
- elv_dequeue_request(req->q, req);
+ BUG_ON(blk_queued_rq(req));
if (unlikely(laptop_mode) && blk_fs_request(req))
laptop_io_completion();
@@ -1852,117 +2064,62 @@ static void end_that_request_last(struct request *req, int error)
}
/**
- * blk_rq_bytes - Returns bytes left to complete in the entire request
- * @rq: the request being processed
- **/
-unsigned int blk_rq_bytes(struct request *rq)
-{
- if (blk_fs_request(rq))
- return rq->hard_nr_sectors << 9;
-
- return rq->data_len;
-}
-EXPORT_SYMBOL_GPL(blk_rq_bytes);
-
-/**
- * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
- * @rq: the request being processed
- **/
-unsigned int blk_rq_cur_bytes(struct request *rq)
-{
- if (blk_fs_request(rq))
- return rq->current_nr_sectors << 9;
-
- if (rq->bio)
- return rq->bio->bi_size;
-
- return rq->data_len;
-}
-EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
-
-/**
- * end_request - end I/O on the current segment of the request
- * @req: the request being processed
- * @uptodate: error value or %0/%1 uptodate flag
+ * blk_end_bidi_request - Complete a bidi request
+ * @rq: the request to complete
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete @rq
+ * @bidi_bytes: number of bytes to complete @rq->next_rq
*
* Description:
- * Ends I/O on the current segment of a request. If that is the only
- * remaining segment, the request is also completed and freed.
- *
- * This is a remnant of how older block drivers handled I/O completions.
- * Modern drivers typically end I/O on the full request in one go, unless
- * they have a residual value to account for. For that case this function
- * isn't really useful, unless the residual just happens to be the
- * full current segment. In other words, don't use this function in new
- * code. Use blk_end_request() or __blk_end_request() to end a request.
+ * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
+ * Drivers that supports bidi can safely call this member for any
+ * type of request, bidi or uni. In the later case @bidi_bytes is
+ * just ignored.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
**/
-void end_request(struct request *req, int uptodate)
-{
- int error = 0;
-
- if (uptodate <= 0)
- error = uptodate ? uptodate : -EIO;
-
- __blk_end_request(req, error, req->hard_cur_sectors << 9);
-}
-EXPORT_SYMBOL(end_request);
-
-static int end_that_request_data(struct request *rq, int error,
+static bool blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes)
{
- if (rq->bio) {
- if (__end_that_request_first(rq, error, nr_bytes))
- return 1;
+ struct request_queue *q = rq->q;
+ unsigned long flags;
- /* Bidi request must be completed as a whole */
- if (blk_bidi_rq(rq) &&
- __end_that_request_first(rq->next_rq, error, bidi_bytes))
- return 1;
- }
+ if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
+ return true;
- return 0;
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_finish_request(rq, error);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return false;
}
/**
- * blk_end_io - Generic end_io function to complete a request.
- * @rq: the request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete @rq
- * @bidi_bytes: number of bytes to complete @rq->next_rq
- * @drv_callback: function called between completion of bios in the request
- * and completion of the request.
- * If the callback returns non %0, this helper returns without
- * completion of the request.
+ * __blk_end_bidi_request - Complete a bidi request with queue lock held
+ * @rq: the request to complete
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete @rq
+ * @bidi_bytes: number of bytes to complete @rq->next_rq
*
* Description:
- * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
- * If @rq has leftover, sets it up for the next range of segments.
+ * Identical to blk_end_bidi_request() except that queue lock is
+ * assumed to be locked on entry and remains so on return.
*
* Return:
- * %0 - we are done with this request
- * %1 - this request is not freed yet, it still has pending buffers.
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
**/
-static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
- unsigned int bidi_bytes,
- int (drv_callback)(struct request *))
+static bool __blk_end_bidi_request(struct request *rq, int error,
+ unsigned int nr_bytes, unsigned int bidi_bytes)
{
- struct request_queue *q = rq->q;
- unsigned long flags = 0UL;
-
- if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
- return 1;
-
- /* Special feature for tricky drivers */
- if (drv_callback && drv_callback(rq))
- return 1;
-
- add_disk_randomness(rq->rq_disk);
+ if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
+ return true;
- spin_lock_irqsave(q->queue_lock, flags);
- end_that_request_last(rq, error);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_finish_request(rq, error);
- return 0;
+ return false;
}
/**
@@ -1976,124 +2133,112 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
* If @rq has leftover, sets it up for the next range of segments.
*
* Return:
- * %0 - we are done with this request
- * %1 - still buffers pending for this request
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
**/
-int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
{
- return blk_end_io(rq, error, nr_bytes, 0, NULL);
+ return blk_end_bidi_request(rq, error, nr_bytes, 0);
}
EXPORT_SYMBOL_GPL(blk_end_request);
/**
- * __blk_end_request - Helper function for drivers to complete the request.
- * @rq: the request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
+ * blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @err: %0 for success, < %0 for error
*
* Description:
- * Must be called with queue lock held unlike blk_end_request().
- *
- * Return:
- * %0 - we are done with this request
- * %1 - still buffers pending for this request
- **/
-int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+ * Completely finish @rq.
+ */
+void blk_end_request_all(struct request *rq, int error)
{
- if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
- return 1;
+ bool pending;
+ unsigned int bidi_bytes = 0;
- add_disk_randomness(rq->rq_disk);
+ if (unlikely(blk_bidi_rq(rq)))
+ bidi_bytes = blk_rq_bytes(rq->next_rq);
- end_that_request_last(rq, error);
+ pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
+ BUG_ON(pending);
+}
+EXPORT_SYMBOL_GPL(blk_end_request_all);
- return 0;
+/**
+ * blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @err: %0 for success, < %0 for error
+ *
+ * Description:
+ * Complete the current consecutively mapped chunk from @rq.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool blk_end_request_cur(struct request *rq, int error)
+{
+ return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
}
-EXPORT_SYMBOL_GPL(__blk_end_request);
+EXPORT_SYMBOL_GPL(blk_end_request_cur);
/**
- * blk_end_bidi_request - Helper function for drivers to complete bidi request.
- * @rq: the bidi request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete @rq
- * @bidi_bytes: number of bytes to complete @rq->next_rq
+ * __blk_end_request - Helper function for drivers to complete the request.
+ * @rq: the request being processed
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete
*
* Description:
- * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
+ * Must be called with queue lock held unlike blk_end_request().
*
* Return:
- * %0 - we are done with this request
- * %1 - still buffers pending for this request
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
**/
-int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
- unsigned int bidi_bytes)
+bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
{
- return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
+ return __blk_end_bidi_request(rq, error, nr_bytes, 0);
}
-EXPORT_SYMBOL_GPL(blk_end_bidi_request);
+EXPORT_SYMBOL_GPL(__blk_end_request);
/**
- * blk_update_request - Special helper function for request stacking drivers
- * @rq: the request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete @rq
+ * __blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @err: %0 for success, < %0 for error
*
* Description:
- * Ends I/O on a number of bytes attached to @rq, but doesn't complete
- * the request structure even if @rq doesn't have leftover.
- * If @rq has leftover, sets it up for the next range of segments.
- *
- * This special helper function is only for request stacking drivers
- * (e.g. request-based dm) so that they can handle partial completion.
- * Actual device drivers should use blk_end_request instead.
+ * Completely finish @rq. Must be called with queue lock held.
*/
-void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
+void __blk_end_request_all(struct request *rq, int error)
{
- if (!end_that_request_data(rq, error, nr_bytes, 0)) {
- /*
- * These members are not updated in end_that_request_data()
- * when all bios are completed.
- * Update them so that the request stacking driver can find
- * how many bytes remain in the request later.
- */
- rq->nr_sectors = rq->hard_nr_sectors = 0;
- rq->current_nr_sectors = rq->hard_cur_sectors = 0;
- }
+ bool pending;
+ unsigned int bidi_bytes = 0;
+
+ if (unlikely(blk_bidi_rq(rq)))
+ bidi_bytes = blk_rq_bytes(rq->next_rq);
+
+ pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
+ BUG_ON(pending);
}
-EXPORT_SYMBOL_GPL(blk_update_request);
+EXPORT_SYMBOL_GPL(__blk_end_request_all);
/**
- * blk_end_request_callback - Special helper function for tricky drivers
- * @rq: the request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
- * @drv_callback: function called between completion of bios in the request
- * and completion of the request.
- * If the callback returns non %0, this helper returns without
- * completion of the request.
+ * __blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @err: %0 for success, < %0 for error
*
* Description:
- * Ends I/O on a number of bytes attached to @rq.
- * If @rq has leftover, sets it up for the next range of segments.
- *
- * This special helper function is used only for existing tricky drivers.
- * (e.g. cdrom_newpc_intr() of ide-cd)
- * This interface will be removed when such drivers are rewritten.
- * Don't use this interface in other places anymore.
+ * Complete the current consecutively mapped chunk from @rq. Must
+ * be called with queue lock held.
*
* Return:
- * %0 - we are done with this request
- * %1 - this request is not freed yet.
- * this request still has pending buffers or
- * the driver doesn't want to finish this request yet.
- **/
-int blk_end_request_callback(struct request *rq, int error,
- unsigned int nr_bytes,
- int (drv_callback)(struct request *))
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool __blk_end_request_cur(struct request *rq, int error)
{
- return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
+ return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
}
-EXPORT_SYMBOL_GPL(blk_end_request_callback);
+EXPORT_SYMBOL_GPL(__blk_end_request_cur);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
@@ -2106,11 +2251,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
- rq->current_nr_sectors = bio_cur_sectors(bio);
- rq->hard_cur_sectors = rq->current_nr_sectors;
- rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
- rq->data_len = bio->bi_size;
-
+ rq->__data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)
@@ -2145,6 +2286,106 @@ int blk_lld_busy(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_lld_busy);
+/**
+ * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
+ * @rq: the clone request to be cleaned up
+ *
+ * Description:
+ * Free all bios in @rq for a cloned request.
+ */
+void blk_rq_unprep_clone(struct request *rq)
+{
+ struct bio *bio;
+
+ while ((bio = rq->bio) != NULL) {
+ rq->bio = bio->bi_next;
+
+ bio_put(bio);
+ }
+}
+EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
+
+/*
+ * Copy attributes of the original request to the clone request.
+ * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
+ */
+static void __blk_rq_prep_clone(struct request *dst, struct request *src)
+{
+ dst->cpu = src->cpu;
+ dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
+ dst->cmd_type = src->cmd_type;
+ dst->__sector = blk_rq_pos(src);
+ dst->__data_len = blk_rq_bytes(src);
+ dst->nr_phys_segments = src->nr_phys_segments;
+ dst->ioprio = src->ioprio;
+ dst->extra_len = src->extra_len;
+}
+
+/**
+ * blk_rq_prep_clone - Helper function to setup clone request
+ * @rq: the request to be setup
+ * @rq_src: original request to be cloned
+ * @bs: bio_set that bios for clone are allocated from
+ * @gfp_mask: memory allocation mask for bio
+ * @bio_ctr: setup function to be called for each clone bio.
+ * Returns %0 for success, non %0 for failure.
+ * @data: private data to be passed to @bio_ctr
+ *
+ * Description:
+ * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
+ * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
+ * are not copied, and copying such parts is the caller's responsibility.
+ * Also, pages which the original bios are pointing to are not copied
+ * and the cloned bios just point same pages.
+ * So cloned bios must be completed before original bios, which means
+ * the caller must complete @rq before @rq_src.
+ */
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *),
+ void *data)
+{
+ struct bio *bio, *bio_src;
+
+ if (!bs)
+ bs = fs_bio_set;
+
+ blk_rq_init(NULL, rq);
+
+ __rq_for_each_bio(bio_src, rq_src) {
+ bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
+ if (!bio)
+ goto free_and_out;
+
+ __bio_clone(bio, bio_src);
+
+ if (bio_integrity(bio_src) &&
+ bio_integrity_clone(bio, bio_src, gfp_mask))
+ goto free_and_out;
+
+ if (bio_ctr && bio_ctr(bio, bio_src, data))
+ goto free_and_out;
+
+ if (rq->bio) {
+ rq->biotail->bi_next = bio;
+ rq->biotail = bio;
+ } else
+ rq->bio = rq->biotail = bio;
+ }
+
+ __blk_rq_prep_clone(rq, rq_src);
+
+ return 0;
+
+free_and_out:
+ if (bio)
+ bio_free(bio, bs);
+ blk_rq_unprep_clone(rq);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
+
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
{
return queue_work(kblockd_workqueue, work);
@@ -2153,6 +2394,9 @@ EXPORT_SYMBOL(kblockd_schedule_work);
int __init blk_dev_init(void)
{
+ BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+ sizeof(((struct request *)0)->cmd_flags));
+
kblockd_workqueue = create_workqueue("kblockd");
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 6af716d1e54e..49557e91f0da 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -51,7 +51,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
rq->rq_disk = bd_disk;
- rq->cmd_flags |= REQ_NOMERGE;
rq->end_io = done;
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 91fa8e06b6a5..73e28d355688 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -340,7 +340,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
kobject_uevent(&bi->kobj, KOBJ_ADD);
bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE;
- bi->sector_size = disk->queue->hardsect_size;
+ bi->sector_size = queue_logical_block_size(disk->queue);
disk->integrity = bi;
} else
bi = disk->integrity;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 012f065ac8e2..d4ed6000147d 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -35,9 +35,9 @@ int put_io_context(struct io_context *ioc)
if (ioc == NULL)
return 1;
- BUG_ON(atomic_read(&ioc->refcount) == 0);
+ BUG_ON(atomic_long_read(&ioc->refcount) == 0);
- if (atomic_dec_and_test(&ioc->refcount)) {
+ if (atomic_long_dec_and_test(&ioc->refcount)) {
rcu_read_lock();
if (ioc->aic && ioc->aic->dtor)
ioc->aic->dtor(ioc->aic);
@@ -90,7 +90,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
if (ret) {
- atomic_set(&ret->refcount, 1);
+ atomic_long_set(&ret->refcount, 1);
atomic_set(&ret->nr_tasks, 1);
spin_lock_init(&ret->lock);
ret->ioprio_changed = 0;
@@ -151,7 +151,7 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
ret = current_io_context(gfp_flags, node);
if (unlikely(!ret))
break;
- } while (!atomic_inc_not_zero(&ret->refcount));
+ } while (!atomic_long_inc_not_zero(&ret->refcount));
return ret;
}
@@ -163,8 +163,8 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
struct io_context *dst = *pdst;
if (src) {
- BUG_ON(atomic_read(&src->refcount) == 0);
- atomic_inc(&src->refcount);
+ BUG_ON(atomic_long_read(&src->refcount) == 0);
+ atomic_long_inc(&src->refcount);
put_io_context(dst);
*pdst = src;
}
diff --git a/block/blk-map.c b/block/blk-map.c
index f103729b462f..9083cf0180cc 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,11 +20,10 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->data_len += bio->bi_size;
+ rq->__data_len += bio->bi_size;
}
return 0;
}
-EXPORT_SYMBOL(blk_rq_append_bio);
static int __blk_rq_unmap_user(struct bio *bio)
{
@@ -116,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
struct bio *bio = NULL;
int ret;
- if (len > (q->max_hw_sectors << 9))
+ if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len)
return -EINVAL;
@@ -156,7 +155,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
- rq->buffer = rq->data = NULL;
+ rq->buffer = NULL;
return 0;
unmap_rq:
blk_rq_unmap_user(bio);
@@ -235,7 +234,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
- rq->buffer = rq->data = NULL;
+ rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
@@ -282,7 +281,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
- * buffer is used.
+ * buffer is used. Can be called multple times to append multple
+ * buffers.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
@@ -290,8 +290,9 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
int reading = rq_data_dir(rq) == READ;
int do_copy = 0;
struct bio *bio;
+ int ret;
- if (len > (q->max_hw_sectors << 9))
+ if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
@@ -311,9 +312,15 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
- blk_rq_bio_prep(q, rq, bio);
+ ret = blk_rq_append_bio(q, rq, bio);
+ if (unlikely(ret)) {
+ /* request is too big */
+ bio_put(bio);
+ return ret;
+ }
+
blk_queue_bounce(q, &rq->bio);
- rq->buffer = rq->data = NULL;
+ rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 23d2a6fe34a3..39ce64432ba6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,35 +9,6 @@
#include "blk.h"
-void blk_recalc_rq_sectors(struct request *rq, int nsect)
-{
- if (blk_fs_request(rq) || blk_discard_rq(rq)) {
- rq->hard_sector += nsect;
- rq->hard_nr_sectors -= nsect;
-
- /*
- * Move the I/O submission pointers ahead if required.
- */
- if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
- (rq->sector <= rq->hard_sector)) {
- rq->sector = rq->hard_sector;
- rq->nr_sectors = rq->hard_nr_sectors;
- rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
- rq->current_nr_sectors = rq->hard_cur_sectors;
- rq->buffer = bio_data(rq->bio);
- }
-
- /*
- * if total number of sectors is less than the first segment
- * size, something has gone terribly wrong
- */
- if (rq->nr_sectors < rq->current_nr_sectors) {
- printk(KERN_ERR "blk: request botched\n");
- rq->nr_sectors = rq->current_nr_sectors;
- }
- }
-}
-
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
@@ -61,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
* never considered part of another segment, since that
* might change with the bounce page.
*/
- high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+ high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
if (high || highprv)
goto new_segment;
if (cluster) {
- if (seg_size + bv->bv_len > q->max_segment_size)
+ if (seg_size + bv->bv_len
+ > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
goto new_segment;
@@ -120,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return 0;
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
- q->max_segment_size)
+ queue_max_segment_size(q))
return 0;
if (!bio_has_data(bio))
@@ -163,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
- if (sg->length + nbytes > q->max_segment_size)
+ if (sg->length + nbytes > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -199,8 +171,9 @@ new_segment:
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
- (rq->data_len & q->dma_pad_mask)) {
- unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
+ (blk_rq_bytes(rq) & q->dma_pad_mask)) {
+ unsigned int pad_len =
+ (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
sg->length += pad_len;
rq->extra_len += pad_len;
@@ -233,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
{
int nr_phys_segs = bio_phys_segments(q, bio);
- if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
- || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+ if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
+ req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -255,11 +228,11 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
- max_sectors = q->max_hw_sectors;
+ max_sectors = queue_max_hw_sectors(q);
else
- max_sectors = q->max_sectors;
+ max_sectors = queue_max_sectors(q);
- if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+ if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -279,12 +252,12 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
- max_sectors = q->max_hw_sectors;
+ max_sectors = queue_max_hw_sectors(q);
else
- max_sectors = q->max_sectors;
+ max_sectors = queue_max_sectors(q);
- if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+ if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -315,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
/*
* Will it become too large?
*/
- if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+ if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -327,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_phys_segments--;
}
- if (total_phys_segments > q->max_phys_segments)
+ if (total_phys_segments > queue_max_phys_segments(q))
return 0;
- if (total_phys_segments > q->max_hw_segments)
+ if (total_phys_segments > queue_max_hw_segments(q))
return 0;
/* Merge is OK... */
@@ -345,7 +318,7 @@ static void blk_account_io_merge(struct request *req)
int cpu;
cpu = part_stat_lock();
- part = disk_map_sector_rcu(req->rq_disk, req->sector);
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_round_stats(cpu, part);
part_dec_in_flight(part);
@@ -366,7 +339,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
/*
* not contiguous
*/
- if (req->sector + req->nr_sectors != next->sector)
+ if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
return 0;
if (rq_data_dir(req) != rq_data_dir(next)
@@ -398,7 +371,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = next->bio;
req->biotail = next->biotail;
- req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+ req->__data_len += blk_rq_bytes(next);
elv_merge_requests(q, req, next);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 57af728d94bb..1c4df9bf6813 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -134,7 +134,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
- blk_queue_hardsect_size(q, 512);
+ blk_queue_logical_block_size(q, 512);
blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q);
q->nr_batching = BLK_BATCH_REQ;
@@ -179,16 +179,16 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
*/
if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1;
- q->bounce_pfn = max_low_pfn;
+ q->limits.bounce_pfn = max_low_pfn;
#else
if (b_pfn < blk_max_low_pfn)
dma = 1;
- q->bounce_pfn = b_pfn;
+ q->limits.bounce_pfn = b_pfn;
#endif
if (dma) {
init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA;
- q->bounce_pfn = b_pfn;
+ q->limits.bounce_pfn = b_pfn;
}
}
EXPORT_SYMBOL(blk_queue_bounce_limit);
@@ -211,14 +211,23 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
}
if (BLK_DEF_MAX_SECTORS > max_sectors)
- q->max_hw_sectors = q->max_sectors = max_sectors;
+ q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
else {
- q->max_sectors = BLK_DEF_MAX_SECTORS;
- q->max_hw_sectors = max_sectors;
+ q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
+ q->limits.max_hw_sectors = max_sectors;
}
}
EXPORT_SYMBOL(blk_queue_max_sectors);
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
+{
+ if (BLK_DEF_MAX_SECTORS > max_sectors)
+ q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
+ else
+ q->limits.max_hw_sectors = max_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_hw_sectors);
+
/**
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
* @q: the request queue for the device
@@ -238,7 +247,7 @@ void blk_queue_max_phys_segments(struct request_queue *q,
__func__, max_segments);
}
- q->max_phys_segments = max_segments;
+ q->limits.max_phys_segments = max_segments;
}
EXPORT_SYMBOL(blk_queue_max_phys_segments);
@@ -262,7 +271,7 @@ void blk_queue_max_hw_segments(struct request_queue *q,
__func__, max_segments);
}
- q->max_hw_segments = max_segments;
+ q->limits.max_hw_segments = max_segments;
}
EXPORT_SYMBOL(blk_queue_max_hw_segments);
@@ -283,26 +292,110 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
__func__, max_size);
}
- q->max_segment_size = max_size;
+ q->limits.max_segment_size = max_size;
}
EXPORT_SYMBOL(blk_queue_max_segment_size);
/**
- * blk_queue_hardsect_size - set hardware sector size for the queue
+ * blk_queue_logical_block_size - set logical block size for the queue
* @q: the request queue for the device
- * @size: the hardware sector size, in bytes
+ * @size: the logical block size, in bytes
*
* Description:
- * This should typically be set to the lowest possible sector size
- * that the hardware can operate on (possible without reverting to
- * even internal read-modify-write operations). Usually the default
- * of 512 covers most hardware.
+ * This should be set to the lowest possible block size that the
+ * storage device can address. The default of 512 covers most
+ * hardware.
**/
-void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
+void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
+{
+ q->limits.logical_block_size = size;
+
+ if (q->limits.physical_block_size < size)
+ q->limits.physical_block_size = size;
+
+ if (q->limits.io_min < q->limits.physical_block_size)
+ q->limits.io_min = q->limits.physical_block_size;
+}
+EXPORT_SYMBOL(blk_queue_logical_block_size);
+
+/**
+ * blk_queue_physical_block_size - set physical block size for the queue
+ * @q: the request queue for the device
+ * @size: the physical block size, in bytes
+ *
+ * Description:
+ * This should be set to the lowest possible sector size that the
+ * hardware can operate on without reverting to read-modify-write
+ * operations.
+ */
+void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
+{
+ q->limits.physical_block_size = size;
+
+ if (q->limits.physical_block_size < q->limits.logical_block_size)
+ q->limits.physical_block_size = q->limits.logical_block_size;
+
+ if (q->limits.io_min < q->limits.physical_block_size)
+ q->limits.io_min = q->limits.physical_block_size;
+}
+EXPORT_SYMBOL(blk_queue_physical_block_size);
+
+/**
+ * blk_queue_alignment_offset - set physical block alignment offset
+ * @q: the request queue for the device
+ * @alignment: alignment offset in bytes
+ *
+ * Description:
+ * Some devices are naturally misaligned to compensate for things like
+ * the legacy DOS partition table 63-sector offset. Low-level drivers
+ * should call this function for devices whose first sector is not
+ * naturally aligned.
+ */
+void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
{
- q->hardsect_size = size;
+ q->limits.alignment_offset =
+ offset & (q->limits.physical_block_size - 1);
+ q->limits.misaligned = 0;
}
-EXPORT_SYMBOL(blk_queue_hardsect_size);
+EXPORT_SYMBOL(blk_queue_alignment_offset);
+
+/**
+ * blk_queue_io_min - set minimum request size for the queue
+ * @q: the request queue for the device
+ * @io_min: smallest I/O size in bytes
+ *
+ * Description:
+ * Some devices have an internal block size bigger than the reported
+ * hardware sector size. This function can be used to signal the
+ * smallest I/O the device can perform without incurring a performance
+ * penalty.
+ */
+void blk_queue_io_min(struct request_queue *q, unsigned int min)
+{
+ q->limits.io_min = min;
+
+ if (q->limits.io_min < q->limits.logical_block_size)
+ q->limits.io_min = q->limits.logical_block_size;
+
+ if (q->limits.io_min < q->limits.physical_block_size)
+ q->limits.io_min = q->limits.physical_block_size;
+}
+EXPORT_SYMBOL(blk_queue_io_min);
+
+/**
+ * blk_queue_io_opt - set optimal request size for the queue
+ * @q: the request queue for the device
+ * @io_opt: optimal request size in bytes
+ *
+ * Description:
+ * Drivers can call this function to set the preferred I/O request
+ * size for devices that report such a value.
+ */
+void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
+{
+ q->limits.io_opt = opt;
+}
+EXPORT_SYMBOL(blk_queue_io_opt);
/*
* Returns the minimum that is _not_ zero, unless both are zero.
@@ -317,14 +410,27 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
/* zero is "infinity" */
- t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
- t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
- t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
+ t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
+ queue_max_sectors(b));
+
+ t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
+ queue_max_hw_sectors(b));
+
+ t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
+ queue_segment_boundary(b));
+
+ t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
+ queue_max_phys_segments(b));
+
+ t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
+ queue_max_hw_segments(b));
+
+ t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
+ queue_max_segment_size(b));
+
+ t->limits.logical_block_size = max(queue_logical_block_size(t),
+ queue_logical_block_size(b));
- t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
- t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
- t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
- t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
if (!t->queue_lock)
WARN_ON_ONCE(1);
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
@@ -337,6 +443,109 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
EXPORT_SYMBOL(blk_queue_stack_limits);
/**
+ * blk_stack_limits - adjust queue_limits for stacked devices
+ * @t: the stacking driver limits (top)
+ * @b: the underlying queue limits (bottom)
+ * @offset: offset to beginning of data within component device
+ *
+ * Description:
+ * Merges two queue_limit structs. Returns 0 if alignment didn't
+ * change. Returns -1 if adding the bottom device caused
+ * misalignment.
+ */
+int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ sector_t offset)
+{
+ t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+ t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+ t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
+
+ t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
+ b->seg_boundary_mask);
+
+ t->max_phys_segments = min_not_zero(t->max_phys_segments,
+ b->max_phys_segments);
+
+ t->max_hw_segments = min_not_zero(t->max_hw_segments,
+ b->max_hw_segments);
+
+ t->max_segment_size = min_not_zero(t->max_segment_size,
+ b->max_segment_size);
+
+ t->logical_block_size = max(t->logical_block_size,
+ b->logical_block_size);
+
+ t->physical_block_size = max(t->physical_block_size,
+ b->physical_block_size);
+
+ t->io_min = max(t->io_min, b->io_min);
+ t->no_cluster |= b->no_cluster;
+
+ /* Bottom device offset aligned? */
+ if (offset &&
+ (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
+ t->misaligned = 1;
+ return -1;
+ }
+
+ /* If top has no alignment offset, inherit from bottom */
+ if (!t->alignment_offset)
+ t->alignment_offset =
+ b->alignment_offset & (b->physical_block_size - 1);
+
+ /* Top device aligned on logical block boundary? */
+ if (t->alignment_offset & (t->logical_block_size - 1)) {
+ t->misaligned = 1;
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(blk_stack_limits);
+
+/**
+ * disk_stack_limits - adjust queue limits for stacked drivers
+ * @disk: MD/DM gendisk (top)
+ * @bdev: the underlying block device (bottom)
+ * @offset: offset to beginning of data within component device
+ *
+ * Description:
+ * Merges the limits for two queues. Returns 0 if alignment
+ * didn't change. Returns -1 if adding the bottom device caused
+ * misalignment.
+ */
+void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
+ sector_t offset)
+{
+ struct request_queue *t = disk->queue;
+ struct request_queue *b = bdev_get_queue(bdev);
+
+ offset += get_start_sect(bdev) << 9;
+
+ if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
+ char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
+
+ disk_name(disk, 0, top);
+ bdevname(bdev, bottom);
+
+ printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
+ top, bottom);
+ }
+
+ if (!t->queue_lock)
+ WARN_ON_ONCE(1);
+ else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(t->queue_lock, flags);
+ if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
+ queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
+ spin_unlock_irqrestore(t->queue_lock, flags);
+ }
+}
+EXPORT_SYMBOL(disk_stack_limits);
+
+/**
* blk_queue_dma_pad - set pad mask
* @q: the request queue for the device
* @mask: pad mask
@@ -396,11 +605,11 @@ int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{
- if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
+ if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
return -EINVAL;
/* make room for appending the drain */
- --q->max_hw_segments;
- --q->max_phys_segments;
+ blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
+ blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;
@@ -422,7 +631,7 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
__func__, mask);
}
- q->seg_boundary_mask = mask;
+ q->limits.seg_boundary_mask = mask;
}
EXPORT_SYMBOL(blk_queue_segment_boundary);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 26f9ec28f56c..b1cd04087d6a 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -95,21 +95,36 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
{
- int max_sectors_kb = q->max_sectors >> 1;
+ int max_sectors_kb = queue_max_sectors(q) >> 1;
return queue_var_show(max_sectors_kb, (page));
}
-static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page)
+static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
{
- return queue_var_show(q->hardsect_size, page);
+ return queue_var_show(queue_logical_block_size(q), page);
+}
+
+static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_physical_block_size(q), page);
+}
+
+static ssize_t queue_io_min_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_io_min(q), page);
+}
+
+static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_io_opt(q), page);
}
static ssize_t
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long max_sectors_kb,
- max_hw_sectors_kb = q->max_hw_sectors >> 1,
+ max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
@@ -117,7 +132,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
return -EINVAL;
spin_lock_irq(q->queue_lock);
- q->max_sectors = max_sectors_kb << 1;
+ blk_queue_max_sectors(q, max_sectors_kb << 1);
spin_unlock_irq(q->queue_lock);
return ret;
@@ -125,7 +140,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
{
- int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+ int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
return queue_var_show(max_hw_sectors_kb, (page));
}
@@ -249,7 +264,27 @@ static struct queue_sysfs_entry queue_iosched_entry = {
static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
- .show = queue_hw_sector_size_show,
+ .show = queue_logical_block_size_show,
+};
+
+static struct queue_sysfs_entry queue_logical_block_size_entry = {
+ .attr = {.name = "logical_block_size", .mode = S_IRUGO },
+ .show = queue_logical_block_size_show,
+};
+
+static struct queue_sysfs_entry queue_physical_block_size_entry = {
+ .attr = {.name = "physical_block_size", .mode = S_IRUGO },
+ .show = queue_physical_block_size_show,
+};
+
+static struct queue_sysfs_entry queue_io_min_entry = {
+ .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
+ .show = queue_io_min_show,
+};
+
+static struct queue_sysfs_entry queue_io_opt_entry = {
+ .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
+ .show = queue_io_opt_show,
};
static struct queue_sysfs_entry queue_nonrot_entry = {
@@ -283,6 +318,10 @@ static struct attribute *default_attrs[] = {
&queue_max_sectors_entry.attr,
&queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
+ &queue_logical_block_size_entry.attr,
+ &queue_physical_block_size_entry.attr,
+ &queue_io_min_entry.attr,
+ &queue_io_opt_entry.attr,
&queue_nonrot_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
@@ -394,16 +433,15 @@ int blk_register_queue(struct gendisk *disk)
if (ret)
return ret;
- if (!q->request_fn)
- return 0;
-
- ret = kobject_add(&q->kobj, kobject_get(&dev->kobj),
- "%s", "queue");
+ ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
if (ret < 0)
return ret;
kobject_uevent(&q->kobj, KOBJ_ADD);
+ if (!q->request_fn)
+ return 0;
+
ret = elv_register_queue(q);
if (ret) {
kobject_uevent(&q->kobj, KOBJ_REMOVE);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3c518e3303ae..2e5cfeb59333 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
- unsigned max_depth, offset;
+ unsigned max_depth;
int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
* to starve sync IO on behalf of flooding async IO.
*/
max_depth = bqt->max_depth;
- if (rq_is_sync(rq))
- offset = 0;
- else
- offset = max_depth >> 2;
+ if (!rq_is_sync(rq) && max_depth > 1) {
+ max_depth -= 2;
+ if (!max_depth)
+ max_depth = 1;
+ if (q->in_flight[0] > max_depth)
+ return 1;
+ }
do {
- tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
+ tag = find_first_zero_bit(bqt->tag_map, max_depth);
if (tag >= max_depth)
return 1;
@@ -374,7 +377,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
return 0;
}
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 1ec0d503cacd..1ba7e0aca878 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -122,10 +122,8 @@ void blk_rq_timed_out_timer(unsigned long data)
if (blk_mark_rq_complete(rq))
continue;
blk_rq_timed_out(rq);
- } else {
- if (!next || time_after(next, rq->deadline))
- next = rq->deadline;
- }
+ } else if (!next || time_after(next, rq->deadline))
+ next = rq->deadline;
}
/*
@@ -176,16 +174,14 @@ void blk_add_timer(struct request *req)
BUG_ON(!list_empty(&req->timeout_list));
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
- if (req->timeout)
- req->deadline = jiffies + req->timeout;
- else {
- req->deadline = jiffies + q->rq_timeout;
- /*
- * Some LLDs, like scsi, peek at the timeout to prevent
- * a command from being retried forever.
- */
+ /*
+ * Some LLDs, like scsi, peek at the timeout to prevent a
+ * command from being retried forever.
+ */
+ if (!req->timeout)
req->timeout = q->rq_timeout;
- }
+
+ req->deadline = jiffies + req->timeout;
list_add_tail(&req->timeout_list, &q->timeout_list);
/*
diff --git a/block/blk.h b/block/blk.h
index 79c85f7c9ff5..3fae6add5430 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -13,6 +13,9 @@ extern struct kobj_type blk_queue_ktype;
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
+int blk_rq_append_bio(struct request_queue *q, struct request *rq,
+ struct bio *bio);
+void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
void blk_unplug_work(struct work_struct *work);
@@ -43,6 +46,43 @@ static inline void blk_clear_rq_complete(struct request *rq)
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
+/*
+ * Internal elevator interface
+ */
+#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
+
+static inline struct request *__elv_next_request(struct request_queue *q)
+{
+ struct request *rq;
+
+ while (1) {
+ while (!list_empty(&q->queue_head)) {
+ rq = list_entry_rq(q->queue_head.next);
+ if (blk_do_ordered(q, &rq))
+ return rq;
+ }
+
+ if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
+}
+
+static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_activate_req_fn)
+ e->ops->elevator_activate_req_fn(q, rq);
+}
+
+static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_deactivate_req_fn)
+ e->ops->elevator_deactivate_req_fn(q, rq);
+}
+
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
@@ -64,7 +104,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
void blk_recalc_rq_segments(struct request *rq);
-void blk_recalc_rq_sectors(struct request *rq, int nsect);
void blk_queue_congestion_threshold(struct request_queue *q);
@@ -112,9 +151,17 @@ static inline int blk_cpu_to_group(int cpu)
#endif
}
+/*
+ * Contribute to IO statistics IFF:
+ *
+ * a) it's attached to a gendisk, and
+ * b) the queue had IO stats enabled when this request was started, and
+ * c) it's a file system request or a discard request
+ */
static inline int blk_do_io_stat(struct request *rq)
{
- return rq->rq_disk && blk_rq_io_stat(rq);
+ return rq->rq_disk && blk_rq_io_stat(rq) &&
+ (blk_fs_request(rq) || blk_discard_rq(rq));
}
#endif
diff --git a/block/bsg.c b/block/bsg.c
index dd81be455e00..5358f9ae13c1 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -446,15 +446,15 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
}
if (rq->next_rq) {
- hdr->dout_resid = rq->data_len;
- hdr->din_resid = rq->next_rq->data_len;
+ hdr->dout_resid = rq->resid_len;
+ hdr->din_resid = rq->next_rq->resid_len;
blk_rq_unmap_user(bidi_bio);
rq->next_rq->bio = NULL;
blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ)
- hdr->din_resid = rq->data_len;
+ hdr->din_resid = rq->resid_len;
else
- hdr->dout_resid = rq->data_len;
+ hdr->dout_resid = rq->resid_len;
/*
* If the request generated a negative error number, return it
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a55a9bd75bd1..ef2f72d42434 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -349,8 +349,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
return rq2;
- s1 = rq1->sector;
- s2 = rq2->sector;
+ s1 = blk_rq_pos(rq1);
+ s2 = blk_rq_pos(rq2);
last = cfqd->last_position;
@@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
* Sort strictly based on sector. Smallest to the left,
* largest to the right.
*/
- if (sector > cfqq->next_rq->sector)
+ if (sector > blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_right;
- else if (sector < cfqq->next_rq->sector)
+ else if (sector < blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_left;
else
break;
@@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return;
cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
- __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector,
- &parent, &p);
+ __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
+ blk_rq_pos(cfqq->next_rq), &parent, &p);
if (!__cfqq) {
rb_link_node(&cfqq->p_node, parent, p);
rb_insert_color(&cfqq->p_node, cfqq->p_root);
@@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
cfqd->rq_in_driver);
- cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
+ cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
}
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
@@ -949,10 +949,10 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
struct request *rq)
{
- if (rq->sector >= cfqd->last_position)
- return rq->sector - cfqd->last_position;
+ if (blk_rq_pos(rq) >= cfqd->last_position)
+ return blk_rq_pos(rq) - cfqd->last_position;
else
- return cfqd->last_position - rq->sector;
+ return cfqd->last_position - blk_rq_pos(rq);
}
#define CIC_SEEK_THR 8 * 1024
@@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
if (cfq_rq_close(cfqd, __cfqq->next_rq))
return __cfqq;
- if (__cfqq->next_rq->sector < sector)
+ if (blk_rq_pos(__cfqq->next_rq) < sector)
node = rb_next(&__cfqq->p_node);
else
node = rb_prev(&__cfqq->p_node);
@@ -1282,7 +1282,7 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
if (!cfqd->active_cic) {
struct cfq_io_context *cic = RQ_CIC(rq);
- atomic_inc(&cic->ioc->refcount);
+ atomic_long_inc(&cic->ioc->refcount);
cfqd->active_cic = cic;
}
}
@@ -1918,10 +1918,10 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
if (!cic->last_request_pos)
sdist = 0;
- else if (cic->last_request_pos < rq->sector)
- sdist = rq->sector - cic->last_request_pos;
+ else if (cic->last_request_pos < blk_rq_pos(rq))
+ sdist = blk_rq_pos(rq) - cic->last_request_pos;
else
- sdist = cic->last_request_pos - rq->sector;
+ sdist = cic->last_request_pos - blk_rq_pos(rq);
/*
* Don't allow the seek distance to get too large from the
@@ -2071,7 +2071,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_update_io_seektime(cfqd, cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic);
- cic->last_request_pos = rq->sector + rq->nr_sectors;
+ cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (cfqq == cfqd->active_queue) {
/*
@@ -2088,7 +2088,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
cfqd->busy_queues > 1) {
del_timer(&cfqd->idle_slice_timer);
- blk_start_queueing(cfqd->queue);
+ __blk_run_queue(cfqd->queue);
}
cfq_mark_cfqq_must_dispatch(cfqq);
}
@@ -2100,7 +2100,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
- blk_start_queueing(cfqd->queue);
+ __blk_run_queue(cfqd->queue);
}
}
@@ -2345,7 +2345,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
- blk_start_queueing(q);
+ __blk_run_queue(cfqd->queue);
spin_unlock_irq(q->queue_lock);
}
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index f8c218cd08e1..7865a34e0faa 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -763,10 +763,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
return compat_put_int(arg, block_size(bdev));
case BLKSSZGET: /* get block device hardware sector size */
- return compat_put_int(arg, bdev_hardsect_size(bdev));
+ return compat_put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET:
return compat_put_ushort(arg,
- bdev_get_queue(bdev)->max_sectors);
+ queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET: /* compatible, but no compat_ptr (!) */
case BLKFRASET:
if (!capable(CAP_SYS_ADMIN))
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c4d991d4adef..b547cbca7b23 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -138,7 +138,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
if (__rq) {
- BUG_ON(sector != __rq->sector);
+ BUG_ON(sector != blk_rq_pos(__rq));
if (elv_rq_merge_ok(__rq, bio)) {
ret = ELEVATOR_FRONT_MERGE;
diff --git a/block/elevator.c b/block/elevator.c
index e220f0c543e3..ca861927ba41 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -51,8 +51,7 @@ static const int elv_hash_shift = 6;
#define ELV_HASH_FN(sec) \
(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
-#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
+#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
/*
* Query io scheduler to see if the current process issuing bio may be
@@ -116,9 +115,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
* we can merge and sequence is ok, check if it's possible
*/
if (elv_rq_merge_ok(__rq, bio)) {
- if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
+ if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
ret = ELEVATOR_BACK_MERGE;
- else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
+ else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
ret = ELEVATOR_FRONT_MERGE;
}
@@ -306,22 +305,6 @@ void elevator_exit(struct elevator_queue *e)
}
EXPORT_SYMBOL(elevator_exit);
-static void elv_activate_rq(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (e->ops->elevator_activate_req_fn)
- e->ops->elevator_activate_req_fn(q, rq);
-}
-
-static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (e->ops->elevator_deactivate_req_fn)
- e->ops->elevator_deactivate_req_fn(q, rq);
-}
-
static inline void __elv_rqhash_del(struct request *rq)
{
hlist_del_init(&rq->hash);
@@ -383,9 +366,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
parent = *p;
__rq = rb_entry(parent, struct request, rb_node);
- if (rq->sector < __rq->sector)
+ if (blk_rq_pos(rq) < blk_rq_pos(__rq))
p = &(*p)->rb_left;
- else if (rq->sector > __rq->sector)
+ else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
p = &(*p)->rb_right;
else
return __rq;
@@ -413,9 +396,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
while (n) {
rq = rb_entry(n, struct request, rb_node);
- if (sector < rq->sector)
+ if (sector < blk_rq_pos(rq))
n = n->rb_left;
- else if (sector > rq->sector)
+ else if (sector > blk_rq_pos(rq))
n = n->rb_right;
else
return rq;
@@ -454,14 +437,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
break;
if (pos->cmd_flags & stop_flags)
break;
- if (rq->sector >= boundary) {
- if (pos->sector < boundary)
+ if (blk_rq_pos(rq) >= boundary) {
+ if (blk_rq_pos(pos) < boundary)
continue;
} else {
- if (pos->sector >= boundary)
+ if (blk_rq_pos(pos) >= boundary)
break;
}
- if (rq->sector >= pos->sector)
+ if (blk_rq_pos(rq) >= blk_rq_pos(pos))
break;
}
@@ -559,7 +542,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
* in_flight count again
*/
if (blk_account_rq(rq)) {
- q->in_flight--;
+ q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq))
elv_deactivate_rq(q, rq);
}
@@ -588,6 +571,9 @@ void elv_drain_elevator(struct request_queue *q)
*/
void elv_quiesce_start(struct request_queue *q)
{
+ if (!q->elevator)
+ return;
+
queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
/*
@@ -595,7 +581,7 @@ void elv_quiesce_start(struct request_queue *q)
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
- blk_start_queueing(q);
+ __blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
@@ -639,8 +625,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue
* processing.
*/
- blk_remove_plug(q);
- blk_start_queueing(q);
+ __blk_run_queue(q);
break;
case ELEVATOR_INSERT_SORT:
@@ -699,7 +684,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- - q->in_flight;
+ - queue_in_flight(q);
if (nrq >= q->unplug_thresh)
__generic_unplug_device(q);
@@ -755,117 +740,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
}
EXPORT_SYMBOL(elv_add_request);
-static inline struct request *__elv_next_request(struct request_queue *q)
-{
- struct request *rq;
-
- while (1) {
- while (!list_empty(&q->queue_head)) {
- rq = list_entry_rq(q->queue_head.next);
- if (blk_do_ordered(q, &rq))
- return rq;
- }
-
- if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
- return NULL;
- }
-}
-
-struct request *elv_next_request(struct request_queue *q)
-{
- struct request *rq;
- int ret;
-
- while ((rq = __elv_next_request(q)) != NULL) {
- if (!(rq->cmd_flags & REQ_STARTED)) {
- /*
- * This is the first time the device driver
- * sees this request (possibly after
- * requeueing). Notify IO scheduler.
- */
- if (blk_sorted_rq(rq))
- elv_activate_rq(q, rq);
-
- /*
- * just mark as started even if we don't start
- * it, a request that has been delayed should
- * not be passed by new incoming requests
- */
- rq->cmd_flags |= REQ_STARTED;
- trace_block_rq_issue(q, rq);
- }
-
- if (!q->boundary_rq || q->boundary_rq == rq) {
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = NULL;
- }
-
- if (rq->cmd_flags & REQ_DONTPREP)
- break;
-
- if (q->dma_drain_size && rq->data_len) {
- /*
- * make sure space for the drain appears we
- * know we can do this because max_hw_segments
- * has been adjusted to be one fewer than the
- * device can handle
- */
- rq->nr_phys_segments++;
- }
-
- if (!q->prep_rq_fn)
- break;
-
- ret = q->prep_rq_fn(q, rq);
- if (ret == BLKPREP_OK) {
- break;
- } else if (ret == BLKPREP_DEFER) {
- /*
- * the request may have been (partially) prepped.
- * we need to keep this request in the front to
- * avoid resource deadlock. REQ_STARTED will
- * prevent other fs requests from passing this one.
- */
- if (q->dma_drain_size && rq->data_len &&
- !(rq->cmd_flags & REQ_DONTPREP)) {
- /*
- * remove the space for the drain we added
- * so that we don't add it again
- */
- --rq->nr_phys_segments;
- }
-
- rq = NULL;
- break;
- } else if (ret == BLKPREP_KILL) {
- rq->cmd_flags |= REQ_QUIET;
- __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
- } else {
- printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
- break;
- }
- }
-
- return rq;
-}
-EXPORT_SYMBOL(elv_next_request);
-
-void elv_dequeue_request(struct request_queue *q, struct request *rq)
-{
- BUG_ON(list_empty(&rq->queuelist));
- BUG_ON(ELV_ON_HASH(rq));
-
- list_del_init(&rq->queuelist);
-
- /*
- * the time frame between a request being removed from the lists
- * and to it is freed is accounted as io that is in progress at
- * the driver side.
- */
- if (blk_account_rq(rq))
- q->in_flight++;
-}
-
int elv_queue_empty(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
@@ -935,7 +809,12 @@ void elv_abort_queue(struct request_queue *q)
rq = list_entry_rq(q->queue_head.next);
rq->cmd_flags |= REQ_QUIET;
trace_block_rq_abort(q, rq);
- __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+ /*
+ * Mark this request as started so we don't trigger
+ * any debug logic in the end I/O path.
+ */
+ blk_start_request(rq);
+ __blk_end_request_all(rq, -EIO);
}
}
EXPORT_SYMBOL(elv_abort_queue);
@@ -948,7 +827,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
* request is released from the driver, io must be done
*/
if (blk_account_rq(rq)) {
- q->in_flight--;
+ q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq);
}
@@ -963,11 +842,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
if (!list_empty(&q->queue_head))
next = list_entry_rq(q->queue_head.next);
- if (!q->in_flight &&
+ if (!queue_in_flight(q) &&
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
(!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
- blk_start_queueing(q);
+ __blk_run_queue(q);
}
}
}
@@ -1175,6 +1054,9 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
char elevator_name[ELV_NAME_MAX];
struct elevator_type *e;
+ if (!q->elevator)
+ return count;
+
strlcpy(elevator_name, name, sizeof(elevator_name));
strstrip(elevator_name);
@@ -1198,10 +1080,15 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
ssize_t elv_iosched_show(struct request_queue *q, char *name)
{
struct elevator_queue *e = q->elevator;
- struct elevator_type *elv = e->elevator_type;
+ struct elevator_type *elv;
struct elevator_type *__e;
int len = 0;
+ if (!q->elevator)
+ return sprintf(name, "none\n");
+
+ elv = e->elevator_type;
+
spin_lock(&elv_list_lock);
list_for_each_entry(__e, &elv_list, list) {
if (!strcmp(elv->elevator_name, __e->elevator_name))
diff --git a/block/genhd.c b/block/genhd.c
index 1a4916e01732..fe7ccc0a618f 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -852,11 +852,21 @@ static ssize_t disk_capability_show(struct device *dev,
return sprintf(buf, "%x\n", disk->flags);
}
+static ssize_t disk_alignment_offset_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
+}
+
static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
+static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -875,6 +885,7 @@ static struct attribute *disk_attrs[] = {
&dev_attr_removable.attr,
&dev_attr_ro.attr,
&dev_attr_size.attr,
+ &dev_attr_alignment_offset.attr,
&dev_attr_capability.attr,
&dev_attr_stat.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
diff --git a/block/ioctl.c b/block/ioctl.c
index ad474d4bbcce..500e4c73cc52 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
bio->bi_private = &wait;
bio->bi_sector = start;
- if (len > q->max_hw_sectors) {
- bio->bi_size = q->max_hw_sectors << 9;
- len -= q->max_hw_sectors;
- start += q->max_hw_sectors;
+ if (len > queue_max_hw_sectors(q)) {
+ bio->bi_size = queue_max_hw_sectors(q) << 9;
+ len -= queue_max_hw_sectors(q);
+ start += queue_max_hw_sectors(q);
} else {
bio->bi_size = len << 9;
len = 0;
@@ -311,9 +311,9 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
return put_int(arg, block_size(bdev));
case BLKSSZGET: /* get block device hardware sector size */
- return put_int(arg, bdev_hardsect_size(bdev));
+ return put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET:
- return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
+ return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET:
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 82a0ca2f6729..5f8e798ede4e 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
static int sg_get_reserved_size(struct request_queue *q, int __user *p)
{
- unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
+ unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
return put_user(val, p);
}
@@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
if (size < 0)
return -EINVAL;
- if (size > (q->max_sectors << 9))
- size = q->max_sectors << 9;
+ if (size > (queue_max_sectors(q) << 9))
+ size = queue_max_sectors(q) << 9;
q->sg_reserved_size = size;
return 0;
@@ -230,7 +230,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
- hdr->resid = rq->data_len;
+ hdr->resid = rq->resid_len;
hdr->sb_len_wr = 0;
if (rq->sense_len && hdr->sbp) {
@@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->cmd_len > BLK_MAX_CDB)
return -EINVAL;
- if (hdr->dxfer_len > (q->max_hw_sectors << 9))
+ if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
return -EIO;
if (hdr->dxfer_len)
@@ -500,9 +500,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->data = NULL;
- rq->data_len = 0;
- rq->extra_len = 0;
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq->cmd[0] = cmd;
rq->cmd[4] = data;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 342316064e9f..d0dfeef55db5 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1084,7 +1084,7 @@ static int atapi_drain_needed(struct request *rq)
if (likely(!blk_pc_request(rq)))
return 0;
- if (!rq->data_len || (rq->cmd_flags & REQ_RW))
+ if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f22ed6cc69f2..668dc234b8e2 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3321,7 +3321,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
DAC960_Command_T *Command;
while(1) {
- Request = elv_next_request(req_q);
+ Request = blk_peek_request(req_q);
if (!Request)
return 1;
@@ -3338,10 +3338,10 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
}
Command->Completion = Request->end_io_data;
Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
- Command->BlockNumber = Request->sector;
- Command->BlockCount = Request->nr_sectors;
+ Command->BlockNumber = blk_rq_pos(Request);
+ Command->BlockCount = blk_rq_sectors(Request);
Command->Request = Request;
- blkdev_dequeue_request(Request);
+ blk_start_request(Request);
Command->SegmentCount = blk_rq_map_sg(req_q,
Command->Request, Command->cmd_sglist);
/* pci_map_sg MAY change the value of SegCount */
@@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
* successfully as possible.
*/
Command->SegmentCount = 1;
- Command->BlockNumber = Request->sector;
+ Command->BlockNumber = blk_rq_pos(Request);
Command->BlockCount = 1;
DAC960_QueueReadWriteCommand(Command);
return;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ddea8e485cc9..f42fa50d3550 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -412,7 +412,7 @@ config ATA_OVER_ETH
config MG_DISK
tristate "mGine mflash, gflash support"
- depends on ARM && ATA && GPIOLIB
+ depends on ARM && GPIOLIB
help
mGine mFlash(gFlash) block device driver
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8df436ff7068..9c6e5b0fe894 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -112,8 +112,6 @@ module_param(fd_def_df0, ulong, 0);
MODULE_LICENSE("GPL");
static struct request_queue *floppy_queue;
-#define QUEUE (floppy_queue)
-#define CURRENT elv_next_request(floppy_queue)
/*
* Macros
@@ -1335,64 +1333,60 @@ static int get_track(int drive, int track)
static void redo_fd_request(void)
{
+ struct request *rq;
unsigned int cnt, block, track, sector;
int drive;
struct amiga_floppy_struct *floppy;
char *data;
unsigned long flags;
+ int err;
- repeat:
- if (!CURRENT) {
+next_req:
+ rq = blk_fetch_request(floppy_queue);
+ if (!rq) {
/* Nothing left to do */
return;
}
- floppy = CURRENT->rq_disk->private_data;
+ floppy = rq->rq_disk->private_data;
drive = floppy - unit;
+next_segment:
/* Here someone could investigate to be more efficient */
- for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) {
+ for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
- CURRENT->sector,cnt,
- (rq_data_dir(CURRENT) == READ) ? "read" : "write");
+ blk_rq_pos(rq), cnt,
+ (rq_data_dir(rq) == READ) ? "read" : "write");
#endif
- block = CURRENT->sector + cnt;
+ block = blk_rq_pos(rq) + cnt;
if ((int)block > floppy->blocks) {
- end_request(CURRENT, 0);
- goto repeat;
+ err = -EIO;
+ break;
}
track = block / (floppy->dtype->sects * floppy->type->sect_mult);
sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
- data = CURRENT->buffer + 512 * cnt;
+ data = rq->buffer + 512 * cnt;
#ifdef DEBUG
printk("access to track %d, sector %d, with buffer at "
"0x%08lx\n", track, sector, data);
#endif
- if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
- printk(KERN_WARNING "do_fd_request: unknown command\n");
- end_request(CURRENT, 0);
- goto repeat;
- }
if (get_track(drive, track) == -1) {
- end_request(CURRENT, 0);
- goto repeat;
+ err = -EIO;
+ break;
}
- switch (rq_data_dir(CURRENT)) {
- case READ:
+ if (rq_data_dir(rq) == READ) {
memcpy(data, floppy->trackbuf + sector * 512, 512);
- break;
-
- case WRITE:
+ } else {
memcpy(floppy->trackbuf + sector * 512, data, 512);
/* keep the drive spinning while writes are scheduled */
if (!fd_motor_on(drive)) {
- end_request(CURRENT, 0);
- goto repeat;
+ err = -EIO;
+ break;
}
/*
* setup a callback to write the track buffer
@@ -1404,14 +1398,12 @@ static void redo_fd_request(void)
/* reset the timer */
mod_timer (flush_track_timer + drive, jiffies + 1);
local_irq_restore(flags);
- break;
}
}
- CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
- CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
- goto repeat;
+ if (__blk_end_request_cur(rq, err))
+ goto next_segment;
+ goto next_req;
}
static void do_fd_request(struct request_queue * q)
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4234c11c1e4c..f5e7180d7f47 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -79,9 +79,7 @@
#undef DEBUG
static struct request_queue *floppy_queue;
-
-#define QUEUE (floppy_queue)
-#define CURRENT elv_next_request(floppy_queue)
+static struct request *fd_request;
/* Disk types: DD, HD, ED */
static struct atari_disk_type {
@@ -376,6 +374,12 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
static DEFINE_TIMER(fd_timer, check_change, 0, 0);
+static void fd_end_request_cur(int err)
+{
+ if (!__blk_end_request_cur(fd_request, err))
+ fd_request = NULL;
+}
+
static inline void start_motor_off_timer(void)
{
mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
@@ -606,15 +610,15 @@ static void fd_error( void )
return;
}
- if (!CURRENT)
+ if (!fd_request)
return;
- CURRENT->errors++;
- if (CURRENT->errors >= MAX_ERRORS) {
+ fd_request->errors++;
+ if (fd_request->errors >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
}
- else if (CURRENT->errors == RECALIBRATE_ERRORS) {
+ else if (fd_request->errors == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
@@ -725,16 +729,14 @@ static void do_fd_action( int drive )
if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
if (ReqCmd == READ) {
copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
- if (++ReqCnt < CURRENT->current_nr_sectors) {
+ if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
/* read next sector */
setup_req_params( drive );
goto repeat;
}
else {
/* all sectors finished */
- CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
- CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
+ fd_end_request_cur(0);
redo_fd_request();
return;
}
@@ -1132,16 +1134,14 @@ static void fd_rwsec_done1(int status)
}
}
- if (++ReqCnt < CURRENT->current_nr_sectors) {
+ if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
/* read next sector */
setup_req_params( SelectedDrive );
do_fd_action( SelectedDrive );
}
else {
/* all sectors finished */
- CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
- CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
+ fd_end_request_cur(0);
redo_fd_request();
}
return;
@@ -1382,7 +1382,7 @@ static void setup_req_params( int drive )
ReqData = ReqBuffer + 512 * ReqCnt;
if (UseTrackbuffer)
- read_track = (ReqCmd == READ && CURRENT->errors == 0);
+ read_track = (ReqCmd == READ && fd_request->errors == 0);
else
read_track = 0;
@@ -1396,25 +1396,27 @@ static void redo_fd_request(void)
int drive, type;
struct atari_floppy_struct *floppy;
- DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
- CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
- CURRENT ? CURRENT->sector : 0 ));
+ DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
+ fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
+ fd_request ? blk_rq_pos(fd_request) : 0 ));
IsFormatting = 0;
repeat:
+ if (!fd_request) {
+ fd_request = blk_fetch_request(floppy_queue);
+ if (!fd_request)
+ goto the_end;
+ }
- if (!CURRENT)
- goto the_end;
-
- floppy = CURRENT->rq_disk->private_data;
+ floppy = fd_request->rq_disk->private_data;
drive = floppy - unit;
type = floppy->type;
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
goto repeat;
}
@@ -1430,12 +1432,12 @@ repeat:
/* user supplied disk type */
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
goto repeat;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
goto repeat;
}
type = minor2disktype[type].index;
@@ -1444,8 +1446,8 @@ repeat:
UD.autoprobe = 0;
}
- if (CURRENT->sector + 1 > UDT->blocks) {
- end_request(CURRENT, 0);
+ if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
+ fd_end_request_cur(-EIO);
goto repeat;
}
@@ -1453,9 +1455,9 @@ repeat:
del_timer( &motor_off_timer );
ReqCnt = 0;
- ReqCmd = rq_data_dir(CURRENT);
- ReqBlock = CURRENT->sector;
- ReqBuffer = CURRENT->buffer;
+ ReqCmd = rq_data_dir(fd_request);
+ ReqBlock = blk_rq_pos(fd_request);
+ ReqBuffer = fd_request->buffer;
setup_req_params( drive );
do_fd_action( drive );
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 5f7e64ba87e5..4bf8705b3ace 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -407,12 +407,7 @@ static int __init ramdisk_size(char *str)
rd_size = simple_strtol(str, NULL, 0);
return 1;
}
-static int __init ramdisk_size2(char *str)
-{
- return ramdisk_size(str);
-}
-__setup("ramdisk=", ramdisk_size);
-__setup("ramdisk_size=", ramdisk_size2);
+__setup("ramdisk_size=", ramdisk_size);
#endif
/*
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4d4d5e0d3fa6..b22cec97ea19 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -180,11 +180,13 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
__u32);
static void start_io(ctlr_info_t *h);
static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
- unsigned int use_unit_num, unsigned int log_unit,
__u8 page_code, unsigned char *scsi3addr, int cmd_type);
static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
- unsigned int use_unit_num, unsigned int log_unit,
- __u8 page_code, int cmd_type);
+ __u8 page_code, unsigned char scsi3addr[],
+ int cmd_type);
+static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
+ int attempt_retry);
+static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
static void fail_all_cmds(unsigned long ctlr);
static int scan_thread(void *data);
@@ -437,6 +439,194 @@ static void __devinit cciss_procinit(int i)
}
#endif /* CONFIG_PROC_FS */
+#define MAX_PRODUCT_NAME_LEN 19
+
+#define to_hba(n) container_of(n, struct ctlr_info, dev)
+#define to_drv(n) container_of(n, drive_info_struct, dev)
+
+static struct device_type cciss_host_type = {
+ .name = "cciss_host",
+};
+
+static ssize_t dev_show_unique_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ __u8 sn[16];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(sn, drv->serial_no, sizeof(sn));
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, 16 * 2 + 2,
+ "%02X%02X%02X%02X%02X%02X%02X%02X"
+ "%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ sn[0], sn[1], sn[2], sn[3],
+ sn[4], sn[5], sn[6], sn[7],
+ sn[8], sn[9], sn[10], sn[11],
+ sn[12], sn[13], sn[14], sn[15]);
+}
+DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
+
+static ssize_t dev_show_vendor(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ char vendor[VENDOR_LEN + 1];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
+}
+DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
+
+static ssize_t dev_show_model(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ char model[MODEL_LEN + 1];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(model, drv->model, MODEL_LEN + 1);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
+}
+DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
+
+static ssize_t dev_show_rev(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ char rev[REV_LEN + 1];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(rev, drv->rev, REV_LEN + 1);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
+}
+DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
+
+static struct attribute *cciss_dev_attrs[] = {
+ &dev_attr_unique_id.attr,
+ &dev_attr_model.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_rev.attr,
+ NULL
+};
+
+static struct attribute_group cciss_dev_attr_group = {
+ .attrs = cciss_dev_attrs,
+};
+
+static struct attribute_group *cciss_dev_attr_groups[] = {
+ &cciss_dev_attr_group,
+ NULL
+};
+
+static struct device_type cciss_dev_type = {
+ .name = "cciss_device",
+ .groups = cciss_dev_attr_groups,
+};
+
+static struct bus_type cciss_bus_type = {
+ .name = "cciss",
+};
+
+
+/*
+ * Initialize sysfs entry for each controller. This sets up and registers
+ * the 'cciss#' directory for each individual controller under
+ * /sys/bus/pci/devices/<dev>/.
+ */
+static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
+{
+ device_initialize(&h->dev);
+ h->dev.type = &cciss_host_type;
+ h->dev.bus = &cciss_bus_type;
+ dev_set_name(&h->dev, "%s", h->devname);
+ h->dev.parent = &h->pdev->dev;
+
+ return device_add(&h->dev);
+}
+
+/*
+ * Remove sysfs entries for an hba.
+ */
+static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
+{
+ device_del(&h->dev);
+}
+
+/*
+ * Initialize sysfs for each logical drive. This sets up and registers
+ * the 'c#d#' directory for each individual logical drive under
+ * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
+ * /sys/block/cciss!c#d# to this entry.
+ */
+static int cciss_create_ld_sysfs_entry(struct ctlr_info *h,
+ drive_info_struct *drv,
+ int drv_index)
+{
+ device_initialize(&drv->dev);
+ drv->dev.type = &cciss_dev_type;
+ drv->dev.bus = &cciss_bus_type;
+ dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index);
+ drv->dev.parent = &h->dev;
+ return device_add(&drv->dev);
+}
+
+/*
+ * Remove sysfs entries for a logical drive.
+ */
+static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv)
+{
+ device_del(&drv->dev);
+}
+
/*
* For operations that cannot sleep, a command block is allocated at init,
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
@@ -1299,7 +1489,6 @@ static void cciss_softirq_done(struct request *rq)
{
CommandList_struct *cmd = rq->completion_data;
ctlr_info_t *h = hba[cmd->ctlr];
- unsigned int nr_bytes;
unsigned long flags;
u64bit temp64;
int i, ddir;
@@ -1321,15 +1510,11 @@ static void cciss_softirq_done(struct request *rq)
printk("Done with %p\n", rq);
#endif /* CCISS_DEBUG */
- /*
- * Store the full size and set the residual count for pc requests
- */
- nr_bytes = blk_rq_bytes(rq);
+ /* set the residual count for pc requests */
if (blk_pc_request(rq))
- rq->data_len = cmd->err_info->ResidualCnt;
+ rq->resid_len = cmd->err_info->ResidualCnt;
- if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes))
- BUG();
+ blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
spin_lock_irqsave(&h->lock, flags);
cmd_free(h, cmd, 1);
@@ -1337,6 +1522,56 @@ static void cciss_softirq_done(struct request *rq)
spin_unlock_irqrestore(&h->lock, flags);
}
+static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[],
+ uint32_t log_unit)
+{
+ log_unit = h->drv[log_unit].LunID & 0x03fff;
+ memset(&scsi3addr[4], 0, 4);
+ memcpy(&scsi3addr[0], &log_unit, 4);
+ scsi3addr[3] |= 0x40;
+}
+
+/* This function gets the SCSI vendor, model, and revision of a logical drive
+ * via the inquiry page 0. Model, vendor, and rev are set to empty strings if
+ * they cannot be read.
+ */
+static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
+ char *vendor, char *model, char *rev)
+{
+ int rc;
+ InquiryData_struct *inq_buf;
+ unsigned char scsi3addr[8];
+
+ *vendor = '\0';
+ *model = '\0';
+ *rev = '\0';
+
+ inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+ if (!inq_buf)
+ return;
+
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+ if (withirq)
+ rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf,
+ sizeof(InquiryData_struct), 0,
+ scsi3addr, TYPE_CMD);
+ else
+ rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf,
+ sizeof(InquiryData_struct), 0,
+ scsi3addr, TYPE_CMD);
+ if (rc == IO_OK) {
+ memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
+ vendor[VENDOR_LEN] = '\0';
+ memcpy(model, &inq_buf->data_byte[16], MODEL_LEN);
+ model[MODEL_LEN] = '\0';
+ memcpy(rev, &inq_buf->data_byte[32], REV_LEN);
+ rev[REV_LEN] = '\0';
+ }
+
+ kfree(inq_buf);
+ return;
+}
+
/* This function gets the serial number of a logical drive via
* inquiry page 0x83. Serial no. is 16 bytes. If the serial
* number cannot be had, for whatever reason, 16 bytes of 0xff
@@ -1348,6 +1583,7 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
#define PAGE_83_INQ_BYTES 64
int rc;
unsigned char *buf;
+ unsigned char scsi3addr[8];
if (buflen > 16)
buflen = 16;
@@ -1356,12 +1592,13 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
if (!buf)
return;
memset(serial_no, 0, buflen);
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq)
rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
- PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD);
+ PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
else
rc = sendcmd(CISS_INQUIRY, ctlr, buf,
- PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD);
+ PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
if (rc == IO_OK)
memcpy(serial_no, &buf[8], buflen);
kfree(buf);
@@ -1377,7 +1614,7 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
disk->first_minor = drv_index << NWD_SHIFT;
disk->fops = &cciss_fops;
disk->private_data = &h->drv[drv_index];
- disk->driverfs_dev = &h->pdev->dev;
+ disk->driverfs_dev = &h->drv[drv_index].dev;
/* Set up queue information */
blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1394,8 +1631,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
disk->queue->queuedata = h;
- blk_queue_hardsect_size(disk->queue,
- h->drv[drv_index].block_size);
+ blk_queue_logical_block_size(disk->queue,
+ h->drv[drv_index].block_size);
/* Make sure all queue data is written out before */
/* setting h->drv[drv_index].queue, as setting this */
@@ -1468,6 +1705,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
drvinfo->block_size = block_size;
drvinfo->nr_blocks = total_size + 1;
+ cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor,
+ drvinfo->model, drvinfo->rev);
cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
sizeof(drvinfo->serial_no));
@@ -1517,6 +1756,9 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
h->drv[drv_index].cylinders = drvinfo->cylinders;
h->drv[drv_index].raid_level = drvinfo->raid_level;
memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
+ memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1);
+ memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1);
+ memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1);
++h->num_luns;
disk = h->gendisk[drv_index];
@@ -1591,6 +1833,8 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
}
}
h->drv[drv_index].LunID = lunid;
+ if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index))
+ goto err_free_disk;
/* Don't need to mark this busy because nobody */
/* else knows about this disk yet to contend */
@@ -1598,6 +1842,11 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
h->drv[drv_index].busy_configuring = 0;
wmb();
return drv_index;
+
+err_free_disk:
+ put_disk(h->gendisk[drv_index]);
+ h->gendisk[drv_index] = NULL;
+ return -1;
}
/* This is for the special case of a controller which
@@ -1668,8 +1917,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
goto mem_msg;
return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
- sizeof(ReportLunData_struct), 0,
- 0, 0, TYPE_CMD);
+ sizeof(ReportLunData_struct),
+ 0, CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK)
listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
@@ -1718,6 +1967,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
h->drv[i].busy_configuring = 1;
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
return_code = deregister_disk(h, i, 1);
+ cciss_destroy_ld_sysfs_entry(&h->drv[i]);
h->drv[i].busy_configuring = 0;
}
}
@@ -1877,11 +2127,9 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
return 0;
}
-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
- 1: address logical volume log_unit,
- 2: periph device address is scsi3addr */
- unsigned int log_unit, __u8 page_code,
- unsigned char *scsi3addr, int cmd_type)
+static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ size_t size, __u8 page_code, unsigned char *scsi3addr,
+ int cmd_type)
{
ctlr_info_t *h = hba[ctlr];
u64bit buff_dma_handle;
@@ -1897,27 +2145,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
c->Header.SGTotal = 0;
}
c->Header.Tag.lower = c->busaddr;
+ memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
c->Request.Type.Type = cmd_type;
if (cmd_type == TYPE_CMD) {
switch (cmd) {
case CISS_INQUIRY:
- /* If the logical unit number is 0 then, this is going
- to controller so It's a physical command
- mode = 0 target = 0. So we have nothing to write.
- otherwise, if use_unit_num == 1,
- mode = 1(volume set addressing) target = LUNID
- otherwise, if use_unit_num == 2,
- mode = 0(periph dev addr) target = scsi3addr */
- if (use_unit_num == 1) {
- c->Header.LUN.LogDev.VolId =
- h->drv[log_unit].LunID;
- c->Header.LUN.LogDev.Mode = 1;
- } else if (use_unit_num == 2) {
- memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
- 8);
- c->Header.LUN.LogDev.Mode = 0;
- }
/* are we trying to read a vital product page */
if (page_code != 0) {
c->Request.CDB[1] = 0x01;
@@ -1947,8 +2180,6 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
break;
case CCISS_READ_CAPACITY:
- c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
- c->Header.LUN.LogDev.Mode = 1;
c->Request.CDBLen = 10;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_READ;
@@ -1956,8 +2187,6 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
c->Request.CDB[0] = cmd;
break;
case CCISS_READ_CAPACITY_16:
- c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
- c->Header.LUN.LogDev.Mode = 1;
c->Request.CDBLen = 16;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_READ;
@@ -1979,6 +2208,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
break;
+ case TEST_UNIT_READY:
+ c->Request.CDBLen = 6;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_NONE;
+ c->Request.Timeout = 0;
+ break;
default:
printk(KERN_WARNING
"cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
@@ -1997,13 +2232,13 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
memcpy(&c->Request.CDB[4], buff, 8);
break;
case 1: /* RESET message */
- c->Request.CDBLen = 12;
+ c->Request.CDBLen = 16;
c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_WRITE;
+ c->Request.Type.Direction = XFER_NONE;
c->Request.Timeout = 0;
memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
c->Request.CDB[0] = cmd; /* reset */
- c->Request.CDB[1] = 0x04; /* reset a LUN */
+ c->Request.CDB[1] = 0x03; /* reset a target */
break;
case 3: /* No-Op message */
c->Request.CDBLen = 1;
@@ -2035,114 +2270,152 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
return status;
}
-static int sendcmd_withirq(__u8 cmd,
- int ctlr,
- void *buff,
- size_t size,
- unsigned int use_unit_num,
- unsigned int log_unit, __u8 page_code, int cmd_type)
+static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
{
- ctlr_info_t *h = hba[ctlr];
- CommandList_struct *c;
+ switch (c->err_info->ScsiStatus) {
+ case SAM_STAT_GOOD:
+ return IO_OK;
+ case SAM_STAT_CHECK_CONDITION:
+ switch (0xf & c->err_info->SenseInfo[2]) {
+ case 0: return IO_OK; /* no sense */
+ case 1: return IO_OK; /* recovered error */
+ default:
+ printk(KERN_WARNING "cciss%d: cmd 0x%02x "
+ "check condition, sense key = 0x%02x\n",
+ h->ctlr, c->Request.CDB[0],
+ c->err_info->SenseInfo[2]);
+ }
+ break;
+ default:
+ printk(KERN_WARNING "cciss%d: cmd 0x%02x"
+ "scsi status = 0x%02x\n", h->ctlr,
+ c->Request.CDB[0], c->err_info->ScsiStatus);
+ break;
+ }
+ return IO_ERROR;
+}
+
+static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
+{
+ int return_status = IO_OK;
+
+ if (c->err_info->CommandStatus == CMD_SUCCESS)
+ return IO_OK;
+
+ switch (c->err_info->CommandStatus) {
+ case CMD_TARGET_STATUS:
+ return_status = check_target_status(h, c);
+ break;
+ case CMD_DATA_UNDERRUN:
+ case CMD_DATA_OVERRUN:
+ /* expected for inquiry and report lun commands */
+ break;
+ case CMD_INVALID:
+ printk(KERN_WARNING "cciss: cmd 0x%02x is "
+ "reported invalid\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_PROTOCOL_ERR:
+ printk(KERN_WARNING "cciss: cmd 0x%02x has "
+ "protocol error \n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_HARDWARE_ERR:
+ printk(KERN_WARNING "cciss: cmd 0x%02x had "
+ " hardware error\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_CONNECTION_LOST:
+ printk(KERN_WARNING "cciss: cmd 0x%02x had "
+ "connection lost\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_ABORTED:
+ printk(KERN_WARNING "cciss: cmd 0x%02x was "
+ "aborted\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_ABORT_FAILED:
+ printk(KERN_WARNING "cciss: cmd 0x%02x reports "
+ "abort failed\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ printk(KERN_WARNING
+ "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
+ c->Request.CDB[0]);
+ return_status = IO_NEEDS_RETRY;
+ break;
+ default:
+ printk(KERN_WARNING "cciss: cmd 0x%02x returned "
+ "unknown status %x\n", c->Request.CDB[0],
+ c->err_info->CommandStatus);
+ return_status = IO_ERROR;
+ }
+ return return_status;
+}
+
+static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
+ int attempt_retry)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
u64bit buff_dma_handle;
unsigned long flags;
- int return_status;
- DECLARE_COMPLETION_ONSTACK(wait);
+ int return_status = IO_OK;
- if ((c = cmd_alloc(h, 0)) == NULL)
- return -ENOMEM;
- return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
- log_unit, page_code, NULL, cmd_type);
- if (return_status != IO_OK) {
- cmd_free(h, c, 0);
- return return_status;
- }
- resend_cmd2:
+resend_cmd2:
c->waiting = &wait;
-
/* Put the request on the tail of the queue and send it */
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
addQ(&h->reqQ, c);
h->Qdepth++;
start_io(h);
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
wait_for_completion(&wait);
- if (c->err_info->CommandStatus != 0) { /* an error has occurred */
- switch (c->err_info->CommandStatus) {
- case CMD_TARGET_STATUS:
- printk(KERN_WARNING "cciss: cmd %p has "
- " completed with errors\n", c);
- if (c->err_info->ScsiStatus) {
- printk(KERN_WARNING "cciss: cmd %p "
- "has SCSI Status = %x\n",
- c, c->err_info->ScsiStatus);
- }
+ if (c->err_info->CommandStatus == 0 || !attempt_retry)
+ goto command_done;
- break;
- case CMD_DATA_UNDERRUN:
- case CMD_DATA_OVERRUN:
- /* expected for inquire and report lun commands */
- break;
- case CMD_INVALID:
- printk(KERN_WARNING "cciss: Cmd %p is "
- "reported invalid\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_PROTOCOL_ERR:
- printk(KERN_WARNING "cciss: cmd %p has "
- "protocol error \n", c);
- return_status = IO_ERROR;
- break;
- case CMD_HARDWARE_ERR:
- printk(KERN_WARNING "cciss: cmd %p had "
- " hardware error\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_CONNECTION_LOST:
- printk(KERN_WARNING "cciss: cmd %p had "
- "connection lost\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_ABORTED:
- printk(KERN_WARNING "cciss: cmd %p was "
- "aborted\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_ABORT_FAILED:
- printk(KERN_WARNING "cciss: cmd %p reports "
- "abort failed\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_UNSOLICITED_ABORT:
- printk(KERN_WARNING
- "cciss%d: unsolicited abort %p\n", ctlr, c);
- if (c->retry_count < MAX_CMD_RETRIES) {
- printk(KERN_WARNING
- "cciss%d: retrying %p\n", ctlr, c);
- c->retry_count++;
- /* erase the old error information */
- memset(c->err_info, 0,
- sizeof(ErrorInfo_struct));
- return_status = IO_OK;
- INIT_COMPLETION(wait);
- goto resend_cmd2;
- }
- return_status = IO_ERROR;
- break;
- default:
- printk(KERN_WARNING "cciss: cmd %p returned "
- "unknown status %x\n", c,
- c->err_info->CommandStatus);
- return_status = IO_ERROR;
- }
+ return_status = process_sendcmd_error(h, c);
+
+ if (return_status == IO_NEEDS_RETRY &&
+ c->retry_count < MAX_CMD_RETRIES) {
+ printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
+ c->Request.CDB[0]);
+ c->retry_count++;
+ /* erase the old error information */
+ memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+ return_status = IO_OK;
+ INIT_COMPLETION(wait);
+ goto resend_cmd2;
}
+
+command_done:
/* unlock the buffers from DMA */
buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
+ return return_status;
+}
+
+static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
+ __u8 page_code, unsigned char scsi3addr[],
+ int cmd_type)
+{
+ ctlr_info_t *h = hba[ctlr];
+ CommandList_struct *c;
+ int return_status;
+
+ c = cmd_alloc(h, 0);
+ if (!c)
+ return -ENOMEM;
+ return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+ scsi3addr, cmd_type);
+ if (return_status == IO_OK)
+ return_status = sendcmd_withirq_core(h, c, 1);
+
cmd_free(h, c, 0);
return return_status;
}
@@ -2155,15 +2428,17 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
{
int return_code;
unsigned long t;
+ unsigned char scsi3addr[8];
memset(inq_buff, 0, sizeof(InquiryData_struct));
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq)
return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
- inq_buff, sizeof(*inq_buff), 1,
- logvol, 0xC1, TYPE_CMD);
+ inq_buff, sizeof(*inq_buff),
+ 0xC1, scsi3addr, TYPE_CMD);
else
return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
- sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
+ sizeof(*inq_buff), 0xC1, scsi3addr,
TYPE_CMD);
if (return_code == IO_OK) {
if (inq_buff->data_byte[8] == 0xFF) {
@@ -2204,6 +2479,7 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
{
ReadCapdata_struct *buf;
int return_code;
+ unsigned char scsi3addr[8];
buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
if (!buf) {
@@ -2211,14 +2487,15 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
return;
}
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq)
return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
ctlr, buf, sizeof(ReadCapdata_struct),
- 1, logvol, 0, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
else
return_code = sendcmd(CCISS_READ_CAPACITY,
ctlr, buf, sizeof(ReadCapdata_struct),
- 1, logvol, 0, NULL, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
if (return_code == IO_OK) {
*total_size = be32_to_cpu(*(__be32 *) buf->total_size);
*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
@@ -2238,6 +2515,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
{
ReadCapdata_struct_16 *buf;
int return_code;
+ unsigned char scsi3addr[8];
buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
if (!buf) {
@@ -2245,15 +2523,16 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
return;
}
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq) {
return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
ctlr, buf, sizeof(ReadCapdata_struct_16),
- 1, logvol, 0, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
}
else {
return_code = sendcmd(CCISS_READ_CAPACITY_16,
ctlr, buf, sizeof(ReadCapdata_struct_16),
- 1, logvol, 0, NULL, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
}
if (return_code == IO_OK) {
*total_size = be64_to_cpu(*(__be64 *) buf->total_size);
@@ -2303,7 +2582,7 @@ static int cciss_revalidate(struct gendisk *disk)
cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
inq_buff, drv);
- blk_queue_hardsect_size(drv->queue, drv->block_size);
+ blk_queue_logical_block_size(drv->queue, drv->block_size);
set_capacity(disk, drv->nr_blocks);
kfree(inq_buff);
@@ -2333,86 +2612,21 @@ static unsigned long pollcomplete(int ctlr)
return 1;
}
-static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
-{
- /* We get in here if sendcmd() is polling for completions
- and gets some command back that it wasn't expecting --
- something other than that which it just sent down.
- Ordinarily, that shouldn't happen, but it can happen when
- the scsi tape stuff gets into error handling mode, and
- starts using sendcmd() to try to abort commands and
- reset tape drives. In that case, sendcmd may pick up
- completions of commands that were sent to logical drives
- through the block i/o system, or cciss ioctls completing, etc.
- In that case, we need to save those completions for later
- processing by the interrupt handler.
- */
-
-#ifdef CONFIG_CISS_SCSI_TAPE
- struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
-
- /* If it's not the scsi tape stuff doing error handling, (abort */
- /* or reset) then we don't expect anything weird. */
- if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
-#endif
- printk(KERN_WARNING "cciss cciss%d: SendCmd "
- "Invalid command list address returned! (%lx)\n",
- ctlr, complete);
- /* not much we can do. */
-#ifdef CONFIG_CISS_SCSI_TAPE
- return 1;
- }
-
- /* We've sent down an abort or reset, but something else
- has completed */
- if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
- /* Uh oh. No room to save it for later... */
- printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
- "reject list overflow, command lost!\n", ctlr);
- return 1;
- }
- /* Save it for later */
- srl->complete[srl->ncompletions] = complete;
- srl->ncompletions++;
-#endif
- return 0;
-}
-
-/*
- * Send a command to the controller, and wait for it to complete.
- * Only used at init time.
+/* Send command c to controller h and poll for it to complete.
+ * Turns interrupts off on the board. Used at driver init time
+ * and during SCSI error recovery.
*/
-static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
- 1: address logical volume log_unit,
- 2: periph device address is scsi3addr */
- unsigned int log_unit,
- __u8 page_code, unsigned char *scsi3addr, int cmd_type)
+static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
{
- CommandList_struct *c;
int i;
unsigned long complete;
- ctlr_info_t *info_p = hba[ctlr];
+ int status = IO_ERROR;
u64bit buff_dma_handle;
- int status, done = 0;
- if ((c = cmd_alloc(info_p, 1)) == NULL) {
- printk(KERN_WARNING "cciss: unable to get memory");
- return IO_ERROR;
- }
- status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
- log_unit, page_code, scsi3addr, cmd_type);
- if (status != IO_OK) {
- cmd_free(info_p, c, 1);
- return status;
- }
- resend_cmd1:
- /*
- * Disable interrupt
- */
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: turning intr off\n");
-#endif /* CCISS_DEBUG */
- info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
+resend_cmd1:
+
+ /* Disable interrupt on the board. */
+ h->access.set_intr_mask(h, CCISS_INTR_OFF);
/* Make sure there is room in the command FIFO */
/* Actually it should be completely empty at this time */
@@ -2420,21 +2634,15 @@ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use
/* tape side of the driver. */
for (i = 200000; i > 0; i--) {
/* if fifo isn't full go */
- if (!(info_p->access.fifo_full(info_p))) {
-
+ if (!(h->access.fifo_full(h)))
break;
- }
udelay(10);
printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
- " waiting!\n", ctlr);
+ " waiting!\n", h->ctlr);
}
- /*
- * Send the cmd
- */
- info_p->access.submit_command(info_p, c);
- done = 0;
+ h->access.submit_command(h, c); /* Send the cmd */
do {
- complete = pollcomplete(ctlr);
+ complete = pollcomplete(h->ctlr);
#ifdef CCISS_DEBUG
printk(KERN_DEBUG "cciss: command completed\n");
@@ -2443,97 +2651,102 @@ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use
if (complete == 1) {
printk(KERN_WARNING
"cciss cciss%d: SendCmd Timeout out, "
- "No command list address returned!\n", ctlr);
+ "No command list address returned!\n", h->ctlr);
status = IO_ERROR;
- done = 1;
break;
}
- /* This will need to change for direct lookup completions */
- if ((complete & CISS_ERROR_BIT)
- && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
- /* if data overrun or underun on Report command
- ignore it
- */
- if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
- (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
- (c->Request.CDB[0] == CISS_INQUIRY)) &&
- ((c->err_info->CommandStatus ==
- CMD_DATA_OVERRUN) ||
- (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
- )) {
- complete = c->busaddr;
- } else {
- if (c->err_info->CommandStatus ==
- CMD_UNSOLICITED_ABORT) {
- printk(KERN_WARNING "cciss%d: "
- "unsolicited abort %p\n",
- ctlr, c);
- if (c->retry_count < MAX_CMD_RETRIES) {
- printk(KERN_WARNING
- "cciss%d: retrying %p\n",
- ctlr, c);
- c->retry_count++;
- /* erase the old error */
- /* information */
- memset(c->err_info, 0,
- sizeof
- (ErrorInfo_struct));
- goto resend_cmd1;
- } else {
- printk(KERN_WARNING
- "cciss%d: retried %p too "
- "many times\n", ctlr, c);
- status = IO_ERROR;
- goto cleanup1;
- }
- } else if (c->err_info->CommandStatus ==
- CMD_UNABORTABLE) {
- printk(KERN_WARNING
- "cciss%d: command could not be aborted.\n",
- ctlr);
- status = IO_ERROR;
- goto cleanup1;
- }
- printk(KERN_WARNING "ciss ciss%d: sendcmd"
- " Error %x \n", ctlr,
- c->err_info->CommandStatus);
- printk(KERN_WARNING "ciss ciss%d: sendcmd"
- " offensive info\n"
- " size %x\n num %x value %x\n",
- ctlr,
- c->err_info->MoreErrInfo.Invalid_Cmd.
- offense_size,
- c->err_info->MoreErrInfo.Invalid_Cmd.
- offense_num,
- c->err_info->MoreErrInfo.Invalid_Cmd.
- offense_value);
- status = IO_ERROR;
- goto cleanup1;
- }
+ /* Make sure it's the command we're expecting. */
+ if ((complete & ~CISS_ERROR_BIT) != c->busaddr) {
+ printk(KERN_WARNING "cciss%d: Unexpected command "
+ "completion.\n", h->ctlr);
+ continue;
+ }
+
+ /* It is our command. If no error, we're done. */
+ if (!(complete & CISS_ERROR_BIT)) {
+ status = IO_OK;
+ break;
}
- /* This will need changing for direct lookup completions */
- if (complete != c->busaddr) {
- if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
- BUG(); /* we are pretty much hosed if we get here. */
+
+ /* There is an error... */
+
+ /* if data overrun or underun on Report command ignore it */
+ if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
+ (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
+ (c->Request.CDB[0] == CISS_INQUIRY)) &&
+ ((c->err_info->CommandStatus == CMD_DATA_OVERRUN) ||
+ (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) {
+ complete = c->busaddr;
+ status = IO_OK;
+ break;
+ }
+
+ if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) {
+ printk(KERN_WARNING "cciss%d: unsolicited abort %p\n",
+ h->ctlr, c);
+ if (c->retry_count < MAX_CMD_RETRIES) {
+ printk(KERN_WARNING "cciss%d: retrying %p\n",
+ h->ctlr, c);
+ c->retry_count++;
+ /* erase the old error information */
+ memset(c->err_info, 0, sizeof(c->err_info));
+ goto resend_cmd1;
}
- continue;
- } else
- done = 1;
- } while (!done);
+ printk(KERN_WARNING "cciss%d: retried %p too many "
+ "times\n", h->ctlr, c);
+ status = IO_ERROR;
+ break;
+ }
+
+ if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
+ printk(KERN_WARNING "cciss%d: command could not be "
+ "aborted.\n", h->ctlr);
+ status = IO_ERROR;
+ break;
+ }
+
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS) {
+ status = check_target_status(h, c);
+ break;
+ }
+
+ printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr);
+ printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n",
+ c->Request.CDB[0], c->err_info->CommandStatus);
+ status = IO_ERROR;
+ break;
+
+ } while (1);
- cleanup1:
/* unlock the data buffer from DMA */
buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
+ pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
-#ifdef CONFIG_CISS_SCSI_TAPE
- /* if we saved some commands for later, process them now. */
- if (info_p->scsi_rejects.ncompletions > 0)
- do_cciss_intr(0, info_p);
-#endif
- cmd_free(info_p, c, 1);
+ return status;
+}
+
+/*
+ * Send a command to the controller, and wait for it to complete.
+ * Used at init time, and during SCSI error recovery.
+ */
+static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
+ __u8 page_code, unsigned char *scsi3addr, int cmd_type)
+{
+ CommandList_struct *c;
+ int status;
+
+ c = cmd_alloc(hba[ctlr], 1);
+ if (!c) {
+ printk(KERN_WARNING "cciss: unable to get memory");
+ return IO_ERROR;
+ }
+ status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+ scsi3addr, cmd_type);
+ if (status == IO_OK)
+ status = sendcmd_core(hba[ctlr], c);
+ cmd_free(hba[ctlr], c, 1);
return status;
}
@@ -2691,7 +2904,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
printk(KERN_WARNING "cciss: cmd %p has"
" completed with data underrun "
"reported\n", cmd);
- cmd->rq->data_len = cmd->err_info->ResidualCnt;
+ cmd->rq->resid_len = cmd->err_info->ResidualCnt;
}
break;
case CMD_DATA_OVERRUN:
@@ -2806,7 +3019,7 @@ static void do_cciss_request(struct request_queue *q)
goto startio;
queue:
- creq = elv_next_request(q);
+ creq = blk_peek_request(q);
if (!creq)
goto startio;
@@ -2815,7 +3028,7 @@ static void do_cciss_request(struct request_queue *q)
if ((c = cmd_alloc(h, 1)) == NULL)
goto full;
- blkdev_dequeue_request(creq);
+ blk_start_request(creq);
spin_unlock_irq(q->queue_lock);
@@ -2840,10 +3053,10 @@ static void do_cciss_request(struct request_queue *q)
c->Request.Timeout = 0; // Don't time out
c->Request.CDB[0] =
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
- start_blk = creq->sector;
+ start_blk = blk_rq_pos(creq);
#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
- (int)creq->nr_sectors);
+ printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
+ (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
#endif /* CCISS_DEBUG */
sg_init_table(tmp_sg, MAXSGENTRIES);
@@ -2869,8 +3082,8 @@ static void do_cciss_request(struct request_queue *q)
h->maxSG = seg;
#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
- creq->nr_sectors, seg);
+ printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
+ blk_rq_sectors(creq), seg);
#endif /* CCISS_DEBUG */
c->Header.SGList = c->Header.SGTotal = seg;
@@ -2882,8 +3095,8 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[4] = (start_blk >> 8) & 0xff;
c->Request.CDB[5] = start_blk & 0xff;
c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
- c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
- c->Request.CDB[8] = creq->nr_sectors & 0xff;
+ c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
+ c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
} else {
u32 upper32 = upper_32_bits(start_blk);
@@ -2898,10 +3111,10 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[7]= (start_blk >> 16) & 0xff;
c->Request.CDB[8]= (start_blk >> 8) & 0xff;
c->Request.CDB[9]= start_blk & 0xff;
- c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
- c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
- c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
- c->Request.CDB[13]= creq->nr_sectors & 0xff;
+ c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
+ c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
+ c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
+ c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
c->Request.CDB[14] = c->Request.CDB[15] = 0;
}
} else if (blk_pc_request(creq)) {
@@ -2931,44 +3144,18 @@ startio:
static inline unsigned long get_next_completion(ctlr_info_t *h)
{
-#ifdef CONFIG_CISS_SCSI_TAPE
- /* Any rejects from sendcmd() lying around? Process them first */
- if (h->scsi_rejects.ncompletions == 0)
- return h->access.command_completed(h);
- else {
- struct sendcmd_reject_list *srl;
- int n;
- srl = &h->scsi_rejects;
- n = --srl->ncompletions;
- /* printk("cciss%d: processing saved reject\n", h->ctlr); */
- printk("p");
- return srl->complete[n];
- }
-#else
return h->access.command_completed(h);
-#endif
}
static inline int interrupt_pending(ctlr_info_t *h)
{
-#ifdef CONFIG_CISS_SCSI_TAPE
- return (h->access.intr_pending(h)
- || (h->scsi_rejects.ncompletions > 0));
-#else
return h->access.intr_pending(h);
-#endif
}
static inline long interrupt_not_for_us(ctlr_info_t *h)
{
-#ifdef CONFIG_CISS_SCSI_TAPE
- return (((h->access.intr_pending(h) == 0) ||
- (h->interrupts_enabled == 0))
- && (h->scsi_rejects.ncompletions == 0));
-#else
return (((h->access.intr_pending(h) == 0) ||
(h->interrupts_enabled == 0)));
-#endif
}
static irqreturn_t do_cciss_intr(int irq, void *dev_id)
@@ -3723,12 +3910,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
INIT_HLIST_HEAD(&hba[i]->reqQ);
if (cciss_pci_init(hba[i], pdev) != 0)
- goto clean1;
+ goto clean0;
sprintf(hba[i]->devname, "cciss%d", i);
hba[i]->ctlr = i;
hba[i]->pdev = pdev;
+ if (cciss_create_hba_sysfs_entry(hba[i]))
+ goto clean0;
+
/* configure PCI DMA stuff */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
dac = 1;
@@ -3787,15 +3977,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
printk(KERN_ERR "cciss: out of memory");
goto clean4;
}
-#ifdef CONFIG_CISS_SCSI_TAPE
- hba[i]->scsi_rejects.complete =
- kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
- (hba[i]->nr_cmds + 5), GFP_KERNEL);
- if (hba[i]->scsi_rejects.complete == NULL) {
- printk(KERN_ERR "cciss: out of memory");
- goto clean4;
- }
-#endif
spin_lock_init(&hba[i]->lock);
/* Initialize the pdev driver private data.
@@ -3828,7 +4009,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
}
return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
- sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD);
+ sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK) {
hba[i]->firm_ver[0] = inq_buff->data_byte[32];
hba[i]->firm_ver[1] = inq_buff->data_byte[33];
@@ -3855,9 +4036,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
clean4:
kfree(inq_buff);
-#ifdef CONFIG_CISS_SCSI_TAPE
- kfree(hba[i]->scsi_rejects.complete);
-#endif
kfree(hba[i]->cmd_pool_bits);
if (hba[i]->cmd_pool)
pci_free_consistent(hba[i]->pdev,
@@ -3872,6 +4050,8 @@ clean4:
clean2:
unregister_blkdev(hba[i]->major, hba[i]->devname);
clean1:
+ cciss_destroy_hba_sysfs_entry(hba[i]);
+clean0:
hba[i]->busy_initializing = 0;
/* cleanup any queues that may have been initialized */
for (j=0; j <= hba[i]->highest_lun; j++){
@@ -3907,8 +4087,8 @@ static void cciss_shutdown(struct pci_dev *pdev)
/* sendcmd will turn off interrupt, and send the flush...
* To write all data in the battery backed cache to disks */
memset(flush_buf, 0, 4);
- return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
- TYPE_CMD);
+ return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0,
+ CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK) {
printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
} else {
@@ -3973,15 +4153,13 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
kfree(hba[i]->cmd_pool_bits);
-#ifdef CONFIG_CISS_SCSI_TAPE
- kfree(hba[i]->scsi_rejects.complete);
-#endif
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
+ cciss_destroy_hba_sysfs_entry(hba[i]);
free_hba(i);
}
@@ -3999,6 +4177,8 @@ static struct pci_driver cciss_pci_driver = {
*/
static int __init cciss_init(void)
{
+ int err;
+
/*
* The hardware requires that commands are aligned on a 64-bit
* boundary. Given that we use pci_alloc_consistent() to allocate an
@@ -4008,8 +4188,20 @@ static int __init cciss_init(void)
printk(KERN_INFO DRIVER_NAME "\n");
+ err = bus_register(&cciss_bus_type);
+ if (err)
+ return err;
+
/* Register for our PCI devices */
- return pci_register_driver(&cciss_pci_driver);
+ err = pci_register_driver(&cciss_pci_driver);
+ if (err)
+ goto err_bus_register;
+
+ return 0;
+
+err_bus_register:
+ bus_unregister(&cciss_bus_type);
+ return err;
}
static void __exit cciss_cleanup(void)
@@ -4026,6 +4218,7 @@ static void __exit cciss_cleanup(void)
}
}
remove_proc_entry("driver/cciss", NULL);
+ bus_unregister(&cciss_bus_type);
}
static void fail_all_cmds(unsigned long ctlr)
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 703e08038fb9..06a5db25b298 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -11,6 +11,11 @@
#define IO_OK 0
#define IO_ERROR 1
+#define IO_NEEDS_RETRY 3
+
+#define VENDOR_LEN 8
+#define MODEL_LEN 16
+#define REV_LEN 4
struct ctlr_info;
typedef struct ctlr_info ctlr_info_t;
@@ -34,23 +39,20 @@ typedef struct _drive_info_struct
int cylinders;
int raid_level; /* set to -1 to indicate that
* the drive is not in use/configured
- */
- int busy_configuring; /*This is set when the drive is being removed
- *to prevent it from being opened or it's queue
- *from being started.
- */
- __u8 serial_no[16]; /* from inquiry page 0x83, */
- /* not necc. null terminated. */
+ */
+ int busy_configuring; /* This is set when a drive is being removed
+ * to prevent it from being opened or it's
+ * queue from being started.
+ */
+ struct device dev;
+ __u8 serial_no[16]; /* from inquiry page 0x83,
+ * not necc. null terminated.
+ */
+ char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
+ char model[MODEL_LEN + 1]; /* SCSI model string */
+ char rev[REV_LEN + 1]; /* SCSI revision string */
} drive_info_struct;
-#ifdef CONFIG_CISS_SCSI_TAPE
-
-struct sendcmd_reject_list {
- int ncompletions;
- unsigned long *complete; /* array of NR_CMDS tags */
-};
-
-#endif
struct ctlr_info
{
int ctlr;
@@ -118,11 +120,11 @@ struct ctlr_info
void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
/* list of block side commands the scsi error handling sucked up */
/* and saved for later processing */
- struct sendcmd_reject_list scsi_rejects;
#endif
unsigned char alive;
struct completion *rescan_wait;
struct task_struct *cciss_scan_thread;
+ struct device dev;
};
/* Defining the diffent access_menthods */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 40b1b92dae7f..cd665b00c7c5 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -217,6 +217,8 @@ typedef union _LUNAddr_struct {
LogDevAddr_struct LogDev;
} LUNAddr_struct;
+#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
+
typedef struct _CommandListHeader_struct {
BYTE ReplyQueue;
BYTE SGList;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index a3fd87b41444..3315268b4ec7 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -44,20 +44,13 @@
#define CCISS_ABORT_MSG 0x00
#define CCISS_RESET_MSG 0x01
-/* some prototypes... */
-static int sendcmd(
- __u8 cmd,
- int ctlr,
- void *buff,
- size_t size,
- unsigned int use_unit_num, /* 0: address the controller,
- 1: address logical volume log_unit,
- 2: address is in scsi3addr */
- unsigned int log_unit,
- __u8 page_code,
- unsigned char *scsi3addr,
+static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ size_t size,
+ __u8 page_code, unsigned char *scsi3addr,
int cmd_type);
+static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool);
+static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool);
static int cciss_scsi_proc_info(
struct Scsi_Host *sh,
@@ -1575,6 +1568,75 @@ cciss_seq_tape_report(struct seq_file *seq, int ctlr)
CPQ_TAPE_UNLOCK(ctlr, flags);
}
+static int wait_for_device_to_become_ready(ctlr_info_t *h,
+ unsigned char lunaddr[])
+{
+ int rc;
+ int count = 0;
+ int waittime = HZ;
+ CommandList_struct *c;
+
+ c = cmd_alloc(h, 1);
+ if (!c) {
+ printk(KERN_WARNING "cciss%d: out of memory in "
+ "wait_for_device_to_become_ready.\n", h->ctlr);
+ return IO_ERROR;
+ }
+
+ /* Send test unit ready until device ready, or give up. */
+ while (count < 20) {
+
+ /* Wait for a bit. do this first, because if we send
+ * the TUR right away, the reset will just abort it.
+ */
+ schedule_timeout_uninterruptible(waittime);
+ count++;
+
+ /* Increase wait time with each try, up to a point. */
+ if (waittime < (HZ * 30))
+ waittime = waittime * 2;
+
+ /* Send the Test Unit Ready */
+ rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0,
+ lunaddr, TYPE_CMD);
+ if (rc == 0)
+ rc = sendcmd_withirq_core(h, c, 0);
+
+ (void) process_sendcmd_error(h, c);
+
+ if (rc != 0)
+ goto retry_tur;
+
+ if (c->err_info->CommandStatus == CMD_SUCCESS)
+ break;
+
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
+ if (c->err_info->SenseInfo[2] == NO_SENSE)
+ break;
+ if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) {
+ unsigned char asc;
+ asc = c->err_info->SenseInfo[12];
+ check_for_unit_attention(h, c);
+ if (asc == POWER_OR_RESET)
+ break;
+ }
+ }
+retry_tur:
+ printk(KERN_WARNING "cciss%d: Waiting %d secs "
+ "for device to become ready.\n",
+ h->ctlr, waittime / HZ);
+ rc = 1; /* device not ready. */
+ }
+
+ if (rc)
+ printk("cciss%d: giving up on device.\n", h->ctlr);
+ else
+ printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr);
+
+ cmd_free(h, c, 1);
+ return rc;
+}
/* Need at least one of these error handlers to keep ../scsi/hosts.c from
* complaining. Doing a host- or bus-reset can't do anything good here.
@@ -1591,6 +1653,7 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
{
int rc;
CommandList_struct *cmd_in_trouble;
+ unsigned char lunaddr[8];
ctlr_info_t **c;
int ctlr;
@@ -1600,19 +1663,15 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
return FAILED;
ctlr = (*c)->ctlr;
printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
-
/* find the command that's giving us trouble */
cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
- if (cmd_in_trouble == NULL) { /* paranoia */
+ if (cmd_in_trouble == NULL) /* paranoia */
return FAILED;
- }
+ memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
/* send a reset to the SCSI LUN which the command was sent to */
- rc = sendcmd(CCISS_RESET_MSG, ctlr, NULL, 0, 2, 0, 0,
- (unsigned char *) &cmd_in_trouble->Header.LUN.LunAddrBytes[0],
+ rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
TYPE_MSG);
- /* sendcmd turned off interrupts on the board, turn 'em back on. */
- (*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
- if (rc == 0)
+ if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0)
return SUCCESS;
printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
return FAILED;
@@ -1622,6 +1681,7 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
{
int rc;
CommandList_struct *cmd_to_abort;
+ unsigned char lunaddr[8];
ctlr_info_t **c;
int ctlr;
@@ -1636,12 +1696,9 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
if (cmd_to_abort == NULL) /* paranoia */
return FAILED;
- rc = sendcmd(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
- 0, 2, 0, 0,
- (unsigned char *) &cmd_to_abort->Header.LUN.LunAddrBytes[0],
- TYPE_MSG);
- /* sendcmd turned off interrupts on the board, turn 'em back on. */
- (*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
+ memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
+ rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
+ 0, 0, lunaddr, TYPE_MSG);
if (rc == 0)
return SUCCESS;
return FAILED;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index ca268ca11159..44fa2018f6b0 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -474,7 +474,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
disk->fops = &ida_fops;
if (j && !drv->nr_blks)
continue;
- blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
+ blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks);
disk->queue = hba[i]->queue;
disk->private_data = drv;
@@ -903,7 +903,7 @@ static void do_ida_request(struct request_queue *q)
goto startio;
queue_next:
- creq = elv_next_request(q);
+ creq = blk_peek_request(q);
if (!creq)
goto startio;
@@ -912,17 +912,18 @@ queue_next:
if ((c = cmd_alloc(h,1)) == NULL)
goto startio;
- blkdev_dequeue_request(creq);
+ blk_start_request(creq);
c->ctlr = h->ctlr;
c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
c->hdr.size = sizeof(rblk_t) >> 2;
c->size += sizeof(rblk_t);
- c->req.hdr.blk = creq->sector;
+ c->req.hdr.blk = blk_rq_pos(creq);
c->rq = creq;
DBGPX(
- printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
+ printk("sector=%d, nr_sectors=%u\n",
+ blk_rq_pos(creq), blk_rq_sectors(creq));
);
sg_init_table(tmp_sg, SG_MAX);
seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -940,9 +941,9 @@ DBGPX(
tmp_sg[i].offset,
tmp_sg[i].length, dir);
}
-DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
+DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
c->req.hdr.sg_cnt = seg;
- c->req.hdr.blk_cnt = creq->nr_sectors;
+ c->req.hdr.blk_cnt = blk_rq_sectors(creq);
c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
c->type = CMD_RWREQ;
@@ -1024,8 +1025,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
cmd->req.sg[i].size, ddir);
DBGPX(printk("Done with %p\n", rq););
- if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
- BUG();
+ __blk_end_request_all(rq, error);
}
/*
@@ -1546,7 +1546,7 @@ static int revalidate_allvol(ctlr_info_t *host)
drv_info_t *drv = &host->drv[i];
if (i && !drv->nr_blks)
continue;
- blk_queue_hardsect_size(host->queue, drv->blk_size);
+ blk_queue_logical_block_size(host->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks);
disk->queue = host->queue;
disk->private_data = drv;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1300df6f1642..862b40c90181 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -931,7 +931,7 @@ static inline void unlock_fdc(void)
del_timer(&fd_timeout);
cont = NULL;
clear_bit(0, &fdc_busy);
- if (elv_next_request(floppy_queue))
+ if (current_req || blk_peek_request(floppy_queue))
do_fd_request(floppy_queue);
spin_unlock_irqrestore(&floppy_lock, flags);
wake_up(&fdc_wait);
@@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
/* current_count_sectors can be zero if transfer failed */
if (error)
- nr_sectors = req->current_nr_sectors;
+ nr_sectors = blk_rq_cur_sectors(req);
if (__blk_end_request(req, error, nr_sectors << 9))
return;
@@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
if (uptodate) {
/* maintain values for invalidation on geometry
* change */
- block = current_count_sectors + req->sector;
+ block = current_count_sectors + blk_rq_pos(req);
INFBOUND(DRS->maxblock, block);
if (block > _floppy->sect)
DRS->maxtrack = 1;
@@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
/* record write error information */
DRWE->write_errors++;
if (DRWE->write_errors == 1) {
- DRWE->first_error_sector = req->sector;
+ DRWE->first_error_sector = blk_rq_pos(req);
DRWE->first_error_generation = DRS->generation;
}
- DRWE->last_error_sector = req->sector;
+ DRWE->last_error_sector = blk_rq_pos(req);
DRWE->last_error_generation = DRS->generation;
}
spin_lock_irqsave(q->queue_lock, flags);
@@ -2503,24 +2503,23 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
- current_req->nr_sectors);
+ blk_rq_sectors(current_req));
if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
- buffer_max > fsector_t + current_req->nr_sectors)
+ buffer_max > fsector_t + blk_rq_sectors(current_req))
current_count_sectors = min_t(int, buffer_max - fsector_t,
- current_req->nr_sectors);
+ blk_rq_sectors(current_req));
remaining = current_count_sectors << 9;
#ifdef FLOPPY_SANITY_CHECK
- if ((remaining >> 9) > current_req->nr_sectors &&
- CT(COMMAND) == FD_WRITE) {
+ if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
DPRINT("in copy buffer\n");
printk("current_count_sectors=%ld\n", current_count_sectors);
printk("remaining=%d\n", remaining >> 9);
- printk("current_req->nr_sectors=%ld\n",
- current_req->nr_sectors);
+ printk("current_req->nr_sectors=%u\n",
+ blk_rq_sectors(current_req));
printk("current_req->current_nr_sectors=%u\n",
- current_req->current_nr_sectors);
+ blk_rq_cur_sectors(current_req));
printk("max_sector=%d\n", max_sector);
printk("ssize=%d\n", ssize);
}
@@ -2530,7 +2529,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
- size = current_req->current_nr_sectors << 9;
+ size = blk_rq_cur_bytes(current_req);
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
@@ -2648,10 +2647,10 @@ static int make_raw_rw_request(void)
max_sector = _floppy->sect * _floppy->head;
- TRACK = (int)current_req->sector / max_sector;
- fsector_t = (int)current_req->sector % max_sector;
+ TRACK = (int)blk_rq_pos(current_req) / max_sector;
+ fsector_t = (int)blk_rq_pos(current_req) % max_sector;
if (_floppy->track && TRACK >= _floppy->track) {
- if (current_req->current_nr_sectors & 1) {
+ if (blk_rq_cur_sectors(current_req) & 1) {
current_count_sectors = 1;
return 1;
} else
@@ -2669,7 +2668,7 @@ static int make_raw_rw_request(void)
if (fsector_t >= max_sector) {
current_count_sectors =
min_t(int, _floppy->sect - fsector_t,
- current_req->nr_sectors);
+ blk_rq_sectors(current_req));
return 1;
}
SIZECODE = 2;
@@ -2720,7 +2719,7 @@ static int make_raw_rw_request(void)
in_sector_offset = (fsector_t % _floppy->sect) % ssize;
aligned_sector_t = fsector_t - in_sector_offset;
- max_size = current_req->nr_sectors;
+ max_size = blk_rq_sectors(current_req);
if ((raw_cmd->track == buffer_track) &&
(current_drive == buffer_drive) &&
(fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
@@ -2729,10 +2728,10 @@ static int make_raw_rw_request(void)
copy_buffer(1, max_sector, buffer_max);
return 1;
}
- } else if (in_sector_offset || current_req->nr_sectors < ssize) {
+ } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
if (CT(COMMAND) == FD_WRITE) {
- if (fsector_t + current_req->nr_sectors > ssize &&
- fsector_t + current_req->nr_sectors < ssize + ssize)
+ if (fsector_t + blk_rq_sectors(current_req) > ssize &&
+ fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
max_size = ssize + ssize;
else
max_size = ssize;
@@ -2776,7 +2775,7 @@ static int make_raw_rw_request(void)
(indirect * 2 > direct * 3 &&
*errors < DP->max_errors.read_track && ((!probing
|| (DP->read_track & (1 << DRS->probed_format)))))) {
- max_size = current_req->nr_sectors;
+ max_size = blk_rq_sectors(current_req);
} else {
raw_cmd->kernel_data = current_req->buffer;
raw_cmd->length = current_count_sectors << 9;
@@ -2801,7 +2800,7 @@ static int make_raw_rw_request(void)
fsector_t > buffer_max ||
fsector_t < buffer_min ||
((CT(COMMAND) == FD_READ ||
- (!in_sector_offset && current_req->nr_sectors >= ssize)) &&
+ (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
max_sector > 2 * max_buffer_sectors + buffer_min &&
max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
/* not enough space */
@@ -2879,8 +2878,8 @@ static int make_raw_rw_request(void)
printk("write\n");
return 0;
}
- } else if (raw_cmd->length > current_req->nr_sectors << 9 ||
- current_count_sectors > current_req->nr_sectors) {
+ } else if (raw_cmd->length > blk_rq_bytes(current_req) ||
+ current_count_sectors > blk_rq_sectors(current_req)) {
DPRINT("buffer overrun in direct transfer\n");
return 0;
} else if (raw_cmd->length < current_count_sectors << 9) {
@@ -2913,7 +2912,7 @@ static void redo_fd_request(void)
struct request *req;
spin_lock_irq(floppy_queue->queue_lock);
- req = elv_next_request(floppy_queue);
+ req = blk_fetch_request(floppy_queue);
spin_unlock_irq(floppy_queue->queue_lock);
if (!req) {
do_floppy = NULL;
@@ -2990,8 +2989,9 @@ static void do_fd_request(struct request_queue * q)
if (usage_count == 0) {
printk("warning: usage count=0, current_req=%p exiting\n",
current_req);
- printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector,
- current_req->cmd_type, current_req->cmd_flags);
+ printk("sect=%ld type=%x flags=%x\n",
+ (long)blk_rq_pos(current_req), current_req->cmd_type,
+ current_req->cmd_flags);
return;
}
if (test_bit(0, &fdc_busy)) {
@@ -4148,6 +4148,24 @@ static void floppy_device_release(struct device *dev)
{
}
+static int floppy_resume(struct platform_device *dev)
+{
+ int fdc;
+
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (FDCS->address != -1)
+ user_reset_fdc(-1, FD_RESET_ALWAYS, 0);
+
+ return 0;
+}
+
+static struct platform_driver floppy_driver = {
+ .resume = floppy_resume,
+ .driver = {
+ .name = "floppy",
+ },
+};
+
static struct platform_device floppy_device[N_DRIVE];
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
@@ -4196,10 +4214,14 @@ static int __init floppy_init(void)
if (err)
goto out_put_disk;
+ err = platform_driver_register(&floppy_driver);
+ if (err)
+ goto out_unreg_blkdev;
+
floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!floppy_queue) {
err = -ENOMEM;
- goto out_unreg_blkdev;
+ goto out_unreg_driver;
}
blk_queue_max_sectors(floppy_queue, 64);
@@ -4346,6 +4368,8 @@ out_flush_work:
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
blk_cleanup_queue(floppy_queue);
+out_unreg_driver:
+ platform_driver_unregister(&floppy_driver);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
@@ -4566,6 +4590,7 @@ static void __exit floppy_module_exit(void)
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
unregister_blkdev(FLOPPY_MAJOR, "fd");
+ platform_driver_unregister(&floppy_driver);
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index baaa9e486e50..f65b3f369eb0 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -98,10 +98,9 @@
static DEFINE_SPINLOCK(hd_lock);
static struct request_queue *hd_queue;
+static struct request *hd_req;
#define MAJOR_NR HD_MAJOR
-#define QUEUE (hd_queue)
-#define CURRENT elv_next_request(hd_queue)
#define TIMEOUT_VALUE (6*HZ)
#define HD_DELAY 0
@@ -195,11 +194,24 @@ static void __init hd_setup(char *str, int *ints)
NR_HD = hdind+1;
}
+static bool hd_end_request(int err, unsigned int bytes)
+{
+ if (__blk_end_request(hd_req, err, bytes))
+ return true;
+ hd_req = NULL;
+ return false;
+}
+
+static bool hd_end_request_cur(int err)
+{
+ return hd_end_request(err, blk_rq_cur_bytes(hd_req));
+}
+
static void dump_status(const char *msg, unsigned int stat)
{
char *name = "hd?";
- if (CURRENT)
- name = CURRENT->rq_disk->disk_name;
+ if (hd_req)
+ name = hd_req->rq_disk->disk_name;
#ifdef VERBOSE_ERRORS
printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
@@ -227,8 +239,8 @@ static void dump_status(const char *msg, unsigned int stat)
if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
- if (CURRENT)
- printk(", sector=%ld", CURRENT->sector);
+ if (hd_req)
+ printk(", sector=%ld", blk_rq_pos(hd_req));
}
printk("\n");
}
@@ -406,11 +418,12 @@ static void unexpected_hd_interrupt(void)
*/
static void bad_rw_intr(void)
{
- struct request *req = CURRENT;
+ struct request *req = hd_req;
+
if (req != NULL) {
struct hd_i_struct *disk = req->rq_disk->private_data;
if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
disk->special_op = disk->recalibrate = 1;
} else if (req->errors % RESET_FREQ == 0)
reset = 1;
@@ -452,37 +465,30 @@ static void read_intr(void)
bad_rw_intr();
hd_request();
return;
+
ok_to_read:
- req = CURRENT;
+ req = hd_req;
insw(HD_DATA, req->buffer, 256);
- req->sector++;
- req->buffer += 512;
- req->errors = 0;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
#ifdef DEBUG
- printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n",
- req->rq_disk->disk_name, req->sector, req->nr_sectors,
- req->buffer+512);
+ printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
+ req->rq_disk->disk_name, blk_rq_pos(req) + 1,
+ blk_rq_sectors(req) - 1, req->buffer+512);
#endif
- if (req->current_nr_sectors <= 0)
- end_request(req, 1);
- if (i > 0) {
+ if (hd_end_request(0, 512)) {
SET_HANDLER(&read_intr);
return;
}
+
(void) inb_p(HD_STATUS);
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
- if (elv_next_request(QUEUE))
- hd_request();
- return;
+ hd_request();
}
static void write_intr(void)
{
- struct request *req = CURRENT;
+ struct request *req = hd_req;
int i;
int retries = 100000;
@@ -492,30 +498,25 @@ static void write_intr(void)
continue;
if (!OK_STATUS(i))
break;
- if ((req->nr_sectors <= 1) || (i & DRQ_STAT))
+ if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
goto ok_to_write;
} while (--retries > 0);
dump_status("write_intr", i);
bad_rw_intr();
hd_request();
return;
+
ok_to_write:
- req->sector++;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
- req->buffer += 512;
- if (!i || (req->bio && req->current_nr_sectors <= 0))
- end_request(req, 1);
- if (i > 0) {
+ if (hd_end_request(0, 512)) {
SET_HANDLER(&write_intr);
outsw(HD_DATA, req->buffer, 256);
- } else {
+ return;
+ }
+
#if (HD_DELAY > 0)
- last_req = read_timer();
+ last_req = read_timer();
#endif
- hd_request();
- }
- return;
+ hd_request();
}
static void recal_intr(void)
@@ -537,18 +538,18 @@ static void hd_times_out(unsigned long dummy)
do_hd = NULL;
- if (!CURRENT)
+ if (!hd_req)
return;
spin_lock_irq(hd_queue->queue_lock);
reset = 1;
- name = CURRENT->rq_disk->disk_name;
+ name = hd_req->rq_disk->disk_name;
printk("%s: timeout\n", name);
- if (++CURRENT->errors >= MAX_ERRORS) {
+ if (++hd_req->errors >= MAX_ERRORS) {
#ifdef DEBUG
printk("%s: too many errors\n", name);
#endif
- end_request(CURRENT, 0);
+ hd_end_request_cur(-EIO);
}
hd_request();
spin_unlock_irq(hd_queue->queue_lock);
@@ -563,7 +564,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
}
if (disk->head > 16) {
printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
}
disk->special_op = 0;
return 1;
@@ -590,24 +591,27 @@ static void hd_request(void)
repeat:
del_timer(&device_timer);
- req = CURRENT;
- if (!req) {
- do_hd = NULL;
- return;
+ if (!hd_req) {
+ hd_req = blk_fetch_request(hd_queue);
+ if (!hd_req) {
+ do_hd = NULL;
+ return;
+ }
}
+ req = hd_req;
if (reset) {
reset_hd();
return;
}
disk = req->rq_disk->private_data;
- block = req->sector;
- nsect = req->nr_sectors;
+ block = blk_rq_pos(req);
+ nsect = blk_rq_sectors(req);
if (block >= get_capacity(req->rq_disk) ||
((block+nsect) > get_capacity(req->rq_disk))) {
printk("%s: bad access: block=%d, count=%d\n",
req->rq_disk->disk_name, block, nsect);
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
goto repeat;
}
@@ -647,7 +651,7 @@ repeat:
break;
default:
printk("unknown hd-command\n");
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
break;
}
}
@@ -720,7 +724,7 @@ static int __init hd_init(void)
blk_queue_max_sectors(hd_queue, 255);
init_timer(&device_timer);
device_timer.function = hd_times_out;
- blk_queue_hardsect_size(hd_queue, 512);
+ blk_queue_logical_block_size(hd_queue, 512);
if (!NR_HD) {
/*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ddae80825899..801f4ab83302 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -511,11 +511,7 @@ out:
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
- if (lo->lo_biotail) {
- lo->lo_biotail->bi_next = bio;
- lo->lo_biotail = bio;
- } else
- lo->lo_bio = lo->lo_biotail = bio;
+ bio_list_add(&lo->lo_bio_list, bio);
}
/*
@@ -523,16 +519,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
*/
static struct bio *loop_get_bio(struct loop_device *lo)
{
- struct bio *bio;
-
- if ((bio = lo->lo_bio)) {
- if (bio == lo->lo_biotail)
- lo->lo_biotail = NULL;
- lo->lo_bio = bio->bi_next;
- bio->bi_next = NULL;
- }
-
- return bio;
+ return bio_list_pop(&lo->lo_bio_list);
}
static int loop_make_request(struct request_queue *q, struct bio *old_bio)
@@ -609,12 +596,13 @@ static int loop_thread(void *data)
set_user_nice(current, -20);
- while (!kthread_should_stop() || lo->lo_bio) {
+ while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
wait_event_interruptible(lo->lo_event,
- lo->lo_bio || kthread_should_stop());
+ !bio_list_empty(&lo->lo_bio_list) ||
+ kthread_should_stop());
- if (!lo->lo_bio)
+ if (bio_list_empty(&lo->lo_bio_list))
continue;
spin_lock_irq(&lo->lo_lock);
bio = loop_get_bio(lo);
@@ -721,10 +709,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
goto out_putf;
- /* new backing store needs to support loop (eg splice_read) */
- if (!inode->i_fop->splice_read)
- goto out_putf;
-
/* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_putf;
@@ -800,12 +784,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
error = -EINVAL;
if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
const struct address_space_operations *aops = mapping->a_ops;
- /*
- * If we can't read - sorry. If we only can't write - well,
- * it's going to be read-only.
- */
- if (!file->f_op->splice_read)
- goto out_putf;
+
if (aops->write_begin)
lo_flags |= LO_FLAGS_USE_AOPS;
if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
@@ -841,7 +820,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- lo->lo_bio = lo->lo_biotail = NULL;
+ bio_list_init(&lo->lo_bio_list);
/*
* set queue make_request_fn, and add limits based on lower level
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index f3898353d0a8..60de5a01e71e 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -17,71 +17,220 @@
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
-#include <linux/libata.h>
+#include <linux/ata.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/mg_disk.h>
#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
+/* name for block device */
+#define MG_DISK_NAME "mgd"
+/* name for platform device */
+#define MG_DEV_NAME "mg_disk"
+
+#define MG_DISK_MAJ 0
+#define MG_DISK_MAX_PART 16
+#define MG_SECTOR_SIZE 512
+#define MG_MAX_SECTS 256
+
+/* Register offsets */
+#define MG_BUFF_OFFSET 0x8000
+#define MG_STORAGE_BUFFER_SIZE 0x200
+#define MG_REG_OFFSET 0xC000
+#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
+#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
+#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
+#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
+#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
+#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
+#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
+#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
+#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
+#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
+#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
+
+/* handy status */
+#define MG_STAT_READY (ATA_DRDY | ATA_DSC)
+#define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
+ ATA_ERR))) == MG_STAT_READY)
+
+/* error code for others */
+#define MG_ERR_NONE 0
+#define MG_ERR_TIMEOUT 0x100
+#define MG_ERR_INIT_STAT 0x101
+#define MG_ERR_TRANSLATION 0x102
+#define MG_ERR_CTRL_RST 0x103
+#define MG_ERR_INV_STAT 0x104
+#define MG_ERR_RSTOUT 0x105
+
+#define MG_MAX_ERRORS 6 /* Max read/write errors */
+
+/* command */
+#define MG_CMD_RD 0x20
+#define MG_CMD_WR 0x30
+#define MG_CMD_SLEEP 0x99
+#define MG_CMD_WAKEUP 0xC3
+#define MG_CMD_ID 0xEC
+#define MG_CMD_WR_CONF 0x3C
+#define MG_CMD_RD_CONF 0x40
+
+/* operation mode */
+#define MG_OP_CASCADE (1 << 0)
+#define MG_OP_CASCADE_SYNC_RD (1 << 1)
+#define MG_OP_CASCADE_SYNC_WR (1 << 2)
+#define MG_OP_INTERLEAVE (1 << 3)
+
+/* synchronous */
+#define MG_BURST_LAT_4 (3 << 4)
+#define MG_BURST_LAT_5 (4 << 4)
+#define MG_BURST_LAT_6 (5 << 4)
+#define MG_BURST_LAT_7 (6 << 4)
+#define MG_BURST_LAT_8 (7 << 4)
+#define MG_BURST_LEN_4 (1 << 1)
+#define MG_BURST_LEN_8 (2 << 1)
+#define MG_BURST_LEN_16 (3 << 1)
+#define MG_BURST_LEN_32 (4 << 1)
+#define MG_BURST_LEN_CONT (0 << 1)
+
+/* timeout value (unit: ms) */
+#define MG_TMAX_CONF_TO_CMD 1
+#define MG_TMAX_WAIT_RD_DRQ 10
+#define MG_TMAX_WAIT_WR_DRQ 500
+#define MG_TMAX_RST_TO_BUSY 10
+#define MG_TMAX_HDRST_TO_RDY 500
+#define MG_TMAX_SWRST_TO_RDY 500
+#define MG_TMAX_RSTOUT 3000
+
+/* device attribution */
+/* use mflash as boot device */
+#define MG_BOOT_DEV (1 << 0)
+/* use mflash as storage device */
+#define MG_STORAGE_DEV (1 << 1)
+/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
+#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
+
+#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
+
+/* names of GPIO resource */
+#define MG_RST_PIN "mg_rst"
+/* except MG_BOOT_DEV, reset-out pin should be assigned */
+#define MG_RSTOUT_PIN "mg_rstout"
+
+/* private driver data */
+struct mg_drv_data {
+ /* disk resource */
+ u32 use_polling;
+
+ /* device attribution */
+ u32 dev_attr;
+
+ /* internally used */
+ struct mg_host *host;
+};
+
+/* main structure for mflash driver */
+struct mg_host {
+ struct device *dev;
+
+ struct request_queue *breq;
+ struct request *req;
+ spinlock_t lock;
+ struct gendisk *gd;
+
+ struct timer_list timer;
+ void (*mg_do_intr) (struct mg_host *);
+
+ u16 id[ATA_ID_WORDS];
+
+ u16 cyls;
+ u16 heads;
+ u16 sectors;
+ u32 n_sectors;
+ u32 nres_sectors;
+
+ void __iomem *dev_base;
+ unsigned int irq;
+ unsigned int rst;
+ unsigned int rstout;
+
+ u32 major;
+ u32 error;
+};
+
+/*
+ * Debugging macro and defines
+ */
+#undef DO_MG_DEBUG
+#ifdef DO_MG_DEBUG
+# define MG_DBG(fmt, args...) \
+ printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
+#else /* CONFIG_MG_DEBUG */
+# define MG_DBG(fmt, args...) do { } while (0)
+#endif /* CONFIG_MG_DEBUG */
+
static void mg_request(struct request_queue *);
+static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
+{
+ if (__blk_end_request(host->req, err, nr_bytes))
+ return true;
+
+ host->req = NULL;
+ return false;
+}
+
+static bool mg_end_request_cur(struct mg_host *host, int err)
+{
+ return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
+}
+
static void mg_dump_status(const char *msg, unsigned int stat,
struct mg_host *host)
{
char *name = MG_DISK_NAME;
- struct request *req;
- if (host->breq) {
- req = elv_next_request(host->breq);
- if (req)
- name = req->rq_disk->disk_name;
- }
+ if (host->req)
+ name = host->req->rq_disk->disk_name;
printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
- if (stat & MG_REG_STATUS_BIT_BUSY)
+ if (stat & ATA_BUSY)
printk("Busy ");
- if (stat & MG_REG_STATUS_BIT_READY)
+ if (stat & ATA_DRDY)
printk("DriveReady ");
- if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
+ if (stat & ATA_DF)
printk("WriteFault ");
- if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
+ if (stat & ATA_DSC)
printk("SeekComplete ");
- if (stat & MG_REG_STATUS_BIT_DATA_REQ)
+ if (stat & ATA_DRQ)
printk("DataRequest ");
- if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
+ if (stat & ATA_CORR)
printk("CorrectedError ");
- if (stat & MG_REG_STATUS_BIT_ERROR)
+ if (stat & ATA_ERR)
printk("Error ");
printk("}\n");
- if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
+ if ((stat & ATA_ERR) == 0) {
host->error = 0;
} else {
host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
host->error & 0xff);
- if (host->error & MG_REG_ERR_BBK)
+ if (host->error & ATA_BBK)
printk("BadSector ");
- if (host->error & MG_REG_ERR_UNC)
+ if (host->error & ATA_UNC)
printk("UncorrectableError ");
- if (host->error & MG_REG_ERR_IDNF)
+ if (host->error & ATA_IDNF)
printk("SectorIdNotFound ");
- if (host->error & MG_REG_ERR_ABRT)
+ if (host->error & ATA_ABORTED)
printk("DriveStatusError ");
- if (host->error & MG_REG_ERR_AMNF)
+ if (host->error & ATA_AMNF)
printk("AddrMarkNotFound ");
printk("}");
- if (host->error &
- (MG_REG_ERR_BBK | MG_REG_ERR_UNC |
- MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
- if (host->breq) {
- req = elv_next_request(host->breq);
- if (req)
- printk(", sector=%u", (u32)req->sector);
- }
-
+ if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
+ if (host->req)
+ printk(", sector=%u",
+ (unsigned int)blk_rq_pos(host->req));
}
printk("\n");
}
@@ -100,12 +249,12 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
do {
cur_jiffies = jiffies;
- if (status & MG_REG_STATUS_BIT_BUSY) {
- if (expect == MG_REG_STATUS_BIT_BUSY)
+ if (status & ATA_BUSY) {
+ if (expect == ATA_BUSY)
break;
} else {
/* Check the error condition! */
- if (status & MG_REG_STATUS_BIT_ERROR) {
+ if (status & ATA_ERR) {
mg_dump_status("mg_wait", status, host);
break;
}
@@ -114,8 +263,8 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
if (MG_READY_OK(status))
break;
- if (expect == MG_REG_STATUS_BIT_DATA_REQ)
- if (status & MG_REG_STATUS_BIT_DATA_REQ)
+ if (expect == ATA_DRQ)
+ if (status & ATA_DRQ)
break;
}
if (!msec) {
@@ -173,6 +322,42 @@ static irqreturn_t mg_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/* local copy of ata_id_string() */
+static void mg_id_string(const u16 *id, unsigned char *s,
+ unsigned int ofs, unsigned int len)
+{
+ unsigned int c;
+
+ BUG_ON(len & 1);
+
+ while (len > 0) {
+ c = id[ofs] >> 8;
+ *s = c;
+ s++;
+
+ c = id[ofs] & 0xff;
+ *s = c;
+ s++;
+
+ ofs++;
+ len -= 2;
+ }
+}
+
+/* local copy of ata_id_c_string() */
+static void mg_id_c_string(const u16 *id, unsigned char *s,
+ unsigned int ofs, unsigned int len)
+{
+ unsigned char *p;
+
+ mg_id_string(id, s, ofs, len - 1);
+
+ p = s + strnlen(s, len - 1);
+ while (p > s && p[-1] == ' ')
+ p--;
+ *p = '\0';
+}
+
static int mg_get_disk_id(struct mg_host *host)
{
u32 i;
@@ -184,12 +369,10 @@ static int mg_get_disk_id(struct mg_host *host)
char serial[ATA_ID_SERNO_LEN + 1];
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_DISABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
- err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
+ err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
if (err)
return err;
@@ -219,9 +402,9 @@ static int mg_get_disk_id(struct mg_host *host)
host->n_sectors -= host->nres_sectors;
}
- ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
- ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
- ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
+ mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
+ mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
+ mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
printk(KERN_INFO "mg_disk: model: %s\n", model);
printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
printk(KERN_INFO "mg_disk: serial: %s\n", serial);
@@ -229,8 +412,7 @@ static int mg_get_disk_id(struct mg_host *host)
host->n_sectors, host->nres_sectors);
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return err;
}
@@ -244,7 +426,7 @@ static int mg_disk_init(struct mg_host *host)
/* hdd rst low */
gpio_set_value(host->rst, 0);
- err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
+ err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
if (err)
return err;
@@ -255,17 +437,14 @@ static int mg_disk_init(struct mg_host *host)
return err;
/* soft reset on */
- outb(MG_REG_CTRL_RESET |
- (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
- MG_REG_CTRL_INTR_ENABLE),
+ outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
- err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
+ err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
if (err)
return err;
/* soft reset off */
- outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
- MG_REG_CTRL_INTR_ENABLE,
+ outb(prv_data->use_polling ? ATA_NIEN : 0,
(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
if (err)
@@ -281,11 +460,10 @@ static int mg_disk_init(struct mg_host *host)
static void mg_bad_rw_intr(struct mg_host *host)
{
- struct request *req = elv_next_request(host->breq);
- if (req != NULL)
- if (++req->errors >= MG_MAX_ERRORS ||
- host->error == MG_ERR_TIMEOUT)
- end_request(req, 0);
+ if (host->req)
+ if (++host->req->errors >= MG_MAX_ERRORS ||
+ host->error == MG_ERR_TIMEOUT)
+ mg_end_request_cur(host, -EIO);
}
static unsigned int mg_out(struct mg_host *host,
@@ -311,7 +489,7 @@ static unsigned int mg_out(struct mg_host *host,
MG_REG_CYL_LOW);
outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
MG_REG_CYL_HIGH);
- outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
+ outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
(unsigned long)host->dev_base + MG_REG_DRV_HEAD);
outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
return MG_ERR_NONE;
@@ -319,105 +497,77 @@ static unsigned int mg_out(struct mg_host *host,
static void mg_read(struct request *req)
{
- u32 remains, j;
+ u32 j;
struct mg_host *host = req->rq_disk->private_data;
- remains = req->nr_sectors;
-
- if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
- MG_ERR_NONE)
+ if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
+ MG_CMD_RD, NULL) != MG_ERR_NONE)
mg_bad_rw_intr(host);
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
- remains, req->sector, req->buffer);
+ blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
+
+ do {
+ u16 *buff = (u16 *)req->buffer;
- while (remains) {
- if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
- MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
+ if (mg_wait(host, ATA_DRQ,
+ MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
- for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
- *(u16 *)req->buffer =
- inw((unsigned long)host->dev_base +
- MG_BUFF_OFFSET + (j << 1));
- req->buffer += 2;
- }
-
- req->sector++;
- req->errors = 0;
- remains = --req->nr_sectors;
- --req->current_nr_sectors;
-
- if (req->current_nr_sectors <= 0) {
- MG_DBG("remain : %d sects\n", remains);
- end_request(req, 1);
- if (remains > 0)
- req = elv_next_request(host->breq);
- }
+ for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
+ *buff++ = inw((unsigned long)host->dev_base +
+ MG_BUFF_OFFSET + (j << 1));
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
- }
+ } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
}
static void mg_write(struct request *req)
{
- u32 remains, j;
+ u32 j;
struct mg_host *host = req->rq_disk->private_data;
- remains = req->nr_sectors;
-
- if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
- MG_ERR_NONE) {
+ if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
+ MG_CMD_WR, NULL) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
-
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
- remains, req->sector, req->buffer);
- while (remains) {
- if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
- MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
+ blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
+
+ do {
+ u16 *buff = (u16 *)req->buffer;
+
+ if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
- for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
- outw(*(u16 *)req->buffer,
- (unsigned long)host->dev_base +
- MG_BUFF_OFFSET + (j << 1));
- req->buffer += 2;
- }
- req->sector++;
- remains = --req->nr_sectors;
- --req->current_nr_sectors;
-
- if (req->current_nr_sectors <= 0) {
- MG_DBG("remain : %d sects\n", remains);
- end_request(req, 1);
- if (remains > 0)
- req = elv_next_request(host->breq);
- }
+ for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
+ outw(*buff++, (unsigned long)host->dev_base +
+ MG_BUFF_OFFSET + (j << 1));
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
- }
+ } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
}
static void mg_read_intr(struct mg_host *host)
{
+ struct request *req = host->req;
u32 i;
- struct request *req;
+ u16 *buff;
/* check status */
do {
i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
- if (i & MG_REG_STATUS_BIT_BUSY)
+ if (i & ATA_BUSY)
break;
if (!MG_READY_OK(i))
break;
- if (i & MG_REG_STATUS_BIT_DATA_REQ)
+ if (i & ATA_DRQ)
goto ok_to_read;
} while (0);
mg_dump_status("mg_read_intr", i, host);
@@ -427,60 +577,42 @@ static void mg_read_intr(struct mg_host *host)
ok_to_read:
/* get current segment of request */
- req = elv_next_request(host->breq);
+ buff = (u16 *)req->buffer;
/* read 1 sector */
- for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
- *(u16 *)req->buffer =
- inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
- (i << 1));
- req->buffer += 2;
- }
+ for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
+ *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
+ (i << 1));
- /* manipulate request */
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
- req->sector, req->nr_sectors - 1, req->buffer);
-
- req->sector++;
- req->errors = 0;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
-
- /* let know if current segment done */
- if (req->current_nr_sectors <= 0)
- end_request(req, 1);
-
- /* set handler if read remains */
- if (i > 0) {
- host->mg_do_intr = mg_read_intr;
- mod_timer(&host->timer, jiffies + 3 * HZ);
- }
+ blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
/* send read confirm */
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
- /* goto next request */
- if (!i)
+ if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
+ /* set handler if read remains */
+ host->mg_do_intr = mg_read_intr;
+ mod_timer(&host->timer, jiffies + 3 * HZ);
+ } else /* goto next request */
mg_request(host->breq);
}
static void mg_write_intr(struct mg_host *host)
{
+ struct request *req = host->req;
u32 i, j;
u16 *buff;
- struct request *req;
-
- /* get current segment of request */
- req = elv_next_request(host->breq);
+ bool rem;
/* check status */
do {
i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
- if (i & MG_REG_STATUS_BIT_BUSY)
+ if (i & ATA_BUSY)
break;
if (!MG_READY_OK(i))
break;
- if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
+ if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
goto ok_to_write;
} while (0);
mg_dump_status("mg_write_intr", i, host);
@@ -489,18 +621,8 @@ static void mg_write_intr(struct mg_host *host)
return;
ok_to_write:
- /* manipulate request */
- req->sector++;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
- req->buffer += MG_SECTOR_SIZE;
-
- /* let know if current segment or all done */
- if (!i || (req->bio && req->current_nr_sectors <= 0))
- end_request(req, 1);
-
- /* write 1 sector and set handler if remains */
- if (i > 0) {
+ if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
+ /* write 1 sector and set handler if remains */
buff = (u16 *)req->buffer;
for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
outw(*buff, (unsigned long)host->dev_base +
@@ -508,7 +630,7 @@ ok_to_write:
buff++;
}
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
- req->sector, req->nr_sectors, req->buffer);
+ blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
host->mg_do_intr = mg_write_intr;
mod_timer(&host->timer, jiffies + 3 * HZ);
}
@@ -516,7 +638,7 @@ ok_to_write:
/* send write confirm */
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
- if (!i)
+ if (!rem)
mg_request(host->breq);
}
@@ -524,49 +646,45 @@ void mg_times_out(unsigned long data)
{
struct mg_host *host = (struct mg_host *)data;
char *name;
- struct request *req;
spin_lock_irq(&host->lock);
- req = elv_next_request(host->breq);
- if (!req)
+ if (!host->req)
goto out_unlock;
host->mg_do_intr = NULL;
- name = req->rq_disk->disk_name;
+ name = host->req->rq_disk->disk_name;
printk(KERN_DEBUG "%s: timeout\n", name);
host->error = MG_ERR_TIMEOUT;
mg_bad_rw_intr(host);
- mg_request(host->breq);
out_unlock:
+ mg_request(host->breq);
spin_unlock_irq(&host->lock);
}
static void mg_request_poll(struct request_queue *q)
{
- struct request *req;
- struct mg_host *host;
+ struct mg_host *host = q->queuedata;
- while ((req = elv_next_request(q)) != NULL) {
- host = req->rq_disk->private_data;
- if (blk_fs_request(req)) {
- switch (rq_data_dir(req)) {
- case READ:
- mg_read(req);
- break;
- case WRITE:
- mg_write(req);
- break;
- default:
- printk(KERN_WARNING "%s:%d unknown command\n",
- __func__, __LINE__);
- end_request(req, 0);
+ while (1) {
+ if (!host->req) {
+ host->req = blk_fetch_request(q);
+ if (!host->req)
break;
- }
}
+
+ if (unlikely(!blk_fs_request(host->req))) {
+ mg_end_request_cur(host, -EIO);
+ continue;
+ }
+
+ if (rq_data_dir(host->req) == READ)
+ mg_read(host->req);
+ else
+ mg_write(host->req);
}
}
@@ -588,18 +706,15 @@ static unsigned int mg_issue_req(struct request *req,
break;
case WRITE:
/* TODO : handler */
- outb(MG_REG_CTRL_INTR_DISABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
!= MG_ERR_NONE) {
mg_bad_rw_intr(host);
return host->error;
}
del_timer(&host->timer);
- mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
- outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (host->error) {
mg_bad_rw_intr(host);
return host->error;
@@ -614,11 +729,6 @@ static unsigned int mg_issue_req(struct request *req,
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
break;
- default:
- printk(KERN_WARNING "%s:%d unknown command\n",
- __func__, __LINE__);
- end_request(req, 0);
- break;
}
return MG_ERR_NONE;
}
@@ -626,16 +736,17 @@ static unsigned int mg_issue_req(struct request *req,
/* This function also called from IRQ context */
static void mg_request(struct request_queue *q)
{
+ struct mg_host *host = q->queuedata;
struct request *req;
- struct mg_host *host;
u32 sect_num, sect_cnt;
while (1) {
- req = elv_next_request(q);
- if (!req)
- return;
-
- host = req->rq_disk->private_data;
+ if (!host->req) {
+ host->req = blk_fetch_request(q);
+ if (!host->req)
+ break;
+ }
+ req = host->req;
/* check unwanted request call */
if (host->mg_do_intr)
@@ -643,9 +754,9 @@ static void mg_request(struct request_queue *q)
del_timer(&host->timer);
- sect_num = req->sector;
+ sect_num = blk_rq_pos(req);
/* deal whole segments */
- sect_cnt = req->nr_sectors;
+ sect_cnt = blk_rq_sectors(req);
/* sanity check */
if (sect_num >= get_capacity(req->rq_disk) ||
@@ -655,12 +766,14 @@ static void mg_request(struct request_queue *q)
"%s: bad access: sector=%d, count=%d\n",
req->rq_disk->disk_name,
sect_num, sect_cnt);
- end_request(req, 0);
+ mg_end_request_cur(host, -EIO);
continue;
}
- if (!blk_fs_request(req))
- return;
+ if (unlikely(!blk_fs_request(req))) {
+ mg_end_request_cur(host, -EIO);
+ continue;
+ }
if (!mg_issue_req(req, host, sect_num, sect_cnt))
return;
@@ -690,9 +803,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
return -EIO;
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_DISABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
/* wait until mflash deep sleep */
@@ -700,9 +811,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_ENABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return -EIO;
}
@@ -725,8 +834,7 @@ static int mg_resume(struct platform_device *plat_dev)
return -EIO;
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return 0;
}
@@ -877,6 +985,7 @@ static int mg_probe(struct platform_device *plat_dev)
__func__, __LINE__);
goto probe_err_5;
}
+ host->breq->queuedata = host;
/* mflash is random device, thanx for the noop */
elevator_exit(host->breq->elevator);
@@ -887,7 +996,7 @@ static int mg_probe(struct platform_device *plat_dev)
goto probe_err_6;
}
blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
- blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
+ blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
init_timer(&host->timer);
host->timer.function = mg_times_out;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4d6de4f15ccb..5d23ffad7c77 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
req, error ? "failed" : "done");
spin_lock_irqsave(q->queue_lock, flags);
- __blk_end_request(req, error, req->nr_sectors << 9);
+ __blk_end_request_all(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
int result, flags;
struct nbd_request request;
- unsigned long size = req->nr_sectors << 9;
+ unsigned long size = blk_rq_bytes(req);
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(nbd_cmd(req));
- request.from = cpu_to_be64((u64) req->sector << 9);
+ request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
memcpy(request.handle, &req, sizeof(req));
- dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
+ dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
lo->disk->disk_name, req,
nbdcmd_to_ascii(nbd_cmd(req)),
- (unsigned long long)req->sector << 9,
- req->nr_sectors << 9);
+ (unsigned long long)blk_rq_pos(req) << 9,
+ blk_rq_bytes(req));
result = sock_xmit(lo, 1, &request, sizeof(request),
(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
@@ -533,11 +533,9 @@ static void do_nbd_request(struct request_queue *q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ while ((req = blk_fetch_request(q)) != NULL) {
struct nbd_device *lo;
- blkdev_dequeue_request(req);
-
spin_unlock_irq(q->queue_lock);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
@@ -580,13 +578,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
blk_rq_init(NULL, &sreq);
sreq.cmd_type = REQ_TYPE_SPECIAL;
nbd_cmd(&sreq) = NBD_CMD_DISC;
- /*
- * Set these to sane values in case server implementation
- * fails to check the request type first and also to keep
- * debugging output cleaner.
- */
- sreq.sector = 0;
- sreq.nr_sectors = 0;
if (!lo->sock)
return -EINVAL;
nbd_send_req(lo, &sreq);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index e91d4b4b014f..911dfd98d813 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -719,32 +719,37 @@ static void do_pcd_request(struct request_queue * q)
if (pcd_busy)
return;
while (1) {
- pcd_req = elv_next_request(q);
- if (!pcd_req)
- return;
+ if (!pcd_req) {
+ pcd_req = blk_fetch_request(q);
+ if (!pcd_req)
+ return;
+ }
if (rq_data_dir(pcd_req) == READ) {
struct pcd_unit *cd = pcd_req->rq_disk->private_data;
if (cd != pcd_current)
pcd_bufblk = -1;
pcd_current = cd;
- pcd_sector = pcd_req->sector;
- pcd_count = pcd_req->current_nr_sectors;
+ pcd_sector = blk_rq_pos(pcd_req);
+ pcd_count = blk_rq_cur_sectors(pcd_req);
pcd_buf = pcd_req->buffer;
pcd_busy = 1;
ps_set_intr(do_pcd_read, NULL, 0, nice);
return;
- } else
- end_request(pcd_req, 0);
+ } else {
+ __blk_end_request_all(pcd_req, -EIO);
+ pcd_req = NULL;
+ }
}
}
-static inline void next_request(int success)
+static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pcd_lock, saved_flags);
- end_request(pcd_req, success);
+ if (!__blk_end_request_cur(pcd_req, err))
+ pcd_req = NULL;
pcd_busy = 0;
do_pcd_request(pcd_queue);
spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -781,7 +786,7 @@ static void pcd_start(void)
if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
pcd_bufblk = -1;
- next_request(0);
+ next_request(-EIO);
return;
}
@@ -796,7 +801,7 @@ static void do_pcd_read(void)
pcd_retries = 0;
pcd_transfer();
if (!pcd_count) {
- next_request(1);
+ next_request(0);
return;
}
@@ -815,7 +820,7 @@ static void do_pcd_read_drq(void)
return;
}
pcd_bufblk = -1;
- next_request(0);
+ next_request(-EIO);
return;
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 9299455b0af6..bf5955b3d873 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -410,10 +410,12 @@ static void run_fsm(void)
pd_claimed = 0;
phase = NULL;
spin_lock_irqsave(&pd_lock, saved_flags);
- end_request(pd_req, res);
- pd_req = elv_next_request(pd_queue);
- if (!pd_req)
- stop = 1;
+ if (!__blk_end_request_cur(pd_req,
+ res == Ok ? 0 : -EIO)) {
+ pd_req = blk_fetch_request(pd_queue);
+ if (!pd_req)
+ stop = 1;
+ }
spin_unlock_irqrestore(&pd_lock, saved_flags);
if (stop)
return;
@@ -443,11 +445,11 @@ static enum action do_pd_io_start(void)
pd_cmd = rq_data_dir(pd_req);
if (pd_cmd == READ || pd_cmd == WRITE) {
- pd_block = pd_req->sector;
- pd_count = pd_req->current_nr_sectors;
+ pd_block = blk_rq_pos(pd_req);
+ pd_count = blk_rq_cur_sectors(pd_req);
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
return Fail;
- pd_run = pd_req->nr_sectors;
+ pd_run = blk_rq_sectors(pd_req);
pd_buf = pd_req->buffer;
pd_retries = 0;
if (pd_cmd == READ)
@@ -477,8 +479,8 @@ static int pd_next_buf(void)
if (pd_count)
return 0;
spin_lock_irqsave(&pd_lock, saved_flags);
- end_request(pd_req, 1);
- pd_count = pd_req->current_nr_sectors;
+ __blk_end_request_cur(pd_req, 0);
+ pd_count = blk_rq_cur_sectors(pd_req);
pd_buf = pd_req->buffer;
spin_unlock_irqrestore(&pd_lock, saved_flags);
return 0;
@@ -702,7 +704,7 @@ static void do_pd_request(struct request_queue * q)
{
if (pd_req)
return;
- pd_req = elv_next_request(q);
+ pd_req = blk_fetch_request(q);
if (!pd_req)
return;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index bef3b997ba3e..68a90834e993 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -750,12 +750,10 @@ static int pf_ready(void)
static struct request_queue *pf_queue;
-static void pf_end_request(int uptodate)
+static void pf_end_request(int err)
{
- if (pf_req) {
- end_request(pf_req, uptodate);
+ if (pf_req && !__blk_end_request_cur(pf_req, err))
pf_req = NULL;
- }
}
static void do_pf_request(struct request_queue * q)
@@ -763,17 +761,19 @@ static void do_pf_request(struct request_queue * q)
if (pf_busy)
return;
repeat:
- pf_req = elv_next_request(q);
- if (!pf_req)
- return;
+ if (!pf_req) {
+ pf_req = blk_fetch_request(q);
+ if (!pf_req)
+ return;
+ }
pf_current = pf_req->rq_disk->private_data;
- pf_block = pf_req->sector;
- pf_run = pf_req->nr_sectors;
- pf_count = pf_req->current_nr_sectors;
+ pf_block = blk_rq_pos(pf_req);
+ pf_run = blk_rq_sectors(pf_req);
+ pf_count = blk_rq_cur_sectors(pf_req);
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
- pf_end_request(0);
+ pf_end_request(-EIO);
goto repeat;
}
@@ -788,7 +788,7 @@ repeat:
pi_do_claimed(pf_current->pi, do_pf_write);
else {
pf_busy = 0;
- pf_end_request(0);
+ pf_end_request(-EIO);
goto repeat;
}
}
@@ -805,23 +805,22 @@ static int pf_next_buf(void)
return 1;
if (!pf_count) {
spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(1);
- pf_req = elv_next_request(pf_queue);
+ pf_end_request(0);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
if (!pf_req)
return 1;
- pf_count = pf_req->current_nr_sectors;
+ pf_count = blk_rq_cur_sectors(pf_req);
pf_buf = pf_req->buffer;
}
return 0;
}
-static inline void next_request(int success)
+static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(success);
+ pf_end_request(err);
pf_busy = 0;
do_pf_request(pf_queue);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
@@ -844,7 +843,7 @@ static void do_pf_read_start(void)
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pf_mask = STAT_DRQ;
@@ -863,7 +862,7 @@ static void do_pf_read_drq(void)
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_read_block(pf_current->pi, pf_buf, 512);
@@ -871,7 +870,7 @@ static void do_pf_read_drq(void)
break;
}
pi_disconnect(pf_current->pi);
- next_request(1);
+ next_request(0);
}
static void do_pf_write(void)
@@ -890,7 +889,7 @@ static void do_pf_write_start(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
@@ -903,7 +902,7 @@ static void do_pf_write_start(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_write_block(pf_current->pi, pf_buf, 512);
@@ -923,11 +922,11 @@ static void do_pf_write_done(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_disconnect(pf_current->pi);
- next_request(1);
+ next_request(0);
}
static int __init pf_init(void)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index dc7a8c352da2..d57f11759480 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
*/
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
- if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
+ if ((pd->settings.size << 9) / CD_FRAMESIZE
+ <= queue_max_phys_segments(q)) {
/*
* The cdrom device can handle one segment/frame
*/
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
- } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
+ } else if ((pd->settings.size << 9) / PAGE_SIZE
+ <= queue_max_phys_segments(q)) {
/*
* We can handle this case at the expense of some extra memory
* copies during write operations
@@ -2657,7 +2659,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
struct request_queue *q = pd->disk->queue;
blk_queue_make_request(q, pkt_make_request);
- blk_queue_hardsect_size(q, CD_FRAMESIZE);
+ blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
blk_queue_merge_bvec(q, pkt_merge_bvec);
q->queuedata = pd;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index bccc42bb9212..aaeeb544228a 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
- "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
- __func__, __LINE__, op, n, req->nr_sectors,
- req->hard_nr_sectors);
+ "%s:%u: %s req has %u bvecs for %u sectors\n",
+ __func__, __LINE__, op, n, blk_rq_sectors(req));
#endif
- start_sector = req->sector * priv->blocking_factor;
- sectors = req->nr_sectors * priv->blocking_factor;
+ start_sector = blk_rq_pos(req) * priv->blocking_factor;
+ sectors = blk_rq_sectors(req) * priv->blocking_factor;
dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
__func__, __LINE__, op, sectors, start_sector);
@@ -158,7 +157,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
return 0;
}
@@ -180,7 +179,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
return 0;
}
@@ -195,7 +194,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
- while ((req = elv_next_request(q))) {
+ while ((req = blk_fetch_request(q))) {
if (blk_fs_request(req)) {
if (ps3disk_submit_request_sg(dev, req))
break;
@@ -205,7 +204,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
break;
} else {
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
continue;
}
}
@@ -231,7 +230,6 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
struct request *req;
int res, read, error;
u64 tag, status;
- unsigned long num_sectors;
const char *op;
res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
@@ -261,11 +259,9 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
req->cmd[0] == REQ_LB_OP_FLUSH) {
read = 0;
- num_sectors = req->hard_cur_sectors;
op = "flush";
} else {
read = !rq_data_dir(req);
- num_sectors = req->nr_sectors;
op = read ? "read" : "write";
}
if (status) {
@@ -281,7 +277,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
}
spin_lock(&priv->lock);
- __blk_end_request(req, error, num_sectors << 9);
+ __blk_end_request_all(req, error);
priv->req = NULL;
ps3disk_do_request(dev, priv->queue);
spin_unlock(&priv->lock);
@@ -481,7 +477,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_max_sectors(queue, dev->bounce_size >> 9);
blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
- blk_queue_hardsect_size(queue, dev->blk_size);
+ blk_queue_logical_block_size(queue, dev->blk_size);
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
ps3disk_prepare_flush);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5861e33efe63..cbfd9c0aef03 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -212,11 +212,6 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
}
-static void vdc_end_request(struct request *req, int error, int num_sectors)
-{
- __blk_end_request(req, error, num_sectors << 9);
-}
-
static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
unsigned int index)
{
@@ -239,7 +234,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
rqe->req = NULL;
- vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9);
+ __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
if (blk_queue_stopped(port->disk->queue))
blk_start_queue(port->disk->queue);
@@ -421,7 +416,7 @@ static int __send_request(struct request *req)
desc->slice = 0;
}
desc->status = ~0;
- desc->offset = (req->sector << 9) / port->vdisk_block_size;
+ desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
desc->size = len;
desc->ncookies = err;
@@ -446,14 +441,13 @@ out:
static void do_vdc_request(struct request_queue *q)
{
while (1) {
- struct request *req = elv_next_request(q);
+ struct request *req = blk_fetch_request(q);
if (!req)
break;
- blkdev_dequeue_request(req);
if (__send_request(req) < 0)
- vdc_end_request(req, -EIO, req->hard_nr_sectors);
+ __blk_end_request_all(req, -EIO);
}
}
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index d22cc3856937..cf7877fb8a7d 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -514,7 +514,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
ret = swim_read_sector(fs, side, track, sector,
buffer);
if (try-- == 0)
- return -1;
+ return -EIO;
} while (ret != 512);
buffer += ret;
@@ -528,45 +528,31 @@ static void redo_fd_request(struct request_queue *q)
struct request *req;
struct floppy_state *fs;
- while ((req = elv_next_request(q))) {
+ req = blk_fetch_request(q);
+ while (req) {
+ int err = -EIO;
fs = req->rq_disk->private_data;
- if (req->sector < 0 || req->sector >= fs->total_secs) {
- end_request(req, 0);
- continue;
- }
- if (req->current_nr_sectors == 0) {
- end_request(req, 1);
- continue;
- }
- if (!fs->disk_in) {
- end_request(req, 0);
- continue;
- }
- if (rq_data_dir(req) == WRITE) {
- if (fs->write_protected) {
- end_request(req, 0);
- continue;
- }
- }
+ if (blk_rq_pos(req) >= fs->total_secs)
+ goto done;
+ if (!fs->disk_in)
+ goto done;
+ if (rq_data_dir(req) == WRITE && fs->write_protected)
+ goto done;
+
switch (rq_data_dir(req)) {
case WRITE:
/* NOT IMPLEMENTED */
- end_request(req, 0);
break;
case READ:
- if (floppy_read_sectors(fs, req->sector,
- req->current_nr_sectors,
- req->buffer)) {
- end_request(req, 0);
- continue;
- }
- req->nr_sectors -= req->current_nr_sectors;
- req->sector += req->current_nr_sectors;
- req->buffer += req->current_nr_sectors * 512;
- end_request(req, 1);
+ err = floppy_read_sectors(fs, blk_rq_pos(req),
+ blk_rq_cur_sectors(req),
+ req->buffer);
break;
}
+ done:
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 612965307ba0..80df93e3cdd0 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,6 +251,20 @@ static int floppy_release(struct gendisk *disk, fmode_t mode);
static int floppy_check_change(struct gendisk *disk);
static int floppy_revalidate(struct gendisk *disk);
+static bool swim3_end_request(int err, unsigned int nr_bytes)
+{
+ if (__blk_end_request(fd_req, err, nr_bytes))
+ return true;
+
+ fd_req = NULL;
+ return false;
+}
+
+static bool swim3_end_request_cur(int err)
+{
+ return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+}
+
static void swim3_select(struct floppy_state *fs, int sel)
{
struct swim3 __iomem *sw = fs->swim3;
@@ -310,25 +324,27 @@ static void start_request(struct floppy_state *fs)
wake_up(&fs->wait);
return;
}
- while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
+ while (fs->state == idle) {
+ if (!fd_req) {
+ fd_req = blk_fetch_request(swim3_queue);
+ if (!fd_req)
+ break;
+ }
+ req = fd_req;
#if 0
- printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
+ printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
- (long)req->sector, req->nr_sectors, req->buffer);
- printk(" errors=%d current_nr_sectors=%ld\n",
- req->errors, req->current_nr_sectors);
+ (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
+ printk(" errors=%d current_nr_sectors=%u\n",
+ req->errors, blk_rq_cur_sectors(req));
#endif
- if (req->sector < 0 || req->sector >= fs->total_secs) {
- end_request(req, 0);
- continue;
- }
- if (req->current_nr_sectors == 0) {
- end_request(req, 1);
+ if (blk_rq_pos(req) >= fs->total_secs) {
+ swim3_end_request_cur(-EIO);
continue;
}
if (fs->ejected) {
- end_request(req, 0);
+ swim3_end_request_cur(-EIO);
continue;
}
@@ -336,18 +352,19 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- end_request(req, 0);
+ swim3_end_request_cur(-EIO);
continue;
}
}
- /* Do not remove the cast. req->sector is now a sector_t and
- * can be 64 bits, but it will never go past 32 bits for this
- * driver anyway, so we can safely cast it down and not have
- * to do a 64/32 division
+ /* Do not remove the cast. blk_rq_pos(req) is now a
+ * sector_t and can be 64 bits, but it will never go
+ * past 32 bits for this driver anyway, so we can
+ * safely cast it down and not have to do a 64/32
+ * division
*/
- fs->req_cyl = ((long)req->sector) / fs->secpercyl;
- x = ((long)req->sector) % fs->secpercyl;
+ fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
+ x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
fd_req = req;
@@ -424,7 +441,7 @@ static inline void setup_transfer(struct floppy_state *fs)
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
- if (fd_req->current_nr_sectors <= 0) {
+ if (blk_rq_cur_sectors(fd_req) <= 0) {
printk(KERN_ERR "swim3: transfer 0 sectors?\n");
return;
}
@@ -432,8 +449,8 @@ static inline void setup_transfer(struct floppy_state *fs)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
- if (n > fd_req->current_nr_sectors)
- n = fd_req->current_nr_sectors;
+ if (n > blk_rq_cur_sectors(fd_req))
+ n = blk_rq_cur_sectors(fd_req);
}
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
@@ -508,7 +525,7 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
return;
}
@@ -540,7 +557,7 @@ static void scan_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
} else {
@@ -559,7 +576,7 @@ static void seek_timeout(unsigned long data)
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
printk(KERN_ERR "swim3: seek timeout\n");
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@@ -583,7 +600,7 @@ static void settle_timeout(unsigned long data)
return;
}
printk(KERN_ERR "swim3: seek settle timeout\n");
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@@ -593,8 +610,6 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
- struct dbdma_cmd *cp = fs->dma_cmd;
- unsigned long s;
int n;
fs->timeout_pending = 0;
@@ -605,17 +620,10 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- if (rq_data_dir(fd_req) == WRITE)
- ++cp;
- if (ld_le16(&cp->xfer_status) != 0)
- s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
- else
- s = 0;
- fd_req->sector += s;
- fd_req->current_nr_sectors -= s;
printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
- (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
- end_request(fd_req, 0);
+ (rq_data_dir(fd_req)==WRITE? "writ": "read"),
+ (long)blk_rq_pos(fd_req));
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@@ -646,7 +654,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
} else {
@@ -719,9 +727,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
- fd_req->sector += n;
- fd_req->current_nr_sectors -= n;
- fd_req->buffer += n * 512;
+ blk_update_request(fd_req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
@@ -730,8 +736,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
} else {
printk("swim3: error %sing block %ld (err=%x)\n",
rq_data_dir(fd_req) == WRITE? "writ": "read",
- (long)fd_req->sector, err);
- end_request(fd_req, 0);
+ (long)blk_rq_pos(fd_req), err);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
}
} else {
@@ -740,18 +746,12 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(fd_req), intr, err);
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
break;
}
- fd_req->sector += fs->scount;
- fd_req->current_nr_sectors -= fs->scount;
- fd_req->buffer += fs->scount * 512;
- if (fd_req->current_nr_sectors <= 0) {
- end_request(fd_req, 1);
- fs->state = idle;
- } else {
+ if (swim3_end_request(0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@@ -761,7 +761,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
}
}
act(fs);
- }
+ } else
+ fs->state = idle;
}
if (fs->state == idle)
start_request(fs);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index ff0448e4bf03..da403b6a7f43 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
struct request *req = crq->rq;
int rc;
- rc = __blk_end_request(req, error, blk_rq_bytes(req));
- assert(rc == 0);
+ __blk_end_request_all(req, error);
rc = carm_put_request(host, crq);
assert(rc == 0);
@@ -811,12 +810,10 @@ static void carm_oob_rq_fn(struct request_queue *q)
while (1) {
DPRINTK("get req\n");
- rq = elv_next_request(q);
+ rq = blk_fetch_request(q);
if (!rq)
break;
- blkdev_dequeue_request(rq);
-
crq = rq->special;
assert(crq != NULL);
assert(crq->rq == rq);
@@ -847,7 +844,7 @@ static void carm_rq_fn(struct request_queue *q)
queue_one_request:
VPRINTK("get req\n");
- rq = elv_next_request(q);
+ rq = blk_peek_request(q);
if (!rq)
return;
@@ -858,7 +855,7 @@ queue_one_request:
}
crq->rq = rq;
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
if (rq_data_dir(rq) == WRITE) {
writing = 1;
@@ -904,10 +901,10 @@ queue_one_request:
msg->sg_count = n_elem;
msg->sg_type = SGT_32BIT;
msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
- msg->lba = cpu_to_le32(rq->sector & 0xffffffff);
- tmp = (rq->sector >> 16) >> 16;
+ msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
+ tmp = (blk_rq_pos(rq) >> 16) >> 16;
msg->lba_high = cpu_to_le16( (u16) tmp );
- msg->lba_count = cpu_to_le16(rq->nr_sectors);
+ msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
for (i = 0; i < n_elem; i++) {
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 689cd27ac890..cc54473b8e77 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -360,8 +360,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq);
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
-static void ub_end_rq(struct request *rq, unsigned int status,
- unsigned int cmd_len);
+static void ub_end_rq(struct request *rq, unsigned int status);
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
struct ub_request *urq, struct ub_scsi_cmd *cmd);
static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -627,7 +626,7 @@ static void ub_request_fn(struct request_queue *q)
struct ub_lun *lun = q->queuedata;
struct request *rq;
- while ((rq = elv_next_request(q)) != NULL) {
+ while ((rq = blk_peek_request(q)) != NULL) {
if (ub_request_fn_1(lun, rq) != 0) {
blk_stop_queue(q);
break;
@@ -643,14 +642,14 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
int n_elem;
if (atomic_read(&sc->poison)) {
- blkdev_dequeue_request(rq);
- ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq));
+ blk_start_request(rq);
+ ub_end_rq(rq, DID_NO_CONNECT << 16);
return 0;
}
if (lun->changed && !blk_pc_request(rq)) {
- blkdev_dequeue_request(rq);
- ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq));
+ blk_start_request(rq);
+ ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
return 0;
}
@@ -660,7 +659,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
urq = &lun->urq;
memset(urq, 0, sizeof(struct ub_request));
@@ -702,7 +701,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
drop:
ub_put_cmd(lun, cmd);
- ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq));
+ ub_end_rq(rq, DID_ERROR << 16);
return 0;
}
@@ -723,11 +722,11 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
/*
* build the command
*
- * The call to blk_queue_hardsect_size() guarantees that request
+ * The call to blk_queue_logical_block_size() guarantees that request
* is aligned, but it is given in terms of 512 byte units, always.
*/
- block = rq->sector >> lun->capacity.bshift;
- nblks = rq->nr_sectors >> lun->capacity.bshift;
+ block = blk_rq_pos(rq) >> lun->capacity.bshift;
+ nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@@ -739,7 +738,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
cmd->cdb[8] = nblks;
cmd->cdb_len = 10;
- cmd->len = rq->nr_sectors * 512;
+ cmd->len = blk_rq_bytes(rq);
}
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
@@ -747,7 +746,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
{
struct request *rq = urq->rq;
- if (rq->data_len == 0) {
+ if (blk_rq_bytes(rq) == 0) {
cmd->dir = UB_DIR_NONE;
} else {
if (rq_data_dir(rq) == WRITE)
@@ -762,7 +761,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
cmd->cdb_len = rq->cmd_len;
- cmd->len = rq->data_len;
+ cmd->len = blk_rq_bytes(rq);
/*
* To reapply this to every URB is not as incorrect as it looks.
@@ -777,16 +776,15 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
struct ub_request *urq = cmd->back;
struct request *rq;
unsigned int scsi_status;
- unsigned int cmd_len;
rq = urq->rq;
if (cmd->error == 0) {
if (blk_pc_request(rq)) {
- if (cmd->act_len >= rq->data_len)
- rq->data_len = 0;
+ if (cmd->act_len >= rq->resid_len)
+ rq->resid_len = 0;
else
- rq->data_len -= cmd->act_len;
+ rq->resid_len -= cmd->act_len;
scsi_status = 0;
} else {
if (cmd->act_len != cmd->len) {
@@ -818,17 +816,14 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
urq->rq = NULL;
- cmd_len = cmd->len;
ub_put_cmd(lun, cmd);
- ub_end_rq(rq, scsi_status, cmd_len);
+ ub_end_rq(rq, scsi_status);
blk_start_queue(lun->disk->queue);
}
-static void ub_end_rq(struct request *rq, unsigned int scsi_status,
- unsigned int cmd_len)
+static void ub_end_rq(struct request *rq, unsigned int scsi_status)
{
int error;
- long rqlen;
if (scsi_status == 0) {
error = 0;
@@ -836,12 +831,7 @@ static void ub_end_rq(struct request *rq, unsigned int scsi_status,
error = -EIO;
rq->errors = scsi_status;
}
- rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */
- if (__blk_end_request(rq, error, cmd_len)) {
- printk(KERN_WARNING DRV_NAME
- ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
- blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
- }
+ __blk_end_request_all(rq, error);
}
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
@@ -1759,7 +1749,7 @@ static int ub_bd_revalidate(struct gendisk *disk)
ub_revalidate(lun->udev, lun);
/* XXX Support sector size switching like in sr.c */
- blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
+ blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
set_capacity(disk, lun->capacity.nsec);
// set_disk_ro(sdkp->disk, lun->readonly);
@@ -2334,7 +2324,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
blk_queue_max_sectors(q, UB_MAX_SECTORS);
- blk_queue_hardsect_size(q, lun->capacity.bsize);
+ blk_queue_logical_block_size(q, lun->capacity.bsize);
lun->disk = disk;
q->queuedata = lun;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index ecccf65dce2f..390d69bb7c48 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,7 +252,7 @@ static int send_request(struct request *req)
struct viodasd_device *d;
unsigned long flags;
- start = (u64)req->sector << 9;
+ start = (u64)blk_rq_pos(req) << 9;
if (rq_data_dir(req) == READ) {
direction = DMA_FROM_DEVICE;
@@ -361,19 +361,17 @@ static void do_viodasd_request(struct request_queue *q)
* back later.
*/
while (num_req_outstanding < VIOMAXREQ) {
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
if (req == NULL)
return;
- /* dequeue the current request from the queue */
- blkdev_dequeue_request(req);
/* check that request contains a valid command */
if (!blk_fs_request(req)) {
- viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+ viodasd_end_request(req, -EIO, blk_rq_sectors(req));
continue;
}
/* Try sending the request */
if (send_request(req) != 0)
- viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+ viodasd_end_request(req, -EIO, blk_rq_sectors(req));
}
}
@@ -590,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
event->xRc, bevent->sub_result, err->msg);
- num_sect = req->hard_nr_sectors;
+ num_sect = blk_rq_sectors(req);
}
qlock = req->q->queue_lock;
spin_lock_irqsave(qlock, irq_flags);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5d34764c8a87..c0facaa55cf4 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -37,6 +37,7 @@ struct virtblk_req
struct list_head list;
struct request *req;
struct virtio_blk_outhdr out_hdr;
+ struct virtio_scsi_inhdr in_hdr;
u8 status;
};
@@ -50,6 +51,7 @@ static void blk_done(struct virtqueue *vq)
spin_lock_irqsave(&vblk->lock, flags);
while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
int error;
+
switch (vbr->status) {
case VIRTIO_BLK_S_OK:
error = 0;
@@ -62,7 +64,13 @@ static void blk_done(struct virtqueue *vq)
break;
}
- __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
+ if (blk_pc_request(vbr->req)) {
+ vbr->req->resid_len = vbr->in_hdr.residual;
+ vbr->req->sense_len = vbr->in_hdr.sense_len;
+ vbr->req->errors = vbr->in_hdr.errors;
+ }
+
+ __blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
mempool_free(vbr, vblk->pool);
}
@@ -74,7 +82,7 @@ static void blk_done(struct virtqueue *vq)
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
struct request *req)
{
- unsigned long num, out, in;
+ unsigned long num, out = 0, in = 0;
struct virtblk_req *vbr;
vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
@@ -85,7 +93,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->req = req;
if (blk_fs_request(vbr->req)) {
vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = vbr->req->sector;
+ vbr->out_hdr.sector = blk_rq_pos(vbr->req);
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
} else if (blk_pc_request(vbr->req)) {
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
@@ -99,18 +107,36 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
if (blk_barrier_rq(vbr->req))
vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
- sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
- num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
- sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
+ sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
- if (rq_data_dir(vbr->req) == WRITE) {
- vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
- out = 1 + num;
- in = 1;
- } else {
- vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
- out = 1;
- in = 1 + num;
+ /*
+ * If this is a packet command we need a couple of additional headers.
+ * Behind the normal outhdr we put a segment with the scsi command
+ * block, and before the normal inhdr we put the sense data and the
+ * inhdr with additional status information before the normal inhdr.
+ */
+ if (blk_pc_request(vbr->req))
+ sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
+
+ num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
+
+ if (blk_pc_request(vbr->req)) {
+ sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
+ sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
+ sizeof(vbr->in_hdr));
+ }
+
+ sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
+ sizeof(vbr->status));
+
+ if (num) {
+ if (rq_data_dir(vbr->req) == WRITE) {
+ vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
+ out += num;
+ } else {
+ vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
+ in += num;
+ }
}
if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
@@ -124,12 +150,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
static void do_virtblk_request(struct request_queue *q)
{
- struct virtio_blk *vblk = NULL;
+ struct virtio_blk *vblk = q->queuedata;
struct request *req;
unsigned int issued = 0;
- while ((req = elv_next_request(q)) != NULL) {
- vblk = req->rq_disk->private_data;
+ while ((req = blk_peek_request(q)) != NULL) {
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
/* If this request fails, stop queue and wait for something to
@@ -138,7 +163,7 @@ static void do_virtblk_request(struct request_queue *q)
blk_stop_queue(q);
break;
}
- blkdev_dequeue_request(req);
+ blk_start_request(req);
issued++;
}
@@ -146,12 +171,51 @@ static void do_virtblk_request(struct request_queue *q)
vblk->vq->vq_ops->kick(vblk->vq);
}
+/* return ATA identify data
+ */
+static int virtblk_identify(struct gendisk *disk, void *argp)
+{
+ struct virtio_blk *vblk = disk->private_data;
+ void *opaque;
+ int err = -ENOMEM;
+
+ opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ if (!opaque)
+ goto out;
+
+ err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY,
+ offsetof(struct virtio_blk_config, identify), opaque,
+ VIRTIO_BLK_ID_BYTES);
+
+ if (err)
+ goto out_kfree;
+
+ if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES))
+ err = -EFAULT;
+
+out_kfree:
+ kfree(opaque);
+out:
+ return err;
+}
+
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
- return scsi_cmd_ioctl(bdev->bd_disk->queue,
- bdev->bd_disk, mode, cmd,
- (void __user *)data);
+ struct gendisk *disk = bdev->bd_disk;
+ struct virtio_blk *vblk = disk->private_data;
+ void __user *argp = (void __user *)data;
+
+ if (cmd == HDIO_GET_IDENTITY)
+ return virtblk_identify(disk, argp);
+
+ /*
+ * Only allow the generic SCSI ioctls if the host can support it.
+ */
+ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
+ return -ENOIOCTLCMD;
+
+ return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
@@ -249,6 +313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_put_disk;
}
+ vblk->disk->queue->queuedata = vblk;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
if (index < 26) {
@@ -313,7 +378,7 @@ static int virtblk_probe(struct virtio_device *vdev)
offsetof(struct virtio_blk_config, blk_size),
&blk_size);
if (!err)
- blk_queue_hardsect_size(vblk->disk->queue, blk_size);
+ blk_queue_logical_block_size(vblk->disk->queue, blk_size);
add_disk(vblk->disk);
return 0;
@@ -356,6 +421,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
+ VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY
};
static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 64b496fce98b..ce2429219925 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -305,30 +305,25 @@ static void do_xd_request (struct request_queue * q)
if (xdc_busy)
return;
- while ((req = elv_next_request(q)) != NULL) {
- unsigned block = req->sector;
- unsigned count = req->nr_sectors;
- int rw = rq_data_dir(req);
+ req = blk_fetch_request(q);
+ while (req) {
+ unsigned block = blk_rq_pos(req);
+ unsigned count = blk_rq_cur_sectors(req);
XD_INFO *disk = req->rq_disk->private_data;
- int res = 0;
+ int res = -EIO;
int retry;
- if (!blk_fs_request(req)) {
- end_request(req, 0);
- continue;
- }
- if (block + count > get_capacity(req->rq_disk)) {
- end_request(req, 0);
- continue;
- }
- if (rw != READ && rw != WRITE) {
- printk("do_xd_request: unknown request\n");
- end_request(req, 0);
- continue;
- }
+ if (!blk_fs_request(req))
+ goto done;
+ if (block + count > get_capacity(req->rq_disk))
+ goto done;
for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
- res = xd_readwrite(rw, disk, req->buffer, block, count);
- end_request(req, res); /* wrap up, 0 = fail, 1 = success */
+ res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
+ block, count);
+ done:
+ /* wrap up, 0 = success, -errno = fail */
+ if (!__blk_end_request_cur(req, res))
+ req = blk_fetch_request(q);
}
}
@@ -418,7 +413,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
xd_recalibrate(drive);
spin_lock_irq(&xd_lock);
- return (0);
+ return -EIO;
case 2:
if (sense[0] & 0x30) {
printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
@@ -439,7 +434,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
else
printk(" - no valid disk address\n");
spin_lock_irq(&xd_lock);
- return (0);
+ return -EIO;
}
if (xd_dma_buffer)
for (i=0; i < (temp * 0x200); i++)
@@ -448,7 +443,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
count -= temp, buffer += temp * 0x200, block += temp;
}
spin_lock_irq(&xd_lock);
- return (1);
+ return 0;
}
/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a6cbf7b808e6..c1996829d5ec 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(blkif_io_lock);
static int get_id_from_freelist(struct blkfront_info *info)
{
unsigned long free = info->shadow_free;
- BUG_ON(free > BLK_RING_SIZE);
+ BUG_ON(free >= BLK_RING_SIZE);
info->shadow_free = info->shadow[free].req.id;
info->shadow[free].req.id = 0x0fffffee; /* debug */
return free;
@@ -231,7 +231,7 @@ static int blkif_queue_request(struct request *req)
info->shadow[id].request = (unsigned long)req;
ring_req->id = id;
- ring_req->sector_number = (blkif_sector_t)req->sector;
+ ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
@@ -299,25 +299,25 @@ static void do_blkif_request(struct request_queue *rq)
queued = 0;
- while ((req = elv_next_request(rq)) != NULL) {
+ while ((req = blk_peek_request(rq)) != NULL) {
info = req->rq_disk->private_data;
- if (!blk_fs_request(req)) {
- end_request(req, 0);
- continue;
- }
if (RING_FULL(&info->ring))
goto wait;
- pr_debug("do_blk_req %p: cmd %p, sec %lx, "
- "(%u/%li) buffer:%p [%s]\n",
- req, req->cmd, (unsigned long)req->sector,
- req->current_nr_sectors,
- req->nr_sectors, req->buffer,
- rq_data_dir(req) ? "write" : "read");
+ blk_start_request(req);
+ if (!blk_fs_request(req)) {
+ __blk_end_request_all(req, -EIO);
+ continue;
+ }
+
+ pr_debug("do_blk_req %p: cmd %p, sec %lx, "
+ "(%u/%u) buffer:%p [%s]\n",
+ req, req->cmd, (unsigned long)blk_rq_pos(req),
+ blk_rq_cur_sectors(req), blk_rq_sectors(req),
+ req->buffer, rq_data_dir(req) ? "write" : "read");
- blkdev_dequeue_request(req);
if (blkif_queue_request(req)) {
blk_requeue_request(rq, req);
wait:
@@ -344,7 +344,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
/* Hard sector size and max sectors impersonate the equiv. hardware. */
- blk_queue_hardsect_size(rq, sector_size);
+ blk_queue_logical_block_size(rq, sector_size);
blk_queue_max_sectors(rq, 512);
/* Each segment in a request is up to an aligned page in size. */
@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
for (i = info->ring.rsp_cons; i != rp; i++) {
unsigned long id;
- int ret;
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);
- ret = __blk_end_request(req, error, blk_rq_bytes(req));
- BUG_ON(ret);
+ __blk_end_request_all(req, error);
break;
default:
BUG();
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 4aecf5dc6a93..f08491a3a813 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -463,10 +463,11 @@ struct request *ace_get_next_request(struct request_queue * q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ while ((req = blk_peek_request(q)) != NULL) {
if (blk_fs_request(req))
break;
- end_request(req, 0);
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
}
return req;
}
@@ -492,9 +493,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
set_capacity(ace->gd, 0);
dev_info(ace->dev, "No CF in slot\n");
- /* Drop all pending requests */
- while ((req = elv_next_request(ace->queue)) != NULL)
- end_request(req, 0);
+ /* Drop all in-flight and pending requests */
+ if (ace->req) {
+ __blk_end_request_all(ace->req, -EIO);
+ ace->req = NULL;
+ }
+ while ((req = blk_fetch_request(ace->queue)) != NULL)
+ __blk_end_request_all(req, -EIO);
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -642,19 +647,21 @@ static void ace_fsm_dostate(struct ace_device *ace)
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
}
+ blk_start_request(req);
/* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev,
- "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n",
- (unsigned long long) req->sector, req->hard_nr_sectors,
- req->current_nr_sectors, rq_data_dir(req));
+ "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
+ (unsigned long long)blk_rq_pos(req),
+ blk_rq_sectors(req), blk_rq_cur_sectors(req),
+ rq_data_dir(req));
ace->req = req;
ace->data_ptr = req->buffer;
- ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
- ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
+ ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
+ ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
- count = req->hard_nr_sectors;
+ count = blk_rq_sectors(req);
if (rq_data_dir(req)) {
/* Kick off write request */
dev_dbg(ace->dev, "write data\n");
@@ -688,7 +695,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
dev_dbg(ace->dev,
"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
ace->fsm_task, ace->fsm_iter_num,
- ace->req->current_nr_sectors * 16,
+ blk_rq_cur_sectors(ace->req) * 16,
ace->data_count, ace->in_irq);
ace_fsm_yield(ace); /* need to poll CFBSY bit */
break;
@@ -697,7 +704,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
dev_dbg(ace->dev,
"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
ace->fsm_task, ace->fsm_iter_num,
- ace->req->current_nr_sectors * 16,
+ blk_rq_cur_sectors(ace->req) * 16,
ace->data_count, ace->in_irq);
ace_fsm_yieldirq(ace);
break;
@@ -717,14 +724,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
}
/* bio finished; is there another one? */
- if (__blk_end_request(ace->req, 0,
- blk_rq_cur_bytes(ace->req))) {
- /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
- * ace->req->hard_nr_sectors,
- * ace->req->current_nr_sectors);
+ if (__blk_end_request_cur(ace->req, 0)) {
+ /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
+ * blk_rq_sectors(ace->req),
+ * blk_rq_cur_sectors(ace->req));
*/
ace->data_ptr = ace->req->buffer;
- ace->data_count = ace->req->current_nr_sectors * 16;
+ ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
ace_fsm_yieldirq(ace);
break;
}
@@ -978,7 +984,7 @@ static int __devinit ace_setup(struct ace_device *ace)
ace->queue = blk_init_queue(ace_request, &ace->lock);
if (ace->queue == NULL)
goto err_blk_initq;
- blk_queue_hardsect_size(ace->queue, 512);
+ blk_queue_logical_block_size(ace->queue, 512);
/*
* Allocate and initialize GD structure
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 80754cdd3119..4575171e5beb 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -70,15 +70,18 @@ static struct gendisk *z2ram_gendisk;
static void do_z2_request(struct request_queue *q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
- unsigned long start = req->sector << 9;
- unsigned long len = req->current_nr_sectors << 9;
+
+ req = blk_fetch_request(q);
+ while (req) {
+ unsigned long start = blk_rq_pos(req) << 9;
+ unsigned long len = blk_rq_cur_bytes(req);
+ int err = 0;
if (start + len > z2ram_size) {
printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
- req->sector, req->current_nr_sectors);
- end_request(req, 0);
- continue;
+ blk_rq_pos(req), blk_rq_cur_sectors(req));
+ err = -EIO;
+ goto done;
}
while (len) {
unsigned long addr = start & Z2RAM_CHUNKMASK;
@@ -93,7 +96,9 @@ static void do_z2_request(struct request_queue *q)
start += size;
len -= size;
}
- end_request(req, 1);
+ done:
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index cceace61ef28..71d1b9bab70b 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1;
- if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9))
- nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW;
+ if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
+ nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
len = nr * CD_FRAMESIZE_RAW;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 2eecb779437b..b5621f27c4be 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -584,8 +584,8 @@ static void gdrom_readdisk_dma(struct work_struct *work)
list_for_each_safe(elem, next, &gdrom_deferred) {
req = list_entry(elem, struct request, queuelist);
spin_unlock(&gdrom_lock);
- block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET;
- block_cnt = req->nr_sectors/GD_TO_BLK;
+ block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
+ block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
@@ -632,39 +632,35 @@ static void gdrom_readdisk_dma(struct work_struct *work)
* before handling ending the request */
spin_lock(&gdrom_lock);
list_del_init(&req->queuelist);
- __blk_end_request(req, err, blk_rq_bytes(req));
+ __blk_end_request_all(req, err);
}
spin_unlock(&gdrom_lock);
kfree(read_command);
}
-static void gdrom_request_handler_dma(struct request *req)
-{
- /* dequeue, add to list of deferred work
- * and then schedule workqueue */
- blkdev_dequeue_request(req);
- list_add_tail(&req->queuelist, &gdrom_deferred);
- schedule_work(&work);
-}
-
static void gdrom_request(struct request_queue *rq)
{
struct request *req;
- while ((req = elv_next_request(rq)) != NULL) {
+ while ((req = blk_fetch_request(rq)) != NULL) {
if (!blk_fs_request(req)) {
printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
+ continue;
}
if (rq_data_dir(req) != READ) {
printk(KERN_NOTICE "GDROM: Read only device -");
printk(" write request ignored\n");
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
+ continue;
}
- if (req->nr_sectors)
- gdrom_request_handler_dma(req);
- else
- end_request(req, 0);
+
+ /*
+ * Add to list of deferred work and then schedule
+ * workqueue.
+ */
+ list_add_tail(&req->queuelist, &gdrom_deferred);
+ schedule_work(&work);
}
}
@@ -743,7 +739,7 @@ static void __devinit probe_gdrom_setupdisk(void)
static int __devinit probe_gdrom_setupqueue(void)
{
- blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
+ blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
/* using DMA so memory will need to be contiguous */
blk_queue_max_hw_segments(gd.gdrom_rq, 1);
/* set a large max size to get most from DMA */
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 9b1624e0ddeb..0fff646cc2f0 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -282,7 +282,7 @@ static int send_request(struct request *req)
viopath_targetinst(viopath_hostLp),
(u64)req, VIOVERSION << 16,
((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
- (u64)req->sector * 512, len, 0);
+ (u64)blk_rq_pos(req) * 512, len, 0);
if (hvrc != HvLpEvent_Rc_Good) {
printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
return -1;
@@ -291,36 +291,19 @@ static int send_request(struct request *req)
return 0;
}
-static void viocd_end_request(struct request *req, int error)
-{
- int nsectors = req->hard_nr_sectors;
-
- /*
- * Make sure it's fully ended, and ensure that we process
- * at least one sector.
- */
- if (blk_pc_request(req))
- nsectors = (req->data_len + 511) >> 9;
- if (!nsectors)
- nsectors = 1;
-
- if (__blk_end_request(req, error, nsectors << 9))
- BUG();
-}
-
static int rwreq;
static void do_viocd_request(struct request_queue *q)
{
struct request *req;
- while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
+ while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
if (!blk_fs_request(req))
- viocd_end_request(req, -EIO);
+ __blk_end_request_all(req, -EIO);
else if (send_request(req) < 0) {
printk(VIOCD_KERN_WARNING
"unable to send message to OS/400!");
- viocd_end_request(req, -EIO);
+ __blk_end_request_all(req, -EIO);
} else
rwreq++;
}
@@ -486,8 +469,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
case viocdopen:
if (event->xRc == 0) {
di = &viocd_diskinfo[bevent->disk];
- blk_queue_hardsect_size(di->viocd_disk->queue,
- bevent->block_size);
+ blk_queue_logical_block_size(di->viocd_disk->queue,
+ bevent->block_size);
set_capacity(di->viocd_disk,
bevent->media_size *
bevent->block_size / 512);
@@ -531,9 +514,9 @@ return_complete:
"with rc %d:0x%04X: %s\n",
req, event->xRc,
bevent->sub_result, err->msg);
- viocd_end_request(req, -EIO);
+ __blk_end_request_all(req, -EIO);
} else
- viocd_end_request(req, 0);
+ __blk_end_request_all(req, 0);
/* restart handling of incoming requests */
spin_unlock_irqrestore(&viocd_reqlock, flags);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 20d90e6a6e50..db32f0e4c7dd 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -71,7 +71,7 @@ static int raw_open(struct inode *inode, struct file *filp)
err = bd_claim(bdev, raw_open);
if (err)
goto out1;
- err = set_blocksize(bdev, bdev_hardsect_size(bdev));
+ err = set_blocksize(bdev, bdev_logical_block_size(bdev));
if (err)
goto out2;
filp->f_flags |= O_DIRECT;
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index afe5a4323879..757e5956b132 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -246,6 +246,7 @@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
*/
void ide_retry_pc(ide_drive_t *drive)
{
+ struct request *failed_rq = drive->hwif->rq;
struct request *sense_rq = &drive->sense_rq;
struct ide_atapi_pc *pc = &drive->request_sense_pc;
@@ -255,13 +256,22 @@ void ide_retry_pc(ide_drive_t *drive)
ide_init_pc(pc);
memcpy(pc->c, sense_rq->cmd, 12);
pc->buf = bio_data(sense_rq->bio); /* pointer to mapped address */
- pc->req_xfer = sense_rq->data_len;
+ pc->req_xfer = blk_rq_bytes(sense_rq);
if (drive->media == ide_tape)
set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
- if (ide_queue_sense_rq(drive, pc))
- ide_complete_rq(drive, -EIO, blk_rq_bytes(drive->hwif->rq));
+ /*
+ * Push back the failed request and put request sense on top
+ * of it. The failed command will be retried after sense data
+ * is acquired.
+ */
+ blk_requeue_request(failed_rq->q, failed_rq);
+ drive->hwif->rq = NULL;
+ if (ide_queue_sense_rq(drive, pc)) {
+ blk_start_request(failed_rq);
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
+ }
}
EXPORT_SYMBOL_GPL(ide_retry_pc);
@@ -303,7 +313,7 @@ int ide_cd_get_xferlen(struct request *rq)
return 32768;
else if (blk_sense_request(rq) || blk_pc_request(rq) ||
rq->cmd_type == REQ_TYPE_ATA_PC)
- return rq->data_len;
+ return blk_rq_bytes(rq);
else
return 0;
}
@@ -367,7 +377,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
/* No more interrupts */
if ((stat & ATA_DRQ) == 0) {
int uptodate, error;
- unsigned int done;
debug_log("Packet command completed, %d bytes transferred\n",
pc->xferred);
@@ -431,7 +440,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
error = uptodate ? 0 : -EIO;
}
- ide_complete_rq(drive, error, done);
+ ide_complete_rq(drive, error, blk_rq_bytes(rq));
return ide_stopped;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index a75e4ee1cd17..424140c6c400 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -182,7 +182,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
(sense->information[2] << 8) |
(sense->information[3]);
- if (drive->queue->hardsect_size == 2048)
+ if (queue_logical_block_size(drive->queue) == 2048)
/* device sector size is 2K */
sector <<= 2;
@@ -404,15 +404,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
end_request:
if (stat & ATA_ERR) {
- struct request_queue *q = drive->queue;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blkdev_dequeue_request(rq);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
hwif->rq = NULL;
-
return ide_queue_sense_rq(drive, rq) ? 2 : 1;
} else
return 2;
@@ -518,7 +510,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
error = blk_execute_rq(drive->queue, info->disk, rq, 0);
if (buffer)
- *bufflen = rq->data_len;
+ *bufflen = rq->resid_len;
flags = rq->cmd_flags;
blk_put_request(rq);
@@ -576,7 +568,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
struct request *rq = hwif->rq;
ide_expiry_t *expiry = NULL;
int dma_error = 0, dma, thislen, uptodate = 0;
- int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors;
+ int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
int sense = blk_sense_request(rq);
unsigned int timeout;
u16 len;
@@ -706,13 +698,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
out_end:
if (blk_pc_request(rq) && rc == 0) {
- unsigned int dlen = rq->data_len;
-
- rq->data_len = 0;
-
- if (blk_end_request(rq, 0, dlen))
- BUG();
-
+ rq->resid_len = 0;
+ blk_end_request_all(rq, 0);
hwif->rq = NULL;
} else {
if (sense && uptodate)
@@ -730,21 +717,13 @@ out_end:
ide_cd_error_cmd(drive, cmd);
/* make sure it's fully ended */
- if (blk_pc_request(rq))
- nsectors = (rq->data_len + 511) >> 9;
- else
- nsectors = rq->hard_nr_sectors;
-
- if (nsectors == 0)
- nsectors = 1;
-
if (blk_fs_request(rq) == 0) {
- rq->data_len -= (cmd->nbytes - cmd->nleft);
+ rq->resid_len -= cmd->nbytes - cmd->nleft;
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
- rq->data_len += cmd->last_xfer_len;
+ rq->resid_len += cmd->last_xfer_len;
}
- ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9);
+ ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
if (sense && rc == 2)
ide_error(drive, "request sense failure", stat);
@@ -758,7 +737,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
struct request_queue *q = drive->queue;
int write = rq_data_dir(rq) == WRITE;
unsigned short sectors_per_frame =
- queue_hardsect_size(q) >> SECTOR_BITS;
+ queue_logical_block_size(q) >> SECTOR_BITS;
ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
"secs_per_frame: %u",
@@ -777,8 +756,8 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
}
/* fs requests *must* be hardware frame aligned */
- if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
- (rq->sector & (sectors_per_frame - 1)))
+ if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
+ (blk_rq_pos(rq) & (sectors_per_frame - 1)))
return ide_stopped;
/* use DMA, if possible */
@@ -821,7 +800,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
*/
alignment = queue_dma_alignment(q) | q->dma_pad_mask;
if ((unsigned long)buf & alignment
- || rq->data_len & q->dma_pad_mask
+ || blk_rq_bytes(rq) & q->dma_pad_mask
|| object_is_on_stack(buf))
drive->dma = 0;
}
@@ -869,15 +848,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
cmd.rq = rq;
- if (blk_fs_request(rq) || rq->data_len) {
- ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9)
- : rq->data_len);
+ if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
+ ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
return ide_issue_pc(drive, &cmd);
out_end:
- nsectors = rq->hard_nr_sectors;
+ nsectors = blk_rq_sectors(rq);
if (nsectors == 0)
nsectors = 1;
@@ -1043,8 +1021,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
/* save a private copy of the TOC capacity for error handling */
drive->probed_capacity = toc->capacity * sectors_per_frame;
- blk_queue_hardsect_size(drive->queue,
- sectors_per_frame << SECTOR_BITS);
+ blk_queue_logical_block_size(drive->queue,
+ sectors_per_frame << SECTOR_BITS);
/* first read just the header, so we know how long the TOC is */
stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
@@ -1360,9 +1338,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
/* standard prep_rq_fn that builds 10 byte cmds */
static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
{
- int hard_sect = queue_hardsect_size(q);
- long block = (long)rq->hard_sector / (hard_sect >> 9);
- unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
+ int hard_sect = queue_logical_block_size(q);
+ long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
+ unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
memset(rq->cmd, 0, BLK_MAX_CDB);
@@ -1565,7 +1543,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
nslots = ide_cdrom_probe_capabilities(drive);
- blk_queue_hardsect_size(q, CD_FRAMESIZE);
+ blk_queue_logical_block_size(q, CD_FRAMESIZE);
if (ide_cdrom_register(drive, nslots)) {
printk(KERN_ERR PFX "%s: %s failed to register device with the"
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index c2438804d3c4..c6f7fcfb9d67 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -82,7 +82,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
sector_t block)
{
ide_hwif_t *hwif = drive->hwif;
- u16 nsectors = (u16)rq->nr_sectors;
+ u16 nsectors = (u16)blk_rq_sectors(rq);
u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
struct ide_cmd cmd;
@@ -90,7 +90,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ide_startstop_t rc;
if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
- if (block + rq->nr_sectors > 1ULL << 28)
+ if (block + blk_rq_sectors(rq) > 1ULL << 28)
dma = 0;
else
lba48 = 0;
@@ -195,9 +195,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ledtrig_ide_activity();
- pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
+ pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
- (unsigned long long)block, rq->nr_sectors,
+ (unsigned long long)block, blk_rq_sectors(rq),
(unsigned long)rq->buffer);
if (hwif->rw_disk)
@@ -639,7 +639,7 @@ static void ide_disk_setup(ide_drive_t *drive)
}
printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
- q->max_sectors / 2);
+ queue_max_sectors(q) / 2);
if (ata_id_is_ssd(id))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index d9123ecae4a9..001f68f0bb28 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -103,7 +103,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
ide_finish_cmd(drive, cmd, stat);
else
ide_complete_rq(drive, 0,
- cmd->rq->nr_sectors << 9);
+ blk_rq_sectors(cmd->rq) << 9);
return ide_stopped;
}
printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 537b7c558033..650981758f15 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -194,7 +194,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
{
struct ide_disk_obj *floppy = drive->driver_data;
int block = sector / floppy->bs_factor;
- int blocks = rq->nr_sectors / floppy->bs_factor;
+ int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
int cmd = rq_data_dir(rq);
ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
@@ -220,14 +220,14 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
ide_init_pc(pc);
memcpy(pc->c, rq->cmd, sizeof(pc->c));
pc->rq = rq;
- if (rq->data_len) {
+ if (blk_rq_bytes(rq)) {
pc->flags |= PC_FLAG_DMA_OK;
if (rq_data_dir(rq) == WRITE)
pc->flags |= PC_FLAG_WRITING;
}
/* pio will be performed by ide_pio_bytes() which handles sg fine */
pc->buf = NULL;
- pc->req_xfer = pc->buf_size = rq->data_len;
+ pc->req_xfer = pc->buf_size = blk_rq_bytes(rq);
}
static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
@@ -259,8 +259,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
goto out_end;
}
if (blk_fs_request(rq)) {
- if (((long)rq->sector % floppy->bs_factor) ||
- (rq->nr_sectors % floppy->bs_factor)) {
+ if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
+ (blk_rq_sectors(rq) % floppy->bs_factor)) {
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
drive->name);
goto out_end;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 41d804065d38..bba4297f2f03 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -116,9 +116,9 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
unsigned int ide_rq_bytes(struct request *rq)
{
if (blk_pc_request(rq))
- return rq->data_len;
+ return blk_rq_bytes(rq);
else
- return rq->hard_cur_sectors << 9;
+ return blk_rq_cur_sectors(rq) << 9;
}
EXPORT_SYMBOL_GPL(ide_rq_bytes);
@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
* and complete the whole request right now
*/
if (blk_noretry_request(rq) && error <= 0)
- nr_bytes = rq->hard_nr_sectors << 9;
+ nr_bytes = blk_rq_sectors(rq) << 9;
rc = ide_end_rq(drive, rq, error, nr_bytes);
if (rc == 0)
@@ -279,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
if (cmd) {
if (cmd->protocol == ATA_PROT_PIO) {
- ide_init_sg_cmd(cmd, rq->nr_sectors << 9);
+ ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
ide_map_sg(drive, cmd);
}
@@ -387,7 +387,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
drv = *(struct ide_driver **)rq->rq_disk->private_data;
- return drv->do_request(drive, rq, rq->sector);
+ return drv->do_request(drive, rq, blk_rq_pos(rq));
}
return do_special(drive);
kill_rq:
@@ -487,10 +487,10 @@ void do_ide_request(struct request_queue *q)
if (!ide_lock_port(hwif)) {
ide_hwif_t *prev_port;
+
+ WARN_ON_ONCE(hwif->rq);
repeat:
prev_port = hwif->host->cur_port;
- hwif->rq = NULL;
-
if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
time_after(drive->sleep, jiffies)) {
ide_unlock_port(hwif);
@@ -519,7 +519,9 @@ repeat:
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
- rq = elv_next_request(drive->queue);
+ if (!rq)
+ rq = blk_fetch_request(drive->queue);
+
spin_unlock_irq(q->queue_lock);
spin_lock_irq(&hwif->lock);
@@ -531,7 +533,7 @@ repeat:
/*
* Sanity: don't accept a request that isn't a PM request
* if we are currently power managed. This is very important as
- * blk_stop_queue() doesn't prevent the elv_next_request()
+ * blk_stop_queue() doesn't prevent the blk_fetch_request()
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
@@ -555,8 +557,11 @@ repeat:
startstop = start_request(drive, rq);
spin_lock_irq(&hwif->lock);
- if (startstop == ide_stopped)
+ if (startstop == ide_stopped) {
+ rq = hwif->rq;
+ hwif->rq = NULL;
goto repeat;
+ }
} else
goto plug_device;
out:
@@ -572,18 +577,24 @@ plug_device:
plug_device_2:
spin_lock_irq(q->queue_lock);
+ if (rq)
+ blk_requeue_request(q, rq);
if (!elv_queue_empty(q))
blk_plug_device(q);
}
-static void ide_plug_device(ide_drive_t *drive)
+static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
+
+ if (rq)
+ blk_requeue_request(q, rq);
if (!elv_queue_empty(q))
blk_plug_device(q);
+
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -632,6 +643,7 @@ void ide_timer_expiry (unsigned long data)
unsigned long flags;
int wait = -1;
int plug_device = 0;
+ struct request *uninitialized_var(rq_in_flight);
spin_lock_irqsave(&hwif->lock, flags);
@@ -693,6 +705,8 @@ void ide_timer_expiry (unsigned long data)
spin_lock_irq(&hwif->lock);
enable_irq(hwif->irq);
if (startstop == ide_stopped && hwif->polling == 0) {
+ rq_in_flight = hwif->rq;
+ hwif->rq = NULL;
ide_unlock_port(hwif);
plug_device = 1;
}
@@ -701,7 +715,7 @@ void ide_timer_expiry (unsigned long data)
if (plug_device) {
ide_unlock_host(hwif->host);
- ide_plug_device(drive);
+ ide_requeue_and_plug(drive, rq_in_flight);
}
}
@@ -787,6 +801,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
ide_startstop_t startstop;
irqreturn_t irq_ret = IRQ_NONE;
int plug_device = 0;
+ struct request *uninitialized_var(rq_in_flight);
if (host->host_flags & IDE_HFLAG_SERIALIZE) {
if (hwif != host->cur_port)
@@ -866,6 +881,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
*/
if (startstop == ide_stopped && hwif->polling == 0) {
BUG_ON(hwif->handler);
+ rq_in_flight = hwif->rq;
+ hwif->rq = NULL;
ide_unlock_port(hwif);
plug_device = 1;
}
@@ -875,7 +892,7 @@ out:
out_early:
if (plug_device) {
ide_unlock_host(hwif->host);
- ide_plug_device(drive);
+ ide_requeue_and_plug(drive, rq_in_flight);
}
return irq_ret;
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 2148df836ce7..e386a32dc9ba 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -96,7 +96,7 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
if (rq)
printk(KERN_CONT ", sector=%llu",
- (unsigned long long)rq->sector);
+ (unsigned long long)blk_rq_pos(rq));
}
printk(KERN_CONT "\n");
}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 203bbeac182f..d9764f0bc82f 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -380,7 +380,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
}
tape->first_frame += blocks;
- rq->data_len -= blocks * tape->blk_size;
+ rq->resid_len -= blocks * tape->blk_size;
if (pc->error) {
uptodate = 0;
@@ -586,7 +586,7 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
struct ide_atapi_pc *pc, struct request *rq,
u8 opcode)
{
- unsigned int length = rq->nr_sectors;
+ unsigned int length = blk_rq_sectors(rq);
ide_init_pc(pc);
put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
@@ -617,8 +617,8 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
struct ide_cmd cmd;
u8 stat;
- debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu\n",
- (unsigned long long)rq->sector, rq->nr_sectors);
+ debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n"
+ (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq));
if (!(blk_special_request(rq) || blk_sense_request(rq))) {
/* We do not support buffer cache originated requests. */
@@ -892,7 +892,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd[13] = cmd;
rq->rq_disk = tape->disk;
- rq->sector = tape->first_frame;
+ rq->__sector = tape->first_frame;
if (size) {
ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
@@ -904,7 +904,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
blk_execute_rq(drive->queue, tape->disk, rq, 0);
/* calculate the number of transferred bytes and update buffer state */
- size -= rq->data_len;
+ size -= rq->resid_len;
tape->cur = tape->buf;
if (cmd == REQ_IDETAPE_READ)
tape->valid = size;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index f400eb4d4aff..a0c3e1b2f73c 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -385,7 +385,7 @@ out_end:
if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
ide_finish_cmd(drive, cmd, stat);
else
- ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9);
+ ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
return ide_stopped;
out_err:
ide_error_cmd(drive, cmd);
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index b3bc96f930a6..e24ecc87a9b1 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -177,7 +177,7 @@ static void pdc202xx_dma_start(ide_drive_t *drive)
u8 clock = inb(high_16 + 0x11);
outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
- word_count = (rq->nr_sectors << 8);
+ word_count = (blk_rq_sectors(rq) << 8);
word_count = (rq_data_dir(rq) == READ) ?
word_count | 0x05000000 :
word_count | 0x06000000;
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index b4cf42dc8a6f..05a93d6baecc 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -112,7 +112,7 @@ static void tc86c001_dma_start(ide_drive_t *drive)
ide_hwif_t *hwif = drive->hwif;
unsigned long sc_base = hwif->config_data;
unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
- unsigned long nsectors = hwif->rq->nr_sectors;
+ unsigned long nsectors = blk_rq_sectors(hwif->rq);
/*
* We have to manually load the sector count and size into
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 564422d23976..5ca76224f6d1 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -307,7 +307,7 @@ static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
- tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt);
+ tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
return 0;
}
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 56df1cee8fb3..3319c2fec28e 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
if (sync_page_io(rdev->bdev, target,
- roundup(size, bdev_hardsect_size(rdev->bdev)),
+ roundup(size, bdev_logical_block_size(rdev->bdev)),
page, READ)) {
page->index = index;
attach_page_buffers(page, NULL); /* so that free_buffer will
@@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
int size = PAGE_SIZE;
if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size,
- bdev_hardsect_size(rdev->bdev));
+ bdev_logical_block_size(rdev->bdev));
/* Just make sure we aren't corrupting data or
* metadata
*/
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index a2e26c242141..75d8081a9041 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store,
}
/* Validate the chunk size against the device block size */
- if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) {
+ if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
*error = "Chunk size is not a multiple of device blocksize";
return -EINVAL;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index be233bc4d917..6fa8ccf91c70 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
* Buffer holds both header and bitset.
*/
buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
- bitset_size, ti->limits.hardsect_size);
+ bitset_size,
+ ti->limits.logical_block_size);
if (buf_size > dev->bdev->bd_inode->i_size) {
DMWARN("log device %s too small: need %llu bytes",
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index e75c6dd76a9a..2662a41337e7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
*/
if (!ps->store->chunk_size) {
ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
- bdev_hardsect_size(ps->store->cow->bdev) >> 9);
+ bdev_logical_block_size(ps->store->cow->bdev) >> 9);
ps->store->chunk_mask = ps->store->chunk_size - 1;
ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
chunk_size_supplied = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 429b50b975d5..e9a73bb242b0 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
lhs->max_hw_segments =
min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
- lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
+ lhs->logical_block_size = max(lhs->logical_block_size,
+ rhs->logical_block_size);
lhs->max_segment_size =
min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
@@ -509,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
* combine_restrictions_low()
*/
rs->max_sectors =
- min_not_zero(rs->max_sectors, q->max_sectors);
+ min_not_zero(rs->max_sectors, queue_max_sectors(q));
/*
* Check if merge fn is supported.
@@ -524,24 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
rs->max_phys_segments =
min_not_zero(rs->max_phys_segments,
- q->max_phys_segments);
+ queue_max_phys_segments(q));
rs->max_hw_segments =
- min_not_zero(rs->max_hw_segments, q->max_hw_segments);
+ min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
- rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
+ rs->logical_block_size = max(rs->logical_block_size,
+ queue_logical_block_size(q));
rs->max_segment_size =
- min_not_zero(rs->max_segment_size, q->max_segment_size);
+ min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
rs->max_hw_sectors =
- min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
+ min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
rs->seg_boundary_mask =
min_not_zero(rs->seg_boundary_mask,
- q->seg_boundary_mask);
+ queue_segment_boundary(q));
- rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
+ rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
}
@@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
rs->max_phys_segments = MAX_PHYS_SEGMENTS;
if (!rs->max_hw_segments)
rs->max_hw_segments = MAX_HW_SEGMENTS;
- if (!rs->hardsect_size)
- rs->hardsect_size = 1 << SECTOR_SHIFT;
+ if (!rs->logical_block_size)
+ rs->logical_block_size = 1 << SECTOR_SHIFT;
if (!rs->max_segment_size)
rs->max_segment_size = MAX_SEGMENT_SIZE;
if (!rs->seg_boundary_mask)
@@ -912,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
* restrictions.
*/
blk_queue_max_sectors(q, t->limits.max_sectors);
- q->max_phys_segments = t->limits.max_phys_segments;
- q->max_hw_segments = t->limits.max_hw_segments;
- q->hardsect_size = t->limits.hardsect_size;
- q->max_segment_size = t->limits.max_segment_size;
- q->max_hw_sectors = t->limits.max_hw_sectors;
- q->seg_boundary_mask = t->limits.seg_boundary_mask;
- q->bounce_pfn = t->limits.bounce_pfn;
+ blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
+ blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
+ blk_queue_logical_block_size(q, t->limits.logical_block_size);
+ blk_queue_max_segment_size(q, t->limits.max_segment_size);
+ blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
+ blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
+ blk_queue_bounce_limit(q, t->limits.bounce_pfn);
if (t->limits.no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7a36e38393a1..64f1f3e046e0 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->num_sectors = rdev->sectors;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 641b211fe3fe..20f6ac338349 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1202,7 +1202,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
- bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
+ bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 41ced0cbe823..4ee31aa13c40 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* merge_bvec_fn will be involved in multipath.)
*/
if (q->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(q) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
conf->working_disks++;
@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!test_bit(Faulty, &rdev->flags))
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c08d7559be55..925507e7d673 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
*/
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!smallest || (rdev1->sectors < smallest->sectors))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 36df9109cde1..e23758b4a34e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 499620afb44b..750550c1166f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
- mddev->queue->max_sectors = (PAGE_SIZE>>9);
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
rdev->raid_disk = mirror;
@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
- mddev->queue->max_sectors = (PAGE_SIZE>>9);
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index bb37fb1b2d82..bef876698232 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
{
struct request_queue *q = bdev_get_queue(bi->bi_bdev);
- if ((bi->bi_size>>9) > q->max_sectors)
+ if ((bi->bi_size>>9) > queue_max_sectors(q))
return 0;
blk_recount_segments(q, bi);
- if (bi->bi_phys_segments > q->max_phys_segments)
+ if (bi->bi_phys_segments > queue_max_phys_segments(q))
return 0;
if (q->merge_bvec_fn)
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index de143deb06f0..7847bbc1440d 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -672,15 +672,14 @@ try_again:
msb->req_sg);
if (!msb->seg_count) {
- chunk = __blk_end_request(msb->block_req, -ENOMEM,
- blk_rq_cur_bytes(msb->block_req));
+ chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
continue;
}
- t_sec = msb->block_req->sector << 9;
+ t_sec = blk_rq_pos(msb->block_req) << 9;
sector_div(t_sec, msb->page_size);
- count = msb->block_req->nr_sectors << 9;
+ count = blk_rq_bytes(msb->block_req);
count /= msb->page_size;
param.system = msb->system;
@@ -705,8 +704,8 @@ try_again:
return 0;
}
- dev_dbg(&card->dev, "elv_next\n");
- msb->block_req = elv_next_request(msb->queue);
+ dev_dbg(&card->dev, "blk_fetch\n");
+ msb->block_req = blk_fetch_request(msb->queue);
if (!msb->block_req) {
dev_dbg(&card->dev, "issue end\n");
return -EAGAIN;
@@ -745,7 +744,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
t_len *= msb->page_size;
}
} else
- t_len = msb->block_req->nr_sectors << 9;
+ t_len = blk_rq_bytes(msb->block_req);
dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
@@ -825,8 +824,8 @@ static void mspro_block_submit_req(struct request_queue *q)
return;
if (msb->eject) {
- while ((req = elv_next_request(q)) != NULL)
- __blk_end_request(req, -ENODEV, blk_rq_bytes(req));
+ while ((req = blk_fetch_request(q)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
return;
}
@@ -1243,7 +1242,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
- blk_queue_hardsect_size(msb->queue, msb->page_size);
+ blk_queue_logical_block_size(msb->queue, msb->page_size);
capacity = be16_to_cpu(sys_info->user_block_count);
capacity *= be16_to_cpu(sys_info->block_size);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index a9019f081b97..79f5433359f9 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1277,8 +1277,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
- ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
- rsp->bio->bi_vcnt, rsp->data_len);
+ ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+ rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
@@ -1295,7 +1295,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
- smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4);
+ smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
if (rphy)
@@ -1321,10 +1321,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_DIRECTION |
mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
- flagsLength |= (req->data_len - 4);
+ flagsLength |= (blk_rq_bytes(req) - 4);
dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
- req->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out)
goto put_mf;
mpt_add_sge(psge, flagsLength, dma_addr_out);
@@ -1332,9 +1332,9 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* response */
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
- flagsLength |= rsp->data_len + 4;
+ flagsLength |= blk_rq_bytes(rsp) + 4;
dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
- rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in)
goto unmap;
mpt_add_sge(psge, flagsLength, dma_addr_in);
@@ -1357,8 +1357,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
memcpy(req->sense, smprep, sizeof(*smprep));
req->sense_len = sizeof(*smprep);
- req->data_len = 0;
- rsp->data_len -= smprep->ResponseDataLength;
+ req->resid_len = 0;
+ rsp->resid_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
ioc->name, __func__);
@@ -1366,10 +1366,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
unmap:
if (dma_addr_out)
- pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len,
+ pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
- pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len,
+ pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
put_mf:
if (mf)
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a443e136dc41..335d4c78a775 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
struct request_queue *q = req->q;
unsigned long flags;
- if (blk_end_request(req, error, nr_bytes)) {
- int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
-
- if (blk_pc_request(req))
- leftover = req->data_len;
-
+ if (blk_end_request(req, error, nr_bytes))
if (error)
- blk_end_request(req, -EIO, leftover);
- }
+ blk_end_request_all(req, -EIO);
spin_lock_irqsave(q->queue_lock, flags);
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
break;
case CACHE_SMARTFETCH:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x201F0008;
else
ctl_flags = 0x001F0000;
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTBACK:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x001F0004;
else
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTTHROUGH:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x001F0004;
else
ctl_flags = 0x001F0010;
@@ -800,8 +794,9 @@ static int i2o_block_transfer(struct request *req)
if (c->adaptec) {
u8 cmd[10];
u32 scsi_flags;
- u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT;
+ u16 hwsec;
+ hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
memset(cmd, 0, 10);
sgl_offset = SGL_OFFSET_12;
@@ -827,22 +822,22 @@ static int i2o_block_transfer(struct request *req)
*mptr++ = cpu_to_le32(scsi_flags);
- *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
- *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
+ *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
+ *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
memcpy(mptr, cmd, 10);
mptr += 4;
- *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ = cpu_to_le32(blk_rq_bytes(req));
} else
#endif
{
msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
*mptr++ = cpu_to_le32(ctl_flags);
- *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ = cpu_to_le32(blk_rq_bytes(req));
*mptr++ =
- cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
+ cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
*mptr++ =
- cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
+ cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
}
if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -883,7 +878,7 @@ static void i2o_block_request_fn(struct request_queue *q)
struct request *req;
while (!blk_queue_plugged(q)) {
- req = elv_next_request(q);
+ req = blk_peek_request(q);
if (!req)
break;
@@ -896,7 +891,7 @@ static void i2o_block_request_fn(struct request_queue *q)
if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
if (!i2o_block_transfer(req)) {
- blkdev_dequeue_request(req);
+ blk_start_request(req);
continue;
} else
osm_info("transfer error\n");
@@ -922,8 +917,10 @@ static void i2o_block_request_fn(struct request_queue *q)
blk_stop_queue(q);
break;
}
- } else
- end_request(req, 0);
+ } else {
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
+ }
}
};
@@ -1082,7 +1079,7 @@ static int i2o_block_probe(struct device *dev)
*/
if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
- blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
+ blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
} else
osm_warn("unable to get blocksize of %s\n", gd->disk_name);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b25e9b6516ae..98ffc41eaf2c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -243,7 +243,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.mrq.cmd = &brq.cmd;
brq.mrq.data = &brq.data;
- brq.cmd.arg = req->sector;
+ brq.cmd.arg = blk_rq_pos(req);
if (!mmc_card_blockaddr(card))
brq.cmd.arg <<= 9;
brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -251,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- brq.data.blocks = req->nr_sectors;
+ brq.data.blocks = blk_rq_sectors(req);
/*
* The block layer doesn't support all sector count
@@ -301,7 +301,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
* Adjust the sg list so it is the same size as the
* request.
*/
- if (brq.data.blocks != req->nr_sectors) {
+ if (brq.data.blocks != blk_rq_sectors(req)) {
int i, data_size = brq.data.blocks << 9;
struct scatterlist *sg;
@@ -352,8 +352,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
printk(KERN_ERR "%s: error %d transferring data,"
" sector %u, nr %u, card status %#x\n",
req->rq_disk->disk_name, brq.data.error,
- (unsigned)req->sector,
- (unsigned)req->nr_sectors, status);
+ (unsigned)blk_rq_pos(req),
+ (unsigned)blk_rq_sectors(req), status);
}
if (brq.stop.error) {
@@ -521,7 +521,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
sprintf(md->disk->disk_name, "mmcblk%d", devidx);
- blk_queue_hardsect_size(md->queue.queue, 512);
+ blk_queue_logical_block_size(md->queue.queue, 512);
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
/*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7a72e75d5c67..49e582356c65 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,7 +55,7 @@ static int mmc_queue_thread(void *d)
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
if (!blk_queue_plugged(q))
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
mq->req = req;
spin_unlock_irq(q->queue_lock);
@@ -88,16 +88,11 @@ static void mmc_request(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
- int ret;
if (!mq) {
printk(KERN_ERR "MMC: killing requests for dead queue\n");
- while ((req = elv_next_request(q)) != NULL) {
- do {
- ret = __blk_end_request(req, -EIO,
- blk_rq_cur_bytes(req));
- } while (ret);
- }
+ while ((req = blk_fetch_request(q)) != NULL)
+ __blk_end_request_all(req, -EIO);
return;
}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a49a9c8f2cb1..aaac3b6800b7 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -47,40 +47,41 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
unsigned long block, nsect;
char *buf;
- block = req->sector << 9 >> tr->blkshift;
- nsect = req->current_nr_sectors << 9 >> tr->blkshift;
+ block = blk_rq_pos(req) << 9 >> tr->blkshift;
+ nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
buf = req->buffer;
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
req->cmd[0] == REQ_LB_OP_DISCARD)
- return !tr->discard(dev, block, nsect);
+ return tr->discard(dev, block, nsect);
if (!blk_fs_request(req))
- return 0;
+ return -EIO;
- if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
- return 0;
+ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
+ get_capacity(req->rq_disk))
+ return -EIO;
switch(rq_data_dir(req)) {
case READ:
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
- return 0;
- return 1;
+ return -EIO;
+ return 0;
case WRITE:
if (!tr->writesect)
- return 0;
+ return -EIO;
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->writesect(dev, block, buf))
- return 0;
- return 1;
+ return -EIO;
+ return 0;
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
- return 0;
+ return -EIO;
}
}
@@ -88,19 +89,18 @@ static int mtd_blktrans_thread(void *arg)
{
struct mtd_blktrans_ops *tr = arg;
struct request_queue *rq = tr->blkcore_priv->rq;
+ struct request *req = NULL;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
current->flags |= PF_MEMALLOC;
spin_lock_irq(rq->queue_lock);
+
while (!kthread_should_stop()) {
- struct request *req;
struct mtd_blktrans_dev *dev;
- int res = 0;
-
- req = elv_next_request(rq);
+ int res;
- if (!req) {
+ if (!req && !(req = blk_fetch_request(rq))) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(rq->queue_lock);
schedule();
@@ -119,8 +119,13 @@ static int mtd_blktrans_thread(void *arg)
spin_lock_irq(rq->queue_lock);
- end_request(req, res);
+ if (!__blk_end_request_cur(req, res))
+ req = NULL;
}
+
+ if (req)
+ __blk_end_request_all(req, -EIO);
+
spin_unlock_irq(rq->queue_lock);
return 0;
@@ -373,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
}
tr->blkcore_priv->rq->queuedata = tr;
- blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
+ blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
if (tr->discard)
blk_queue_set_discard(tr->blkcore_priv->rq,
blktrans_discard_request);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d1815272c435..27a1be0cd4d4 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -603,7 +603,7 @@ static void dasd_profile_end(struct dasd_block *block,
if (dasd_profile_level != DASD_PROFILE_ON)
return;
- sectors = req->nr_sectors;
+ sectors = blk_rq_sectors(req);
if (!cqr->buildclk || !cqr->startclk ||
!cqr->stopclk || !cqr->endclk ||
!sectors)
@@ -1614,15 +1614,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
}
/*
- * posts the buffer_cache about a finalized request
- */
-static inline void dasd_end_request(struct request *req, int error)
-{
- if (__blk_end_request(req, error, blk_rq_bytes(req)))
- BUG();
-}
-
-/*
* Process finished error recovery ccw.
*/
static inline void __dasd_block_process_erp(struct dasd_block *block,
@@ -1665,18 +1656,14 @@ static void __dasd_process_request_queue(struct dasd_block *block)
if (basedev->state < DASD_STATE_READY)
return;
/* Now we try to fetch requests from the request queue */
- while (!blk_queue_plugged(queue) &&
- elv_next_request(queue)) {
-
- req = elv_next_request(queue);
-
+ while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting write request %p",
req);
- blkdev_dequeue_request(req);
- dasd_end_request(req, -EIO);
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
continue;
}
cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1704,8 +1691,8 @@ static void __dasd_process_request_queue(struct dasd_block *block)
"CCW creation failed (rc=%ld) "
"on request %p",
PTR_ERR(cqr), req);
- blkdev_dequeue_request(req);
- dasd_end_request(req, -EIO);
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
continue;
}
/*
@@ -1714,7 +1701,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
*/
cqr->callback_data = (void *) req;
cqr->status = DASD_CQR_FILLED;
- blkdev_dequeue_request(req);
+ blk_start_request(req);
list_add_tail(&cqr->blocklist, &block->ccw_queue);
dasd_profile_start(block, cqr, req);
}
@@ -1731,7 +1718,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
status = cqr->block->base->discipline->free_cp(cqr, req);
if (status <= 0)
error = status ? status : -EIO;
- dasd_end_request(req, error);
+ __blk_end_request_all(req, error);
}
/*
@@ -2003,7 +1990,7 @@ static void dasd_setup_queue(struct dasd_block *block)
{
int max;
- blk_queue_hardsect_size(block->request_queue, block->bp_block);
+ blk_queue_logical_block_size(block->request_queue, block->bp_block);
max = block->base->discipline->max_blocks << block->s2b_shift;
blk_queue_max_sectors(block->request_queue, max);
blk_queue_max_phys_segments(block->request_queue, -1L);
@@ -2038,10 +2025,8 @@ static void dasd_flush_request_queue(struct dasd_block *block)
return;
spin_lock_irq(&block->request_queue_lock);
- while ((req = elv_next_request(block->request_queue))) {
- blkdev_dequeue_request(req);
- dasd_end_request(req, -EIO);
- }
+ while ((req = blk_fetch_request(block->request_queue)))
+ __blk_end_request_all(req, -EIO);
spin_unlock_irq(&block->request_queue_lock);
}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index b9a7f7733446..2efaddfae560 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -505,8 +505,9 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
- first_rec = req->sector >> block->s2b_shift;
- last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ first_rec = blk_rq_pos(req) >> block->s2b_shift;
+ last_rec =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_segment(bv, req, iter) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cb52da033f06..a41c94053e64 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2354,10 +2354,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
blksize = block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
/* Calculate record id of first and last block. */
- first_rec = first_trk = req->sector >> block->s2b_shift;
+ first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
first_offs = sector_div(first_trk, blk_per_trk);
last_rec = last_trk =
- (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
last_offs = sector_div(last_trk, blk_per_trk);
cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
@@ -2420,7 +2420,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
private = (struct dasd_eckd_private *) cqr->block->base->private;
blksize = cqr->block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
- recid = req->sector >> cqr->block->s2b_shift;
+ recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
ccw = cqr->cpaddr;
/* Skip over define extent & locate record. */
ccw++;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a3eb6fd14673..8912358daa2f 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -270,8 +270,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
- first_rec = req->sector >> block->s2b_shift;
- last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ first_rec = blk_rq_pos(req) >> block->s2b_shift;
+ last_rec =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
@@ -309,7 +310,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
ccw = cqr->cpaddr;
/* First ccw is define extent. */
define_extent(ccw++, cqr->data, rq_data_dir(req),
- block->bp_block, req->sector, req->nr_sectors);
+ block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
/* Build locate_record + read/write ccws. */
idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
LO_data = (struct LO_fba_data *) (idaws + cidaw);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index cfdcf1aed33c..a4c7ffcd9987 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -602,7 +602,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->private_data = dev_info;
dev_info->gd->driverfs_dev = &dev_info->dev;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
- blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
+ blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 76814f3e898a..0ae0c83ef879 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -343,7 +343,7 @@ static int __init xpram_setup_blkdev(void)
goto out;
}
blk_queue_make_request(xpram_queues[i], xpram_make_request);
- blk_queue_hardsect_size(xpram_queues[i], 4096);
+ blk_queue_logical_block_size(xpram_queues[i], 4096);
}
/*
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5f8e8ef43dd3..2d00a383a475 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,7 +1134,7 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
/* Setup ccws. */
request->op = TO_BLOCK;
start_block = (struct tape_34xx_block_id *) request->cpdata;
- start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B;
+ start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block->block);
ccw = request->cpaddr;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 823b05bd0dd7..c453b2f3e9f4 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -633,7 +633,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
struct req_iterator iter;
DBF_EVENT(6, "xBREDid:");
- start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
+ start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block);
rq_for_each_segment(bv, req, iter)
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index f32e89e7c4f2..47ff695255ea 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -74,13 +74,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
* Post finished request.
*/
static void
-tapeblock_end_request(struct request *req, int error)
-{
- if (blk_end_request(req, error, blk_rq_bytes(req)))
- BUG();
-}
-
-static void
__tapeblock_end_request(struct tape_request *ccw_req, void *data)
{
struct tape_device *device;
@@ -90,17 +83,17 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
device = ccw_req->device;
req = (struct request *) data;
- tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO);
+ blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
if (ccw_req->rc == 0)
/* Update position. */
device->blk_data.block_position =
- (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B;
+ (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
else
/* We lost the position information due to an error. */
device->blk_data.block_position = -1;
device->discipline->free_bread(ccw_req);
if (!list_empty(&device->req_queue) ||
- elv_next_request(device->blk_data.request_queue))
+ blk_peek_request(device->blk_data.request_queue))
tapeblock_trigger_requeue(device);
}
@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
ccw_req = device->discipline->bread(device, req);
if (IS_ERR(ccw_req)) {
DBF_EVENT(1, "TBLOCK: bread failed\n");
- tapeblock_end_request(req, -EIO);
+ blk_end_request_all(req, -EIO);
return PTR_ERR(ccw_req);
}
ccw_req->callback = __tapeblock_end_request;
@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
* Start/enqueueing failed. No retries in
* this case.
*/
- tapeblock_end_request(req, -EIO);
+ blk_end_request_all(req, -EIO);
device->discipline->free_bread(ccw_req);
}
@@ -169,19 +162,16 @@ tapeblock_requeue(struct work_struct *work) {
spin_lock_irq(&device->blk_data.request_queue_lock);
while (
!blk_queue_plugged(queue) &&
- elv_next_request(queue) &&
+ (req = blk_fetch_request(queue)) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE
) {
- req = elv_next_request(queue);
if (rq_data_dir(req) == WRITE) {
DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
- blkdev_dequeue_request(req);
spin_unlock_irq(&device->blk_data.request_queue_lock);
- tapeblock_end_request(req, -EIO);
+ blk_end_request_all(req, -EIO);
spin_lock_irq(&device->blk_data.request_queue_lock);
continue;
}
- blkdev_dequeue_request(req);
nr_queued++;
spin_unlock_irq(&device->blk_data.request_queue_lock);
rc = tapeblock_start_request(device, req);
@@ -232,7 +222,7 @@ tapeblock_setup_device(struct tape_device * device)
if (rc)
goto cleanup_queue;
- blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
+ blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
blk_queue_max_phys_segments(blkdat->request_queue, -1L);
blk_queue_max_hw_segments(blkdat->request_queue, -1L);
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a85ad05e8548..6d4651684688 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -186,31 +186,31 @@ static void jsfd_do_request(struct request_queue *q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ req = blk_fetch_request(q);
+ while (req) {
struct jsfd_part *jdp = req->rq_disk->private_data;
- unsigned long offset = req->sector << 9;
- size_t len = req->current_nr_sectors << 9;
+ unsigned long offset = blk_rq_pos(req) << 9;
+ size_t len = blk_rq_cur_bytes(req);
+ int err = -EIO;
- if ((offset + len) > jdp->dsize) {
- end_request(req, 0);
- continue;
- }
+ if ((offset + len) > jdp->dsize)
+ goto end;
if (rq_data_dir(req) != READ) {
printk(KERN_ERR "jsfd: write\n");
- end_request(req, 0);
- continue;
+ goto end;
}
if ((jdp->dbase & 0xff000000) != 0x20000000) {
printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase);
- end_request(req, 0);
- continue;
+ goto end;
}
jsfd_read(req->buffer, jdp->dbase + offset, len);
-
- end_request(req, 1);
+ err = 0;
+ end:
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index be5099dd94b5..c7076ce25e21 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type)) {
ha->cp_stat[i] = READY;
- flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
return 0;
}
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
if (!cpp->din)
input_only = 0;
- if (SCpnt->request->sector < minsec)
- minsec = SCpnt->request->sector;
- if (SCpnt->request->sector > maxsec)
- maxsec = SCpnt->request->sector;
+ if (blk_rq_pos(SCpnt->request) < minsec)
+ minsec = blk_rq_pos(SCpnt->request);
+ if (blk_rq_pos(SCpnt->request) > maxsec)
+ maxsec = blk_rq_pos(SCpnt->request);
- sl[n] = SCpnt->request->sector;
- ioseek += SCpnt->request->nr_sectors;
+ sl[n] = blk_rq_pos(SCpnt->request);
+ ioseek += blk_rq_sectors(SCpnt->request);
if (!n)
continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
k = il[n];
cpp = &ha->cp[k];
SCpnt = cpp->SCpnt;
- ll[n] = SCpnt->request->nr_sectors;
+ ll[n] = blk_rq_sectors(SCpnt->request);
pl[n] = SCpnt->serial_number;
if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
cpp = &ha->cp[k];
SCpnt = cpp->SCpnt;
scmd_printk(KERN_INFO, SCpnt,
- "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld"
+ "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
" cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
(ihdlr ? "ihdlr" : "qcomm"),
SCpnt->serial_number, k, flushcount,
- n_ready, SCpnt->request->sector,
- SCpnt->request->nr_sectors, cursec, YESNO(s),
+ n_ready, blk_rq_pos(SCpnt->request),
+ blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
YESNO(r), YESNO(rev), YESNO(input_only),
YESNO(overlap), cpp->din);
}
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type))
- flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
tstatus = status_byte(spp->target_status);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3da02e436788..54fa1e42dc4d 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1927,21 +1927,21 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk("%s: multiple segments req %u %u, rsp %u %u\n",
- __func__, req->bio->bi_vcnt, req->data_len,
- rsp->bio->bi_vcnt, rsp->data_len);
+ __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+ rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
- ret = smp_execute_task(dev, bio_data(req->bio), req->data_len,
- bio_data(rsp->bio), rsp->data_len);
+ ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
+ bio_data(rsp->bio), blk_rq_bytes(rsp));
if (ret > 0) {
/* positive number is the untransferred residual */
- rsp->data_len = ret;
- req->data_len = 0;
+ rsp->resid_len = ret;
+ req->resid_len = 0;
ret = 0;
} else if (ret == 0) {
- rsp->data_len = 0;
- req->data_len = 0;
+ rsp->resid_len = 0;
+ req->resid_len = 0;
}
return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d110a366c48a..1bc3b7567994 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -134,24 +134,24 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
{
u8 *req_data = NULL, *resp_data = NULL, *buf;
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
- int error = -EINVAL, resp_data_len = rsp->data_len;
+ int error = -EINVAL;
/* eight is the minimum size for request and response frames */
- if (req->data_len < 8 || rsp->data_len < 8)
+ if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
goto out;
- if (bio_offset(req->bio) + req->data_len > PAGE_SIZE ||
- bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) {
+ if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
+ bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
shost_printk(KERN_ERR, shost,
"SMP request/response frame crosses page boundary");
goto out;
}
- req_data = kzalloc(req->data_len, GFP_KERNEL);
+ req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
/* make sure frame can always be built ... we copy
* back only the requested length */
- resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL);
+ resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
if (!req_data || !resp_data) {
error = -ENOMEM;
@@ -160,7 +160,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
local_irq_disable();
buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
- memcpy(req_data, buf, req->data_len);
+ memcpy(req_data, buf, blk_rq_bytes(req));
kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
local_irq_enable();
@@ -178,15 +178,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
switch (req_data[1]) {
case SMP_REPORT_GENERAL:
- req->data_len -= 8;
- resp_data_len -= 32;
+ req->resid_len -= 8;
+ rsp->resid_len -= 32;
resp_data[2] = SMP_RESP_FUNC_ACC;
resp_data[9] = sas_ha->num_phys;
break;
case SMP_REPORT_MANUF_INFO:
- req->data_len -= 8;
- resp_data_len -= 64;
+ req->resid_len -= 8;
+ rsp->resid_len -= 64;
resp_data[2] = SMP_RESP_FUNC_ACC;
memcpy(resp_data + 12, shost->hostt->name,
SAS_EXPANDER_VENDOR_ID_LEN);
@@ -199,13 +199,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_DISCOVER:
- req->data_len -= 16;
- if ((int)req->data_len < 0) {
- req->data_len = 0;
+ req->resid_len -= 16;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
error = -EINVAL;
goto out;
}
- resp_data_len -= 56;
+ rsp->resid_len -= 56;
sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
break;
@@ -215,13 +215,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_REPORT_PHY_SATA:
- req->data_len -= 16;
- if ((int)req->data_len < 0) {
- req->data_len = 0;
+ req->resid_len -= 16;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
error = -EINVAL;
goto out;
}
- resp_data_len -= 60;
+ rsp->resid_len -= 60;
sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
break;
@@ -238,13 +238,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_PHY_CONTROL:
- req->data_len -= 44;
- if ((int)req->data_len < 0) {
- req->data_len = 0;
+ req->resid_len -= 44;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
error = -EINVAL;
goto out;
}
- resp_data_len -= 8;
+ rsp->resid_len -= 8;
sas_phy_control(sas_ha, req_data[9], req_data[10],
req_data[32] >> 4, req_data[33] >> 4,
resp_data);
@@ -261,11 +261,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
local_irq_disable();
buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
- memcpy(buf, resp_data, rsp->data_len);
+ memcpy(buf, resp_data, blk_rq_bytes(rsp));
flush_kernel_dcache_page(bio_page(rsp->bio));
kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
local_irq_enable();
- rsp->data_len = resp_data_len;
out:
kfree(req_data);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c7..8032c5adb6a9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1312,10 +1312,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
uint32_t bgstat = bgf->bgstat;
uint64_t failing_sector = 0;
- printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
+ printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=0x%x bghm=0x%x\n",
cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
- cmd->request->nr_sectors, bgstat, bghm);
+ blk_rq_sectors(cmd->request), bgstat, bghm);
spin_lock(&_dump_buf_lock);
if (!_dump_buf_done) {
@@ -2378,15 +2378,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9035 BLKGRD: READ @ sector %llu, "
- "count %lu\n",
- (unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors);
+ "count %u\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ blk_rq_sectors(cmnd->request));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9036 BLKGRD: WRITE @ sector %llu, "
- "count %lu cmd=%p\n",
+ "count %u cmd=%p\n",
(unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors,
+ blk_rq_sectors(cmnd->request),
cmnd);
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +2406,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9040 dbg: READ @ sector %llu, "
- "count %lu\n",
+ "count %u\n",
(unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors);
+ blk_rq_sectors(cmnd->request));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9041 dbg: WRITE @ sector %llu, "
- "count %lu cmd=%p\n",
+ "count %u cmd=%p\n",
(unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors, cmnd);
+ blk_rq_sectors(cmnd->request), cmnd);
else
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9042 dbg: parser not implemented\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index e03dc0b1e1a0..5c65da519e39 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1041,7 +1041,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
"rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
- req->data_len, rsp->bio->bi_vcnt, rsp->data_len);
+ blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
@@ -1104,7 +1104,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
*((u64 *)&mpi_request->SASAddress) = (rphy) ?
cpu_to_le64(rphy->identify.sas_address) :
cpu_to_le64(ioc->sas_hba.sas_address);
- mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4);
+ mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
psge = &mpi_request->SGL;
/* WRITE sgel first */
@@ -1112,13 +1112,13 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
- req->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out) {
mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
goto unmap;
}
- ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4),
+ ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
dma_addr_out);
/* incr sgel */
@@ -1129,14 +1129,14 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
- rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+ dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in) {
mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
goto unmap;
}
- ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4),
+ ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
dma_addr_in);
dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
@@ -1170,9 +1170,8 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
req->sense_len = sizeof(*mpi_reply);
- req->data_len = 0;
- rsp->data_len -= mpi_reply->ResponseDataLength;
-
+ req->resid_len = 0;
+ rsp->resid_len -= mpi_reply->ResponseDataLength;
} else {
dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
"%s - no reply\n", ioc->name, __func__));
@@ -1188,10 +1187,10 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
unmap:
if (dma_addr_out)
- pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len,
+ pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
- pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len,
+ pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
out:
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 1ce6b24abab2..5776b2ab6b12 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -889,26 +889,6 @@ int osd_req_add_set_attr_list(struct osd_request *or,
}
EXPORT_SYMBOL(osd_req_add_set_attr_list);
-static int _append_map_kern(struct request *req,
- void *buff, unsigned len, gfp_t flags)
-{
- struct bio *bio;
- int ret;
-
- bio = bio_map_kern(req->q, buff, len, flags);
- if (IS_ERR(bio)) {
- OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
- PTR_ERR(bio));
- return PTR_ERR(bio);
- }
- ret = blk_rq_append_bio(req->q, req, bio);
- if (ret) {
- OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
- bio_put(bio);
- }
- return ret;
-}
-
static int _req_append_segment(struct osd_request *or,
unsigned padding, struct _osd_req_data_segment *seg,
struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
@@ -924,14 +904,14 @@ static int _req_append_segment(struct osd_request *or,
else
pad_buff = io->pad_buff;
- ret = _append_map_kern(io->req, pad_buff, padding,
+ ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
or->alloc_flags);
if (ret)
return ret;
io->total_bytes += padding;
}
- ret = _append_map_kern(io->req, seg->buff, seg->total_bytes,
+ ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
or->alloc_flags);
if (ret)
return ret;
@@ -1293,6 +1273,21 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
/*
* osd_finalize_request and helpers
*/
+static struct request *_make_request(struct request_queue *q, bool has_write,
+ struct _osd_io_info *oii, gfp_t flags)
+{
+ if (oii->bio)
+ return blk_make_request(q, oii->bio, flags);
+ else {
+ struct request *req;
+
+ req = blk_get_request(q, has_write ? WRITE : READ, flags);
+ if (unlikely(!req))
+ return ERR_PTR(-ENOMEM);
+
+ return req;
+ }
+}
static int _init_blk_request(struct osd_request *or,
bool has_in, bool has_out)
@@ -1301,11 +1296,13 @@ static int _init_blk_request(struct osd_request *or,
struct scsi_device *scsi_device = or->osd_dev->scsi_device;
struct request_queue *q = scsi_device->request_queue;
struct request *req;
- int ret = -ENOMEM;
+ int ret;
- req = blk_get_request(q, has_out, flags);
- if (!req)
+ req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
goto out;
+ }
or->request = req;
req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1318,9 +1315,10 @@ static int _init_blk_request(struct osd_request *or,
or->out.req = req;
if (has_in) {
/* allocate bidi request */
- req = blk_get_request(q, READ, flags);
- if (!req) {
+ req = _make_request(q, false, &or->in, flags);
+ if (IS_ERR(req)) {
OSD_DEBUG("blk_get_request for bidi failed\n");
+ ret = PTR_ERR(req);
goto out;
}
req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1364,26 +1362,6 @@ int osd_finalize_request(struct osd_request *or,
return ret;
}
- if (or->out.bio) {
- ret = blk_rq_append_bio(or->request->q, or->out.req,
- or->out.bio);
- if (ret) {
- OSD_DEBUG("blk_rq_append_bio out failed\n");
- return ret;
- }
- OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
- _LLU(or->out.total_bytes), or->out.req->data_len);
- }
- if (or->in.bio) {
- ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
- if (ret) {
- OSD_DEBUG("blk_rq_append_bio in failed\n");
- return ret;
- }
- OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
- _LLU(or->in.total_bytes), or->in.req->data_len);
- }
-
or->out.pad_buff = sg_out_pad_buffer;
or->in.pad_buff = sg_in_pad_buffer;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index bb218c8b6e98..dd3f9d2b99fd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
- if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
- memset(buffer + (bufflen - req->data_len), 0, req->data_len);
+ if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
+ memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
if (resid)
- *resid = req->data_len;
+ *resid = req->resid_len;
ret = req->errors;
out:
blk_put_request(req);
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
* to queue the remainder of them.
*/
if (blk_end_request(req, error, bytes)) {
- int leftover = (req->hard_nr_sectors << 9);
-
- if (blk_pc_request(req))
- leftover = req->data_len;
-
/* kill remainder if no retrys */
if (error && scsi_noretry_cmd(cmd))
- blk_end_request(req, error, leftover);
+ blk_end_request_all(req, error);
else {
if (requeue) {
/*
@@ -673,34 +668,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
EXPORT_SYMBOL(scsi_release_buffers);
/*
- * Bidi commands Must be complete as a whole, both sides at once.
- * If part of the bytes were written and lld returned
- * scsi_in()->resid and/or scsi_out()->resid this information will be left
- * in req->data_len and req->next_rq->data_len. The upper-layer driver can
- * decide what to do with this information.
- */
-static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
-{
- struct request *req = cmd->request;
- unsigned int dlen = req->data_len;
- unsigned int next_dlen = req->next_rq->data_len;
-
- req->data_len = scsi_out(cmd)->resid;
- req->next_rq->data_len = scsi_in(cmd)->resid;
-
- /* The req and req->next_rq have not been completed */
- BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
-
- scsi_release_buffers(cmd);
-
- /*
- * This will goose the queue request function at the end, so we don't
- * need to worry about launching another command.
- */
- scsi_next_command(cmd);
-}
-
-/*
* Function: scsi_io_completion()
*
* Purpose: Completion processing for block device I/O requests.
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- int this_count;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
int error = 0;
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (!sense_deferred)
error = -EIO;
}
+
+ req->resid_len = scsi_get_resid(cmd);
+
if (scsi_bidi_cmnd(cmd)) {
- /* will also release_buffers */
- scsi_end_bidi_request(cmd);
+ /*
+ * Bidi commands Must be complete as a whole,
+ * both sides at once.
+ */
+ req->next_rq->resid_len = scsi_in(cmd)->resid;
+
+ blk_end_request_all(req, 0);
+
+ scsi_release_buffers(cmd);
+ scsi_next_command(cmd);
return;
}
- req->data_len = scsi_get_resid(cmd);
}
BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* Next deal with any sectors which we were able to correctly
* handle.
*/
- SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
+ SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
"%d bytes done.\n",
- req->nr_sectors, good_bytes));
+ blk_rq_sectors(req), good_bytes));
/*
* Recovered errors need reporting, but they're always treated
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
return;
- this_count = blk_rq_bytes(req);
error = -EIO;
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (driver_byte(result) & DRIVER_SENSE)
scsi_print_sense("", cmd);
}
- blk_end_request(req, -EIO, blk_rq_bytes(req));
+ blk_end_request_all(req, -EIO);
scsi_next_command(cmd);
break;
case ACTION_REPREP:
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
- if (blk_pc_request(req))
- sdb->length = req->data_len;
- else
- sdb->length = req->nr_sectors << 9;
+ sdb->length = blk_rq_bytes(req);
return BLKPREP_OK;
}
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
if (unlikely(ret))
return ret;
} else {
- BUG_ON(req->data_len);
- BUG_ON(req->data);
+ BUG_ON(blk_rq_bytes(req));
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
req->buffer = NULL;
}
cmd->cmd_len = req->cmd_len;
- if (!req->data_len)
+ if (!blk_rq_bytes(req))
cmd->sc_data_direction = DMA_NONE;
else if (rq_data_dir(req) == WRITE)
cmd->sc_data_direction = DMA_TO_DEVICE;
else
cmd->sc_data_direction = DMA_FROM_DEVICE;
- cmd->transfersize = req->data_len;
+ cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = req->retries;
return BLKPREP_OK;
}
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
break;
case BLKPREP_DEFER:
/*
- * If we defer, the elv_next_request() returns NULL, but the
+ * If we defer, the blk_peek_request() returns NULL, but the
* queue must be restarted, so we plug here if no returning
* command will automatically do that.
*/
@@ -1388,7 +1359,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
struct scsi_target *starget = scsi_target(sdev);
struct Scsi_Host *shost = sdev->host;
- blkdev_dequeue_request(req);
+ blk_start_request(req);
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n",
@@ -1480,7 +1451,7 @@ static void scsi_request_fn(struct request_queue *q)
if (!sdev) {
printk("scsi: killing requests for dead queue\n");
- while ((req = elv_next_request(q)) != NULL)
+ while ((req = blk_peek_request(q)) != NULL)
scsi_kill_request(req, q);
return;
}
@@ -1501,7 +1472,7 @@ static void scsi_request_fn(struct request_queue *q)
* that the request is fully prepared even if we cannot
* accept it.
*/
- req = elv_next_request(q);
+ req = blk_peek_request(q);
if (!req || !scsi_dev_queue_ready(q, sdev))
break;
@@ -1517,7 +1488,7 @@ static void scsi_request_fn(struct request_queue *q)
* Remove the request from the request list.
*/
if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
- blkdev_dequeue_request(req);
+ blk_start_request(req);
sdev->device_busy++;
spin_unlock(q->queue_lock);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 48ba413f7f6a..10303272ba45 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -387,7 +387,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
* we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
* length for us.
*/
- cmd->sdb.length = rq->data_len;
+ cmd->sdb.length = blk_rq_bytes(rq);
return 0;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 50988cbf7b2d..d606452297cf 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -163,12 +163,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
while (!blk_queue_plugged(q)) {
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
if (!req)
break;
- blkdev_dequeue_request(req);
-
spin_unlock_irq(q->queue_lock);
handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84044233b637..bcf3bd40bbd5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,9 +384,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
struct scsi_device *sdp = q->queuedata;
struct gendisk *disk = rq->rq_disk;
struct scsi_disk *sdkp;
- sector_t block = rq->sector;
+ sector_t block = blk_rq_pos(rq);
sector_t threshold;
- unsigned int this_count = rq->nr_sectors;
+ unsigned int this_count = blk_rq_sectors(rq);
int ret, host_dif;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -413,10 +413,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
this_count));
if (!sdp || !scsi_device_online(sdp) ||
- block + rq->nr_sectors > get_capacity(disk)) {
+ block + blk_rq_sectors(rq) > get_capacity(disk)) {
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
- "Finishing %ld sectors\n",
- rq->nr_sectors));
+ "Finishing %u sectors\n",
+ blk_rq_sectors(rq)));
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
"Retry with 0x%p\n", SCpnt));
goto out;
@@ -463,7 +463,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
* for this.
*/
if (sdp->sector_size == 1024) {
- if ((block & 1) || (rq->nr_sectors & 1)) {
+ if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
goto out;
@@ -473,7 +473,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
}
if (sdp->sector_size == 2048) {
- if ((block & 3) || (rq->nr_sectors & 3)) {
+ if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
goto out;
@@ -483,7 +483,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
}
if (sdp->sector_size == 4096) {
- if ((block & 7) || (rq->nr_sectors & 7)) {
+ if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
goto out;
@@ -512,10 +512,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
- "%s %d/%ld 512 byte blocks.\n",
+ "%s %d/%u 512 byte blocks.\n",
(rq_data_dir(rq) == WRITE) ?
"writing" : "reading", this_count,
- rq->nr_sectors));
+ blk_rq_sectors(rq)));
/* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -971,8 +971,8 @@ static struct block_device_operations sd_fops = {
static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
{
- u64 start_lba = scmd->request->sector;
- u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
+ u64 start_lba = blk_rq_pos(scmd->request);
+ u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
u64 bad_lba;
int info_valid;
@@ -1510,7 +1510,7 @@ got_data:
*/
sector_size = 512;
}
- blk_queue_hardsect_size(sdp->request_queue, sector_size);
+ blk_queue_logical_block_size(sdp->request_queue, sector_size);
{
char cap_str_2[10], cap_str_10[10];
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 184dff492797..82f14a9482d0 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
sector_sz = scmd->device->sector_size;
sectors = good_bytes / sector_sz;
- phys = scmd->request->sector & 0xffffffff;
+ phys = blk_rq_pos(scmd->request) & 0xffffffff;
if (sector_sz == 4096)
phys >>= 3;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 91e316fe6522..8201387b4daa 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
- sdp->sg_tablesize = min(q->max_hw_segments,
- q->max_phys_segments);
+ sdp->sg_tablesize = min(queue_max_hw_segments(q),
+ queue_max_phys_segments(q));
}
if ((sfp = sg_add_sfp(sdp, dev)))
filp->private_data = sfp;
@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (val < 0)
return -EINVAL;
val = min_t(int, val,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
if (val != sfp->reserve.bufflen) {
if (sg_res_in_use(sfp) || sfp->mmap_called)
return -EBUSY;
@@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
@@ -1059,7 +1059,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
return -ENODEV;
return scsi_ioctl(sdp->device, cmd_in, p);
case BLKSECTGET:
- return put_user(sdp->device->request_queue->max_sectors * 512,
+ return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
@@ -1261,7 +1261,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
sense = rq->sense;
result = rq->errors;
- resid = rq->data_len;
+ resid = rq->resid_len;
SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1378,7 +1378,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
sdp->device = scsidp;
INIT_LIST_HEAD(&sdp->sfds);
init_waitqueue_head(&sdp->o_excl_wait);
- sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
+ sdp->sg_tablesize = min(queue_max_hw_segments(q),
+ queue_max_phys_segments(q));
sdp->index = k;
kref_init(&sdp->d_ref);
@@ -2056,7 +2057,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
sg_big_buff = def_reserved_size;
bufflen = min_t(int, sg_big_buff,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
sg_build_reserve(sfp, bufflen);
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
sfp->reserve.bufflen, sfp->reserve.k_use_sg));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0e1a0f2d2ad5..cd350dfc1216 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
if (cd->device->sector_size == 2048)
error_sector <<= 2;
error_sector &= ~(block_sectors - 1);
- good_bytes = (error_sector - SCpnt->request->sector) << 9;
+ good_bytes = (error_sector -
+ blk_rq_pos(SCpnt->request)) << 9;
if (good_bytes < 0 || good_bytes >= this_count)
good_bytes = 0;
/*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
cd->disk->disk_name, block));
if (!cd->device || !scsi_device_online(cd->device)) {
- SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n",
- rq->nr_sectors));
+ SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
+ blk_rq_sectors(rq)));
SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
goto out;
}
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
/*
* request doesn't start on hw block boundary, add scatter pads
*/
- if (((unsigned int)rq->sector % (s_size >> 9)) ||
+ if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
(scsi_bufflen(SCpnt) % s_size)) {
scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
- SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
+ SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
cd->cdi.name,
(rq_data_dir(rq) == WRITE) ?
"writing" : "reading",
- this_count, rq->nr_sectors));
+ this_count, blk_rq_sectors(rq)));
SCpnt->cmnd[1] = 0;
- block = (unsigned int)rq->sector / (s_size >> 9);
+ block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
if (this_count > 0xffff) {
this_count = 0xffff;
@@ -726,7 +727,7 @@ static void get_sectorsize(struct scsi_cd *cd)
}
queue = cd->device->request_queue;
- blk_queue_hardsect_size(queue, sector_size);
+ blk_queue_logical_block_size(queue, sector_size);
return;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index eb24efea8f14..89bd438e1fe3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
struct scsi_tape *STp = SRpnt->stp;
STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
- STp->buffer->cmdstat.residual = req->data_len;
+ STp->buffer->cmdstat.residual = req->resid_len;
if (SRpnt->waiting)
complete(SRpnt->waiting);
@@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev)
return -ENODEV;
}
- i = min(SDp->request_queue->max_hw_segments,
- SDp->request_queue->max_phys_segments);
+ i = min(queue_max_hw_segments(SDp->request_queue),
+ queue_max_phys_segments(SDp->request_queue));
if (st_max_sg_segs < i)
i = st_max_sg_segs;
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 601e95141cbe..54023d41fd15 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type)) {
HD(j)->cp_stat[i] = READY;
- flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
return 0;
}
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
- if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector;
- if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector;
+ if (blk_rq_pos(SCpnt->request) < minsec)
+ minsec = blk_rq_pos(SCpnt->request);
+ if (blk_rq_pos(SCpnt->request) > maxsec)
+ maxsec = blk_rq_pos(SCpnt->request);
- sl[n] = SCpnt->request->sector;
- ioseek += SCpnt->request->nr_sectors;
+ sl[n] = blk_rq_pos(SCpnt->request);
+ ioseek += blk_rq_sectors(SCpnt->request);
if (!n) continue;
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
if (!input_only) for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
- ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number;
+ ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
if (!n) continue;
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
if (link_statistics && (overlap || !(flushcount % link_statistics)))
for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
- printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\
+ printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
" cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
(ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
- SCpnt->request->sector, SCpnt->request->nr_sectors, cursec,
- YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
+ blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
+ cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
YESNO(overlap), cpp->xdir);
}
#endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type))
- flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
tstatus = status_byte(spp->target_status);
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 4ca3b5860643..cfa26d56ce60 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -132,7 +132,7 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_MAX_SECTORS_MIN)
max_sectors = PAGE_CACHE_SIZE >> 9;
- if (sdev->request_queue->max_sectors > max_sectors)
+ if (queue_max_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_sectors(sdev->request_queue,
max_sectors);
} else if (sdev->type == TYPE_TAPE) {
@@ -483,7 +483,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sprintf(buf, "%u\n", sdev->request_queue->max_sectors);
+ return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
}
/* Input routine for the sysfs max_sectors file */
diff --git a/fs/bio.c b/fs/bio.c
index 740699c4f90c..59000215e59b 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -498,11 +498,11 @@ int bio_get_nr_vecs(struct block_device *bdev)
struct request_queue *q = bdev_get_queue(bdev);
int nr_pages;
- nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (nr_pages > q->max_phys_segments)
- nr_pages = q->max_phys_segments;
- if (nr_pages > q->max_hw_segments)
- nr_pages = q->max_hw_segments;
+ nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (nr_pages > queue_max_phys_segments(q))
+ nr_pages = queue_max_phys_segments(q);
+ if (nr_pages > queue_max_hw_segments(q))
+ nr_pages = queue_max_hw_segments(q);
return nr_pages;
}
@@ -561,8 +561,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
* make this too complex.
*/
- while (bio->bi_phys_segments >= q->max_phys_segments
- || bio->bi_phys_segments >= q->max_hw_segments) {
+ while (bio->bi_phys_segments >= queue_max_phys_segments(q)
+ || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
if (retried_segments)
return 0;
@@ -633,7 +633,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
- return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
+ return __bio_add_page(q, bio, page, len, offset,
+ queue_max_hw_sectors(q));
}
/**
@@ -653,7 +654,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
unsigned int offset)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
+ return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
}
struct bio_map_data {
@@ -720,7 +721,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
while (bv_len && iov_idx < iov_count) {
unsigned int bytes;
- char *iov_addr;
+ char __user *iov_addr;
bytes = min_t(unsigned int,
iov[iov_idx].iov_len - iov_off, bv_len);
@@ -1200,7 +1201,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
char *addr = page_address(bvec->bv_page);
int len = bmd->iovecs[i].bv_len;
- if (read && !err)
+ if (read)
memcpy(p, addr, len);
__free_page(bvec->bv_page);
@@ -1489,11 +1490,12 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
sector_t bio_sector_offset(struct bio *bio, unsigned short index,
unsigned int offset)
{
- unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue);
+ unsigned int sector_sz;
struct bio_vec *bv;
sector_t sectors;
int i;
+ sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
sectors = 0;
if (index >= bio->bi_idx)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index f45dbc18dd17..2dfc6cdcebbe 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -76,7 +76,7 @@ int set_blocksize(struct block_device *bdev, int size)
return -EINVAL;
/* Size cannot be smaller than the size supported by the device */
- if (size < bdev_hardsect_size(bdev))
+ if (size < bdev_logical_block_size(bdev))
return -EINVAL;
/* Don't change the size if it is same as current */
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(sb_set_blocksize);
int sb_min_blocksize(struct super_block *sb, int size)
{
- int minsize = bdev_hardsect_size(sb->s_bdev);
+ int minsize = bdev_logical_block_size(sb->s_bdev);
if (size < minsize)
size = minsize;
return sb_set_blocksize(sb, size);
@@ -1111,7 +1111,7 @@ EXPORT_SYMBOL(check_disk_change);
void bd_set_size(struct block_device *bdev, loff_t size)
{
- unsigned bsize = bdev_hardsect_size(bdev);
+ unsigned bsize = bdev_logical_block_size(bdev);
bdev->bd_inode->i_size = size;
while (bsize < PAGE_CACHE_SIZE) {
diff --git a/fs/buffer.c b/fs/buffer.c
index 1864d0b63088..a3ef091a45bd 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1085,12 +1085,12 @@ static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
/* Size must be multiple of hard sectorsize */
- if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
+ if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
(size < 512 || size > PAGE_SIZE))) {
printk(KERN_ERR "getblk(): invalid block size %d requested\n",
size);
- printk(KERN_ERR "hardsect size: %d\n",
- bdev_hardsect_size(bdev));
+ printk(KERN_ERR "logical block size: %d\n",
+ bdev_logical_block_size(bdev));
dump_stack();
return NULL;
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 6a347fbc998a..ffd42815fda1 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -47,6 +47,8 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
struct pipe_inode_info *pipe, size_t count,
unsigned int flags)
{
+ ssize_t (*splice_read)(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
struct coda_file_info *cfi;
struct file *host_file;
@@ -54,10 +56,11 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
host_file = cfi->cfi_container;
- if (!host_file->f_op || !host_file->f_op->splice_read)
- return -EINVAL;
+ splice_read = host_file->f_op->splice_read;
+ if (!splice_read)
+ splice_read = default_file_splice_read;
- return host_file->f_op->splice_read(host_file, ppos, pipe, count,flags);
+ return splice_read(host_file, ppos, pipe, count, flags);
}
static ssize_t
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 05763bbc2050..8b10b87dc01a 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1127,7 +1127,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
rw = WRITE_ODIRECT;
if (bdev)
- bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
+ bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
if (offset & blocksize_mask) {
if (bdev)
diff --git a/fs/exofs/osd.c b/fs/exofs/osd.c
index b249ae97fb15..06ca92672eb5 100644
--- a/fs/exofs/osd.c
+++ b/fs/exofs/osd.c
@@ -50,10 +50,10 @@ int exofs_check_ok_resid(struct osd_request *or, u64 *in_resid, u64 *out_resid)
/* FIXME: should be include in osd_sense_info */
if (in_resid)
- *in_resid = or->in.req ? or->in.req->data_len : 0;
+ *in_resid = or->in.req ? or->in.req->resid_len : 0;
if (out_resid)
- *out_resid = or->out.req ? or->out.req->data_len : 0;
+ *out_resid = or->out.req ? or->out.req->resid_len : 0;
return ret;
}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index d8b73d4abe3e..3c70d52afb10 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1696,7 +1696,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
goto failed_mount;
}
- hblock = bdev_hardsect_size(sb->s_bdev);
+ hblock = bdev_logical_block_size(sb->s_bdev);
if (sb->s_blocksize != blocksize) {
/*
* Make sure the blocksize for the filesystem is larger
@@ -2120,7 +2120,7 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
}
blocksize = sb->s_blocksize;
- hblock = bdev_hardsect_size(bdev);
+ hblock = bdev_logical_block_size(bdev);
if (blocksize < hblock) {
printk(KERN_ERR
"EXT3-fs: blocksize too small for journal device.\n");
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c191d0f65fed..f016707597a7 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3035,7 +3035,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
}
blocksize = sb->s_blocksize;
- hblock = bdev_hardsect_size(bdev);
+ hblock = bdev_logical_block_size(bdev);
if (blocksize < hblock) {
ext4_msg(sb, KERN_ERR,
"blocksize too small for journal device");
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index f234aba36fb8..cc34f271b3e7 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -525,11 +525,11 @@ static int init_sb(struct gfs2_sbd *sdp, int silent)
}
/* Set up the buffer cache and SB for real */
- if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) {
+ if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
ret = -EINVAL;
fs_err(sdp, "FS block size (%u) is too small for device "
"block size (%u)\n",
- sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev));
+ sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
goto out;
}
if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 6122c7ee3648..de3239731db8 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -842,7 +842,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
struct super_block *sb = sdp->sd_vfs;
struct block_device *bdev = sb->s_bdev;
const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
- bdev_hardsect_size(sb->s_bdev);
+ bdev_logical_block_size(sb->s_bdev);
u64 blk;
sector_t start = 0;
sector_t nr_sects = 0;
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 7f65b3be4aa9..a91f15b8673c 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -515,7 +515,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
if (sb->s_blocksize != blocksize) {
- int hw_blocksize = bdev_hardsect_size(sb->s_bdev);
+ int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
if (blocksize < hw_blocksize) {
printk(KERN_ERR
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index f76951dcd4a6..6aa7c4713536 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spinlock.h>
-#include <linux/blkdev.h> /* For bdev_hardsect_size(). */
+#include <linux/blkdev.h> /* For bdev_logical_block_size(). */
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
@@ -2785,13 +2785,13 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
goto err_out_now;
/* We support sector sizes up to the PAGE_CACHE_SIZE. */
- if (bdev_hardsect_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
+ if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
if (!silent)
ntfs_error(sb, "Device has unsupported sector size "
"(%i). The maximum supported sector "
"size on this architecture is %lu "
"bytes.",
- bdev_hardsect_size(sb->s_bdev),
+ bdev_logical_block_size(sb->s_bdev),
PAGE_CACHE_SIZE);
goto err_out_now;
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 4f85eceab376..09cc25d04611 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1371,7 +1371,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
bdevname(reg->hr_bdev, reg->hr_dev_name);
- sectsize = bdev_hardsect_size(reg->hr_bdev);
+ sectsize = bdev_logical_block_size(reg->hr_bdev);
if (sectsize != reg->hr_block_bytes) {
mlog(ML_ERROR,
"blocksize %u incorrect for device, expected %d",
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 79ff8d9d37e0..5c6163f55039 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -713,7 +713,7 @@ static int ocfs2_sb_probe(struct super_block *sb,
*bh = NULL;
/* may be > 512 */
- *sector_size = bdev_hardsect_size(sb->s_bdev);
+ *sector_size = bdev_logical_block_size(sb->s_bdev);
if (*sector_size > OCFS2_MAX_BLOCKSIZE) {
mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n",
*sector_size, OCFS2_MAX_BLOCKSIZE);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 99e33ef40be4..0af36085eb28 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -219,6 +219,13 @@ ssize_t part_size_show(struct device *dev,
return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects);
}
+ssize_t part_alignment_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset);
+}
+
ssize_t part_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -272,6 +279,7 @@ ssize_t part_fail_store(struct device *dev,
static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
+static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
static struct device_attribute dev_attr_fail =
@@ -282,6 +290,7 @@ static struct attribute *part_attrs[] = {
&dev_attr_partition.attr,
&dev_attr_start.attr,
&dev_attr_size.attr,
+ &dev_attr_alignment_offset.attr,
&dev_attr_stat.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
&dev_attr_fail.attr,
@@ -383,6 +392,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
pdev = part_to_dev(p);
p->start_sect = start;
+ p->alignment_offset = queue_sector_alignment_offset(disk->queue, start);
p->nr_sects = len;
p->partno = partno;
p->policy = get_disk_ro(disk);
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index 46297683cd34..fc71aab08460 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -76,7 +76,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
Sector sect;
res = 0;
- blocksize = bdev_hardsect_size(bdev);
+ blocksize = bdev_logical_block_size(bdev);
if (blocksize <= 0)
goto out_exit;
i_size = i_size_read(bdev->bd_inode);
diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
index 796511886f28..0028d2ef0662 100644
--- a/fs/partitions/msdos.c
+++ b/fs/partitions/msdos.c
@@ -110,7 +110,7 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
Sector sect;
unsigned char *data;
u32 this_sector, this_size;
- int sector_size = bdev_hardsect_size(bdev) / 512;
+ int sector_size = bdev_logical_block_size(bdev) / 512;
int loopct = 0; /* number of links followed
without finding a data partition */
int i;
@@ -415,7 +415,7 @@ static struct {
int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
{
- int sector_size = bdev_hardsect_size(bdev) / 512;
+ int sector_size = bdev_logical_block_size(bdev) / 512;
Sector sect;
unsigned char *data;
struct partition *p;
diff --git a/fs/pipe.c b/fs/pipe.c
index 13414ec45b8d..f7dd21ad85a6 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -302,6 +302,20 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *info,
return 0;
}
+/**
+ * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
+ * @pipe: the pipe that the buffer belongs to
+ * @buf: the buffer to put a reference to
+ *
+ * Description:
+ * This function releases a reference to @buf.
+ */
+void generic_pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ page_cache_release(buf->page);
+}
+
static const struct pipe_buf_operations anon_pipe_buf_ops = {
.can_merge = 1,
.map = generic_pipe_buf_map,
diff --git a/fs/read_write.c b/fs/read_write.c
index 9d1e76bb9ee1..6c8c55dec2bc 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -805,12 +805,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
goto out;
if (!(in_file->f_mode & FMODE_READ))
goto fput_in;
- retval = -EINVAL;
- in_inode = in_file->f_path.dentry->d_inode;
- if (!in_inode)
- goto fput_in;
- if (!in_file->f_op || !in_file->f_op->splice_read)
- goto fput_in;
retval = -ESPIPE;
if (!ppos)
ppos = &in_file->f_pos;
@@ -834,6 +828,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
retval = -EINVAL;
if (!out_file->f_op || !out_file->f_op->sendpage)
goto fput_out;
+ in_inode = in_file->f_path.dentry->d_inode;
out_inode = out_file->f_path.dentry->d_inode;
retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
if (retval < 0)
diff --git a/fs/splice.c b/fs/splice.c
index 666953d59a35..73766d24f97b 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -507,9 +507,131 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
return ret;
}
-
EXPORT_SYMBOL(generic_file_splice_read);
+static const struct pipe_buf_operations default_pipe_buf_ops = {
+ .can_merge = 0,
+ .map = generic_pipe_buf_map,
+ .unmap = generic_pipe_buf_unmap,
+ .confirm = generic_pipe_buf_confirm,
+ .release = generic_pipe_buf_release,
+ .steal = generic_pipe_buf_steal,
+ .get = generic_pipe_buf_get,
+};
+
+static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
+ unsigned long vlen, loff_t offset)
+{
+ mm_segment_t old_fs;
+ loff_t pos = offset;
+ ssize_t res;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+ res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
+ set_fs(old_fs);
+
+ return res;
+}
+
+static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
+ loff_t pos)
+{
+ mm_segment_t old_fs;
+ ssize_t res;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+ res = vfs_write(file, (const char __user *)buf, count, &pos);
+ set_fs(old_fs);
+
+ return res;
+}
+
+ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ unsigned int nr_pages;
+ unsigned int nr_freed;
+ size_t offset;
+ struct page *pages[PIPE_BUFFERS];
+ struct partial_page partial[PIPE_BUFFERS];
+ struct iovec vec[PIPE_BUFFERS];
+ pgoff_t index;
+ ssize_t res;
+ size_t this_len;
+ int error;
+ int i;
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
+ .flags = flags,
+ .ops = &default_pipe_buf_ops,
+ .spd_release = spd_release_page,
+ };
+
+ index = *ppos >> PAGE_CACHE_SHIFT;
+ offset = *ppos & ~PAGE_CACHE_MASK;
+ nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+ for (i = 0; i < nr_pages && i < PIPE_BUFFERS && len; i++) {
+ struct page *page;
+
+ page = alloc_page(GFP_USER);
+ error = -ENOMEM;
+ if (!page)
+ goto err;
+
+ this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
+ vec[i].iov_base = (void __user *) page_address(page);
+ vec[i].iov_len = this_len;
+ pages[i] = page;
+ spd.nr_pages++;
+ len -= this_len;
+ offset = 0;
+ }
+
+ res = kernel_readv(in, vec, spd.nr_pages, *ppos);
+ if (res < 0) {
+ error = res;
+ goto err;
+ }
+
+ error = 0;
+ if (!res)
+ goto err;
+
+ nr_freed = 0;
+ for (i = 0; i < spd.nr_pages; i++) {
+ this_len = min_t(size_t, vec[i].iov_len, res);
+ partial[i].offset = 0;
+ partial[i].len = this_len;
+ if (!this_len) {
+ __free_page(pages[i]);
+ pages[i] = NULL;
+ nr_freed++;
+ }
+ res -= this_len;
+ }
+ spd.nr_pages -= nr_freed;
+
+ res = splice_to_pipe(pipe, &spd);
+ if (res > 0)
+ *ppos += res;
+
+ return res;
+
+err:
+ for (i = 0; i < spd.nr_pages; i++)
+ __free_page(pages[i]);
+
+ return error;
+}
+EXPORT_SYMBOL(default_file_splice_read);
+
/*
* Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
* using sendpage(). Return the number of bytes sent.
@@ -881,6 +1003,36 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
EXPORT_SYMBOL(generic_file_splice_write);
+static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+ struct splice_desc *sd)
+{
+ int ret;
+ void *data;
+
+ ret = buf->ops->confirm(pipe, buf);
+ if (ret)
+ return ret;
+
+ data = buf->ops->map(pipe, buf, 0);
+ ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos);
+ buf->ops->unmap(pipe, buf, data);
+
+ return ret;
+}
+
+static ssize_t default_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos,
+ size_t len, unsigned int flags)
+{
+ ssize_t ret;
+
+ ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf);
+ if (ret > 0)
+ *ppos += ret;
+
+ return ret;
+}
+
/**
* generic_splice_sendpage - splice data from a pipe to a socket
* @pipe: pipe to splice from
@@ -908,11 +1060,10 @@ EXPORT_SYMBOL(generic_splice_sendpage);
static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
+ ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
+ loff_t *, size_t, unsigned int);
int ret;
- if (unlikely(!out->f_op || !out->f_op->splice_write))
- return -EINVAL;
-
if (unlikely(!(out->f_mode & FMODE_WRITE)))
return -EBADF;
@@ -923,7 +1074,11 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
if (unlikely(ret < 0))
return ret;
- return out->f_op->splice_write(pipe, out, ppos, len, flags);
+ splice_write = out->f_op->splice_write;
+ if (!splice_write)
+ splice_write = default_file_splice_write;
+
+ return splice_write(pipe, out, ppos, len, flags);
}
/*
@@ -933,11 +1088,10 @@ static long do_splice_to(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
+ ssize_t (*splice_read)(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
int ret;
- if (unlikely(!in->f_op || !in->f_op->splice_read))
- return -EINVAL;
-
if (unlikely(!(in->f_mode & FMODE_READ)))
return -EBADF;
@@ -945,7 +1099,11 @@ static long do_splice_to(struct file *in, loff_t *ppos,
if (unlikely(ret < 0))
return ret;
- return in->f_op->splice_read(in, ppos, pipe, len, flags);
+ splice_read = in->f_op->splice_read;
+ if (!splice_read)
+ splice_read = default_file_splice_read;
+
+ return splice_read(in, ppos, pipe, len, flags);
}
/**
@@ -1112,6 +1270,9 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
return ret;
}
+static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
+ struct pipe_inode_info *opipe,
+ size_t len, unsigned int flags);
/*
* After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
* location, so checking ->i_pipe is not enough to verify that this is a
@@ -1132,12 +1293,32 @@ static long do_splice(struct file *in, loff_t __user *off_in,
struct file *out, loff_t __user *off_out,
size_t len, unsigned int flags)
{
- struct pipe_inode_info *pipe;
+ struct pipe_inode_info *ipipe;
+ struct pipe_inode_info *opipe;
loff_t offset, *off;
long ret;
- pipe = pipe_info(in->f_path.dentry->d_inode);
- if (pipe) {
+ ipipe = pipe_info(in->f_path.dentry->d_inode);
+ opipe = pipe_info(out->f_path.dentry->d_inode);
+
+ if (ipipe && opipe) {
+ if (off_in || off_out)
+ return -ESPIPE;
+
+ if (!(in->f_mode & FMODE_READ))
+ return -EBADF;
+
+ if (!(out->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ /* Splicing to self would be fun, but... */
+ if (ipipe == opipe)
+ return -EINVAL;
+
+ return splice_pipe_to_pipe(ipipe, opipe, len, flags);
+ }
+
+ if (ipipe) {
if (off_in)
return -ESPIPE;
if (off_out) {
@@ -1149,7 +1330,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
} else
off = &out->f_pos;
- ret = do_splice_from(pipe, out, off, len, flags);
+ ret = do_splice_from(ipipe, out, off, len, flags);
if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
ret = -EFAULT;
@@ -1157,8 +1338,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
return ret;
}
- pipe = pipe_info(out->f_path.dentry->d_inode);
- if (pipe) {
+ if (opipe) {
if (off_out)
return -ESPIPE;
if (off_in) {
@@ -1170,7 +1350,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
} else
off = &in->f_pos;
- ret = do_splice_to(in, off, pipe, len, flags);
+ ret = do_splice_to(in, off, opipe, len, flags);
if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
ret = -EFAULT;
@@ -1511,7 +1691,7 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
* Make sure there's data to read. Wait for input if we can, otherwise
* return an appropriate error.
*/
-static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
{
int ret;
@@ -1549,7 +1729,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
* Make sure there's writeable room. Wait for room if we can, otherwise
* return an appropriate error.
*/
-static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
{
int ret;
@@ -1587,6 +1767,124 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
}
/*
+ * Splice contents of ipipe to opipe.
+ */
+static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
+ struct pipe_inode_info *opipe,
+ size_t len, unsigned int flags)
+{
+ struct pipe_buffer *ibuf, *obuf;
+ int ret = 0, nbuf;
+ bool input_wakeup = false;
+
+
+retry:
+ ret = ipipe_prep(ipipe, flags);
+ if (ret)
+ return ret;
+
+ ret = opipe_prep(opipe, flags);
+ if (ret)
+ return ret;
+
+ /*
+ * Potential ABBA deadlock, work around it by ordering lock
+ * grabbing by pipe info address. Otherwise two different processes
+ * could deadlock (one doing tee from A -> B, the other from B -> A).
+ */
+ pipe_double_lock(ipipe, opipe);
+
+ do {
+ if (!opipe->readers) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+ break;
+ }
+
+ if (!ipipe->nrbufs && !ipipe->writers)
+ break;
+
+ /*
+ * Cannot make any progress, because either the input
+ * pipe is empty or the output pipe is full.
+ */
+ if (!ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS) {
+ /* Already processed some buffers, break */
+ if (ret)
+ break;
+
+ if (flags & SPLICE_F_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ /*
+ * We raced with another reader/writer and haven't
+ * managed to process any buffers. A zero return
+ * value means EOF, so retry instead.
+ */
+ pipe_unlock(ipipe);
+ pipe_unlock(opipe);
+ goto retry;
+ }
+
+ ibuf = ipipe->bufs + ipipe->curbuf;
+ nbuf = (opipe->curbuf + opipe->nrbufs) % PIPE_BUFFERS;
+ obuf = opipe->bufs + nbuf;
+
+ if (len >= ibuf->len) {
+ /*
+ * Simply move the whole buffer from ipipe to opipe
+ */
+ *obuf = *ibuf;
+ ibuf->ops = NULL;
+ opipe->nrbufs++;
+ ipipe->curbuf = (ipipe->curbuf + 1) % PIPE_BUFFERS;
+ ipipe->nrbufs--;
+ input_wakeup = true;
+ } else {
+ /*
+ * Get a reference to this pipe buffer,
+ * so we can copy the contents over.
+ */
+ ibuf->ops->get(ipipe, ibuf);
+ *obuf = *ibuf;
+
+ /*
+ * Don't inherit the gift flag, we need to
+ * prevent multiple steals of this page.
+ */
+ obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
+
+ obuf->len = len;
+ opipe->nrbufs++;
+ ibuf->offset += obuf->len;
+ ibuf->len -= obuf->len;
+ }
+ ret += obuf->len;
+ len -= obuf->len;
+ } while (len);
+
+ pipe_unlock(ipipe);
+ pipe_unlock(opipe);
+
+ /*
+ * If we put data in the output pipe, wakeup any potential readers.
+ */
+ if (ret > 0) {
+ smp_mb();
+ if (waitqueue_active(&opipe->wait))
+ wake_up_interruptible(&opipe->wait);
+ kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
+ }
+ if (input_wakeup)
+ wakeup_pipe_writers(ipipe);
+
+ return ret;
+}
+
+/*
* Link contents of ipipe to opipe.
*/
static int link_pipe(struct pipe_inode_info *ipipe,
@@ -1690,9 +1988,9 @@ static long do_tee(struct file *in, struct file *out, size_t len,
* Keep going, unless we encounter an error. The ipipe/opipe
* ordering doesn't really matter.
*/
- ret = link_ipipe_prep(ipipe, flags);
+ ret = ipipe_prep(ipipe, flags);
if (!ret) {
- ret = link_opipe_prep(opipe, flags);
+ ret = opipe_prep(opipe, flags);
if (!ret)
ret = link_pipe(ipipe, opipe, len, flags);
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 72348cc855a4..0ba44107d8f1 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1915,7 +1915,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
} else {
- uopt.blocksize = bdev_hardsect_size(sb->s_bdev);
+ uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
if (!silent)
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index e28800a9f2b5..1418b916fc27 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1501,7 +1501,7 @@ xfs_setsize_buftarg_early(
struct block_device *bdev)
{
return xfs_setsize_buftarg_flags(btp,
- PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
+ PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
}
int
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7b214fd672a2..12737be58601 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -218,12 +218,12 @@ struct bio {
#define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
-static inline unsigned int bio_cur_sectors(struct bio *bio)
+static inline unsigned int bio_cur_bytes(struct bio *bio)
{
if (bio->bi_vcnt)
- return bio_iovec(bio)->bv_len >> 9;
+ return bio_iovec(bio)->bv_len;
else /* dataless requests such as discard */
- return bio->bi_size >> 9;
+ return bio->bi_size;
}
static inline void *bio_data(struct bio *bio)
@@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio)
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
- __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
+ __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
#define BIO_SEG_BOUNDARY(q, b1, b2) \
BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
@@ -506,7 +506,7 @@ static inline int bio_has_data(struct bio *bio)
}
/*
- * BIO list managment for use by remapping drivers (e.g. DM or MD).
+ * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
*
* A bio_list anchors a singly-linked list of bios chained through the bi_next
* member of the bio. The bio_list also caches the last list member to allow
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b4f71f1a4af7..ebdfde8fe556 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -166,19 +166,9 @@ struct request {
enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags;
- /* Maintain bio traversal state for part by part I/O submission.
- * hard_* are block layer internals, no driver should touch them!
- */
-
- sector_t sector; /* next sector to submit */
- sector_t hard_sector; /* next sector to complete */
- unsigned long nr_sectors; /* no. of sectors left to submit */
- unsigned long hard_nr_sectors; /* no. of sectors left to complete */
- /* no. of sectors left to submit in the current segment */
- unsigned int current_nr_sectors;
-
- /* no. of sectors left to complete in the current segment */
- unsigned int hard_cur_sectors;
+ /* the following two fields are internal, NEVER access directly */
+ sector_t __sector; /* sector cursor */
+ unsigned int __data_len; /* total data len */
struct bio *bio;
struct bio *biotail;
@@ -211,8 +201,8 @@ struct request {
unsigned short ioprio;
- void *special;
- char *buffer;
+ void *special; /* opaque pointer available for LLD use */
+ char *buffer; /* kaddr of the current segment if available */
int tag;
int errors;
@@ -226,10 +216,9 @@ struct request {
unsigned char __cmd[BLK_MAX_CDB];
unsigned char *cmd;
- unsigned int data_len;
unsigned int extra_len; /* length of alignment and padding */
unsigned int sense_len;
- void *data;
+ unsigned int resid_len; /* residual count */
void *sense;
unsigned long deadline;
@@ -318,6 +307,26 @@ struct blk_cmd_filter {
struct kobject kobj;
};
+struct queue_limits {
+ unsigned long bounce_pfn;
+ unsigned long seg_boundary_mask;
+
+ unsigned int max_hw_sectors;
+ unsigned int max_sectors;
+ unsigned int max_segment_size;
+ unsigned int physical_block_size;
+ unsigned int alignment_offset;
+ unsigned int io_min;
+ unsigned int io_opt;
+
+ unsigned short logical_block_size;
+ unsigned short max_hw_segments;
+ unsigned short max_phys_segments;
+
+ unsigned char misaligned;
+ unsigned char no_cluster;
+};
+
struct request_queue
{
/*
@@ -369,7 +378,6 @@ struct request_queue
/*
* queue needs bounce pages for pages above this limit
*/
- unsigned long bounce_pfn;
gfp_t bounce_gfp;
/*
@@ -398,14 +406,6 @@ struct request_queue
unsigned int nr_congestion_off;
unsigned int nr_batching;
- unsigned int max_sectors;
- unsigned int max_hw_sectors;
- unsigned short max_phys_segments;
- unsigned short max_hw_segments;
- unsigned short hardsect_size;
- unsigned int max_segment_size;
-
- unsigned long seg_boundary_mask;
void *dma_drain_buffer;
unsigned int dma_drain_size;
unsigned int dma_pad_mask;
@@ -415,12 +415,14 @@ struct request_queue
struct list_head tag_busy_list;
unsigned int nr_sorted;
- unsigned int in_flight;
+ unsigned int in_flight[2];
unsigned int rq_timeout;
struct timer_list timeout;
struct list_head timeout_list;
+ struct queue_limits limits;
+
/*
* sg stuff
*/
@@ -522,6 +524,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag,
__clear_bit(flag, &q->queue_flags);
}
+static inline int queue_in_flight(struct request_queue *q)
+{
+ return q->in_flight[0] + q->in_flight[1];
+}
+
static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
@@ -752,10 +759,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern struct request *blk_make_request(struct request_queue *, struct bio *,
+ gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
+extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *),
+ void *data);
+extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_plug_device(struct request_queue *);
@@ -768,12 +782,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
/*
- * Temporary export, until SCSI gets fixed up.
- */
-extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
- struct bio *bio);
-
-/*
* A queue has just exitted congestion. Note this in the global counter of
* congested queues, and wake up anyone who was waiting for requests to be
* put back.
@@ -798,7 +806,6 @@ extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *);
extern void blk_run_queue(struct request_queue *);
-extern void blk_start_queueing(struct request_queue *);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
@@ -831,41 +838,73 @@ static inline void blk_run_address_space(struct address_space *mapping)
blk_run_backing_dev(mapping->backing_dev_info, NULL);
}
-extern void blkdev_dequeue_request(struct request *req);
+/*
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+ return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+ return rq->bio ? bio_cur_bytes(rq->bio) : 0;
+}
+
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return blk_rq_bytes(rq) >> 9;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return blk_rq_cur_bytes(rq) >> 9;
+}
+
+/*
+ * Request issue related functions.
+ */
+extern struct request *blk_peek_request(struct request_queue *q);
+extern void blk_start_request(struct request *rq);
+extern struct request *blk_fetch_request(struct request_queue *q);
/*
- * blk_end_request() and friends.
- * __blk_end_request() and end_request() must be called with
- * the request queue spinlock acquired.
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ *
+ * blk_end_request() and friends. __blk_end_request() must be called
+ * with the request queue spinlock acquired.
*
* Several drivers define their own end_request and call
* blk_end_request() for parts of the original function.
* This prevents code duplication in drivers.
*/
-extern int blk_end_request(struct request *rq, int error,
- unsigned int nr_bytes);
-extern int __blk_end_request(struct request *rq, int error,
- unsigned int nr_bytes);
-extern int blk_end_bidi_request(struct request *rq, int error,
- unsigned int nr_bytes, unsigned int bidi_bytes);
-extern void end_request(struct request *, int);
-extern int blk_end_request_callback(struct request *rq, int error,
- unsigned int nr_bytes,
- int (drv_callback)(struct request *));
+extern bool blk_update_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern bool blk_end_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void blk_end_request_all(struct request *rq, int error);
+extern bool blk_end_request_cur(struct request *rq, int error);
+extern bool __blk_end_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void __blk_end_request_all(struct request *rq, int error);
+extern bool __blk_end_request_cur(struct request *rq, int error);
+
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
extern void blk_abort_queue(struct request_queue *);
-extern void blk_update_request(struct request *rq, int error,
- unsigned int nr_bytes);
-
-/*
- * blk_end_request() takes bytes instead of sectors as a complete size.
- * blk_rq_bytes() returns bytes left to complete in the entire request.
- * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
- */
-extern unsigned int blk_rq_bytes(struct request *rq);
-extern unsigned int blk_rq_cur_bytes(struct request *rq);
/*
* Access functions for manipulating queue properties
@@ -877,10 +916,20 @@ extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
-extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_alignment_offset(struct request_queue *q,
+ unsigned int alignment);
+extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
+extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ sector_t offset);
+extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
+ sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
@@ -967,19 +1016,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-static inline int queue_hardsect_size(struct request_queue *q)
+static inline unsigned long queue_bounce_pfn(struct request_queue *q)
+{
+ return q->limits.bounce_pfn;
+}
+
+static inline unsigned long queue_segment_boundary(struct request_queue *q)
+{
+ return q->limits.seg_boundary_mask;
+}
+
+static inline unsigned int queue_max_sectors(struct request_queue *q)
+{
+ return q->limits.max_sectors;
+}
+
+static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
+{
+ return q->limits.max_hw_sectors;
+}
+
+static inline unsigned short queue_max_hw_segments(struct request_queue *q)
+{
+ return q->limits.max_hw_segments;
+}
+
+static inline unsigned short queue_max_phys_segments(struct request_queue *q)
+{
+ return q->limits.max_phys_segments;
+}
+
+static inline unsigned int queue_max_segment_size(struct request_queue *q)
+{
+ return q->limits.max_segment_size;
+}
+
+static inline unsigned short queue_logical_block_size(struct request_queue *q)
{
int retval = 512;
- if (q && q->hardsect_size)
- retval = q->hardsect_size;
+ if (q && q->limits.logical_block_size)
+ retval = q->limits.logical_block_size;
return retval;
}
-static inline int bdev_hardsect_size(struct block_device *bdev)
+static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+{
+ return queue_logical_block_size(bdev_get_queue(bdev));
+}
+
+static inline unsigned int queue_physical_block_size(struct request_queue *q)
+{
+ return q->limits.physical_block_size;
+}
+
+static inline unsigned int queue_io_min(struct request_queue *q)
+{
+ return q->limits.io_min;
+}
+
+static inline unsigned int queue_io_opt(struct request_queue *q)
+{
+ return q->limits.io_opt;
+}
+
+static inline int queue_alignment_offset(struct request_queue *q)
+{
+ if (q && q->limits.misaligned)
+ return -1;
+
+ if (q && q->limits.alignment_offset)
+ return q->limits.alignment_offset;
+
+ return 0;
+}
+
+static inline int queue_sector_alignment_offset(struct request_queue *q,
+ sector_t sector)
{
- return queue_hardsect_size(bdev_get_queue(bdev));
+ return ((sector << 9) - q->limits.alignment_offset)
+ & (q->limits.io_min - 1);
}
static inline int queue_dma_alignment(struct request_queue *q)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ded2d7c42668..49c2362977fd 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -149,7 +149,7 @@ struct io_restrictions {
unsigned max_hw_sectors;
unsigned max_sectors;
unsigned max_segment_size;
- unsigned short hardsect_size;
+ unsigned short logical_block_size;
unsigned short max_hw_segments;
unsigned short max_phys_segments;
unsigned char no_cluster; /* inverted so that 0 is default */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index c59b769f62b0..1cb3372e65d8 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *, int);
-extern void elv_dequeue_request(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern int elv_queue_empty(struct request_queue *);
-extern struct request *elv_next_request(struct request_queue *q);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);
@@ -171,7 +169,7 @@ enum {
ELV_MQUEUE_MUST,
};
-#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors)
+#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3b534e527e09..83d6b4397245 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2205,6 +2205,8 @@ extern int generic_segment_checks(const struct iovec *iov,
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
+extern ssize_t default_file_splice_read(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index a1a28caed23d..149fda264c86 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -90,6 +90,7 @@ struct disk_stats {
struct hd_struct {
sector_t start_sect;
sector_t nr_sects;
+ sector_t alignment_offset;
struct device __dev;
struct kobject *holder_dir;
int policy, partno;
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 08b987bccf89..dd05434fa45f 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -64,7 +64,7 @@ struct cfq_io_context {
* and kmalloc'ed. These could be shared between processes.
*/
struct io_context {
- atomic_t refcount;
+ atomic_long_t refcount;
atomic_t nr_tasks;
/* all the fields below are protected by this lock */
@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
* if ref count is zero, don't allow sharing (ioc is going away, it's
* a race).
*/
- if (ioc && atomic_inc_not_zero(&ioc->refcount)) {
- atomic_inc(&ioc->nr_tasks);
+ if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
+ atomic_long_inc(&ioc->refcount);
return ioc;
}
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 40725447f5e0..66c194e2d9b9 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -56,8 +56,7 @@ struct loop_device {
gfp_t old_gfp_mask;
spinlock_t lo_lock;
- struct bio *lo_bio;
- struct bio *lo_biotail;
+ struct bio_list lo_bio_list;
int lo_state;
struct mutex lo_ctl_mutex;
struct task_struct *lo_thread;
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
deleted file mode 100644
index 1f76b1ebf627..000000000000
--- a/include/linux/mg_disk.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * include/linux/mg_disk.c
- *
- * Support for the mGine m[g]flash IO mode.
- * Based on legacy hd.c
- *
- * (c) 2008 mGine Co.,LTD
- * (c) 2008 unsik Kim <donari75@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MG_DISK_H__
-#define __MG_DISK_H__
-
-#include <linux/blkdev.h>
-#include <linux/ata.h>
-
-/* name for block device */
-#define MG_DISK_NAME "mgd"
-/* name for platform device */
-#define MG_DEV_NAME "mg_disk"
-
-#define MG_DISK_MAJ 0
-#define MG_DISK_MAX_PART 16
-#define MG_SECTOR_SIZE 512
-#define MG_MAX_SECTS 256
-
-/* Register offsets */
-#define MG_BUFF_OFFSET 0x8000
-#define MG_STORAGE_BUFFER_SIZE 0x200
-#define MG_REG_OFFSET 0xC000
-#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
-#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
-#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
-#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
-#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
-#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
-#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
-#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
-#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
-#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
-#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
-
-/* "Drive Select/Head Register" bit values */
-#define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */
-#define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON)
-#define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON)
-#define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON)
-
-
-/* "Device Control Register" bit values */
-#define MG_REG_CTRL_INTR_ENABLE 0x0
-#define MG_REG_CTRL_INTR_DISABLE (0x1<<1)
-#define MG_REG_CTRL_RESET (0x1<<2)
-#define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0
-#define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4)
-#define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0
-#define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5)
-#define MG_REG_CTRL_DPD_DISABLE 0x0
-#define MG_REG_CTRL_DPD_ENABLE (0x1<<6)
-
-/* Status register bit */
-/* error bit in status register */
-#define MG_REG_STATUS_BIT_ERROR 0x01
-/* corrected error in status register */
-#define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04
-/* data request bit in status register */
-#define MG_REG_STATUS_BIT_DATA_REQ 0x08
-/* DSC - Drive Seek Complete */
-#define MG_REG_STATUS_BIT_SEEK_DONE 0x10
-/* DWF - Drive Write Fault */
-#define MG_REG_STATUS_BIT_WRITE_FAULT 0x20
-#define MG_REG_STATUS_BIT_READY 0x40
-#define MG_REG_STATUS_BIT_BUSY 0x80
-
-/* handy status */
-#define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
-#define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
- (MG_REG_STATUS_BIT_BUSY | \
- MG_REG_STATUS_BIT_WRITE_FAULT | \
- MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
-
-/* Error register */
-#define MG_REG_ERR_AMNF 0x01
-#define MG_REG_ERR_ABRT 0x04
-#define MG_REG_ERR_IDNF 0x10
-#define MG_REG_ERR_UNC 0x40
-#define MG_REG_ERR_BBK 0x80
-
-/* error code for others */
-#define MG_ERR_NONE 0
-#define MG_ERR_TIMEOUT 0x100
-#define MG_ERR_INIT_STAT 0x101
-#define MG_ERR_TRANSLATION 0x102
-#define MG_ERR_CTRL_RST 0x103
-#define MG_ERR_INV_STAT 0x104
-#define MG_ERR_RSTOUT 0x105
-
-#define MG_MAX_ERRORS 6 /* Max read/write errors */
-
-/* command */
-#define MG_CMD_RD 0x20
-#define MG_CMD_WR 0x30
-#define MG_CMD_SLEEP 0x99
-#define MG_CMD_WAKEUP 0xC3
-#define MG_CMD_ID 0xEC
-#define MG_CMD_WR_CONF 0x3C
-#define MG_CMD_RD_CONF 0x40
-
-/* operation mode */
-#define MG_OP_CASCADE (1 << 0)
-#define MG_OP_CASCADE_SYNC_RD (1 << 1)
-#define MG_OP_CASCADE_SYNC_WR (1 << 2)
-#define MG_OP_INTERLEAVE (1 << 3)
-
-/* synchronous */
-#define MG_BURST_LAT_4 (3 << 4)
-#define MG_BURST_LAT_5 (4 << 4)
-#define MG_BURST_LAT_6 (5 << 4)
-#define MG_BURST_LAT_7 (6 << 4)
-#define MG_BURST_LAT_8 (7 << 4)
-#define MG_BURST_LEN_4 (1 << 1)
-#define MG_BURST_LEN_8 (2 << 1)
-#define MG_BURST_LEN_16 (3 << 1)
-#define MG_BURST_LEN_32 (4 << 1)
-#define MG_BURST_LEN_CONT (0 << 1)
-
-/* timeout value (unit: ms) */
-#define MG_TMAX_CONF_TO_CMD 1
-#define MG_TMAX_WAIT_RD_DRQ 10
-#define MG_TMAX_WAIT_WR_DRQ 500
-#define MG_TMAX_RST_TO_BUSY 10
-#define MG_TMAX_HDRST_TO_RDY 500
-#define MG_TMAX_SWRST_TO_RDY 500
-#define MG_TMAX_RSTOUT 3000
-
-/* device attribution */
-/* use mflash as boot device */
-#define MG_BOOT_DEV (1 << 0)
-/* use mflash as storage device */
-#define MG_STORAGE_DEV (1 << 1)
-/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
-#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
-
-#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
-
-/* names of GPIO resource */
-#define MG_RST_PIN "mg_rst"
-/* except MG_BOOT_DEV, reset-out pin should be assigned */
-#define MG_RSTOUT_PIN "mg_rstout"
-
-/* private driver data */
-struct mg_drv_data {
- /* disk resource */
- u32 use_polling;
-
- /* device attribution */
- u32 dev_attr;
-
- /* internally used */
- struct mg_host *host;
-};
-
-/* main structure for mflash driver */
-struct mg_host {
- struct device *dev;
-
- struct request_queue *breq;
- spinlock_t lock;
- struct gendisk *gd;
-
- struct timer_list timer;
- void (*mg_do_intr) (struct mg_host *);
-
- u16 id[ATA_ID_WORDS];
-
- u16 cyls;
- u16 heads;
- u16 sectors;
- u32 n_sectors;
- u32 nres_sectors;
-
- void __iomem *dev_base;
- unsigned int irq;
- unsigned int rst;
- unsigned int rstout;
-
- u32 major;
- u32 error;
-};
-
-/*
- * Debugging macro and defines
- */
-#undef DO_MG_DEBUG
-#ifdef DO_MG_DEBUG
-# define MG_DBG(fmt, args...) \
- printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
-#else /* CONFIG_MG_DEBUG */
-# define MG_DBG(fmt, args...) do { } while (0)
-#endif /* CONFIG_MG_DEBUG */
-
-#endif
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index c8f038554e80..b43a9e039059 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
#endif
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 5f3faa9d15ae..18e7c7c0cae6 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -11,8 +11,7 @@
#include <linux/pipe_fs_i.h>
/*
- * splice is tied to pipes as a transport (at least for now), so we'll just
- * add the splice flags here.
+ * Flags passed in from splice/tee/vmsplice
*/
#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index 94c56d29869d..be7d255fc7cf 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -15,6 +15,10 @@
#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */
#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
+#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
+#define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */
+
+#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
struct virtio_blk_config
{
@@ -32,6 +36,7 @@ struct virtio_blk_config
} geometry;
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
__u32 blk_size;
+ __u8 identify[VIRTIO_BLK_ID_BYTES];
} __attribute__((packed));
/* These two define direction. */
@@ -55,6 +60,13 @@ struct virtio_blk_outhdr
__u64 sector;
};
+struct virtio_scsi_inhdr {
+ __u32 errors;
+ __u32 data_len;
+ __u32 sense_len;
+ __u32 residual;
+};
+
/* And this is the final byte of the write scatter-gather list. */
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 43b50d36925c..3878d1dc7f59 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -270,7 +270,7 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
{
- return scmd->request->sector;
+ return blk_rq_pos(scmd->request);
}
static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 53effd496a50..d6b05f42dd44 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -25,9 +25,8 @@ TRACE_EVENT(block_rq_abort,
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
- __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector;
- __entry->nr_sector = blk_pc_request(rq) ?
- 0 : rq->hard_nr_sectors;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
__entry->errors = rq->errors;
blk_fill_rwbs_rq(__entry->rwbs, rq);
@@ -59,10 +58,9 @@ TRACE_EVENT(block_rq_insert,
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
- __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector;
- __entry->nr_sector = blk_pc_request(rq) ?
- 0 : rq->hard_nr_sectors;
- __entry->bytes = blk_pc_request(rq) ? rq->data_len : 0;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
+ __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
blk_fill_rwbs_rq(__entry->rwbs, rq);
blk_dump_cmd(__get_str(cmd), rq);
@@ -94,10 +92,9 @@ TRACE_EVENT(block_rq_issue,
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
- __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector;
- __entry->nr_sector = blk_pc_request(rq) ?
- 0 : rq->hard_nr_sectors;
- __entry->bytes = blk_pc_request(rq) ? rq->data_len : 0;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
+ __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
blk_fill_rwbs_rq(__entry->rwbs, rq);
blk_dump_cmd(__get_str(cmd), rq);
@@ -128,9 +125,8 @@ TRACE_EVENT(block_rq_requeue,
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
- __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector;
- __entry->nr_sector = blk_pc_request(rq) ?
- 0 : rq->hard_nr_sectors;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
__entry->errors = rq->errors;
blk_fill_rwbs_rq(__entry->rwbs, rq);
@@ -161,9 +157,8 @@ TRACE_EVENT(block_rq_complete,
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
- __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector;
- __entry->nr_sector = blk_pc_request(rq) ?
- 0 : rq->hard_nr_sectors;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
__entry->errors = rq->errors;
blk_fill_rwbs_rq(__entry->rwbs, rq);
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7bd6a9893c24..39af8af6fc30 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -669,12 +669,12 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (blk_pc_request(rq)) {
what |= BLK_TC_ACT(BLK_TC_PC);
- __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
- rq->cmd_len, rq->cmd);
+ __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
+ what, rq->errors, rq->cmd_len, rq->cmd);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
- rw, what, rq->errors, 0, NULL);
+ __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
+ what, rq->errors, 0, NULL);
}
}
@@ -881,11 +881,11 @@ void blk_add_driver_data(struct request_queue *q,
return;
if (blk_pc_request(rq))
- __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
- rq->errors, len, data);
+ __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
+ BLK_TA_DRV_DATA, rq->errors, len, data);
else
- __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
- 0, BLK_TA_DRV_DATA, rq->errors, len, data);
+ __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
+ BLK_TA_DRV_DATA, rq->errors, len, data);
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);
@@ -1724,10 +1724,7 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
if (blk_discard_rq(rq))
rw |= (1 << BIO_RW_DISCARD);
- if (blk_pc_request(rq))
- bytes = rq->data_len;
- else
- bytes = rq->hard_nr_sectors << 9;
+ bytes = blk_rq_bytes(rq);
blk_fill_rwbs(rwbs, rw, bytes);
}
diff --git a/mm/bounce.c b/mm/bounce.c
index 65f5e17e411a..4ebe3ea83795 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -191,7 +191,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
/*
* is destination page below bounce pfn?
*/
- if (page_to_pfn(page) <= q->bounce_pfn)
+ if (page_to_pfn(page) <= queue_bounce_pfn(q))
continue;
/*
@@ -283,7 +283,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
* don't waste time iterating over bio segments
*/
if (!(q->bounce_gfp & GFP_DMA)) {
- if (q->bounce_pfn >= blk_max_pfn)
+ if (queue_bounce_pfn(q) >= blk_max_pfn)
return;
pool = page_pool;
} else {