summaryrefslogtreecommitdiff
path: root/drivers/scsi/sg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/sg.c')
-rw-r--r--drivers/scsi/sg.c660
1 files changed, 374 insertions, 286 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index df5e961484e1..01cf88888797 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -7,9 +7,7 @@
* Original driver (sg.c):
* Copyright (C) 1992 Lawrence Foard
* Version 2 and 3 extensions to driver:
- * Copyright (C) 1998 - 2005 Douglas Gilbert
- *
- * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
+ * Copyright (C) 1998 - 2014 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,11 +16,11 @@
*
*/
-static int sg_version_num = 30534; /* 2 digits for each component */
-#define SG_VERSION_STR "3.5.34"
+static int sg_version_num = 30536; /* 2 digits for each component */
+#define SG_VERSION_STR "3.5.36"
/*
- * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
+ * D. P. Gilbert (dgilbert@interlog.com), notes:
* - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
* the kernel/module needs to be built with CONFIG_SCSI_LOGGING
* (otherwise the macros compile to empty statements).
@@ -51,6 +49,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#include <linux/delay.h>
#include <linux/blktrace_api.h>
#include <linux/mutex.h>
+#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include "scsi.h"
@@ -64,7 +63,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
-static char *sg_version_date = "20061027";
+static char *sg_version_date = "20140603";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
@@ -74,6 +73,12 @@ static void sg_proc_cleanup(void);
#define SG_MAX_DEVS 32768
+/* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
+ * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
+ * than 16 bytes are "variable length" whose length is a multiple of 4
+ */
+#define SG_MAX_CDB_SIZE 252
+
/*
* Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
* Then when using 32 bit integers x * m may overflow during the calculation.
@@ -102,18 +107,16 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
#define SG_SECTOR_SZ 512
-static int sg_add(struct device *, struct class_interface *);
-static void sg_remove(struct device *, struct class_interface *);
-
-static DEFINE_SPINLOCK(sg_open_exclusive_lock);
+static int sg_add_device(struct device *, struct class_interface *);
+static void sg_remove_device(struct device *, struct class_interface *);
static DEFINE_IDR(sg_index_idr);
static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
file descriptor list for device */
static struct class_interface sg_interface = {
- .add_dev = sg_add,
- .remove_dev = sg_remove,
+ .add_dev = sg_add_device,
+ .remove_dev = sg_remove_device,
};
typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
@@ -146,8 +149,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
- /* sfd_siblings is protected by sg_index_lock */
- struct list_head sfd_siblings;
+ struct list_head sfd_siblings; /* protected by device's sfd_lock */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -161,7 +163,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
char low_dma; /* as in parent but possibly overridden to 1 */
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
- char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
+ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
struct kref f_ref;
@@ -170,14 +172,15 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
typedef struct sg_device { /* holds the state of each scsi generic device */
struct scsi_device *device;
- wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
+ wait_queue_head_t open_wait; /* queue open() when O_EXCL present */
+ struct mutex open_rel_lock; /* held when in open() or release() */
int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */
- /* sfds is protected by sg_index_lock */
struct list_head sfds;
- volatile char detached; /* 0->attached, 1->detached pending removal */
- /* exclude protected by sg_open_exclusive_lock */
- char exclude; /* opened for exclusive access */
+ rwlock_t sfd_lock; /* protect access to sfd list */
+ atomic_t detaching; /* 0->device usable, 1->device detaching */
+ bool exclude; /* 1->open(O_EXCL) succeeded and is active */
+ int open_cnt; /* count of opens (perhaps < num(sfds) ) */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
@@ -197,24 +200,28 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
-static void sg_remove_scat(Sg_scatter_hold * schp);
+static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp);
static void sg_build_reserve(Sg_fd * sfp, int req_size);
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
-static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
+static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
-static void sg_put_dev(Sg_device *sdp);
+static void sg_device_destroy(struct kref *kref);
#define SZ_SG_HEADER sizeof(struct sg_header)
#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
#define SZ_SG_IOVEC sizeof(sg_iovec_t)
#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
+#define sg_printk(prefix, sdp, fmt, a...) \
+ sdev_printk(prefix, (sdp)->device, "[%s] " fmt, \
+ (sdp)->disk->disk_name, ##a)
+
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = filp->private_data;
@@ -225,38 +232,43 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
-static int get_exclude(Sg_device *sdp)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&sg_open_exclusive_lock, flags);
- ret = sdp->exclude;
- spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
- return ret;
-}
-
-static int set_exclude(Sg_device *sdp, char val)
+static int
+open_wait(Sg_device *sdp, int flags)
{
- unsigned long flags;
-
- spin_lock_irqsave(&sg_open_exclusive_lock, flags);
- sdp->exclude = val;
- spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
- return val;
-}
+ int retval = 0;
-static int sfds_list_empty(Sg_device *sdp)
-{
- unsigned long flags;
- int ret;
+ if (flags & O_EXCL) {
+ while (sdp->open_cnt > 0) {
+ mutex_unlock(&sdp->open_rel_lock);
+ retval = wait_event_interruptible(sdp->open_wait,
+ (atomic_read(&sdp->detaching) ||
+ !sdp->open_cnt));
+ mutex_lock(&sdp->open_rel_lock);
+
+ if (retval) /* -ERESTARTSYS */
+ return retval;
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ }
+ } else {
+ while (sdp->exclude) {
+ mutex_unlock(&sdp->open_rel_lock);
+ retval = wait_event_interruptible(sdp->open_wait,
+ (atomic_read(&sdp->detaching) ||
+ !sdp->exclude));
+ mutex_lock(&sdp->open_rel_lock);
+
+ if (retval) /* -ERESTARTSYS */
+ return retval;
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ }
+ }
- read_lock_irqsave(&sg_index_lock, flags);
- ret = list_empty(&sdp->sfds);
- read_unlock_irqrestore(&sg_index_lock, flags);
- return ret;
+ return retval;
}
+/* Returns 0 on success, else a negated errno value */
static int
sg_open(struct inode *inode, struct file *filp)
{
@@ -265,17 +277,17 @@ sg_open(struct inode *inode, struct file *filp)
struct request_queue *q;
Sg_device *sdp;
Sg_fd *sfp;
- int res;
int retval;
nonseekable_open(inode, filp);
- SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
+ if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
+ return -EPERM; /* Can't lock it with read only access */
sdp = sg_get_dev(dev);
- if (IS_ERR(sdp)) {
- retval = PTR_ERR(sdp);
- sdp = NULL;
- goto sg_put;
- }
+ if (IS_ERR(sdp))
+ return PTR_ERR(sdp);
+
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_open: flags=0x%x\n", flags));
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
@@ -287,6 +299,9 @@ sg_open(struct inode *inode, struct file *filp)
if (retval)
goto sdp_put;
+ /* scsi_block_when_processing_errors() may block so bypass
+ * check if O_NONBLOCK. Permits SCSI commands to be issued
+ * during error recovery. Tread carefully. */
if (!((flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device))) {
retval = -ENXIO;
@@ -294,65 +309,65 @@ sg_open(struct inode *inode, struct file *filp)
goto error_out;
}
- if (flags & O_EXCL) {
- if (O_RDONLY == (flags & O_ACCMODE)) {
- retval = -EPERM; /* Can't lock it with read only access */
- goto error_out;
- }
- if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
- retval = -EBUSY;
- goto error_out;
- }
- res = wait_event_interruptible(sdp->o_excl_wait,
- ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
- if (res) {
- retval = res; /* -ERESTARTSYS because signal hit process */
- goto error_out;
- }
- } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
- if (flags & O_NONBLOCK) {
- retval = -EBUSY;
- goto error_out;
- }
- res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
- if (res) {
- retval = res; /* -ERESTARTSYS because signal hit process */
- goto error_out;
+ mutex_lock(&sdp->open_rel_lock);
+ if (flags & O_NONBLOCK) {
+ if (flags & O_EXCL) {
+ if (sdp->open_cnt > 0) {
+ retval = -EBUSY;
+ goto error_mutex_locked;
+ }
+ } else {
+ if (sdp->exclude) {
+ retval = -EBUSY;
+ goto error_mutex_locked;
+ }
}
+ } else {
+ retval = open_wait(sdp, flags);
+ if (retval) /* -ERESTARTSYS or -ENODEV */
+ goto error_mutex_locked;
}
- if (sdp->detached) {
- retval = -ENODEV;
- goto error_out;
- }
- if (sfds_list_empty(sdp)) { /* no existing opens on this device */
+
+ /* N.B. at this point we are holding the open_rel_lock */
+ if (flags & O_EXCL)
+ sdp->exclude = true;
+
+ if (sdp->open_cnt < 1) { /* no existing opens */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
- if ((sfp = sg_add_sfp(sdp, dev)))
- filp->private_data = sfp;
- else {
- if (flags & O_EXCL) {
- set_exclude(sdp, 0); /* undo if error */
- wake_up_interruptible(&sdp->o_excl_wait);
- }
- retval = -ENOMEM;
- goto error_out;
+ sfp = sg_add_sfp(sdp);
+ if (IS_ERR(sfp)) {
+ retval = PTR_ERR(sfp);
+ goto out_undo;
}
+
+ filp->private_data = sfp;
+ sdp->open_cnt++;
+ mutex_unlock(&sdp->open_rel_lock);
+
retval = 0;
-error_out:
- if (retval) {
- scsi_autopm_put_device(sdp->device);
-sdp_put:
- scsi_device_put(sdp->device);
- }
sg_put:
- if (sdp)
- sg_put_dev(sdp);
+ kref_put(&sdp->d_ref, sg_device_destroy);
return retval;
+
+out_undo:
+ if (flags & O_EXCL) {
+ sdp->exclude = false; /* undo if error */
+ wake_up_interruptible(&sdp->open_wait);
+ }
+error_mutex_locked:
+ mutex_unlock(&sdp->open_rel_lock);
+error_out:
+ scsi_autopm_put_device(sdp->device);
+sdp_put:
+ scsi_device_put(sdp->device);
+ goto sg_put;
}
-/* Following function was formerly called 'sg_close' */
+/* Release resources associated with a successful sg_open()
+ * Returns 0 on success, else a negated errno value */
static int
sg_release(struct inode *inode, struct file *filp)
{
@@ -361,13 +376,22 @@ sg_release(struct inode *inode, struct file *filp)
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
-
- set_exclude(sdp, 0);
- wake_up_interruptible(&sdp->o_excl_wait);
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
+ mutex_lock(&sdp->open_rel_lock);
scsi_autopm_put_device(sdp->device);
kref_put(&sfp->f_ref, sg_remove_sfp);
+ sdp->open_cnt--;
+
+ /* possibly many open()s waiting on exlude clearing, start many;
+ * only open(O_EXCL)s wait on 0==open_cnt so only start one */
+ if (sdp->exclude) {
+ sdp->exclude = false;
+ wake_up_interruptible_all(&sdp->open_wait);
+ } else if (0 == sdp->open_cnt) {
+ wake_up_interruptible(&sdp->open_wait);
+ }
+ mutex_unlock(&sdp->open_rel_lock);
return 0;
}
@@ -384,8 +408,8 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
- sdp->disk->disk_name, (int) count));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_read: count=%d\n", (int) count));
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
@@ -419,7 +443,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
}
srp = sg_get_rq_mark(sfp, req_pack_id);
if (!srp) { /* now wait on packet to arrive */
- if (sdp->detached) {
+ if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
@@ -428,9 +452,9 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
goto free_old_hdr;
}
retval = wait_event_interruptible(sfp->read_wait,
- (sdp->detached ||
+ (atomic_read(&sdp->detaching) ||
(srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (sdp->detached) {
+ if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
@@ -566,13 +590,13 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
Sg_request *srp;
struct sg_header old_hdr;
sg_io_hdr_t *hp;
- unsigned char cmnd[MAX_COMMAND_SIZE];
+ unsigned char cmnd[SG_MAX_CDB_SIZE];
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
- sdp->disk->disk_name, (int) count));
- if (sdp->detached)
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_write: count=%d\n", (int) count));
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
@@ -592,18 +616,13 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
return -EIO; /* The minimum scsi command length is 6 bytes. */
if (!(srp = sg_add_request(sfp))) {
- SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
+ "sg_write: queue full\n"));
return -EDOM;
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
if (sfp->next_cmd_len > 0) {
- if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
- SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
- sfp->next_cmd_len = 0;
- sg_remove_request(sfp, srp);
- return -EIO;
- }
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
} else {
@@ -611,7 +630,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
- SCSI_LOG_TIMEOUT(4, printk(
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
input_size = count - cmd_size;
@@ -675,7 +694,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
int k;
Sg_request *srp;
sg_io_hdr_t *hp;
- unsigned char cmnd[MAX_COMMAND_SIZE];
+ unsigned char cmnd[SG_MAX_CDB_SIZE];
int timeout;
unsigned long ul_timeout;
@@ -686,7 +705,8 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
if (!(srp = sg_add_request(sfp))) {
- SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_new_write: queue full\n"));
return -EDOM;
}
srp->sg_io_owned = sg_io_owned;
@@ -743,7 +763,7 @@ static int
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
{
- int k, data_dir;
+ int k, data_dir, at_head;
Sg_device *sdp = sfp->parentdp;
sg_io_hdr_t *hp = &srp->header;
@@ -755,16 +775,18 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
hp->host_status = 0;
hp->driver_status = 0;
hp->resid = 0;
- SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
- (int) cmnd[0], (int) hp->cmd_len));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
+ (int) cmnd[0], (int) hp->cmd_len));
k = sg_start_req(srp, cmnd);
if (k) {
- SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
return k; /* probably out of space --> ENOMEM */
}
- if (sdp->detached) {
+ if (atomic_read(&sdp->detaching)) {
if (srp->bio)
blk_end_request_all(srp->rq, -EIO);
sg_finish_rem_req(srp);
@@ -787,11 +809,16 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
break;
}
hp->duration = jiffies_to_msecs(jiffies);
+ if (hp->interface_id != '\0' && /* v3 (or later) interface */
+ (SG_FLAG_Q_AT_TAIL & hp->flags))
+ at_head = 0;
+ else
+ at_head = 1;
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
- srp->rq, 1, sg_rq_end_io);
+ srp->rq, at_head, sg_rq_end_io);
return 0;
}
@@ -806,6 +833,15 @@ static int srp_done(Sg_fd *sfp, Sg_request *srp)
return ret;
}
+static int max_sectors_bytes(struct request_queue *q)
+{
+ unsigned int max_sectors = queue_max_sectors(q);
+
+ max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
+
+ return max_sectors << 9;
+}
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -820,13 +856,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
- sdp->disk->disk_name, (int) cmd_in));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_ioctl: cmd=0x%x\n", (int) cmd_in));
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
switch (cmd_in) {
case SG_IO:
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
@@ -837,8 +873,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
- (srp_done(sfp, srp) || sdp->detached));
- if (sdp->detached)
+ (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
@@ -873,11 +909,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
sfp->low_dma = 1;
if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
val = (int) sfp->reserve.bufflen;
- sg_remove_scat(&sfp->reserve);
+ sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
} else {
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
sfp->low_dma = sdp->device->host->unchecked_isa_dma;
}
@@ -890,7 +926,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
else {
sg_scsi_id_t __user *sg_idp = p;
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
__put_user((int) sdp->device->host->host_no,
&sg_idp->host_no);
@@ -945,17 +981,17 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (val < 0)
return -EINVAL;
val = min_t(int, val,
- queue_max_sectors(sdp->device->request_queue) * 512);
+ max_sectors_bytes(sdp->device->request_queue));
if (val != sfp->reserve.bufflen) {
if (sg_res_in_use(sfp) || sfp->mmap_called)
return -EBUSY;
- sg_remove_scat(&sfp->reserve);
+ sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
- queue_max_sectors(sdp->device->request_queue) * 512);
+ max_sectors_bytes(sdp->device->request_queue));
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
@@ -1032,11 +1068,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return result;
}
case SG_EMULATED_HOST:
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
return put_user(sdp->device->host->hostt->emulated, ip);
case SG_SCSI_RESET:
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
if (filp->f_flags & O_NONBLOCK) {
if (scsi_host_in_recovery(sdp->device->host))
@@ -1069,7 +1105,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return (scsi_reset_provider(sdp->device, val) ==
SUCCESS) ? 0 : -EIO;
case SCSI_IOCTL_SEND_COMMAND:
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
if (read_only) {
unsigned char opcode = WRITE_6;
@@ -1091,11 +1127,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
case SCSI_IOCTL_GET_BUS_NUMBER:
case SCSI_IOCTL_PROBE_HOST:
case SG_GET_TRANSFORM:
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
return scsi_ioctl(sdp->device, cmd_in, p);
case BLKSECTGET:
- return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
+ return put_user(max_sectors_bytes(sdp->device->request_queue),
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
@@ -1165,15 +1201,15 @@ sg_poll(struct file *filp, poll_table * wait)
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
res |= POLLHUP;
else if (!sfp->cmd_q) {
if (0 == count)
res |= POLLOUT | POLLWRNORM;
} else if (count < SG_MAX_QUEUE)
res |= POLLOUT | POLLWRNORM;
- SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
- sdp->disk->disk_name, (int) res));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_poll: res=0x%x\n", (int) res));
return res;
}
@@ -1185,8 +1221,8 @@ sg_fasync(int fd, struct file *filp, int mode)
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
- sdp->disk->disk_name, mode));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_fasync: mode=%d\n", mode));
return fasync_helper(fd, filp, mode, &sfp->async_qp);
}
@@ -1205,8 +1241,9 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= rsv_schp->bufflen)
return VM_FAULT_SIGBUS;
- SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
- offset, rsv_schp->k_use_sg));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_vma_fault: offset=%lu, scatg=%d\n",
+ offset, rsv_schp->k_use_sg));
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
@@ -1241,8 +1278,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
req_sz = vma->vm_end - vma->vm_start;
- SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
- (void *) vma->vm_start, (int) req_sz));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_mmap starting, vm_start=%p, len=%d\n",
+ (void *) vma->vm_start, (int) req_sz));
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
@@ -1264,7 +1302,8 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-static void sg_rq_end_io_usercontext(struct work_struct *work)
+static void
+sg_rq_end_io_usercontext(struct work_struct *work)
{
struct sg_request *srp = container_of(work, struct sg_request, ew.work);
struct sg_fd *sfp = srp->parentfp;
@@ -1277,7 +1316,8 @@ static void sg_rq_end_io_usercontext(struct work_struct *work)
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
-static void sg_rq_end_io(struct request *rq, int uptodate)
+static void
+sg_rq_end_io(struct request *rq, int uptodate)
{
struct sg_request *srp = rq->end_io_data;
Sg_device *sdp;
@@ -1295,15 +1335,16 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
return;
sdp = sfp->parentdp;
- if (unlikely(sdp->detached))
- printk(KERN_INFO "sg_rq_end_io: device detached\n");
+ if (unlikely(atomic_read(&sdp->detaching)))
+ pr_info("%s: device detaching\n", __func__);
sense = rq->sense;
result = rq->errors;
resid = rq->resid_len;
- SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
- sdp->disk->disk_name, srp->header.pack_id, result));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
+ "sg_cmd_done: pack_id=%d, res=0x%x\n",
+ srp->header.pack_id, result));
srp->header.resid = resid;
ms = jiffies_to_msecs(jiffies);
srp->header.duration = (ms > srp->header.duration) ?
@@ -1319,7 +1360,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
if ((sdp->sgdebug > 0) &&
((CHECK_CONDITION == srp->header.masked_status) ||
(COMMAND_TERMINATED == srp->header.masked_status)))
- __scsi_print_sense("sg_cmd_done", sense,
+ __scsi_print_sense(__func__, sense,
SCSI_SENSE_BUFFERSIZE);
/* Following if statement is a patch supplied by Eric Youngdale */
@@ -1378,7 +1419,8 @@ static struct class *sg_sysfs_class;
static int sg_sysfs_valid = 0;
-static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
+static Sg_device *
+sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
{
struct request_queue *q = scsidp->request_queue;
Sg_device *sdp;
@@ -1388,7 +1430,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
if (!sdp) {
- printk(KERN_WARNING "kmalloc Sg_device failure\n");
+ sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
+ "failure\n", __func__);
return ERR_PTR(-ENOMEM);
}
@@ -1403,20 +1446,25 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
scsidp->type, SG_MAX_DEVS - 1);
error = -ENODEV;
} else {
- printk(KERN_WARNING
- "idr allocation Sg_device failure: %d\n", error);
+ sdev_printk(KERN_WARNING, scsidp, "%s: idr "
+ "allocation Sg_device failure: %d\n",
+ __func__, error);
}
goto out_unlock;
}
k = error;
- SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
+ SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
+ "sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
disk->first_minor = k;
sdp->disk = disk;
sdp->device = scsidp;
+ mutex_init(&sdp->open_rel_lock);
INIT_LIST_HEAD(&sdp->sfds);
- init_waitqueue_head(&sdp->o_excl_wait);
+ init_waitqueue_head(&sdp->open_wait);
+ atomic_set(&sdp->detaching, 0);
+ rwlock_init(&sdp->sfd_lock);
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
@@ -1434,7 +1482,7 @@ out_unlock:
}
static int
-sg_add(struct device *cl_dev, struct class_interface *cl_intf)
+sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
struct gendisk *disk;
@@ -1445,7 +1493,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
disk = alloc_disk(1);
if (!disk) {
- printk(KERN_WARNING "alloc_disk failed\n");
+ pr_warn("%s: alloc_disk failed\n", __func__);
return -ENOMEM;
}
disk->major = SCSI_GENERIC_MAJOR;
@@ -1453,7 +1501,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
error = -ENOMEM;
cdev = cdev_alloc();
if (!cdev) {
- printk(KERN_WARNING "cdev_alloc failed\n");
+ pr_warn("%s: cdev_alloc failed\n", __func__);
goto out;
}
cdev->owner = THIS_MODULE;
@@ -1461,7 +1509,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
sdp = sg_alloc(disk, scsidp);
if (IS_ERR(sdp)) {
- printk(KERN_WARNING "sg_alloc failed\n");
+ pr_warn("%s: sg_alloc failed\n", __func__);
error = PTR_ERR(sdp);
goto out;
}
@@ -1479,22 +1527,20 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
sdp->index),
sdp, "%s", disk->disk_name);
if (IS_ERR(sg_class_member)) {
- printk(KERN_ERR "sg_add: "
- "device_create failed\n");
+ pr_err("%s: device_create failed\n", __func__);
error = PTR_ERR(sg_class_member);
goto cdev_add_err;
}
error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
&sg_class_member->kobj, "generic");
if (error)
- printk(KERN_ERR "sg_add: unable to make symlink "
- "'generic' back to sg%d\n", sdp->index);
+ pr_err("%s: unable to make symlink 'generic' back "
+ "to sg%d\n", __func__, sdp->index);
} else
- printk(KERN_WARNING "sg_add: sg_sys Invalid\n");
+ pr_warn("%s: sg_sys Invalid\n", __func__);
- sdev_printk(KERN_NOTICE, scsidp,
- "Attached scsi generic sg%d type %d\n", sdp->index,
- scsidp->type);
+ sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
+ "type %d\n", sdp->index, scsidp->type);
dev_set_drvdata(cl_dev, sdp);
@@ -1513,7 +1559,8 @@ out:
return error;
}
-static void sg_device_destroy(struct kref *kref)
+static void
+sg_device_destroy(struct kref *kref)
{
struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
unsigned long flags;
@@ -1528,40 +1575,45 @@ static void sg_device_destroy(struct kref *kref)
write_unlock_irqrestore(&sg_index_lock, flags);
SCSI_LOG_TIMEOUT(3,
- printk("sg_device_destroy: %s\n",
- sdp->disk->disk_name));
+ sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
put_disk(sdp->disk);
kfree(sdp);
}
-static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
+static void
+sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
Sg_device *sdp = dev_get_drvdata(cl_dev);
unsigned long iflags;
Sg_fd *sfp;
+ int val;
- if (!sdp || sdp->detached)
+ if (!sdp)
return;
+ /* want sdp->detaching non-zero as soon as possible */
+ val = atomic_inc_return(&sdp->detaching);
+ if (val > 1)
+ return; /* only want to do following once per device */
- SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "%s\n", __func__));
- /* Need a write lock to set sdp->detached. */
- write_lock_irqsave(&sg_index_lock, iflags);
- sdp->detached = 1;
+ read_lock_irqsave(&sdp->sfd_lock, iflags);
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
- wake_up_interruptible(&sfp->read_wait);
+ wake_up_interruptible_all(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
}
- write_unlock_irqrestore(&sg_index_lock, iflags);
+ wake_up_interruptible_all(&sdp->open_wait);
+ read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
- sg_put_dev(sdp);
+ kref_put(&sdp->d_ref, sg_device_destroy);
}
module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
@@ -1631,7 +1683,8 @@ exit_sg(void)
idr_destroy(&sg_index_idr);
}
-static int sg_start_req(Sg_request *srp, unsigned char *cmd)
+static int
+sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
@@ -1645,18 +1698,30 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
+ unsigned char *long_cmdp = NULL;
+
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_start_req: dxfer_len=%d\n",
+ dxfer_len));
- SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
- dxfer_len));
+ if (hp->cmd_len > BLK_MAX_CDB) {
+ long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
+ if (!long_cmdp)
+ return -ENOMEM;
+ }
rq = blk_get_request(q, rw, GFP_ATOMIC);
- if (!rq)
+ if (!rq) {
+ kfree(long_cmdp);
return -ENOMEM;
+ }
- memcpy(rq->cmd, cmd, hp->cmd_len);
+ blk_rq_set_block_pc(rq);
+ if (hp->cmd_len > BLK_MAX_CDB)
+ rq->cmd = long_cmdp;
+ memcpy(rq->cmd, cmd, hp->cmd_len);
rq->cmd_len = hp->cmd_len;
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
srp->rq = rq;
rq->end_io_data = srp;
@@ -1727,25 +1792,30 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
return res;
}
-static int sg_finish_rem_req(Sg_request * srp)
+static int
+sg_finish_rem_req(Sg_request *srp)
{
int ret = 0;
Sg_fd *sfp = srp->parentfp;
Sg_scatter_hold *req_schp = &srp->data;
- SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_finish_rem_req: res_used=%d\n",
+ (int) srp->res_used));
if (srp->rq) {
if (srp->bio)
ret = blk_rq_unmap_user(srp->bio);
+ if (srp->rq->cmd != srp->rq->__cmd)
+ kfree(srp->rq->cmd);
blk_put_request(srp->rq);
}
if (srp->res_used)
sg_unlink_reserve(sfp, srp);
else
- sg_remove_scat(req_schp);
+ sg_remove_scat(sfp, req_schp);
sg_remove_request(sfp, srp);
@@ -1779,8 +1849,9 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
++blk_size; /* don't know why */
/* round request up to next highest SG_SECTOR_SZ byte boundary */
blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
- SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
- buff_size, blk_size));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_indirect: buff_size=%d, blk_size=%d\n",
+ buff_size, blk_size));
/* N.B. ret_sz carried into this block ... */
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
@@ -1823,14 +1894,16 @@ retry:
}
}
- SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
- "ret_sz=%d\n", k, num, ret_sz));
+ SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
+ k, num, ret_sz));
} /* end of for loop */
schp->page_order = order;
schp->k_use_sg = k;
- SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
- "rem_sz=%d\n", k, rem_sz));
+ SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
+ k, rem_sz));
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
@@ -1847,17 +1920,19 @@ out:
}
static void
-sg_remove_scat(Sg_scatter_hold * schp)
+sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp)
{
- SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
if (schp->pages && schp->sglist_len > 0) {
if (!schp->dio_in_use) {
int k;
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
- SCSI_LOG_TIMEOUT(5, printk(
- "sg_remove_scat: k=%d, pg=0x%p\n",
- k, schp->pages[k]));
+ SCSI_LOG_TIMEOUT(5,
+ sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_remove_scat: k=%d, pg=0x%p\n",
+ k, schp->pages[k]));
__free_pages(schp->pages[k], schp->page_order);
}
@@ -1873,8 +1948,9 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
Sg_scatter_hold *schp = &srp->data;
int k, num;
- SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
- num_read_xfer));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
+ "sg_read_oxfer: num_read_xfer=%d\n",
+ num_read_xfer));
if ((!outp) || (num_read_xfer <= 0))
return 0;
@@ -1904,14 +1980,15 @@ sg_build_reserve(Sg_fd * sfp, int req_size)
{
Sg_scatter_hold *schp = &sfp->reserve;
- SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_reserve: req_size=%d\n", req_size));
do {
if (req_size < PAGE_SIZE)
req_size = PAGE_SIZE;
if (0 == sg_build_indirect(schp, sfp, req_size))
return;
else
- sg_remove_scat(schp);
+ sg_remove_scat(sfp, schp);
req_size >>= 1; /* divide by 2 */
} while (req_size > (PAGE_SIZE / 2));
}
@@ -1924,7 +2001,8 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
int k, num, rem;
srp->res_used = 1;
- SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_link_reserve: size=%d\n", size));
rem = size;
num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
@@ -1942,7 +2020,8 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
}
if (k >= rsv_schp->k_use_sg)
- SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_link_reserve: BAD size\n"));
}
static void
@@ -1950,8 +2029,9 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
{
Sg_scatter_hold *req_schp = &srp->data;
- SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
- (int) req_schp->k_use_sg));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
+ "sg_unlink_reserve: req->k_use_sg=%d\n",
+ (int) req_schp->k_use_sg));
req_schp->k_use_sg = 0;
req_schp->bufflen = 0;
req_schp->pages = NULL;
@@ -2056,7 +2136,7 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
}
static Sg_fd *
-sg_add_sfp(Sg_device * sdp, int dev)
+sg_add_sfp(Sg_device * sdp)
{
Sg_fd *sfp;
unsigned long iflags;
@@ -2064,7 +2144,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
if (!sfp)
- return NULL;
+ return ERR_PTR(-ENOMEM);
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
@@ -2078,25 +2158,33 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp;
- write_lock_irqsave(&sg_index_lock, iflags);
+ write_lock_irqsave(&sdp->sfd_lock, iflags);
+ if (atomic_read(&sdp->detaching)) {
+ write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ return ERR_PTR(-ENODEV);
+ }
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
- write_unlock_irqrestore(&sg_index_lock, iflags);
- SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
+ write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_add_sfp: sfp=0x%p\n", sfp));
if (unlikely(sg_big_buff != def_reserved_size))
sg_big_buff = def_reserved_size;
bufflen = min_t(int, sg_big_buff,
- queue_max_sectors(sdp->device->request_queue) * 512);
+ max_sectors_bytes(sdp->device->request_queue));
sg_build_reserve(sfp, bufflen);
- SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
- sfp->reserve.bufflen, sfp->reserve.k_use_sg));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
+ sfp->reserve.bufflen,
+ sfp->reserve.k_use_sg));
kref_get(&sdp->d_ref);
__module_get(THIS_MODULE);
return sfp;
}
-static void sg_remove_sfp_usercontext(struct work_struct *work)
+static void
+sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
@@ -2106,34 +2194,32 @@ static void sg_remove_sfp_usercontext(struct work_struct *work)
sg_finish_rem_req(sfp->headrp);
if (sfp->reserve.bufflen > 0) {
- SCSI_LOG_TIMEOUT(6,
- printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
+ SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
+ "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
(int) sfp->reserve.bufflen,
(int) sfp->reserve.k_use_sg));
- sg_remove_scat(&sfp->reserve);
+ sg_remove_scat(sfp, &sfp->reserve);
}
- SCSI_LOG_TIMEOUT(6,
- printk("sg_remove_sfp: %s, sfp=0x%p\n",
- sdp->disk->disk_name,
- sfp));
+ SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
+ "sg_remove_sfp: sfp=0x%p\n", sfp));
kfree(sfp);
scsi_device_put(sdp->device);
- sg_put_dev(sdp);
+ kref_put(&sdp->d_ref, sg_device_destroy);
module_put(THIS_MODULE);
}
-static void sg_remove_sfp(struct kref *kref)
+static void
+sg_remove_sfp(struct kref *kref)
{
struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
struct sg_device *sdp = sfp->parentdp;
unsigned long iflags;
- write_lock_irqsave(&sg_index_lock, iflags);
+ write_lock_irqsave(&sdp->sfd_lock, iflags);
list_del(&sfp->sfd_siblings);
- write_unlock_irqrestore(&sg_index_lock, iflags);
- wake_up_interruptible(&sdp->o_excl_wait);
+ write_unlock_irqrestore(&sdp->sfd_lock, iflags);
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
schedule_work(&sfp->ew.work);
@@ -2184,7 +2270,8 @@ static Sg_device *sg_lookup_dev(int dev)
return idr_find(&sg_index_idr, dev);
}
-static Sg_device *sg_get_dev(int dev)
+static Sg_device *
+sg_get_dev(int dev)
{
struct sg_device *sdp;
unsigned long flags;
@@ -2193,8 +2280,8 @@ static Sg_device *sg_get_dev(int dev)
sdp = sg_lookup_dev(dev);
if (!sdp)
sdp = ERR_PTR(-ENXIO);
- else if (sdp->detached) {
- /* If sdp->detached, then the refcount may already be 0, in
+ else if (atomic_read(&sdp->detaching)) {
+ /* If sdp->detaching, then the refcount may already be 0, in
* which case it would be a bug to do kref_get().
*/
sdp = ERR_PTR(-ENODEV);
@@ -2205,11 +2292,6 @@ static Sg_device *sg_get_dev(int dev)
return sdp;
}
-static void sg_put_dev(struct sg_device *sdp)
-{
- kref_put(&sdp->d_ref, sg_device_destroy);
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static struct proc_dir_entry *sg_proc_sgp = NULL;
@@ -2426,8 +2508,7 @@ static int sg_proc_single_open_version(struct inode *inode, struct file *file)
static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
{
- seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
- "online\n");
+ seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
return 0;
}
@@ -2483,16 +2564,19 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
- if (sdp && (scsidp = sdp->device) && (!sdp->detached))
- seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
+ if ((NULL == sdp) || (NULL == sdp->device) ||
+ (atomic_read(&sdp->detaching)))
+ seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
+ else {
+ scsidp = sdp->device;
+ seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
scsidp->host->host_no, scsidp->channel,
scsidp->id, scsidp->lun, (int) scsidp->type,
1,
(int) scsidp->queue_depth,
- (int) scsidp->device_busy,
+ (int) atomic_read(&scsidp->device_busy),
(int) scsi_device_online(scsidp));
- else
- seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
+ }
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
@@ -2511,11 +2595,12 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
- if (sdp && (scsidp = sdp->device) && (!sdp->detached))
+ scsidp = sdp ? sdp->device : NULL;
+ if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
scsidp->vendor, scsidp->model, scsidp->rev);
else
- seq_printf(s, "<no active device>\n");
+ seq_puts(s, "<no active device>\n");
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
@@ -2560,12 +2645,12 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
else
cp = " ";
}
- seq_printf(s, cp);
+ seq_puts(s, cp);
blen = srp->data.bufflen;
usg = srp->data.k_use_sg;
- seq_printf(s, srp->done ?
- ((1 == srp->done) ? "rcv:" : "fin:")
- : "act:");
+ seq_puts(s, srp->done ?
+ ((1 == srp->done) ? "rcv:" : "fin:")
+ : "act:");
seq_printf(s, " id=%d blen=%d",
srp->header.pack_id, blen);
if (srp->done)
@@ -2581,7 +2666,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
(int) srp->data.cmd_opcode);
}
if (0 == m)
- seq_printf(s, " No requests active\n");
+ seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
}
@@ -2597,31 +2682,34 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
Sg_device *sdp;
unsigned long iflags;
- if (it && (0 == it->index)) {
- seq_printf(s, "max_active_device=%d(origin 1)\n",
- (int)it->max);
- seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
- }
+ if (it && (0 == it->index))
+ seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
+ (int)it->max, sg_big_buff);
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
- if (sdp && !list_empty(&sdp->sfds)) {
- struct scsi_device *scsidp = sdp->device;
-
+ if (NULL == sdp)
+ goto skip;
+ read_lock(&sdp->sfd_lock);
+ if (!list_empty(&sdp->sfds)) {
seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
- if (sdp->detached)
- seq_printf(s, "detached pending close ");
- else
- seq_printf
- (s, "scsi%d chan=%d id=%d lun=%d em=%d",
- scsidp->host->host_no,
- scsidp->channel, scsidp->id,
- scsidp->lun,
- scsidp->host->hostt->emulated);
- seq_printf(s, " sg_tablesize=%d excl=%d\n",
- sdp->sg_tablesize, get_exclude(sdp));
+ if (atomic_read(&sdp->detaching))
+ seq_puts(s, "detaching pending close ");
+ else if (sdp->device) {
+ struct scsi_device *scsidp = sdp->device;
+
+ seq_printf(s, "%d:%d:%d:%llu em=%d",
+ scsidp->host->host_no,
+ scsidp->channel, scsidp->id,
+ scsidp->lun,
+ scsidp->host->hostt->emulated);
+ }
+ seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
+ sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
sg_proc_debug_helper(s, sdp);
}
+ read_unlock(&sdp->sfd_lock);
+skip:
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}