summaryrefslogtreecommitdiff
path: root/drivers/scsi/scsi_debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_debug.c')
-rw-r--r--drivers/scsi/scsi_debug.c983
1 files changed, 430 insertions, 553 deletions
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 8553277effb3..f4fa1035a191 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -250,6 +250,11 @@ static const char *sdebug_version_date = "20210520";
#define SDEB_XA_NOT_IN_USE XA_MARK_1
+static struct kmem_cache *queued_cmd_cache;
+
+#define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
+#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
+
/* Zone types (zbcr05 table 25) */
enum sdebug_z_type {
ZBC_ZTYPE_CNV = 0x1,
@@ -288,7 +293,6 @@ struct sdebug_dev_info {
uuid_t lu_name;
struct sdebug_host_info *sdbg_host;
unsigned long uas_bm[1];
- atomic_t num_in_q;
atomic_t stopped; /* 1: by SSU, 2: device start */
bool used;
@@ -324,9 +328,12 @@ struct sdeb_store_info {
void *map_storep; /* provisioning map */
};
-#define to_sdebug_host(d) \
+#define dev_to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
+#define shost_to_sdebug_host(shost) \
+ dev_to_sdebug_host(shost->dma_dev)
+
enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
@@ -334,13 +341,7 @@ struct sdebug_defer {
struct hrtimer hrt;
struct execute_work ew;
ktime_t cmpl_ts;/* time since boot to complete this cmd */
- int sqa_idx; /* index of sdebug_queue array */
- int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
- int hc_idx; /* hostwide tag index */
int issuing_cpu;
- bool init_hrt;
- bool init_wq;
- bool init_poll;
bool aborted; /* true when blk_abort_request() already called */
enum sdeb_defer_type defer_t;
};
@@ -349,15 +350,12 @@ struct sdebug_queued_cmd {
/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
* instance indicates this slot is in use.
*/
- struct sdebug_defer *sd_dp;
- struct scsi_cmnd *a_cmnd;
+ struct sdebug_defer sd_dp;
+ struct scsi_cmnd *scmd;
};
-struct sdebug_queue {
- struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
- unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
- spinlock_t qc_lock;
- atomic_t blocked; /* to temporarily stop more being queued */
+struct sdebug_scsi_cmd {
+ spinlock_t lock;
};
static atomic_t sdebug_cmnd_count; /* number of incoming commands */
@@ -507,6 +505,8 @@ static int sdebug_add_store(void);
static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
static void sdebug_erase_all_stores(bool apart_from_first);
+static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
+
/*
* The following are overflow arrays for cdbs that "hit" the same index in
* the opcode_info_arr array. The most time sensitive (or commonly used) cdb
@@ -754,7 +754,6 @@ static int sdebug_max_luns = DEF_MAX_LUNS;
static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
-static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
static int sdebug_no_lun_0 = DEF_NO_LUN_0;
static int sdebug_no_uld;
@@ -814,7 +813,7 @@ static int sdebug_cylinders_per; /* cylinders per surface */
static int sdebug_sectors_per; /* sectors per cylinder */
static LIST_HEAD(sdebug_host_list);
-static DEFINE_SPINLOCK(sdebug_host_list_lock);
+static DEFINE_MUTEX(sdebug_host_list_mutex);
static struct xarray per_store_arr;
static struct xarray *per_store_ap = &per_store_arr;
@@ -841,7 +840,6 @@ static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static int poll_queues; /* iouring iopoll interface.*/
-static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
static DEFINE_RWLOCK(atomic_rw);
static DEFINE_RWLOCK(atomic_rw2);
@@ -906,7 +904,7 @@ static void sdebug_max_tgts_luns(void)
struct sdebug_host_info *sdbg_host;
struct Scsi_Host *hpnt;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
hpnt = sdbg_host->shost;
if ((hpnt->this_id >= 0) &&
@@ -917,7 +915,7 @@ static void sdebug_max_tgts_luns(void)
/* sdebug_max_luns; */
hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
@@ -1049,30 +1047,27 @@ static void all_config_cdb_len(void)
struct Scsi_Host *shost;
struct scsi_device *sdev;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
shost = sdbg_host->shost;
shost_for_each_device(sdev, shost) {
config_cdb_len(sdev);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
{
- struct sdebug_host_info *sdhp;
+ struct sdebug_host_info *sdhp = devip->sdbg_host;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
- list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
- list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
- if ((devip->sdbg_host == dp->sdbg_host) &&
- (devip->target == dp->target))
- clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
+ list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
+ if ((devip->sdbg_host == dp->sdbg_host) &&
+ (devip->target == dp->target)) {
+ clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
}
static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
@@ -4899,20 +4894,6 @@ fini:
return res;
}
-static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
-{
- u16 hwq;
- u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
-
- hwq = blk_mq_unique_tag_to_hwq(tag);
-
- pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
- if (WARN_ON_ONCE(hwq >= submit_queues))
- hwq = 0;
-
- return sdebug_q_arr + hwq;
-}
-
static u32 get_tag(struct scsi_cmnd *cmnd)
{
return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
@@ -4921,75 +4902,41 @@ static u32 get_tag(struct scsi_cmnd *cmnd)
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
- bool aborted = sd_dp->aborted;
- int qc_idx;
- int retiring = 0;
- unsigned long iflags;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct scsi_cmnd *scp;
- struct sdebug_dev_info *devip;
+ struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
+ unsigned long flags;
+ struct scsi_cmnd *scp = sqcp->scmd;
+ struct sdebug_scsi_cmd *sdsc;
+ bool aborted;
- if (unlikely(aborted))
- sd_dp->aborted = false;
- qc_idx = sd_dp->qc_idx;
- sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
atomic_inc(&sdebug_completions);
if (raw_smp_processor_id() != sd_dp->issuing_cpu)
atomic_inc(&sdebug_miss_cpus);
}
- if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
- pr_err("wild qc_idx=%d\n", qc_idx);
- return;
- }
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- sqcp = &sqp->qc_arr[qc_idx];
- scp = sqcp->a_cmnd;
- if (unlikely(scp == NULL)) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
- sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
- return;
- }
- devip = (struct sdebug_dev_info *)scp->device->hostdata;
- if (likely(devip))
- atomic_dec(&devip->num_in_q);
- else
- pr_err("devip=NULL\n");
- if (unlikely(atomic_read(&retired_max_queue) > 0))
- retiring = 1;
-
- sqcp->a_cmnd = NULL;
- if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- pr_err("Unexpected completion\n");
- return;
+
+ if (!scp) {
+ pr_err("scmd=NULL\n");
+ goto out;
}
- if (unlikely(retiring)) { /* user has reduced max_queue */
- int k, retval;
+ sdsc = scsi_cmd_priv(scp);
+ spin_lock_irqsave(&sdsc->lock, flags);
+ aborted = sd_dp->aborted;
+ if (unlikely(aborted))
+ sd_dp->aborted = false;
+ ASSIGN_QUEUED_CMD(scp, NULL);
+
+ spin_unlock_irqrestore(&sdsc->lock, flags);
- retval = atomic_read(&retired_max_queue);
- if (qc_idx >= retval) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- pr_err("index %d too large\n", retval);
- return;
- }
- k = find_last_bit(sqp->in_use_bm, retval);
- if ((k < sdebug_max_queue) || (k == retval))
- atomic_set(&retired_max_queue, 0);
- else
- atomic_set(&retired_max_queue, k + 1);
- }
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (unlikely(aborted)) {
- if (sdebug_verbose)
- pr_info("bypassing scsi_done() due to aborted cmd\n");
- return;
+ if (aborted) {
+ pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
+ blk_abort_request(scsi_cmd_to_rq(scp));
+ goto out;
}
+
scsi_done(scp); /* callback to mid level */
+out:
+ sdebug_free_queued_cmd(sqcp);
}
/* When high resolution timer goes off this function is called. */
@@ -5152,7 +5099,6 @@ static struct sdebug_dev_info *sdebug_device_create(
} else {
devip->zmodel = BLK_ZONED_NONE;
}
- devip->sdbg_host = sdbg_host;
devip->create_ts = ktime_get_boottime();
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
@@ -5166,11 +5112,7 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
struct sdebug_dev_info *open_devip = NULL;
struct sdebug_dev_info *devip;
- sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
- if (!sdbg_host) {
- pr_err("Host info NULL\n");
- return NULL;
- }
+ sdbg_host = shost_to_sdebug_host(sdev->host);
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if ((devip->used) && (devip->channel == sdev->channel) &&
@@ -5194,7 +5136,6 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
open_devip->target = sdev->id;
open_devip->lun = sdev->lun;
open_devip->sdbg_host = sdbg_host;
- atomic_set(&open_devip->num_in_q, 0);
set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
open_devip->used = true;
return open_devip;
@@ -5245,215 +5186,171 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
}
}
-static void stop_qc_helper(struct sdebug_defer *sd_dp,
+/* Returns true if we require the queued memory to be freed by the caller. */
+static bool stop_qc_helper(struct sdebug_defer *sd_dp,
enum sdeb_defer_type defer_t)
{
- if (!sd_dp)
- return;
- if (defer_t == SDEB_DEFER_HRT)
- hrtimer_cancel(&sd_dp->hrt);
- else if (defer_t == SDEB_DEFER_WQ)
- cancel_work_sync(&sd_dp->ew.work);
-}
-
-/* If @cmnd found deletes its timer or work queue and returns true; else
- returns false */
-static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
-{
- unsigned long iflags;
- int j, k, qmax, r_qmax;
- enum sdeb_defer_type l_defer_t;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct sdebug_dev_info *devip;
- struct sdebug_defer *sd_dp;
+ if (defer_t == SDEB_DEFER_HRT) {
+ int res = hrtimer_try_to_cancel(&sd_dp->hrt);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- qmax = sdebug_max_queue;
- r_qmax = atomic_read(&retired_max_queue);
- if (r_qmax > qmax)
- qmax = r_qmax;
- for (k = 0; k < qmax; ++k) {
- if (test_bit(k, sqp->in_use_bm)) {
- sqcp = &sqp->qc_arr[k];
- if (cmnd != sqcp->a_cmnd)
- continue;
- /* found */
- devip = (struct sdebug_dev_info *)
- cmnd->device->hostdata;
- if (devip)
- atomic_dec(&devip->num_in_q);
- sqcp->a_cmnd = NULL;
- sd_dp = sqcp->sd_dp;
- if (sd_dp) {
- l_defer_t = READ_ONCE(sd_dp->defer_t);
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- } else
- l_defer_t = SDEB_DEFER_NONE;
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp, l_defer_t);
- clear_bit(k, sqp->in_use_bm);
- return true;
- }
+ switch (res) {
+ case 0: /* Not active, it must have already run */
+ case -1: /* -1 It's executing the CB */
+ return false;
+ case 1: /* Was active, we've now cancelled */
+ default:
+ return true;
}
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ } else if (defer_t == SDEB_DEFER_WQ) {
+ /* Cancel if pending */
+ if (cancel_work_sync(&sd_dp->ew.work))
+ return true;
+ /* Was not pending, so it must have run */
+ return false;
+ } else if (defer_t == SDEB_DEFER_POLL) {
+ return true;
}
+
return false;
}
-/* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(void)
+
+static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
{
- unsigned long iflags;
- int j, k;
enum sdeb_defer_type l_defer_t;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct sdebug_dev_info *devip;
struct sdebug_defer *sd_dp;
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
- if (test_bit(k, sqp->in_use_bm)) {
- sqcp = &sqp->qc_arr[k];
- if (sqcp->a_cmnd == NULL)
- continue;
- devip = (struct sdebug_dev_info *)
- sqcp->a_cmnd->device->hostdata;
- if (devip)
- atomic_dec(&devip->num_in_q);
- sqcp->a_cmnd = NULL;
- sd_dp = sqcp->sd_dp;
- if (sd_dp) {
- l_defer_t = READ_ONCE(sd_dp->defer_t);
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- } else
- l_defer_t = SDEB_DEFER_NONE;
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp, l_defer_t);
- clear_bit(k, sqp->in_use_bm);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- }
- }
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- }
+ lockdep_assert_held(&sdsc->lock);
+
+ if (!sqcp)
+ return false;
+ sd_dp = &sqcp->sd_dp;
+ l_defer_t = READ_ONCE(sd_dp->defer_t);
+ ASSIGN_QUEUED_CMD(cmnd, NULL);
+
+ if (stop_qc_helper(sd_dp, l_defer_t))
+ sdebug_free_queued_cmd(sqcp);
+
+ return true;
+}
+
+/*
+ * Called from scsi_debug_abort() only, which is for timed-out cmd.
+ */
+static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
+{
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&sdsc->lock, flags);
+ res = scsi_debug_stop_cmnd(cmnd);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+
+ return res;
}
-/* Free queued command memory on heap */
-static void free_all_queued(void)
+/*
+ * All we can do is set the cmnd as internally aborted and wait for it to
+ * finish. We cannot call scsi_done() as normal completion path may do that.
+ */
+static bool sdebug_stop_cmnd(struct request *rq, void *data)
{
- int j, k;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
+ scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
- for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
- sqcp = &sqp->qc_arr[k];
- kfree(sqcp->sd_dp);
- sqcp->sd_dp = NULL;
- }
+ return true;
+}
+
+/* Deletes (stops) timers or work queues of all queued commands */
+static void stop_all_queued(void)
+{
+ struct sdebug_host_info *sdhp;
+
+ mutex_lock(&sdebug_host_list_mutex);
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
}
+ mutex_unlock(&sdebug_host_list_mutex);
}
static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{
- bool ok;
+ bool ok = scsi_debug_abort_cmnd(SCpnt);
++num_aborts;
- if (SCpnt) {
- ok = stop_queued_cmnd(SCpnt);
- if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
- sdev_printk(KERN_INFO, SCpnt->device,
- "%s: command%s found\n", __func__,
- ok ? "" : " not");
- }
+
+ if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s: command%s found\n", __func__,
+ ok ? "" : " not");
+
return SUCCESS;
}
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{
+ struct scsi_device *sdp = SCpnt->device;
+ struct sdebug_dev_info *devip = sdp->hostdata;
+
++num_dev_resets;
- if (SCpnt && SCpnt->device) {
- struct scsi_device *sdp = SCpnt->device;
- struct sdebug_dev_info *devip =
- (struct sdebug_dev_info *)sdp->hostdata;
- if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
- sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
- if (devip)
- set_bit(SDEBUG_UA_POR, devip->uas_bm);
- }
+ if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
+ sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+ if (devip)
+ set_bit(SDEBUG_UA_POR, devip->uas_bm);
+
return SUCCESS;
}
static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
{
- struct sdebug_host_info *sdbg_host;
+ struct scsi_device *sdp = SCpnt->device;
+ struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
struct sdebug_dev_info *devip;
- struct scsi_device *sdp;
- struct Scsi_Host *hp;
int k = 0;
++num_target_resets;
- if (!SCpnt)
- goto lie;
- sdp = SCpnt->device;
- if (!sdp)
- goto lie;
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
- hp = sdp->host;
- if (!hp)
- goto lie;
- sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
- if (sdbg_host) {
- list_for_each_entry(devip,
- &sdbg_host->dev_info_list,
- dev_list)
- if (devip->target == sdp->id) {
- set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
- ++k;
- }
+
+ list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
+ if (devip->target == sdp->id) {
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ ++k;
+ }
}
+
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp,
"%s: %d device(s) found in target\n", __func__, k);
-lie:
+
return SUCCESS;
}
static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
{
- struct sdebug_host_info *sdbg_host;
+ struct scsi_device *sdp = SCpnt->device;
+ struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
struct sdebug_dev_info *devip;
- struct scsi_device *sdp;
- struct Scsi_Host *hp;
int k = 0;
++num_bus_resets;
- if (!(SCpnt && SCpnt->device))
- goto lie;
- sdp = SCpnt->device;
+
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
- hp = sdp->host;
- if (hp) {
- sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
- if (sdbg_host) {
- list_for_each_entry(devip,
- &sdbg_host->dev_info_list,
- dev_list) {
- set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
- ++k;
- }
- }
+
+ list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ ++k;
}
+
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp,
"%s: %d device(s) found in host\n", __func__, k);
-lie:
return SUCCESS;
}
@@ -5464,9 +5361,9 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
int k = 0;
++num_host_resets;
- if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
+ if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
@@ -5474,7 +5371,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
++k;
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
@@ -5537,11 +5434,18 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
static void block_unblock_all_queues(bool block)
{
- int j;
- struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)block);
+ lockdep_assert_held(&sdebug_host_list_mutex);
+
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ if (block)
+ scsi_block_requests(shost);
+ else
+ scsi_unblock_requests(shost);
+ }
}
/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
@@ -5554,10 +5458,13 @@ static void tweak_cmnd_count(void)
modulo = abs(sdebug_every_nth);
if (modulo < 2)
return;
+
+ mutex_lock(&sdebug_host_list_mutex);
block_unblock_all_queues(true);
count = atomic_read(&sdebug_cmnd_count);
atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
}
static void clear_queue_stats(void)
@@ -5577,6 +5484,33 @@ static bool inject_on_this_cmd(void)
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
+
+void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
+{
+ if (sqcp)
+ kmem_cache_free(queued_cmd_cache, sqcp);
+}
+
+static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
+{
+ struct sdebug_queued_cmd *sqcp;
+ struct sdebug_defer *sd_dp;
+
+ sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
+ if (!sqcp)
+ return NULL;
+
+ sd_dp = &sqcp->sd_dp;
+
+ hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
+ INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
+
+ sqcp->scmd = scmd;
+
+ return sqcp;
+}
+
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -5588,13 +5522,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
- bool new_sd_dp;
- bool inject = false;
- bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
- int k, num_in_q, qdepth;
- unsigned long iflags;
+ struct request *rq = scsi_cmd_to_rq(cmnd);
+ bool polled = rq->cmd_flags & REQ_POLLED;
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ unsigned long flags;
u64 ns_from_boot = 0;
- struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
struct sdebug_defer *sd_dp;
@@ -5609,66 +5541,30 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (delta_jiff == 0)
goto respond_in_thread;
- sqp = get_queue(cmnd);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (unlikely(atomic_read(&sqp->blocked))) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- num_in_q = atomic_read(&devip->num_in_q);
- qdepth = cmnd->device->queue_depth;
- if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
- if (scsi_result) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- goto respond_in_thread;
- } else
- scsi_result = device_qfull_result;
- } else if (unlikely(sdebug_every_nth &&
- (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
- (scsi_result == 0))) {
- if ((num_in_q == (qdepth - 1)) &&
+
+ if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
+ (scsi_result == 0))) {
+ int num_in_q = scsi_device_busy(sdp);
+ int qdepth = cmnd->device->queue_depth;
+
+ if ((num_in_q == qdepth) &&
(atomic_inc_return(&sdebug_a_tsf) >=
abs(sdebug_every_nth))) {
atomic_set(&sdebug_a_tsf, 0);
- inject = true;
scsi_result = device_qfull_result;
- }
- }
- k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
- if (unlikely(k >= sdebug_max_queue)) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (scsi_result)
- goto respond_in_thread;
- scsi_result = device_qfull_result;
- if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
- sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
- __func__, sdebug_max_queue);
- goto respond_in_thread;
- }
- set_bit(k, sqp->in_use_bm);
- atomic_inc(&devip->num_in_q);
- sqcp = &sqp->qc_arr[k];
- sqcp->a_cmnd = cmnd;
- cmnd->host_scribble = (unsigned char *)sqcp;
- sd_dp = sqcp->sd_dp;
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
-
- if (!sd_dp) {
- sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
- if (!sd_dp) {
- atomic_dec(&devip->num_in_q);
- clear_bit(k, sqp->in_use_bm);
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
+ sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
+ __func__, num_in_q);
}
- new_sd_dp = true;
- } else {
- new_sd_dp = false;
}
- /* Set the hostwide tag */
- if (sdebug_host_max_queue)
- sd_dp->hc_idx = get_tag(cmnd);
+ sqcp = sdebug_alloc_queued_cmd(cmnd);
+ if (!sqcp) {
+ pr_err("%s no alloc\n", __func__);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ sd_dp = &sqcp->sd_dp;
if (polled)
ns_from_boot = ktime_get_boottime_ns();
@@ -5715,14 +5611,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
u64 d = ktime_get_boottime_ns() - ns_from_boot;
if (kt <= d) { /* elapsed duration >= kt */
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- sqcp->a_cmnd = NULL;
- atomic_dec(&devip->num_in_q);
- clear_bit(k, sqp->in_use_bm);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (new_sd_dp)
- kfree(sd_dp);
/* call scsi_done() from this thread */
+ sdebug_free_queued_cmd(sqcp);
scsi_done(cmnd);
return 0;
}
@@ -5730,72 +5620,54 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
kt -= d;
}
}
+ if (sdebug_statistics)
+ sd_dp->issuing_cpu = raw_smp_processor_id();
if (polled) {
+ spin_lock_irqsave(&sdsc->lock, flags);
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (!sd_dp->init_poll) {
- sd_dp->init_poll = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- }
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
- if (!sd_dp->init_hrt) {
- sd_dp->init_hrt = true;
- sqcp->sd_dp = sd_dp;
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- }
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
/* schedule the invocation of scsi_done() for a later time */
+ spin_lock_irqsave(&sdsc->lock, flags);
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
+ /*
+ * The completion handler will try to grab sqcp->lock,
+ * so there is no chance that the completion handler
+ * will call scsi_done() until we release the lock
+ * here (so ok to keep referencing sdsc).
+ */
+ spin_unlock_irqrestore(&sdsc->lock, flags);
}
- if (sdebug_statistics)
- sd_dp->issuing_cpu = raw_smp_processor_id();
} else { /* jdelay < 0, use work queue */
if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
- atomic_read(&sdeb_inject_pending)))
+ atomic_read(&sdeb_inject_pending))) {
sd_dp->aborted = true;
+ atomic_set(&sdeb_inject_pending, 0);
+ sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
+ blk_mq_unique_tag_to_tag(get_tag(cmnd)));
+ }
+
+ if (sdebug_statistics)
+ sd_dp->issuing_cpu = raw_smp_processor_id();
if (polled) {
+ spin_lock_irqsave(&sdsc->lock, flags);
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (!sd_dp->init_poll) {
- sd_dp->init_poll = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- }
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
- if (!sd_dp->init_wq) {
- sd_dp->init_wq = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
- }
+ spin_lock_irqsave(&sdsc->lock, flags);
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
schedule_work(&sd_dp->ew.work);
- }
- if (sdebug_statistics)
- sd_dp->issuing_cpu = raw_smp_processor_id();
- if (unlikely(sd_dp->aborted)) {
- sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
- scsi_cmd_to_rq(cmnd)->tag);
- blk_abort_request(scsi_cmd_to_rq(cmnd));
- atomic_set(&sdeb_inject_pending, 0);
- sd_dp->aborted = false;
+ spin_unlock_irqrestore(&sdsc->lock, flags);
}
}
- if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
- sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
- num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
+
return 0;
respond_in_thread: /* call back to mid-layer using invocation thread */
@@ -5996,14 +5868,39 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
return length;
}
+struct sdebug_submit_queue_data {
+ int *first;
+ int *last;
+ int queue_num;
+};
+
+static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
+{
+ struct sdebug_submit_queue_data *data = opaque;
+ u32 unique_tag = blk_mq_unique_tag(rq);
+ u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
+ u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
+ int queue_num = data->queue_num;
+
+ if (hwq != queue_num)
+ return true;
+
+ /* Rely on iter'ing in ascending tag order */
+ if (*data->first == -1)
+ *data->first = *data->last = tag;
+ else
+ *data->last = tag;
+
+ return true;
+}
+
/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
* same for each scsi_debug host (if more than one). Some of the counters
* output are not atomics so might be inaccurate in a busy system. */
static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- int f, j, l;
- struct sdebug_queue *sqp;
struct sdebug_host_info *sdhp;
+ int j;
seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
SDEBUG_VERSION, sdebug_version_date);
@@ -6031,11 +5928,17 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
atomic_read(&sdeb_mq_poll_count));
seq_printf(m, "submit_queues=%d\n", submit_queues);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+ for (j = 0; j < submit_queues; ++j) {
+ int f = -1, l = -1;
+ struct sdebug_submit_queue_data data = {
+ .queue_num = j,
+ .first = &f,
+ .last = &l,
+ };
seq_printf(m, " queue %d:\n", j);
- f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
- if (f != sdebug_max_queue) {
- l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
+ blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
+ &data);
+ if (f >= 0) {
seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
"first,last bits", f, l);
}
@@ -6086,15 +5989,15 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
res = count;
if (sdebug_jdelay != jdelay) {
- int j, k;
- struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
+ mutex_lock(&sdebug_host_list_mutex);
block_unblock_all_queues(true);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
- ++j, ++sqp) {
- k = find_first_bit(sqp->in_use_bm,
- sdebug_max_queue);
- if (k != sdebug_max_queue) {
+
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ if (scsi_host_busy(shost)) {
res = -EBUSY; /* queued commands */
break;
}
@@ -6104,6 +6007,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
sdebug_ndelay = 0;
}
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return res;
}
@@ -6126,25 +6030,27 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
(ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
res = count;
if (sdebug_ndelay != ndelay) {
- int j, k;
- struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
+ mutex_lock(&sdebug_host_list_mutex);
block_unblock_all_queues(true);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
- ++j, ++sqp) {
- k = find_first_bit(sqp->in_use_bm,
- sdebug_max_queue);
- if (k != sdebug_max_queue) {
+
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ if (scsi_host_busy(shost)) {
res = -EBUSY; /* queued commands */
break;
}
}
+
if (res > 0) {
sdebug_ndelay = ndelay;
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
}
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return res;
}
@@ -6390,13 +6296,13 @@ static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return count;
}
@@ -6426,7 +6332,7 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdhp, &sdebug_host_list,
host_list) {
list_for_each_entry(dp, &sdhp->dev_info_list,
@@ -6435,7 +6341,7 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return count;
}
@@ -6452,28 +6358,19 @@ static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int j, n, k, a;
- struct sdebug_queue *sqp;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
(n <= SDEBUG_CANQUEUE) &&
(sdebug_host_max_queue == 0)) {
- block_unblock_all_queues(true);
- k = 0;
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
- ++j, ++sqp) {
- a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
- if (a > k)
- k = a;
- }
- sdebug_max_queue = n;
- if (k == SDEBUG_CANQUEUE)
- atomic_set(&retired_max_queue, 0);
- else if (k >= n)
- atomic_set(&retired_max_queue, k + 1);
+ mutex_lock(&sdebug_host_list_mutex);
+
+ /* We may only change sdebug_max_queue when we have no shosts */
+ if (list_empty(&sdebug_host_list))
+ sdebug_max_queue = n;
else
- atomic_set(&retired_max_queue, 0);
- block_unblock_all_queues(false);
+ count = -EBUSY;
+ mutex_unlock(&sdebug_host_list_mutex);
return count;
}
return -EINVAL;
@@ -6542,7 +6439,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdhp, &sdebug_host_list,
host_list) {
list_for_each_entry(dp, &sdhp->dev_info_list,
@@ -6551,7 +6448,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return count;
}
@@ -6901,7 +6798,6 @@ static int __init scsi_debug_init(void)
ramdisk_lck_a[0] = &atomic_rw;
ramdisk_lck_a[1] = &atomic_rw2;
- atomic_set(&retired_max_queue, 0);
if (sdebug_ndelay >= 1000 * 1000 * 1000) {
pr_warn("ndelay must be less than 1 second, ignored\n");
@@ -6997,13 +6893,6 @@ static int __init scsi_debug_init(void)
sdebug_max_queue);
}
- sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
- GFP_KERNEL);
- if (sdebug_q_arr == NULL)
- return -ENOMEM;
- for (k = 0; k < submit_queues; ++k)
- spin_lock_init(&sdebug_q_arr[k].qc_lock);
-
/*
* check for host managed zoned block device specified with
* ptype=0x14 or zbc=XXX.
@@ -7012,10 +6901,8 @@ static int __init scsi_debug_init(void)
sdeb_zbc_model = BLK_ZONED_HM;
} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
k = sdeb_zbc_model_str(sdeb_zbc_model_s);
- if (k < 0) {
- ret = k;
- goto free_q_arr;
- }
+ if (k < 0)
+ return k;
sdeb_zbc_model = k;
switch (sdeb_zbc_model) {
case BLK_ZONED_NONE:
@@ -7027,8 +6914,7 @@ static int __init scsi_debug_init(void)
break;
default:
pr_err("Invalid ZBC model\n");
- ret = -EINVAL;
- goto free_q_arr;
+ return -EINVAL;
}
}
if (sdeb_zbc_model != BLK_ZONED_NONE) {
@@ -7075,17 +6961,14 @@ static int __init scsi_debug_init(void)
sdebug_unmap_granularity <=
sdebug_unmap_alignment) {
pr_err("ERR: unmap_granularity <= unmap_alignment\n");
- ret = -EINVAL;
- goto free_q_arr;
+ return -EINVAL;
}
}
xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
if (want_store) {
idx = sdebug_add_store();
- if (idx < 0) {
- ret = idx;
- goto free_q_arr;
- }
+ if (idx < 0)
+ return idx;
}
pseudo_primary = root_device_register("pseudo_0");
@@ -7108,6 +6991,12 @@ static int __init scsi_debug_init(void)
hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
+ queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
+ if (!queued_cmd_cache) {
+ ret = -ENOMEM;
+ goto driver_unreg;
+ }
+
for (k = 0; k < hosts_to_add; k++) {
if (want_store && k == 0) {
ret = sdebug_add_host_helper(idx);
@@ -7130,14 +7019,14 @@ static int __init scsi_debug_init(void)
return 0;
+driver_unreg:
+ driver_unregister(&sdebug_driverfs_driver);
bus_unreg:
bus_unregister(&pseudo_lld_bus);
dev_unreg:
root_device_unregister(pseudo_primary);
free_vm:
sdebug_erase_store(idx, NULL);
-free_q_arr:
- kfree(sdebug_q_arr);
return ret;
}
@@ -7145,17 +7034,15 @@ static void __exit scsi_debug_exit(void)
{
int k = sdebug_num_hosts;
- stop_all_queued();
for (; k; k--)
sdebug_do_remove_host(true);
- free_all_queued();
+ kmem_cache_destroy(queued_cmd_cache);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
sdebug_erase_all_stores(false);
xa_destroy(per_store_ap);
- kfree(sdebug_q_arr);
}
device_initcall(scsi_debug_init);
@@ -7165,7 +7052,7 @@ static void sdebug_release_adapter(struct device *dev)
{
struct sdebug_host_info *sdbg_host;
- sdbg_host = to_sdebug_host(dev);
+ sdbg_host = dev_to_sdebug_host(dev);
kfree(sdbg_host);
}
@@ -7311,9 +7198,9 @@ static int sdebug_add_host_helper(int per_host_idx)
goto clean;
}
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
@@ -7322,9 +7209,9 @@ static int sdebug_add_host_helper(int per_host_idx)
error = device_register(&sdbg_host->dev);
if (error) {
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_del(&sdbg_host->host_list);
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
goto clean;
}
@@ -7364,7 +7251,7 @@ static void sdebug_do_remove_host(bool the_end)
struct sdebug_host_info *sdbg_host = NULL;
struct sdebug_host_info *sdbg_host2;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
if (!list_empty(&sdebug_host_list)) {
sdbg_host = list_entry(sdebug_host_list.prev,
struct sdebug_host_info, host_list);
@@ -7389,7 +7276,7 @@ static void sdebug_do_remove_host(bool the_end)
}
if (sdbg_host)
list_del(&sdbg_host->host_list);
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
if (!sdbg_host)
return;
@@ -7400,16 +7287,13 @@ static void sdebug_do_remove_host(bool the_end)
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
{
- int num_in_q = 0;
- struct sdebug_dev_info *devip;
+ struct sdebug_dev_info *devip = sdev->hostdata;
- block_unblock_all_queues(true);
- devip = (struct sdebug_dev_info *)sdev->hostdata;
- if (NULL == devip) {
- block_unblock_all_queues(false);
+ if (!devip)
return -ENODEV;
- }
- num_in_q = atomic_read(&devip->num_in_q);
+
+ mutex_lock(&sdebug_host_list_mutex);
+ block_unblock_all_queues(true);
if (qdepth > SDEBUG_CANQUEUE) {
qdepth = SDEBUG_CANQUEUE;
@@ -7421,11 +7305,12 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
if (qdepth != sdev->queue_depth)
scsi_change_queue_depth(sdev, qdepth);
- if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
- sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
- __func__, qdepth, num_in_q);
- }
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
+
+ if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
+ sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
+
return sdev->queue_depth;
}
@@ -7515,94 +7400,82 @@ static void sdebug_map_queues(struct Scsi_Host *shost)
}
}
-static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+struct sdebug_blk_mq_poll_data {
+ unsigned int queue_num;
+ int *num_entries;
+};
+
+/*
+ * We don't handle aborted commands here, but it does not seem possible to have
+ * aborted polled commands from schedule_resp()
+ */
+static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
{
- bool first;
- bool retiring = false;
- int num_entries = 0;
- unsigned int qc_idx = 0;
- unsigned long iflags;
- ktime_t kt_from_boot = ktime_get_boottime();
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct scsi_cmnd *scp;
- struct sdebug_dev_info *devip;
+ struct sdebug_blk_mq_poll_data *data = opaque;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
struct sdebug_defer *sd_dp;
+ u32 unique_tag = blk_mq_unique_tag(rq);
+ u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
+ struct sdebug_queued_cmd *sqcp;
+ unsigned long flags;
+ int queue_num = data->queue_num;
+ ktime_t time;
- sqp = sdebug_q_arr + queue_num;
+ /* We're only interested in one queue for this iteration */
+ if (hwq != queue_num)
+ return true;
- spin_lock_irqsave(&sqp->qc_lock, iflags);
+ /* Subsequent checks would fail if this failed, but check anyway */
+ if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
+ return true;
- qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
- if (qc_idx >= sdebug_max_queue)
- goto unlock;
+ time = ktime_get_boottime();
- for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
- if (first) {
- first = false;
- if (!test_bit(qc_idx, sqp->in_use_bm))
- continue;
- } else {
- qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
- }
- if (qc_idx >= sdebug_max_queue)
- break;
+ spin_lock_irqsave(&sdsc->lock, flags);
+ sqcp = TO_QUEUED_CMD(cmd);
+ if (!sqcp) {
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+ return true;
+ }
- sqcp = &sqp->qc_arr[qc_idx];
- sd_dp = sqcp->sd_dp;
- if (unlikely(!sd_dp))
- continue;
- scp = sqcp->a_cmnd;
- if (unlikely(scp == NULL)) {
- pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
- queue_num, qc_idx, __func__);
- break;
- }
- if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
- if (kt_from_boot < sd_dp->cmpl_ts)
- continue;
+ sd_dp = &sqcp->sd_dp;
+ if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+ return true;
+ }
- } else /* ignoring non REQ_POLLED requests */
- continue;
- devip = (struct sdebug_dev_info *)scp->device->hostdata;
- if (likely(devip))
- atomic_dec(&devip->num_in_q);
- else
- pr_err("devip=NULL from %s\n", __func__);
- if (unlikely(atomic_read(&retired_max_queue) > 0))
- retiring = true;
-
- sqcp->a_cmnd = NULL;
- if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
- pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
- sqp, queue_num, qc_idx, __func__);
- break;
- }
- if (unlikely(retiring)) { /* user has reduced max_queue */
- int k, retval;
+ if (time < sd_dp->cmpl_ts) {
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+ return true;
+ }
- retval = atomic_read(&retired_max_queue);
- if (qc_idx >= retval) {
- pr_err("index %d too large\n", retval);
- break;
- }
- k = find_last_bit(sqp->in_use_bm, retval);
- if ((k < sdebug_max_queue) || (k == retval))
- atomic_set(&retired_max_queue, 0);
- else
- atomic_set(&retired_max_queue, k + 1);
- }
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- scsi_done(scp); /* callback to mid level */
- num_entries++;
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
- break;
+ ASSIGN_QUEUED_CMD(cmd, NULL);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+
+ if (sdebug_statistics) {
+ atomic_inc(&sdebug_completions);
+ if (raw_smp_processor_id() != sd_dp->issuing_cpu)
+ atomic_inc(&sdebug_miss_cpus);
}
-unlock:
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ sdebug_free_queued_cmd(sqcp);
+
+ scsi_done(cmd); /* callback to mid level */
+ (*data->num_entries)++;
+ return true;
+}
+
+static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ int num_entries = 0;
+ struct sdebug_blk_mq_poll_data data = {
+ .queue_num = queue_num,
+ .num_entries = &num_entries,
+ };
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
+ &data);
if (num_entries > 0)
atomic_add(num_entries, &sdeb_mq_poll_count);
@@ -7776,6 +7649,16 @@ err_out:
return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
}
+static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
+
+ spin_lock_init(&sdsc->lock);
+
+ return 0;
+}
+
+
static struct scsi_host_template sdebug_driver_template = {
.show_info = scsi_debug_show_info,
.write_info = scsi_debug_write_info,
@@ -7803,6 +7686,8 @@ static struct scsi_host_template sdebug_driver_template = {
.max_segment_size = -1U,
.module = THIS_MODULE,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct sdebug_scsi_cmd),
+ .init_cmd_priv = sdebug_init_cmd_priv,
};
static int sdebug_driver_probe(struct device *dev)
@@ -7812,14 +7697,14 @@ static int sdebug_driver_probe(struct device *dev)
struct Scsi_Host *hpnt;
int hprot;
- sdbg_host = to_sdebug_host(dev);
+ sdbg_host = dev_to_sdebug_host(dev);
sdebug_driver_template.can_queue = sdebug_max_queue;
sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
if (!sdebug_clustering)
sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
- hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
+ hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
if (NULL == hpnt) {
pr_err("scsi_host_alloc failed\n");
error = -ENODEV;
@@ -7862,7 +7747,6 @@ static int sdebug_driver_probe(struct device *dev)
hpnt->nr_maps = 3;
sdbg_host->shost = hpnt;
- *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
hpnt->max_id = sdebug_num_tgts + 1;
else
@@ -7936,7 +7820,7 @@ static void sdebug_driver_remove(struct device *dev)
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
- sdbg_host = to_sdebug_host(dev);
+ sdbg_host = dev_to_sdebug_host(dev);
scsi_remove_host(sdbg_host->shost);
@@ -7950,15 +7834,8 @@ static void sdebug_driver_remove(struct device *dev)
scsi_host_put(sdbg_host->shost);
}
-static int pseudo_lld_bus_match(struct device *dev,
- struct device_driver *dev_driver)
-{
- return 1;
-}
-
static struct bus_type pseudo_lld_bus = {
.name = "pseudo",
- .match = pseudo_lld_bus_match,
.probe = sdebug_driver_probe,
.remove = sdebug_driver_remove,
.drv_groups = sdebug_drv_groups,