summaryrefslogtreecommitdiff
path: root/drivers/scsi/hpsa.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 01:14:01 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 01:14:01 +0300
commitba6d10ab8014ac10d25ca513352b6665e73b5785 (patch)
tree3b7aaa3f2d76d0c0e9612bc87e1da45577465528 /drivers/scsi/hpsa.c
parent64b08df460cfdfc2b010263043a057cdd33500ed (diff)
parentbaf23eddbf2a4ba9bf2bdb342686c71a8042e39b (diff)
downloadlinux-ba6d10ab8014ac10d25ca513352b6665e73b5785.tar.xz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: qla2xxx, hpsa, lpfc, ufs, mpt3sas, ibmvscsi, megaraid_sas, bnx2fc and hisi_sas as well as the removal of the osst driver (I heard from Willem privately that he would like the driver removed because all his test hardware has failed). Plus number of minor changes, spelling fixes and other trivia. The big merge conflict this time around is the SPDX licence tags. Following discussion on linux-next, we believe our version to be more accurate than the one in the tree, so the resolution is to take our version for all the SPDX conflicts" Note on the SPDX license tag conversion conflicts: the SCSI tree had done its own SPDX conversion, which in some cases conflicted with the treewide ones done by Thomas & co. In almost all cases, the conflicts were purely syntactic: the SCSI tree used the old-style SPDX tags ("GPL-2.0" and "GPL-2.0+") while the treewide conversion had used the new-style ones ("GPL-2.0-only" and "GPL-2.0-or-later"). In these cases I picked the new-style one. In a few cases, the SPDX conversion was actually different, though. As explained by James above, and in more detail in a pre-pull-request thread: "The other problem is actually substantive: In the libsas code Luben Tuikov originally specified gpl 2.0 only by dint of stating: * This file is licensed under GPLv2. In all the libsas files, but then muddied the water by quoting GPLv2 verbatim (which includes the or later than language). So for these files Christoph did the conversion to v2 only SPDX tags and Thomas converted to v2 or later tags" So in those cases, where the spdx tag substantially mattered, I took the SCSI tree conversion of it, but then also took the opportunity to turn the old-style "GPL-2.0" into a new-style "GPL-2.0-only" tag. Similarly, when there were whitespace differences or other differences to the comments around the copyright notices, I took the version from the SCSI tree as being the more specific conversion. Finally, in the spdx conversions that had no conflicts (because the treewide ones hadn't been done for those files), I just took the SCSI tree version as-is, even if it was old-style. The old-style conversions are perfectly valid, even if the "-only" and "-or-later" versions are perhaps more descriptive. * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (185 commits) scsi: qla2xxx: move IO flush to the front of NVME rport unregistration scsi: qla2xxx: Fix NVME cmd and LS cmd timeout race condition scsi: qla2xxx: on session delete, return nvme cmd scsi: qla2xxx: Fix kernel crash after disconnecting NVMe devices scsi: megaraid_sas: Update driver version to 07.710.06.00-rc1 scsi: megaraid_sas: Introduce various Aero performance modes scsi: megaraid_sas: Use high IOPS queues based on IO workload scsi: megaraid_sas: Set affinity for high IOPS reply queues scsi: megaraid_sas: Enable coalescing for high IOPS queues scsi: megaraid_sas: Add support for High IOPS queues scsi: megaraid_sas: Add support for MPI toolbox commands scsi: megaraid_sas: Offload Aero RAID5/6 division calculations to driver scsi: megaraid_sas: RAID1 PCI bandwidth limit algorithm is applicable for only Ventura scsi: megaraid_sas: megaraid_sas: Add check for count returned by HOST_DEVICE_LIST DCMD scsi: megaraid_sas: Handle sequence JBOD map failure at driver level scsi: megaraid_sas: Don't send FPIO to RL Bypass queue scsi: megaraid_sas: In probe context, retry IOC INIT once if firmware is in fault scsi: megaraid_sas: Release Mutex lock before OCR in case of DCMD timeout scsi: megaraid_sas: Call disable_irq from process IRQ poll scsi: megaraid_sas: Remove few debug counters from IO path ...
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r--drivers/scsi/hpsa.c280
1 files changed, 169 insertions, 111 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index ffd7e9506570..43a6b5350775 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.20-160"
+#define HPSA_DRIVER_VERSION "3.4.20-170"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -73,6 +73,8 @@
/*define how many times we will try a command because of bus resets */
#define MAX_CMD_RETRIES 3
+/* How long to wait before giving up on a command */
+#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
/* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Hewlett-Packard Company");
@@ -344,11 +346,6 @@ static inline bool hpsa_is_cmd_idle(struct CommandList *c)
return c->scsi_cmd == SCSI_CMD_IDLE;
}
-static inline bool hpsa_is_pending_event(struct CommandList *c)
-{
- return c->reset_pending;
-}
-
/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
static void decode_sense_data(const u8 *sense_data, int sense_data_len,
u8 *sense_key, u8 *asc, u8 *ascq)
@@ -1144,6 +1141,8 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
{
dial_down_lockup_detection_during_fw_flash(h, c);
atomic_inc(&h->commands_outstanding);
+ if (c->device)
+ atomic_inc(&c->device->commands_outstanding);
reply_queue = h->reply_map[raw_smp_processor_id()];
switch (c->cmd_type) {
@@ -1167,9 +1166,6 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
{
- if (unlikely(hpsa_is_pending_event(c)))
- return finish_cmd(c);
-
__enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
}
@@ -1842,25 +1838,33 @@ static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
return count;
}
+#define NUM_WAIT 20
static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
struct hpsa_scsi_dev_t *device)
{
int cmds = 0;
int waits = 0;
+ int num_wait = NUM_WAIT;
+
+ if (device->external)
+ num_wait = HPSA_EH_PTRAID_TIMEOUT;
while (1) {
cmds = hpsa_find_outstanding_commands_for_dev(h, device);
if (cmds == 0)
break;
- if (++waits > 20)
+ if (++waits > num_wait)
break;
msleep(1000);
}
- if (waits > 20)
+ if (waits > num_wait) {
dev_warn(&h->pdev->dev,
- "%s: removing device with %d outstanding commands!\n",
- __func__, cmds);
+ "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
+ __func__,
+ h->scsi_host->host_no,
+ device->bus, device->target, device->lun, cmds);
+ }
}
static void hpsa_remove_device(struct ctlr_info *h,
@@ -2131,11 +2135,16 @@ static int hpsa_slave_configure(struct scsi_device *sdev)
sdev->no_uld_attach = !sd || !sd->expose_device;
if (sd) {
- if (sd->external)
+ sd->was_removed = 0;
+ if (sd->external) {
queue_depth = EXTERNAL_QD;
- else
+ sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue,
+ HPSA_EH_PTRAID_TIMEOUT);
+ } else {
queue_depth = sd->queue_depth != 0 ?
sd->queue_depth : sdev->host->can_queue;
+ }
} else
queue_depth = sdev->host->can_queue;
@@ -2146,7 +2155,12 @@ static int hpsa_slave_configure(struct scsi_device *sdev)
static void hpsa_slave_destroy(struct scsi_device *sdev)
{
- /* nothing to do. */
+ struct hpsa_scsi_dev_t *hdev = NULL;
+
+ hdev = sdev->hostdata;
+
+ if (hdev)
+ hdev->was_removed = 1;
}
static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
@@ -2414,13 +2428,16 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
break;
}
+ if (dev->in_reset)
+ retry = 0;
+
return retry; /* retry on raid path? */
}
static void hpsa_cmd_resolve_events(struct ctlr_info *h,
struct CommandList *c)
{
- bool do_wake = false;
+ struct hpsa_scsi_dev_t *dev = c->device;
/*
* Reset c->scsi_cmd here so that the reset handler will know
@@ -2429,25 +2446,12 @@ static void hpsa_cmd_resolve_events(struct ctlr_info *h,
*/
c->scsi_cmd = SCSI_CMD_IDLE;
mb(); /* Declare command idle before checking for pending events. */
- if (c->reset_pending) {
- unsigned long flags;
- struct hpsa_scsi_dev_t *dev;
-
- /*
- * There appears to be a reset pending; lock the lock and
- * reconfirm. If so, then decrement the count of outstanding
- * commands and wake the reset command if this is the last one.
- */
- spin_lock_irqsave(&h->lock, flags);
- dev = c->reset_pending; /* Re-fetch under the lock. */
- if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
- do_wake = true;
- c->reset_pending = NULL;
- spin_unlock_irqrestore(&h->lock, flags);
+ if (dev) {
+ atomic_dec(&dev->commands_outstanding);
+ if (dev->in_reset &&
+ atomic_read(&dev->commands_outstanding) <= 0)
+ wake_up_all(&h->event_sync_wait_queue);
}
-
- if (do_wake)
- wake_up_all(&h->event_sync_wait_queue);
}
static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
@@ -2496,6 +2500,11 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
dev->offload_to_be_enabled = 0;
}
+ if (dev->in_reset) {
+ cmd->result = DID_RESET << 16;
+ return hpsa_cmd_free_and_done(h, c, cmd);
+ }
+
return hpsa_retry_cmd(h, c);
}
@@ -2574,6 +2583,12 @@ static void complete_scsi_command(struct CommandList *cp)
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+ /* SCSI command has already been cleaned up in SML */
+ if (dev->was_removed) {
+ hpsa_cmd_resolve_and_free(h, cp);
+ return;
+ }
+
if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
if (dev->physical_device && dev->expose_device &&
dev->removed) {
@@ -2595,10 +2610,6 @@ static void complete_scsi_command(struct CommandList *cp)
return hpsa_cmd_free_and_done(h, cp, cmd);
}
- if ((unlikely(hpsa_is_pending_event(cp))))
- if (cp->reset_pending)
- return hpsa_cmd_free_and_done(h, cp, cmd);
-
if (cp->cmd_type == CMD_IOACCEL2)
return process_ioaccel2_completion(h, cp, cmd, dev);
@@ -3048,7 +3059,7 @@ out:
return rc;
}
-static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
u8 reset_type, int reply_queue)
{
int rc = IO_OK;
@@ -3056,11 +3067,10 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
struct ErrorInfo *ei;
c = cmd_alloc(h);
-
+ c->device = dev;
/* fill_cmd can't fail here, no data buffer to map. */
- (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
- scsi3addr, TYPE_MSG);
+ (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
if (rc) {
dev_warn(&h->pdev->dev, "Failed to send reset command\n");
@@ -3138,9 +3148,8 @@ static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
}
static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
- unsigned char *scsi3addr, u8 reset_type, int reply_queue)
+ u8 reset_type, int reply_queue)
{
- int i;
int rc = 0;
/* We can really only handle one reset at a time */
@@ -3149,38 +3158,14 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
return -EINTR;
}
- BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
-
- for (i = 0; i < h->nr_cmds; i++) {
- struct CommandList *c = h->cmd_pool + i;
- int refcount = atomic_inc_return(&c->refcount);
-
- if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
- unsigned long flags;
-
- /*
- * Mark the target command as having a reset pending,
- * then lock a lock so that the command cannot complete
- * while we're considering it. If the command is not
- * idle then count it; otherwise revoke the event.
- */
- c->reset_pending = dev;
- spin_lock_irqsave(&h->lock, flags); /* Implied MB */
- if (!hpsa_is_cmd_idle(c))
- atomic_inc(&dev->reset_cmds_out);
- else
- c->reset_pending = NULL;
- spin_unlock_irqrestore(&h->lock, flags);
- }
-
- cmd_free(h, c);
- }
-
- rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
- if (!rc)
+ rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
+ if (!rc) {
+ /* incremented by sending the reset request */
+ atomic_dec(&dev->commands_outstanding);
wait_event(h->event_sync_wait_queue,
- atomic_read(&dev->reset_cmds_out) == 0 ||
+ atomic_read(&dev->commands_outstanding) <= 0 ||
lockup_detected(h));
+ }
if (unlikely(lockup_detected(h))) {
dev_warn(&h->pdev->dev,
@@ -3188,10 +3173,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
rc = -ENODEV;
}
- if (unlikely(rc))
- atomic_set(&dev->reset_cmds_out, 0);
- else
- rc = wait_for_device_to_become_ready(h, scsi3addr, 0);
+ if (!rc)
+ rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
mutex_unlock(&h->reset_mutex);
return rc;
@@ -4820,6 +4803,9 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
c->phys_disk = dev;
+ if (dev->in_reset)
+ return -1;
+
return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
}
@@ -5010,6 +4996,11 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
} else
cp->sg_count = (u8) use_sg;
+ if (phys_disk->in_reset) {
+ cmd->result = DID_RESET << 16;
+ return -1;
+ }
+
enqueue_cmd_and_start_io(h, c);
return 0;
}
@@ -5027,6 +5018,9 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
if (!c->scsi_cmd->device->hostdata)
return -1;
+ if (phys_disk->in_reset)
+ return -1;
+
/* Try to honor the device's queue depth */
if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
phys_disk->queue_depth) {
@@ -5110,6 +5104,9 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
if (!dev)
return -1;
+ if (dev->in_reset)
+ return -1;
+
/* check for valid opcode, get LBA and block count */
switch (cmd->cmnd[0]) {
case WRITE_6:
@@ -5414,13 +5411,13 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
*/
static int hpsa_ciss_submit(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd,
- unsigned char scsi3addr[])
+ struct hpsa_scsi_dev_t *dev)
{
cmd->host_scribble = (unsigned char *) c;
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
c->Header.ReplyQueue = 0; /* unused in simple mode */
- memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+ memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
/* Fill in the request block... */
@@ -5471,6 +5468,12 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
hpsa_cmd_resolve_and_free(h, c);
return SCSI_MLQUEUE_HOST_BUSY;
}
+
+ if (dev->in_reset) {
+ hpsa_cmd_resolve_and_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
enqueue_cmd_and_start_io(h, c);
/* the cmd'll come back via intr handler in complete_scsi_command() */
return 0;
@@ -5522,8 +5525,7 @@ static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
}
static int hpsa_ioaccel_submit(struct ctlr_info *h,
- struct CommandList *c, struct scsi_cmnd *cmd,
- unsigned char *scsi3addr)
+ struct CommandList *c, struct scsi_cmnd *cmd)
{
struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
int rc = IO_ACCEL_INELIGIBLE;
@@ -5531,6 +5533,12 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
if (!dev)
return SCSI_MLQUEUE_HOST_BUSY;
+ if (dev->in_reset)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (hpsa_simple_mode)
+ return IO_ACCEL_INELIGIBLE;
+
cmd->host_scribble = (unsigned char *) c;
if (dev->offload_enabled) {
@@ -5563,8 +5571,12 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
cmd->result = DID_NO_CONNECT << 16;
return hpsa_cmd_free_and_done(c->h, c, cmd);
}
- if (c->reset_pending)
+
+ if (dev->in_reset) {
+ cmd->result = DID_RESET << 16;
return hpsa_cmd_free_and_done(c->h, c, cmd);
+ }
+
if (c->cmd_type == CMD_IOACCEL2) {
struct ctlr_info *h = c->h;
struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
@@ -5572,7 +5584,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
if (c2->error_data.serv_response ==
IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
- rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
+ rc = hpsa_ioaccel_submit(h, c, cmd);
if (rc == 0)
return;
if (rc == SCSI_MLQUEUE_HOST_BUSY) {
@@ -5588,7 +5600,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
}
}
hpsa_cmd_partial_init(c->h, c->cmdindex, c);
- if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
+ if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
/*
* If we get here, it means dma mapping failed. Try
* again via scsi mid layer, which will then get
@@ -5607,7 +5619,6 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
- unsigned char scsi3addr[8];
struct CommandList *c;
int rc = 0;
@@ -5629,14 +5640,18 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return 0;
}
- memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
-
if (unlikely(lockup_detected(h))) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
}
+
+ if (dev->in_reset)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
c = cmd_tagged_alloc(h, cmd);
+ if (c == NULL)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
/*
* Call alternate submit routine for I/O accelerated commands.
@@ -5645,7 +5660,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
if (likely(cmd->retries == 0 &&
!blk_rq_is_passthrough(cmd->request) &&
h->acciopath_status)) {
- rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
+ rc = hpsa_ioaccel_submit(h, c, cmd);
if (rc == 0)
return 0;
if (rc == SCSI_MLQUEUE_HOST_BUSY) {
@@ -5653,7 +5668,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return SCSI_MLQUEUE_HOST_BUSY;
}
}
- return hpsa_ciss_submit(h, c, cmd, scsi3addr);
+ return hpsa_ciss_submit(h, c, cmd, dev);
}
static void hpsa_scan_complete(struct ctlr_info *h)
@@ -5935,8 +5950,9 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
{
int rc = SUCCESS;
+ int i;
struct ctlr_info *h;
- struct hpsa_scsi_dev_t *dev;
+ struct hpsa_scsi_dev_t *dev = NULL;
u8 reset_type;
char msg[48];
unsigned long flags;
@@ -6002,9 +6018,19 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
+ /*
+ * wait to see if any commands will complete before sending reset
+ */
+ dev->in_reset = true; /* block any new cmds from OS for this device */
+ for (i = 0; i < 10; i++) {
+ if (atomic_read(&dev->commands_outstanding) > 0)
+ msleep(1000);
+ else
+ break;
+ }
+
/* send a reset to the SCSI LUN which the command was sent to */
- rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
- DEFAULT_REPLY_QUEUE);
+ rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
if (rc == 0)
rc = SUCCESS;
else
@@ -6018,6 +6044,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
return_reset_status:
spin_lock_irqsave(&h->reset_lock, flags);
h->reset_in_progress = 0;
+ if (dev)
+ dev->in_reset = false;
spin_unlock_irqrestore(&h->reset_lock, flags);
return rc;
}
@@ -6043,7 +6071,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
BUG();
}
- atomic_inc(&c->refcount);
if (unlikely(!hpsa_is_cmd_idle(c))) {
/*
* We expect that the SCSI layer will hand us a unique tag
@@ -6051,14 +6078,20 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
* two requests...because if the selected command isn't idle
* then someone is going to be very disappointed.
*/
- dev_err(&h->pdev->dev,
- "tag collision (tag=%d) in cmd_tagged_alloc().\n",
- idx);
- if (c->scsi_cmd != NULL)
- scsi_print_command(c->scsi_cmd);
- scsi_print_command(scmd);
+ if (idx != h->last_collision_tag) { /* Print once per tag */
+ dev_warn(&h->pdev->dev,
+ "%s: tag collision (tag=%d)\n", __func__, idx);
+ if (c->scsi_cmd != NULL)
+ scsi_print_command(c->scsi_cmd);
+ if (scmd)
+ scsi_print_command(scmd);
+ h->last_collision_tag = idx;
+ }
+ return NULL;
}
+ atomic_inc(&c->refcount);
+
hpsa_cmd_partial_init(h, idx, c);
return c;
}
@@ -6126,6 +6159,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
break; /* it's ours now. */
}
hpsa_cmd_partial_init(h, i, c);
+ c->device = NULL;
return c;
}
@@ -6579,8 +6613,7 @@ static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
}
}
-static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
- u8 reset_type)
+static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
{
struct CommandList *c;
@@ -7983,10 +8016,15 @@ clean_up:
static void hpsa_free_irqs(struct ctlr_info *h)
{
int i;
+ int irq_vector = 0;
+
+ if (hpsa_simple_mode)
+ irq_vector = h->intr_mode;
if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
/* Single reply queue, only one irq to free */
- free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
+ free_irq(pci_irq_vector(h->pdev, irq_vector),
+ &h->q[h->intr_mode]);
h->q[h->intr_mode] = 0;
return;
}
@@ -8005,6 +8043,10 @@ static int hpsa_request_irqs(struct ctlr_info *h,
irqreturn_t (*intxhandler)(int, void *))
{
int rc, i;
+ int irq_vector = 0;
+
+ if (hpsa_simple_mode)
+ irq_vector = h->intr_mode;
/*
* initialize h->q[x] = x so that interrupt handlers know which
@@ -8040,14 +8082,14 @@ static int hpsa_request_irqs(struct ctlr_info *h,
if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
sprintf(h->intrname[0], "%s-msi%s", h->devname,
h->msix_vectors ? "x" : "");
- rc = request_irq(pci_irq_vector(h->pdev, 0),
+ rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
msixhandler, 0,
h->intrname[0],
&h->q[h->intr_mode]);
} else {
sprintf(h->intrname[h->intr_mode],
"%s-intx", h->devname);
- rc = request_irq(pci_irq_vector(h->pdev, 0),
+ rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
intxhandler, IRQF_SHARED,
h->intrname[0],
&h->q[h->intr_mode]);
@@ -8055,7 +8097,7 @@ static int hpsa_request_irqs(struct ctlr_info *h,
}
if (rc) {
dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
- pci_irq_vector(h->pdev, 0), h->devname);
+ pci_irq_vector(h->pdev, irq_vector), h->devname);
hpsa_free_irqs(h);
return -ENODEV;
}
@@ -8065,7 +8107,7 @@ static int hpsa_request_irqs(struct ctlr_info *h,
static int hpsa_kdump_soft_reset(struct ctlr_info *h)
{
int rc;
- hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
+ hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
@@ -8121,6 +8163,11 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
destroy_workqueue(h->rescan_ctlr_wq);
h->rescan_ctlr_wq = NULL;
}
+ if (h->monitor_ctlr_wq) {
+ destroy_workqueue(h->monitor_ctlr_wq);
+ h->monitor_ctlr_wq = NULL;
+ }
+
kfree(h); /* init_one 1 */
}
@@ -8456,8 +8503,8 @@ static void hpsa_event_monitor_worker(struct work_struct *work)
spin_lock_irqsave(&h->lock, flags);
if (!h->remove_in_progress)
- schedule_delayed_work(&h->event_monitor_work,
- HPSA_EVENT_MONITOR_INTERVAL);
+ queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
+ HPSA_EVENT_MONITOR_INTERVAL);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -8502,7 +8549,7 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
spin_lock_irqsave(&h->lock, flags);
if (!h->remove_in_progress)
- schedule_delayed_work(&h->monitor_ctlr_work,
+ queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
h->heartbeat_sample_interval);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -8670,6 +8717,12 @@ reinit_after_soft_reset:
goto clean7; /* aer/h */
}
+ h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
+ if (!h->monitor_ctlr_wq) {
+ rc = -ENOMEM;
+ goto clean7;
+ }
+
/*
* At this point, the controller is ready to take commands.
* Now, if reset_devices and the hard reset didn't work, try
@@ -8799,6 +8852,10 @@ clean1: /* wq/aer/h */
destroy_workqueue(h->rescan_ctlr_wq);
h->rescan_ctlr_wq = NULL;
}
+ if (h->monitor_ctlr_wq) {
+ destroy_workqueue(h->monitor_ctlr_wq);
+ h->monitor_ctlr_wq = NULL;
+ }
kfree(h);
return rc;
}
@@ -8946,6 +9003,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&h->event_monitor_work);
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
+ destroy_workqueue(h->monitor_ctlr_wq);
hpsa_delete_sas_host(h);