summaryrefslogtreecommitdiff
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-04 22:19:44 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-04 22:19:44 +0300
commit8d5e72dfdf0fa29a21143fd72746c6f43295ce9f (patch)
treecd51765801a1ad27a6db13809e00085b2677d351 /drivers/scsi/ipr.c
parent2bd80401743568ced7d303b008ae5298ce77e695 (diff)
parente7731da36f107e87b0ea137265ebcc991972e14c (diff)
downloadlinux-8d5e72dfdf0fa29a21143fd72746c6f43295ce9f.tar.xz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This update includes the usual round of major driver updates (hisi_sas, ufs, fnic, cxlflash, be2iscsi, ipr, stex). There's also the usual amount of cosmetic and spelling stuff" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (155 commits) scsi: qla4xxx: fix spelling mistake: "Tempalate" -> "Template" scsi: stex: make S6flag static scsi: mac_esp: fix to pass correct device identity to free_irq() scsi: aacraid: pci_alloc_consistent() failures on ARM64 scsi: ufs: make ufshcd_get_lists_status() register operation obvious scsi: ufs: use MASK_EE_STATUS scsi: mac_esp: Replace bogus memory barrier with spinlock scsi: fcoe: make fcoe_e_d_tov and fcoe_r_a_tov static scsi: sd_zbc: Do not write lock zones for reset scsi: sd_zbc: Remove superfluous assignments scsi: sd: sd_zbc: Rename sd_zbc_setup_write_cmnd scsi: Improve scsi_get_sense_info_fld scsi: sd: Cleanup sd_done sense data handling scsi: sd: Improve sd_completed_bytes scsi: sd: Fix function descriptions scsi: mpt3sas: remove redundant wmb scsi: mpt: Move scsi_remove_host() out of mptscsih_remove_host() scsi: sg: reset 'res_in_use' after unlinking reserved array scsi: mvumi: remove code handling zero scsi_sg_count(scmd) case scsi: fusion: fix spelling mistake: "Persistancy" -> "Persistency" ...
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c259
1 files changed, 198 insertions, 61 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 5d5e272fd815..b0c68d24db01 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -820,7 +820,7 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
}
/**
- * ipr_sata_eh_done - done function for aborted SATA commands
+ * __ipr_sata_eh_done - done function for aborted SATA commands
* @ipr_cmd: ipr command struct
*
* This function is invoked for ops generated to SATA
@@ -829,19 +829,41 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* none
**/
-static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
+static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
{
struct ata_queued_cmd *qc = ipr_cmd->qc;
struct ipr_sata_port *sata_port = qc->ap->private_data;
qc->err_mask |= AC_ERR_OTHER;
sata_port->ioasa.status |= ATA_BUSY;
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ata_qc_complete(qc);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
- * ipr_scsi_eh_done - mid-layer done function for aborted ops
+ * ipr_sata_eh_done - done function for aborted SATA commands
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked for ops generated to SATA
+ * devices which are being aborted.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
+ unsigned long hrrq_flags;
+
+ spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
+ __ipr_sata_eh_done(ipr_cmd);
+ spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
+}
+
+/**
+ * __ipr_scsi_eh_done - mid-layer done function for aborted ops
* @ipr_cmd: ipr command struct
*
* This function is invoked by the interrupt handler for
@@ -850,7 +872,7 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
* Return value:
* none
**/
-static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
+static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
{
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
@@ -864,6 +886,26 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_scsi_eh_done - mid-layer done function for aborted ops
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked by the interrupt handler for
+ * ops generated by the SCSI mid-layer which are being aborted.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
+{
+ unsigned long hrrq_flags;
+ struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
+
+ spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
+ __ipr_scsi_eh_done(ipr_cmd);
+ spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
+}
+
+/**
* ipr_fail_all_ops - Fails all outstanding ops.
* @ioa_cfg: ioa config struct
*
@@ -890,9 +932,9 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
cpu_to_be32(IPR_DRIVER_ILID);
if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
+ ipr_cmd->done = __ipr_scsi_eh_done;
else if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
+ ipr_cmd->done = __ipr_sata_eh_done;
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
IPR_IOASC_IOA_WAS_RESET);
@@ -5006,6 +5048,42 @@ static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
}
/**
+ * ipr_cmnd_is_free - Check if a command is free or not
+ * @ipr_cmd ipr command struct
+ *
+ * Returns:
+ * true / false
+ **/
+static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_cmnd *loop_cmd;
+
+ list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
+ if (loop_cmd == ipr_cmd)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ipr_match_res - Match function for specified resource entry
+ * @ipr_cmd: ipr command struct
+ * @resource: resource entry to match
+ *
+ * Returns:
+ * 1 if command matches sdev / 0 if command does not match sdev
+ **/
+static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
+{
+ struct ipr_resource_entry *res = resource;
+
+ if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
+ return 1;
+ return 0;
+}
+
+/**
* ipr_wait_for_ops - Wait for matching commands to complete
* @ipr_cmd: ipr command struct
* @device: device to match (sdev)
@@ -5018,7 +5096,7 @@ static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
int (*match)(struct ipr_cmnd *, void *))
{
struct ipr_cmnd *ipr_cmd;
- int wait;
+ int wait, i;
unsigned long flags;
struct ipr_hrr_queue *hrrq;
signed long timeout = IPR_ABORT_TASK_TIMEOUT;
@@ -5030,10 +5108,13 @@ static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock_irqsave(hrrq->lock, flags);
- list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
- if (match(ipr_cmd, device)) {
- ipr_cmd->eh_comp = &comp;
- wait++;
+ for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
+ if (!ipr_cmnd_is_free(ipr_cmd)) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = &comp;
+ wait++;
+ }
}
}
spin_unlock_irqrestore(hrrq->lock, flags);
@@ -5047,10 +5128,13 @@ static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock_irqsave(hrrq->lock, flags);
- list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
- if (match(ipr_cmd, device)) {
- ipr_cmd->eh_comp = NULL;
- wait++;
+ for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
+ if (!ipr_cmnd_is_free(ipr_cmd)) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = NULL;
+ wait++;
+ }
}
}
spin_unlock_irqrestore(hrrq->lock, flags);
@@ -5179,7 +5263,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_resource_entry *res;
unsigned long lock_flags = 0;
- int rc = -ENXIO;
+ int rc = -ENXIO, ret;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -5193,9 +5277,19 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
if (res) {
rc = ipr_device_reset(ioa_cfg, res);
*classes = res->ata_class;
- }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
+ if (ret != SUCCESS) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ }
+ } else
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
LEAVE;
return rc;
}
@@ -5217,16 +5311,13 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
struct ata_port *ap;
- int rc = 0;
+ int rc = 0, i;
struct ipr_hrr_queue *hrrq;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
res = scsi_cmd->device->hostdata;
- if (!res)
- return FAILED;
-
/*
* If we are currently going through reset/reload, return failed. This will force the
* mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
@@ -5239,14 +5330,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock(&hrrq->_lock);
- list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
+
if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
- if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
- if (ipr_cmd->qc &&
- !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+ if (!ipr_cmd->qc)
+ continue;
+ if (ipr_cmnd_is_free(ipr_cmd))
+ continue;
+
+ ipr_cmd->done = ipr_sata_eh_done;
+ if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
}
@@ -5262,19 +5356,6 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
spin_unlock_irq(scsi_cmd->device->host->host_lock);
ata_std_error_handler(ap);
spin_lock_irq(scsi_cmd->device->host->host_lock);
-
- for_each_hrrq(hrrq, ioa_cfg) {
- spin_lock(&hrrq->_lock);
- list_for_each_entry(ipr_cmd,
- &hrrq->hrrq_pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle ==
- res->res_handle) {
- rc = -EIO;
- break;
- }
- }
- spin_unlock(&hrrq->_lock);
- }
} else
rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0;
@@ -5288,15 +5369,24 @@ static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
{
int rc;
struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_resource_entry *res;
ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+ res = cmd->device->hostdata;
+
+ if (!res)
+ return FAILED;
spin_lock_irq(cmd->device->host->host_lock);
rc = __ipr_eh_dev_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
- if (rc == SUCCESS)
- rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+ if (rc == SUCCESS) {
+ if (ipr_is_gata(res) && res->sata_port)
+ rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
+ else
+ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+ }
return rc;
}
@@ -5393,7 +5483,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
struct ipr_resource_entry *res;
struct ipr_cmd_pkt *cmd_pkt;
u32 ioasc, int_reg;
- int op_found = 0;
+ int i, op_found = 0;
struct ipr_hrr_queue *hrrq;
ENTER;
@@ -5422,11 +5512,12 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock(&hrrq->_lock);
- list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
- if (ipr_cmd->scsi_cmd == scsi_cmd) {
- ipr_cmd->done = ipr_scsi_eh_done;
- op_found = 1;
- break;
+ for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
+ if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
+ if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
+ op_found = 1;
+ break;
+ }
}
}
spin_unlock(&hrrq->_lock);
@@ -5917,7 +6008,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
}
/**
- * ipr_erp_done - Process completion of ERP for a device
+ * __ipr_erp_done - Process completion of ERP for a device
* @ipr_cmd: ipr command struct
*
* This function copies the sense buffer into the scsi_cmd
@@ -5926,7 +6017,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
* Return value:
* nothing
**/
-static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
+static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
{
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
@@ -5947,8 +6038,30 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+}
+
+/**
+ * ipr_erp_done - Process completion of ERP for a device
+ * @ipr_cmd: ipr command struct
+ *
+ * This function copies the sense buffer into the scsi_cmd
+ * struct and pushes the scsi_done function.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
+ unsigned long hrrq_flags;
+
+ spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
+ __ipr_erp_done(ipr_cmd);
+ spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
}
/**
@@ -5983,7 +6096,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
}
/**
- * ipr_erp_request_sense - Send request sense to a device
+ * __ipr_erp_request_sense - Send request sense to a device
* @ipr_cmd: ipr command struct
*
* This function sends a request sense to a device as a result
@@ -5992,13 +6105,13 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
* Return value:
* nothing
**/
-static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
+static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
{
struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
- ipr_erp_done(ipr_cmd);
+ __ipr_erp_done(ipr_cmd);
return;
}
@@ -6019,6 +6132,26 @@ static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_erp_request_sense - Send request sense to a device
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a request sense to a device as a result
+ * of a check condition.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
+ unsigned long hrrq_flags;
+
+ spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
+ __ipr_erp_request_sense(ipr_cmd);
+ spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
+}
+
+/**
* ipr_erp_cancel_all - Send cancel all to a device
* @ipr_cmd: ipr command struct
*
@@ -6041,7 +6174,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
if (!scsi_cmd->device->simple_tags) {
- ipr_erp_request_sense(ipr_cmd);
+ __ipr_erp_request_sense(ipr_cmd);
return;
}
@@ -6261,7 +6394,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
if (!res) {
- ipr_scsi_eh_done(ipr_cmd);
+ __ipr_scsi_eh_done(ipr_cmd);
return;
}
@@ -6343,8 +6476,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -6370,8 +6505,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(scsi_cmd);
spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
} else {
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);