summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aic7xxx/aicasm/Makefile18
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c7
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c4
-rw-r--r--drivers/scsi/bfa/bfa_fc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c2
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c20
-rw-r--r--drivers/scsi/fnic/fnic.h3
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c53
-rw-r--r--drivers/scsi/gvp11.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c16
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c9
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c27
-rw-r--r--drivers/scsi/hosts.c4
-rw-r--r--drivers/scsi/isci/host.h2
-rw-r--r--drivers/scsi/isci/init.c9
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c21
-rw-r--r--drivers/scsi/libsas/sas_discover.c10
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c4
-rw-r--r--drivers/scsi/libsas/sas_init.c16
-rw-r--r--drivers/scsi/libsas/sas_internal.h7
-rw-r--r--drivers/scsi/libsas/sas_phy.c8
-rw-r--r--drivers/scsi/libsas/sas_port.c8
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c54
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c136
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c70
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c77
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c53
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c94
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h23
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c37
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c155
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c50
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mvsas/mv_init.c7
-rw-r--r--drivers/scsi/mvsas/mv_sas.c9
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c12
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c27
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h2
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/ppa.c84
-rw-r--r--drivers/scsi/ppa.h4
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c35
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h66
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h25
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c251
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c322
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c171
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c405
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/scsi_debugfs.c26
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c80
-rw-r--r--drivers/scsi/sd.c66
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h16
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c256
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c9
-rw-r--r--drivers/scsi/sun_esp.c3
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/scsi/xen-scsifront.c6
106 files changed, 2371 insertions, 926 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4962ce989113..695a57d894cd 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -836,7 +836,7 @@ config SCSI_IMM
config SCSI_IZIP_EPP16
bool "ppa/imm option - Use slow (but safe) EPP-16"
- depends on SCSI_PPA || SCSI_IMM
+ depends on SCSI_IMM
help
EPP (Enhanced Parallel Port) is a standard for parallel ports which
allows them to act as expansion buses that can handle up to 64
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 3f062e4013ab..013a9a334972 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1451,7 +1451,7 @@ retry_next:
#endif
break;
}
- scsi_rescan_device(&device->sdev_gendev);
+ scsi_rescan_device(device);
break;
default:
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile
index 243adb0a38d1..a3f2357a3f08 100644
--- a/drivers/scsi/aic7xxx/aicasm/Makefile
+++ b/drivers/scsi/aic7xxx/aicasm/Makefile
@@ -61,23 +61,11 @@ $(OUTDIR)/aicdb.h:
clean:
rm -f $(clean-files)
-# Create a dependency chain in generated files
-# to avoid concurrent invocations of the single
-# rule that builds them all.
-$(OUTDIR)/aicasm_gram.c: $(OUTDIR)/aicasm_gram.h
$(OUTDIR)/aicasm_gram.c $(OUTDIR)/aicasm_gram.h: aicasm_gram.y
- $(YACC) $(YFLAGS) -b $(<:.y=) $<
- mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
- mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
-
-# Create a dependency chain in generated files
-# to avoid concurrent invocations of the single
-# rule that builds them all.
-$(OUTDIR)/aicasm_macro_gram.c: $(OUTDIR)/aicasm_macro_gram.h
+ $(YACC) $(YFLAGS) -b $(<:.y=) $< -o $(OUTDIR)/$(<:.y=.c)
+
$(OUTDIR)/aicasm_macro_gram.c $(OUTDIR)/aicasm_macro_gram.h: aicasm_macro_gram.y
- $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $<
- mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
- mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
+ $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $< -o $(OUTDIR)/$(<:.y=.c)
$(OUTDIR)/aicasm_scan.c: aicasm_scan.l
$(LEX) $(LFLAGS) -o $@ $<
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index 975fcfcc0d8f..2b44eb5702eb 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -52,6 +52,7 @@
#include <stdlib.h>
#include <string.h>
#include <sysexits.h>
+#include <ctype.h>
#include "aicasm_symbol.h"
#include "aicasm.h"
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 3dd110143471..9dda296c0152 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -28,7 +28,7 @@ static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
if (asd_ha->hw_prof.sas_addr[0])
return 0;
- return sas_request_addr(asd_ha->sas_ha.core.shost,
+ return sas_request_addr(asd_ha->sas_ha.shost,
asd_ha->hw_prof.sas_addr);
}
@@ -72,10 +72,8 @@ static int asd_init_phy(struct asd_phy *phy)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
sas_phy->enabled = 1;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index f7f81f6c3fbf..8a3340d8d7ad 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -667,7 +667,6 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
}
asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name;
- asd_ha->sas_ha.lldd_module = THIS_MODULE;
asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0];
for (i = 0; i < ASD_MAX_PHYS; i++) {
@@ -688,8 +687,8 @@ static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
err = sas_unregister_ha(&asd_ha->sas_ha);
- sas_remove_host(asd_ha->sas_ha.core.shost);
- scsi_host_put(asd_ha->sas_ha.core.shost);
+ sas_remove_host(asd_ha->sas_ha.shost);
+ scsi_host_put(asd_ha->sas_ha.shost);
kfree(asd_ha->sas_ha.sas_phy);
kfree(asd_ha->sas_ha.sas_port);
@@ -739,7 +738,7 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha;
- asd_ha->sas_ha.core.shost = shost;
+ asd_ha->sas_ha.shost = shost;
shost->transportt = aic94xx_transport_template;
shost->max_id = ~0;
shost->max_lun = ~0;
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 7f0208300110..4bfd03724ad6 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -388,14 +388,9 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
flags |= data_dir_flags[task->data_dir];
scb->ata_task.ata_flags = flags;
- scb->ata_task.retry_count = task->ata_task.retry_count;
+ scb->ata_task.retry_count = 0;
- flags = 0;
- if (task->ata_task.set_affil_pol)
- flags |= SET_AFFIL_POLICY;
- if (task->ata_task.stp_affil_pol)
- flags |= STP_AFFIL_POLICY;
- scb->ata_task.flags = flags;
+ scb->ata_task.flags = 0;
}
ascb->tasklet_complete = asd_task_tasklet_complete;
@@ -485,9 +480,6 @@ static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
- if (task->ssp_task.enable_first_burst)
- scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
- scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 2cd12c7f06c6..a66221c3b72f 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1715,14 +1715,14 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
arcmsr_flush_adapter_cache(acb);
}
-static int arcmsr_module_init(void)
+static int __init arcmsr_module_init(void)
{
int error = 0;
error = pci_register_driver(&arcmsr_pci_driver);
return error;
}
-static void arcmsr_module_exit(void)
+static void __exit arcmsr_module_exit(void)
{
pci_unregister_driver(&arcmsr_pci_driver);
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 8aeaddc93b16..8d374ae863ba 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -450,6 +450,10 @@ int beiscsi_iface_set_param(struct Scsi_Host *shost,
}
nla_for_each_attr(attrib, data, dt_len, rm_len) {
+ /* ignore nla_type as it is never used */
+ if (nla_len(attrib) < sizeof(*iface_param))
+ return -EINVAL;
+
iface_param = nla_data(attrib);
if (iface_param->param_type != ISCSI_NET_PARAM)
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index a12d693065ce..1091aa428533 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -800,7 +800,7 @@ struct fc_rscn_pl_s {
u8 command;
u8 pagelen;
__be16 payldlen;
- struct fc_rscn_event_s event[1];
+ struct fc_rscn_event_s event[];
};
/*
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 773c84af784c..52303e8c716d 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1051,7 +1051,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
rscn->event[0].portid = s_id;
- return sizeof(struct fc_rscn_pl_s);
+ return struct_size(rscn, event, 1);
}
u16
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index 8f96049f62dd..5e7fb110bc3f 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -2317,12 +2317,8 @@ sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf,
SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT;
bls->context_tag = cpu_to_le16(params->vpi);
- if (params->s_id != U32_MAX)
- bls->local_n_port_id_dword |=
- cpu_to_le32(params->s_id & 0x00ffffff);
- else
- bls->local_n_port_id_dword |=
- cpu_to_le32(params->s_id & 0x00ffffff);
+ bls->local_n_port_id_dword |=
+ cpu_to_le32(params->s_id & 0x00ffffff);
dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) |
(params->d_id & SLI4_BLS_RSP_RID);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5c8d1ba3f8f3..19eee108db02 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -319,16 +319,17 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *sel;
struct fcoe_fcf *fcf;
+ unsigned long flags;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
kfree_skb(fip->flogi_req);
fip->flogi_req = NULL;
list_for_each_entry(fcf, &fip->fcfs, list)
fcf->flogi_sent = 0;
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
sel = fip->sel_fcf;
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
@@ -699,6 +700,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
{
struct fc_frame *fp;
struct fc_frame_header *fh;
+ unsigned long flags;
u16 old_xid;
u8 op;
u8 mac[ETH_ALEN];
@@ -732,11 +734,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
op = FIP_DT_FLOGI;
if (fip->mode == FIP_MODE_VN2VN)
break;
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
kfree_skb(fip->flogi_req);
fip->flogi_req = skb;
fip->flogi_req_send = 1;
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
schedule_work(&fip->timer_work);
return -EINPROGRESS;
case ELS_FDISC:
@@ -1705,10 +1707,11 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
+ unsigned long flags;
int error;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
fcf = fcoe_ctlr_select(fip);
if (!fcf || fcf->flogi_sent) {
@@ -1719,7 +1722,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
fcoe_ctlr_solicit(fip, NULL);
error = fcoe_ctlr_flogi_send_locked(fip);
}
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
mutex_unlock(&fip->ctlr_mutex);
return error;
}
@@ -1736,8 +1739,9 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
+ unsigned long flags;
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
fcf = fip->sel_fcf;
if (!fcf || !fip->flogi_req_send)
goto unlock;
@@ -1764,7 +1768,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
} else /* XXX */
LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
unlock:
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
}
/**
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index e51e92f932fa..93c68931a593 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -27,7 +27,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.55"
+#define DRV_VERSION "1.6.0.56"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -236,6 +236,7 @@ struct fnic {
unsigned int wq_count;
unsigned int cq_count;
+ struct mutex sgreset_mutex;
struct dentry *fnic_stats_debugfs_host;
struct dentry *fnic_stats_debugfs_file;
struct dentry *fnic_reset_debugfs_file;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index be89ce96df46..9761b2c9db48 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2168,39 +2168,6 @@ clean_pending_aborts_end:
}
/*
- * fnic_scsi_host_start_tag
- * Allocates tagid from host's tag list
- **/
-static inline int
-fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
-{
- struct request *rq = scsi_cmd_to_rq(sc);
- struct request_queue *q = rq->q;
- struct request *dummy;
-
- dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(dummy))
- return SCSI_NO_TAG;
-
- rq->tag = dummy->tag;
- sc->host_scribble = (unsigned char *)dummy;
-
- return dummy->tag;
-}
-
-/*
- * fnic_scsi_host_end_tag
- * frees tag allocated by fnic_scsi_host_start_tag.
- **/
-static inline void
-fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
-{
- struct request *dummy = (struct request *)sc->host_scribble;
-
- blk_mq_free_request(dummy);
-}
-
-/*
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
* on the LUN.
@@ -2222,7 +2189,6 @@ int fnic_device_reset(struct scsi_cmnd *sc)
struct reset_stats *reset_stats;
int tag = rq->tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
- int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
bool new_sc = 0;
/* Wait for rport to unblock */
@@ -2252,17 +2218,17 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
- /* Allocate tag if not present */
if (unlikely(tag < 0)) {
/*
- * Really should fix the midlayer to pass in a proper
- * request for ioctls...
+ * For device reset issued through sg3utils, we let
+ * only one LUN_RESET to go through and use a special
+ * tag equal to max_tag_id so that we don't have to allocate
+ * or free it. It won't interact with tags
+ * allocated by mid layer.
*/
- tag = fnic_scsi_host_start_tag(fnic, sc);
- if (unlikely(tag == SCSI_NO_TAG))
- goto fnic_device_reset_end;
- tag_gen_flag = 1;
+ mutex_lock(&fnic->sgreset_mutex);
+ tag = fnic->fnic_max_tag_id;
new_sc = 1;
}
io_lock = fnic_io_lock_hash(fnic, sc);
@@ -2434,9 +2400,8 @@ fnic_device_reset_end:
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
- /* free tag if it is allocated */
- if (unlikely(tag_gen_flag))
- fnic_scsi_host_end_tag(fnic, sc);
+ if (new_sc)
+ mutex_unlock(&fnic->sgreset_mutex);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index d2eddad099a2..0420bfe9bd42 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -50,11 +50,6 @@ static irqreturn_t gvp11_intr(int irq, void *data)
static int gvp11_xfer_mask = 0;
-void gvp11_setup(char *str, int *ints)
-{
- gvp11_xfer_mask = ints[1];
-}
-
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 8f22ece957bd..9472b9743aef 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -787,7 +787,7 @@ static int hisi_sas_init_device(struct domain_device *device)
* However we don't need to issue a hard reset here for these
* reasons:
* a. When probing the device, libsas/libata already issues a
- * hard reset in sas_probe_sata() -> ata_sas_async_probe().
+ * hard reset in sas_probe_sata() -> ata_port_probe().
* Note that in hisi_sas_debug_I_T_nexus_reset() we take care
* to issue a hard reset by checking the dev status (== INIT).
* b. When resetting the controller, this is simply unnecessary.
@@ -1018,10 +1018,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -1065,23 +1063,18 @@ EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
{
- struct sas_ha_struct *sas_ha = sas_phy->ha;
- struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
struct hisi_sas_port *port;
- unsigned long flags;
if (!sas_port)
return;
port = to_hisi_sas_port(sas_port);
- spin_lock_irqsave(&hisi_hba->lock, flags);
port->port_attached = 1;
port->id = phy->port_id;
phy->port = port;
sas_port->lldd_port = port;
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
}
static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
@@ -2519,10 +2512,9 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_ha_name = DRV_NAME;
sha->dev = hisi_hba->dev;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &hisi_hba->sas_addr[0];
sha->num_phys = hisi_hba->n_phy;
- sha->core.shost = hisi_hba->shost;
+ sha->shost = hisi_hba->shost;
for (i = 0; i < hisi_hba->n_phy; i++) {
sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
@@ -2564,12 +2556,12 @@ void hisi_sas_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
- struct Scsi_Host *shost = sha->core.shost;
+ struct Scsi_Host *shost = sha->shost;
del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
- sas_remove_host(sha->core.shost);
+ sas_remove_host(shost);
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 94fbbceddc2e..3c555579f9a1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -960,7 +960,7 @@ static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
- u8 *buf_cmd, fburst = 0;
+ u8 *buf_cmd;
u32 dw1, dw2;
/* create header */
@@ -1018,16 +1018,11 @@ static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
sizeof(struct ssp_frame_hdr);
- if (task->ssp_task.enable_first_burst) {
- fburst = (1 << 7);
- dw2 |= 1 << CMD_HDR_FIRST_BURST_OFF;
- }
hdr->dw2 = cpu_to_le32(dw2);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!tmf) {
- buf_cmd[9] = fburst | task->ssp_task.task_attr |
- (task->ssp_task.task_prio << 3);
+ buf_cmd[9] = task->ssp_task.task_attr;
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
} else {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 87d8e408ccd1..73b378837da7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1798,8 +1798,7 @@ static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!tmf) {
- buf_cmd[9] = task->ssp_task.task_attr |
- (task->ssp_task.task_prio << 3);
+ buf_cmd[9] = task->ssp_task.task_attr;
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
} else {
@@ -2026,6 +2025,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type);
u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type);
u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type);
+ struct hisi_sas_complete_v2_hdr *complete_queue =
+ hisi_hba->complete_hdr[slot->cmplt_queue];
+ struct hisi_sas_complete_v2_hdr *complete_hdr =
+ &complete_queue[slot->cmplt_queue_slot];
+ u32 dw0 = le32_to_cpu(complete_hdr->dw0);
int error = -1;
if (err_phase == 1) {
@@ -2310,7 +2314,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
break;
}
}
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
}
break;
default:
@@ -2443,7 +2448,8 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
ts->stat = SAS_SAM_STAT_GOOD;
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
break;
}
default:
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 20e1607c6282..bbb64ee6afd7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1326,7 +1326,7 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!tmf) {
- buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
+ buf_cmd[9] = ssp_task->task_attr;
memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
} else {
buf_cmd[10] = tmf->tmf;
@@ -2257,7 +2257,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
}
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
break;
case SAS_PROTOCOL_SMP:
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
@@ -2384,7 +2385,8 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
ts->stat = SAS_SAM_STAT_GOOD;
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
break;
default:
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
@@ -3104,21 +3106,25 @@ static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
{
- set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
-
- hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+ struct Scsi_Host *shost = hisi_hba->shost;
+ scsi_block_requests(shost);
wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
+ set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
hisi_sas_sync_cqs(hisi_hba);
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
}
static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
{
+ struct Scsi_Host *shost = hisi_hba->shost;
+
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
(u32)((1ULL << hisi_hba->queue_count) - 1));
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
}
static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
@@ -4576,7 +4582,7 @@ static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p)
debugfs_read_fifo_data_v3_hw(phy);
debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4,
- phy->fifo.rd_data);
+ (__le32 *)phy->fifo.rd_data);
return 0;
}
@@ -4950,7 +4956,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
- sha->core.shost = shost;
+ sha->shost = shost;
sha->lldd_ha = hisi_hba;
shost->transportt = hisi_sas_stt;
@@ -4967,7 +4973,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sha->sas_ha_name = DRV_NAME;
sha->dev = dev;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &hisi_hba->sas_addr[0];
sha->num_phys = hisi_hba->n_phy;
@@ -5055,14 +5060,14 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct device *dev = &pdev->dev;
struct sas_ha_struct *sha = dev_get_drvdata(dev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
- struct Scsi_Host *shost = sha->core.shost;
+ struct Scsi_Host *shost = sha->shost;
pm_runtime_get_noresume(dev);
del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
flush_workqueue(hisi_hba->wq);
- sas_remove_host(sha->core.shost);
+ sas_remove_host(shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
hisi_sas_free(hisi_hba);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 198edf03f929..d7f51b84f3c7 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -537,7 +537,7 @@ EXPORT_SYMBOL(scsi_host_alloc);
static int __scsi_host_match(struct device *dev, const void *data)
{
struct Scsi_Host *p;
- const unsigned short *hostnum = data;
+ const unsigned int *hostnum = data;
p = class_to_shost(dev);
return p->host_no == *hostnum;
@@ -554,7 +554,7 @@ static int __scsi_host_match(struct device *dev, const void *data)
* that scsi_host_get() took. The put_device() below dropped
* the reference from class_find_device().
**/
-struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
+struct Scsi_Host *scsi_host_lookup(unsigned int hostnum)
{
struct device *cdev;
struct Scsi_Host *shost = NULL;
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 6bc3f022630a..52388374cf31 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -306,7 +306,7 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
{
- return ihost->sas_ha.core.shost;
+ return ihost->sas_ha.shost;
}
#define for_each_isci_host(id, ihost, pdev) \
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index ac1e04b86d8f..6277162a028b 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -250,7 +250,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
return -ENOMEM;
sas_ha->sas_ha_name = DRV_NAME;
- sas_ha->lldd_module = THIS_MODULE;
sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
for (i = 0; i < SCI_MAX_PHYS; i++) {
@@ -264,9 +263,7 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
sas_ha->strict_wide_ports = 1;
- sas_register_ha(sas_ha);
-
- return 0;
+ return sas_register_ha(sas_ha);
}
static void isci_unregister(struct isci_host *isci_host)
@@ -575,7 +572,7 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
goto err_shost;
SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
- ihost->sas_ha.core.shost = shost;
+ ihost->sas_ha.shost = shost;
shost->transportt = isci_transport_template;
shost->max_id = ~0;
@@ -730,7 +727,7 @@ static int isci_resume(struct device *dev)
sas_prep_resume_ha(&ihost->sas_ha);
isci_host_init(ihost);
- isci_host_start(ihost->sas_ha.core.shost);
+ isci_host_start(ihost->sas_ha.shost);
wait_for_start(ihost);
sas_resume_ha(&ihost->sas_ha);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index aa8787343e83..743a3c64b0da 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -1404,10 +1404,8 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
iphy->sas_phy.ha = &ihost->sas_ha;
iphy->sas_phy.lldd_phy = iphy;
iphy->sas_phy.enabled = 1;
- iphy->sas_phy.class = SAS;
iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
iphy->sas_phy.tproto = 0;
- iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
iphy->sas_phy.role = PHY_ROLE_INITIATOR;
iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 6370cdbfba08..a7b3243b471d 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -180,7 +180,7 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
cmd_iu->_r_a = 0;
cmd_iu->_r_b = 0;
cmd_iu->en_fburst = 0; /* unsupported */
- cmd_iu->task_prio = task->ssp_task.task_prio;
+ cmd_iu->task_prio = 0;
cmd_iu->task_attr = task->ssp_task.task_attr;
cmd_iu->_r_c = 0;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 77714a495cbb..12e2653846e3 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -162,7 +162,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct domain_device *dev = ap->private_data;
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *host = sas_ha->core.shost;
+ struct Scsi_Host *host = sas_ha->shost;
struct sas_internal *i = to_sas_internal(host->transportt);
/* TODO: we should try to remove that unlock */
@@ -201,12 +201,14 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->data_dir = qc->dma_dir;
}
task->scatter = qc->sg;
- task->ata_task.retry_count = 1;
qc->lldd_task = task;
task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol);
+ if (qc->flags & ATA_QCFLAG_RESULT_TF)
+ task->ata_task.return_fis_on_success = 1;
+
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, task);
@@ -235,7 +237,7 @@ static void sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
{
- return to_sas_internal(dev->port->ha->core.shost->transportt);
+ return to_sas_internal(dev->port->ha->shost->transportt);
}
static int sas_get_ata_command_set(struct domain_device *dev)
@@ -565,8 +567,6 @@ static struct ata_port_operations sas_sata_ops = {
.qc_prep = ata_noop_qc_prep,
.qc_issue = sas_ata_qc_issue,
.qc_fill_rtf = sas_ata_qc_fill_rtf,
- .port_start = ata_sas_port_start,
- .port_stop = ata_sas_port_stop,
.set_dmamode = sas_ata_set_dmamode,
.sched_eh = sas_ata_sched_eh,
.end_eh = sas_ata_end_eh,
@@ -584,7 +584,7 @@ static struct ata_port_info sata_port_info = {
int sas_ata_init(struct domain_device *found_dev)
{
struct sas_ha_struct *ha = found_dev->port->ha;
- struct Scsi_Host *shost = ha->core.shost;
+ struct Scsi_Host *shost = ha->shost;
struct ata_host *ata_host;
struct ata_port *ap;
int rc;
@@ -607,9 +607,6 @@ int sas_ata_init(struct domain_device *found_dev)
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
- rc = ata_sas_port_init(ap);
- if (rc)
- goto destroy_port;
rc = ata_sas_tport_add(ata_host->dev, ap);
if (rc)
@@ -621,7 +618,7 @@ int sas_ata_init(struct domain_device *found_dev)
return 0;
destroy_port:
- ata_sas_port_destroy(ap);
+ kfree(ap);
free_host:
ata_host_put(ata_host);
return rc;
@@ -655,7 +652,7 @@ void sas_probe_sata(struct asd_sas_port *port)
if (!dev_is_sata(dev))
continue;
- ata_sas_async_probe(dev->sata_dev.ap);
+ ata_port_probe(dev->sata_dev.ap);
}
mutex_unlock(&port->ha->disco_mutex);
@@ -822,7 +819,7 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
struct sas_ha_struct *ha = dev->port->ha;
sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
- ata_scsi_port_error_handler(ha->core.shost, ap);
+ ata_scsi_port_error_handler(ha->shost, ap);
sas_put_device(dev);
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 8c6afe724944..ff7b63b10aeb 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -170,7 +170,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
{
int res = 0;
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *shost = sas_ha->core.shost;
+ struct Scsi_Host *shost = sas_ha->shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (!i->dft->lldd_dev_found)
@@ -192,7 +192,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
void sas_notify_lldd_dev_gone(struct domain_device *dev)
{
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *shost = sas_ha->core.shost;
+ struct Scsi_Host *shost = sas_ha->shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (!i->dft->lldd_dev_gone)
@@ -234,7 +234,7 @@ static void sas_suspend_devices(struct work_struct *work)
struct domain_device *dev;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
- struct Scsi_Host *shost = port->ha->core.shost;
+ struct Scsi_Host *shost = port->ha->shost;
struct sas_internal *si = to_sas_internal(shost->transportt);
clear_bit(DISCE_SUSPEND, &port->disc.pending);
@@ -301,7 +301,7 @@ void sas_free_device(struct kref *kref)
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_tport_delete(dev->sata_dev.ap);
- ata_sas_port_destroy(dev->sata_dev.ap);
+ kfree(dev->sata_dev.ap);
ata_host_put(dev->sata_dev.ata_host);
dev->sata_dev.ata_host = NULL;
dev->sata_dev.ap = NULL;
@@ -373,7 +373,7 @@ static bool sas_abort_cmd(struct request *req, void *data)
static void sas_abort_device_scsi_cmds(struct domain_device *dev)
{
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *shost = sas_ha->core.shost;
+ struct Scsi_Host *shost = sas_ha->shost;
if (dev_is_expander(dev->dev_type))
return;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index adcac57aaee6..a2204674b680 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -37,7 +37,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
int res, retry;
struct sas_task *task = NULL;
struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
+ to_sas_internal(dev->port->ha->shost->transportt);
struct sas_ha_struct *ha = dev->port->ha;
pm_runtime_get_sync(ha->dev);
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 32cdc969b736..2ecb8535634c 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -114,7 +114,7 @@ static int sas_host_smp_write_gpio(struct sas_ha_struct *sas_ha, u8 *resp_data,
u8 reg_type, u8 reg_index, u8 reg_count,
u8 *req_data)
{
- struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt);
+ struct sas_internal *i = to_sas_internal(sas_ha->shost->transportt);
int written;
if (i->dft->lldd_write_gpio == NULL) {
@@ -182,7 +182,7 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
enum sas_linkrate max, u8 *resp_data)
{
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
struct sas_phy_linkrates rates;
struct asd_sas_phy *asd_phy;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index f2c05ebeb72f..8586dc79f2a0 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -183,7 +183,7 @@ static int sas_get_linkerrors(struct sas_phy *phy)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
}
@@ -232,7 +232,7 @@ static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
return 0;
@@ -266,7 +266,7 @@ int sas_phy_enable(struct sas_phy *phy, int enable)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
if (enable)
ret = transport_sas_phy_reset(phy, 0);
@@ -303,7 +303,7 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
} else {
@@ -339,7 +339,7 @@ int sas_set_phy_speed(struct sas_phy *phy,
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
rates);
@@ -438,7 +438,7 @@ static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
/* all phys are back up or timed out, turn on i/o so we can
* flush out disks that did not return
*/
- scsi_unblock_requests(ha->core.shost);
+ scsi_unblock_requests(ha->shost);
if (drain)
sas_drain_work(ha);
clear_bit(SAS_HA_RESUMING, &ha->state);
@@ -468,7 +468,7 @@ void sas_suspend_ha(struct sas_ha_struct *ha)
int i;
sas_disable_events(ha);
- scsi_block_requests(ha->core.shost);
+ scsi_block_requests(ha->shost);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
@@ -641,7 +641,7 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy,
struct asd_sas_event *event;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
event = kmem_cache_zalloc(sas_event_cache, gfp_flags);
if (!event)
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 6f593fa69b58..a6dc7dc07fce 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -41,13 +41,7 @@ struct sas_phy_data {
void sas_scsi_recover_host(struct Scsi_Host *shost);
-int sas_show_class(enum sas_class class, char *buf);
-int sas_show_proto(enum sas_protocol proto, char *buf);
-int sas_show_linkrate(enum sas_linkrate linkrate, char *buf);
-int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
-
int sas_register_phys(struct sas_ha_struct *sas_ha);
-void sas_unregister_phys(struct sas_ha_struct *sas_ha);
struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags);
void sas_free_event(struct asd_sas_event *event);
@@ -91,7 +85,6 @@ int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_device_type *type);
int sas_try_ata_reset(struct asd_sas_phy *phy);
-void sas_hae_reset(struct work_struct *work);
void sas_free_device(struct kref *kref);
void sas_destruct_devices(struct asd_sas_port *port);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index a0d592d11dfb..57494ac97076 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -38,7 +38,7 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
sas_deform_port(phy, 1);
@@ -66,7 +66,7 @@ static void sas_phye_spinup_hold(struct work_struct *work)
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
@@ -95,7 +95,7 @@ static void sas_phye_shutdown(struct work_struct *work)
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
if (phy->enabled) {
int ret;
@@ -131,7 +131,7 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
spin_lock_init(&phy->sas_prim_lock);
phy->frame_rcvd_size = 0;
- phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i);
+ phy->phy = sas_phy_alloc(&sas_ha->shost->shost_gendev, i);
if (!phy->phy)
return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 11599c0e3fc3..e3f2ed913419 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -28,7 +28,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
struct domain_device *dev, *n;
struct asd_sas_port *port = phy->port;
struct sas_ha_struct *sas_ha = phy->ha;
- struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
+ struct sas_internal *si = to_sas_internal(sas_ha->shost->transportt);
if (si->dft->lldd_port_formed)
si->dft->lldd_port_formed(phy);
@@ -83,7 +83,6 @@ static void sas_form_port_add_phy(struct asd_sas_port *port,
memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
if (*(u64 *)port->attached_sas_addr == 0) {
- port->class = phy->class;
memcpy(port->attached_sas_addr, phy->attached_sas_addr,
SAS_ADDR_SIZE);
port->iproto = phy->iproto;
@@ -109,7 +108,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
struct asd_sas_port *port = phy->port;
struct domain_device *port_dev = NULL;
struct sas_internal *si =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
unsigned long flags;
if (port) {
@@ -212,7 +211,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *si =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
struct domain_device *dev;
unsigned long flags;
@@ -249,7 +248,6 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
INIT_LIST_HEAD(&port->phy_list);
memset(port->sas_addr, 0, SAS_ADDR_SIZE);
memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE);
- port->class = 0;
port->iproto = 0;
port->tproto = 0;
port->oob_mode = 0;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 94c5f14f3c16..9047cfcd1072 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -142,7 +142,6 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
task->dev = dev;
task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
- task->ssp_task.retry_count = 1;
int_to_scsilun(cmd->device->lun, &lun);
memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
@@ -279,7 +278,7 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
unsigned long flags;
int i, res;
struct sas_internal *si =
- to_sas_internal(task->dev->port->ha->core.shost->transportt);
+ to_sas_internal(task->dev->port->ha->shost->transportt);
for (i = 0; i < 5; i++) {
pr_notice("%s: aborting task 0x%p\n", __func__, task);
@@ -327,7 +326,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
int res = TMF_RESP_FUNC_FAILED;
struct scsi_lun lun;
struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
+ to_sas_internal(dev->port->ha->shost->transportt);
int_to_scsilun(cmd->device->lun, &lun);
@@ -355,7 +354,7 @@ static int sas_recover_I_T(struct domain_device *dev)
{
int res = TMF_RESP_FUNC_FAILED;
struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
+ to_sas_internal(dev->port->ha->shost->transportt);
pr_notice("I_T nexus reset for dev %016llx\n",
SAS_ADDR(dev->sas_addr));
@@ -387,37 +386,7 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev)
}
EXPORT_SYMBOL_GPL(sas_get_local_phy);
-static void sas_wait_eh(struct domain_device *dev)
-{
- struct sas_ha_struct *ha = dev->port->ha;
- DEFINE_WAIT(wait);
-
- if (dev_is_sata(dev)) {
- ata_port_wait_eh(dev->sata_dev.ap);
- return;
- }
- retry:
- spin_lock_irq(&ha->lock);
-
- while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
- prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&ha->lock);
- schedule();
- spin_lock_irq(&ha->lock);
- }
- finish_wait(&ha->eh_wait_q, &wait);
-
- spin_unlock_irq(&ha->lock);
-
- /* make sure SCSI EH is complete */
- if (scsi_host_in_recovery(ha->core.shost)) {
- msleep(10);
- goto retry;
- }
-}
-
-static int sas_queue_reset(struct domain_device *dev, int reset_type,
- u64 lun, int wait)
+static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun)
{
struct sas_ha_struct *ha = dev->port->ha;
int scheduled = 0, tries = 100;
@@ -425,8 +394,6 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
/* ata: promote lun reset to bus reset */
if (dev_is_sata(dev)) {
sas_ata_schedule_reset(dev);
- if (wait)
- sas_ata_wait_eh(dev);
return SUCCESS;
}
@@ -440,13 +407,10 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
set_bit(SAS_DEV_EH_PENDING, &dev->state);
set_bit(reset_type, &dev->state);
int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
- scsi_schedule_eh(ha->core.shost);
+ scsi_schedule_eh(ha->shost);
}
spin_unlock_irq(&ha->lock);
- if (wait)
- sas_wait_eh(dev);
-
if (scheduled)
return SUCCESS;
}
@@ -499,7 +463,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
struct sas_internal *i = to_sas_internal(host->transportt);
if (current != host->ehandler)
- return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
+ return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun);
int_to_scsilun(cmd->device->lun, &lun);
@@ -522,7 +486,7 @@ int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
struct sas_internal *i = to_sas_internal(host->transportt);
if (current != host->ehandler)
- return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
+ return sas_queue_reset(dev, SAS_DEV_RESET, 0);
if (!i->dft->lldd_I_T_nexus_reset)
return FAILED;
@@ -925,7 +889,7 @@ static int sas_execute_internal_abort(struct domain_device *device,
unsigned int qid, void *data)
{
struct sas_ha_struct *ha = device->port->ha;
- struct sas_internal *i = to_sas_internal(ha->core.shost->transportt);
+ struct sas_internal *i = to_sas_internal(ha->shost->transportt);
struct sas_task *task = NULL;
int res, retry;
@@ -1015,7 +979,7 @@ int sas_execute_tmf(struct domain_device *device, void *parameter,
{
struct sas_task *task;
struct sas_internal *i =
- to_sas_internal(device->port->ha->core.shost->transportt);
+ to_sas_internal(device->port->ha->shost->transportt);
int res, retry;
for (retry = 0; retry < TASK_RETRY; retry++) {
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 9a8963684369..af15f7a22d25 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -309,7 +309,6 @@ struct lpfc_hba;
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
#define LPFC_MAX_VMID_SIZE 256
-#define LPFC_COMPRESS_VMID_SIZE 16
union lpfc_vmid_io_tag {
u32 app_id; /* App Id vmid */
@@ -667,7 +666,7 @@ struct lpfc_vport {
uint32_t cfg_first_burst_size;
uint32_t dev_loss_tmo_changed;
/* VMID parameters */
- u8 lpfc_vmid_host_uuid[LPFC_COMPRESS_VMID_SIZE];
+ u8 lpfc_vmid_host_uuid[16];
u32 max_vmid; /* maximum VMIDs allowed per port */
u32 cur_vmid_cnt; /* Current VMID count */
#define LPFC_MIN_VMID 4
@@ -872,6 +871,7 @@ enum lpfc_irq_chann_mode {
enum lpfc_hba_bit_flags {
FABRIC_COMANDS_BLOCKED,
HBA_PCI_ERR,
+ MBX_TMO_ERR,
};
struct lpfc_hba {
@@ -1709,6 +1709,25 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
return cpu_it;
}
/**
+ * lpfc_next_present_cpu - Finds next present CPU after n
+ * @n: the cpu prior to search
+ *
+ * Note: If no next present cpu, then fallback to first present cpu.
+ *
+ **/
+static inline unsigned int lpfc_next_present_cpu(int n)
+{
+ unsigned int cpu;
+
+ cpu = cpumask_next(n, cpu_present_mask);
+
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(cpu_present_mask);
+
+ return cpu;
+}
+
+/**
* lpfc_sli4_mod_hba_eq_delay - update EQ delay
* @phba: Pointer to HBA context object.
* @q: The Event Queue to update.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 21c7ecd3ede5..b1c9107d3408 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2127,11 +2127,12 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
uint32_t *mrpi, uint32_t *arpi,
uint32_t *mvpi, uint32_t *avpi)
{
- struct lpfc_mbx_read_config *rd_config;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
- uint32_t max_vpi;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u16 rsrc_ext_cnt, rsrc_ext_size, max_vpi;
/*
* prevent udev from issuing mailbox commands until the port is
@@ -2167,31 +2168,65 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
}
if (phba->sli_rev == LPFC_SLI_REV4) {
- rd_config = &pmboxq->u.mqe.un.rd_config;
- if (mrpi)
- *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
- if (arpi)
- *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
- phba->sli4_hba.max_cfg_param.rpi_used;
- if (mxri)
- *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
- if (axri)
- *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
- phba->sli4_hba.max_cfg_param.xri_used;
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+
+ /* Normally, extents are not used */
+ if (!phba->sli4_hba.extents_in_use) {
+ if (mrpi)
+ *mrpi = max_cfg_param->max_rpi;
+ if (mxri)
+ *mxri = max_cfg_param->max_xri;
+ if (mvpi) {
+ max_vpi = max_cfg_param->max_vpi;
+
+ /* Limit the max we support */
+ if (max_vpi > LPFC_MAX_VPI)
+ max_vpi = LPFC_MAX_VPI;
+ *mvpi = max_vpi;
+ }
+ } else { /* Extents in use */
+ if (mrpi) {
+ if (lpfc_sli4_get_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_RPI,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size)) {
+ rc = 0;
+ goto free_pmboxq;
+ }
+
+ *mrpi = rsrc_ext_cnt * rsrc_ext_size;
+ }
- /* Account for differences with SLI-3. Get vpi count from
- * mailbox data and subtract one for max vpi value.
- */
- max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
- (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+ if (mxri) {
+ if (lpfc_sli4_get_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_XRI,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size)) {
+ rc = 0;
+ goto free_pmboxq;
+ }
- /* Limit the max we support */
- if (max_vpi > LPFC_MAX_VPI)
- max_vpi = LPFC_MAX_VPI;
- if (mvpi)
- *mvpi = max_vpi;
- if (avpi)
- *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
+ *mxri = rsrc_ext_cnt * rsrc_ext_size;
+ }
+
+ if (mvpi) {
+ if (lpfc_sli4_get_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VPI,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size)) {
+ rc = 0;
+ goto free_pmboxq;
+ }
+
+ max_vpi = rsrc_ext_cnt * rsrc_ext_size;
+
+ /* Limit the max we support */
+ if (max_vpi > LPFC_MAX_VPI)
+ max_vpi = LPFC_MAX_VPI;
+ *mvpi = max_vpi;
+ }
+ }
} else {
if (mrpi)
*mrpi = pmb->un.varRdConfig.max_rpi;
@@ -2212,8 +2247,12 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
}
}
+ /* Success */
+ rc = 1;
+
+free_pmboxq:
mempool_free(pmboxq, phba->mbox_mem_pool);
- return 1;
+ return rc;
}
/**
@@ -2265,10 +2304,19 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- uint32_t cnt, acnt;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u32 cnt = 0, acnt = 0;
- if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
- return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ max_cfg_param->rpi_used);
+ } else {
+ if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ }
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
@@ -2321,10 +2369,19 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- uint32_t cnt, acnt;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u32 cnt = 0, acnt = 0;
- if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
- return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ max_cfg_param->xri_used);
+ } else {
+ if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ }
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
@@ -2377,10 +2434,19 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- uint32_t cnt, acnt;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u32 cnt = 0, acnt = 0;
- if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
- return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ max_cfg_param->vpi_used);
+ } else {
+ if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ }
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 474834f313a7..baae1f8279e0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1557,7 +1557,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_NODE,
"3064 Setting ndlp x%px, DID x%06x "
"with FC4 x%08x, Data: x%08x x%08x "
"%d\n",
@@ -1568,14 +1569,21 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
ndlp->nlp_fc4_type) {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
-
- lpfc_nlp_set_state(vport, ndlp,
- NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(vport, ndlp, 0);
+ /* This is a fabric topology so if discovery
+ * started with an unsolicited PLOGI, don't
+ * send a PRLI. Targets don't issue PLOGI or
+ * PRLI when acting as a target. Likely this is
+ * an initiator function.
+ */
+ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
+ }
} else if (!ndlp->nlp_fc4_type) {
/* If fc4 type is still unknown, then LOGO */
lpfc_printf_vlog(vport, KERN_INFO,
- LOG_DISCOVERY,
+ LOG_DISCOVERY | LOG_NODE,
"6443 Sending LOGO ndlp x%px,"
"DID x%06x with fc4_type: "
"x%08x, state: %d\n",
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2bad9954c355..54e47f268235 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1041,7 +1041,7 @@ stop_rr_fcf_flogi:
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
lpfc_nlp_put(ndlp);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"0150 FLOGI failure Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n",
ulp_status, ulp_word4, cmdiocb->sli4_xritag,
@@ -1091,7 +1091,6 @@ stop_rr_fcf_flogi:
if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
lpfc_issue_reg_vfi(vport);
- lpfc_nlp_put(ndlp);
goto out;
}
goto flogifail;
@@ -1332,7 +1331,8 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->cfg_vmid_priority_tagging) {
sp->cmn.priority_tagging = 1;
/* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
- if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) {
+ if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0,
+ sizeof(vport->lpfc_vmid_host_uuid))) {
memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
sizeof(phba->wwpn));
memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
@@ -2377,10 +2377,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PRLI failed */
lpfc_printf_vlog(vport, mode, loglevel,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
- "data: x%x x%x\n",
+ "data: x%x x%x x%x\n",
ndlp->nlp_DID, ulp_status,
ulp_word4, ndlp->nlp_state,
- ndlp->fc4_prli_sent);
+ ndlp->fc4_prli_sent, ndlp->nlp_flag);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
@@ -2391,14 +2391,16 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* mismatch typically caused by an RSCN. Skip any
* processing to allow recovery.
*/
- if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
- ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) {
+ if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) ||
+ (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+ ndlp->nlp_flag & NLP_DELAY_TMO)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "2784 PRLI cmpl: state mismatch "
+ "2784 PRLI cmpl: Allow Node recovery "
"DID x%06x nstate x%x nflag x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag);
- goto out;
+ goto out;
}
/*
@@ -6166,11 +6168,25 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
npr->TaskRetryIdReq = 1;
}
npr->acceptRspCode = PRLI_REQ_EXECUTED;
- npr->estabImagePair = 1;
+
+ /* Set image pair for complementary pairs only. */
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ npr->estabImagePair = 1;
+ else
+ npr->estabImagePair = 0;
npr->readXferRdyDis = 1;
npr->ConfmComplAllowed = 1;
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
+
+ /* Xmit PRLI ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "6014 FCP issue PRLI ACC imgpair %d "
+ "retry %d task %d\n",
+ npr->estabImagePair,
+ npr->Retry, npr->TaskRetryIdReq);
+
} else if (prli_fc4_req == PRLI_NVME_TYPE) {
/* Respond with an NVME PRLI Type */
npr_nvme = (struct lpfc_nvme_prli *) pcmd;
@@ -9588,11 +9604,13 @@ void
lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
LIST_HEAD(abort_list);
+ LIST_HEAD(cancel_list);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
u32 ulp_command;
unsigned long iflags = 0;
+ bool mbx_tmo_err;
lpfc_fabric_abort_vport(vport);
@@ -9614,15 +9632,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
+ mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->cmd_flag & LPFC_IO_LIBDFC)
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err)
continue;
if (piocb->vport != vport)
continue;
- if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
+ if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
continue;
/* On the ELS ring we can have ELS_REQUESTs or
@@ -9641,8 +9660,8 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
*/
if (phba->link_state == LPFC_LINK_DOWN)
piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
- }
- if (ulp_command == CMD_GEN_REQUEST64_CR)
+ } else if (ulp_command == CMD_GEN_REQUEST64_CR ||
+ mbx_tmo_err)
list_add_tail(&piocb->dlist, &abort_list);
}
@@ -9654,11 +9673,19 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
+ if (mbx_tmo_err)
+ list_move_tail(&piocb->list, &cancel_list);
+ else
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
+
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
- /* Make sure HBA is alive */
- lpfc_issue_hb_tmo(phba);
+ if (!list_empty(&cancel_list))
+ lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+ else
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -12331,9 +12358,10 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
elsiocb->vmid_tag.vmid_context = vmid_context;
pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
- if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
+ if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0,
+ sizeof(vport->lpfc_vmid_host_uuid)))
memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
- LPFC_COMPRESS_VMID_SIZE);
+ sizeof(vport->lpfc_vmid_host_uuid));
*((u32 *)(pcmd)) = ELS_CMD_UVEM;
len = (u32 *)(pcmd + 4);
@@ -12343,13 +12371,13 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
- LPFC_COMPRESS_VMID_SIZE);
+ sizeof(vem_id_desc->vem_id));
inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
memcpy(inst_desc->global_vem_id, vmid->host_vmid,
- LPFC_COMPRESS_VMID_SIZE);
+ sizeof(inst_desc->global_vem_id));
bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
bf_set(lpfc_instantiated_local_id, inst_desc,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index fdd7f69d87ef..51afb60859eb 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -169,29 +169,44 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport x%px flg x%x "
- "load_flag x%x refcnt %d state %d xpt x%x\n",
+ "load_flag x%x refcnt %u state %d xpt x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
vport->load_flag, kref_read(&ndlp->kref),
ndlp->nlp_state, ndlp->fc4_xpt_flags);
- /* Don't schedule a worker thread event if the vport is going down.
- * The teardown process cleans up the node via lpfc_drop_node.
- */
+ /* Don't schedule a worker thread event if the vport is going down. */
if (vport->load_flag & FC_UNLOADING) {
- ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
- ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
- /* clear the NLP_XPT_REGD if the node is not registered
- * with nvme-fc
+ /* The scsi_transport is done with the rport so lpfc cannot
+ * call to unregister. Remove the scsi transport reference
+ * and clean up the SCSI transport node details.
*/
- if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
- ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) {
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+
+ /* NVME transport-registered rports need the
+ * NLP_XPT_REGD flag to complete an unregister.
+ */
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_nlp_put(ndlp);
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ }
- /* Remove the node reference from remote_port_add now.
- * The driver will not call remote_port_delete.
+ /* Only 1 thread can drop the initial node reference. If
+ * another thread has set NLP_DROPPED, this thread is done.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+ ndlp->nlp_flag |= NLP_DROPPED;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_nlp_put(ndlp);
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ }
+
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
return;
}
@@ -4686,7 +4701,8 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
spin_unlock_irqrestore(&ndlp->lock, iflags);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
"0999 %s Not regd: ndlp x%px rport x%px DID "
"x%x FLG x%x XPT x%x\n",
__func__, ndlp, ndlp->rport, ndlp->nlp_DID,
@@ -4702,9 +4718,10 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
} else if (!ndlp->rport) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
"1999 %s NDLP in devloss x%px DID x%x FLG x%x"
- " XPT x%x refcnt %d\n",
+ " XPT x%x refcnt %u\n",
__func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->fc4_xpt_flags,
kref_read(&ndlp->kref));
@@ -4954,22 +4971,29 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
/*
* Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
- * be used if we wish to issue the "last" lpfc_nlp_put() to remove
- * the ndlp from the vport. The ndlp marked as UNUSED on the list
- * until ALL other outstanding threads have completed. We check
- * that the ndlp not already in the UNUSED state before we proceed.
+ * be used when lpfc wants to remove the "last" lpfc_nlp_put() to
+ * release the ndlp from the vport when conditions are correct.
*/
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
- ndlp->nlp_flag |= NLP_DROPPED;
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
}
- lpfc_nlp_put(ndlp);
- return;
+ /* NLP_DROPPED means another thread already removed the initial
+ * reference from lpfc_nlp_init. If set, don't drop it again and
+ * introduce an imbalance.
+ */
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+ ndlp->nlp_flag |= NLP_DROPPED;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_put(ndlp);
+ return;
+ }
+ spin_unlock_irq(&ndlp->lock);
}
/*
@@ -5757,8 +5781,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
(NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
+ if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
+ ndlp->nlp_state < NLP_STE_PRLI_ISSUE) {
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ }
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index aaea3e31944d..2108b4cb7815 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -764,6 +764,8 @@ typedef struct _PRLI { /* Structure is in Big Endian format */
#define PRLI_PREDEF_CONFIG 0x5
#define PRLI_PARTIAL_SUCCESS 0x6
#define PRLI_INVALID_PAGE_CNT 0x7
+#define PRLI_INV_SRV_PARM 0x8
+
uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 3221a934066b..9e59c050103d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2123,7 +2123,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
en_rn_msg = false;
} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3144 Port Down: Debug Dump\n");
else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
@@ -7550,6 +7550,8 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba)
void
lpfc_reset_hba(struct lpfc_hba *phba)
{
+ int rc = 0;
+
/* If resets are disabled then set error state and return. */
if (!phba->cfg_enable_hba_reset) {
phba->link_state = LPFC_HBA_ERROR;
@@ -7560,13 +7562,25 @@ lpfc_reset_hba(struct lpfc_hba *phba)
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
} else {
+ if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) {
+ /* Perform a PCI function reset to start from clean */
+ rc = lpfc_pci_function_reset(phba);
+ lpfc_els_flush_all_cmd(phba);
+ }
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
lpfc_sli_flush_io_rings(phba);
}
lpfc_offline(phba);
- lpfc_sli_brdrestart(phba);
- lpfc_online(phba);
- lpfc_unblock_mgmt_io(phba);
+ clear_bit(MBX_TMO_ERR, &phba->bit_flags);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "8888 PCI function reset failed rc %x\n",
+ rc);
+ } else {
+ lpfc_sli_brdrestart(phba);
+ lpfc_online(phba);
+ lpfc_unblock_mgmt_io(phba);
+ }
}
/**
@@ -12498,10 +12512,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
(new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
(new_cpup->phys_id == cpup->phys_id))
goto found_same;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* At this point, we leave the CPU as unassigned */
continue;
@@ -12513,9 +12524,7 @@ found_same:
* chance of having multiple unassigned CPU entries
* selecting the same IRQ.
*/
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu >= nr_cpu_ids)
- start_cpu = first_cpu;
+ start_cpu = lpfc_next_present_cpu(new_cpu);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3337 Set Affinity: CPU %d "
@@ -12548,10 +12557,7 @@ found_same:
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
(new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
goto found_any;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* We should never leave an entry unassigned */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -12567,9 +12573,7 @@ found_any:
* chance of having multiple unassigned CPU entries
* selecting the same IRQ.
*/
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu >= nr_cpu_ids)
- start_cpu = first_cpu;
+ start_cpu = lpfc_next_present_cpu(new_cpu);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3338 Set Affinity: CPU %d "
@@ -12640,9 +12644,7 @@ found_any:
new_cpup->core_id == cpup->core_id) {
goto found_hdwq;
}
- new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* If we can't match both phys_id and core_id,
@@ -12654,10 +12656,7 @@ found_any:
if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
new_cpup->phys_id == cpup->phys_id)
goto found_hdwq;
-
- new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* Otherwise just round robin on cfg_hdw_queue */
@@ -12666,9 +12665,7 @@ found_any:
goto logit;
found_hdwq:
/* We found an available entry, copy the IRQ info */
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu >= nr_cpu_ids)
- start_cpu = first_cpu;
+ start_cpu = lpfc_next_present_cpu(new_cpu);
cpup->hdwq = new_cpup->hdwq;
logit:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b86ff9fcdf0c..1eb7f7e60bba 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -879,23 +879,34 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
lpfc_retry_pport_discovery(phba);
}
- } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
- ((ndlp->nlp_type & NLP_FCP_TARGET) ||
- (ndlp->nlp_type & NLP_NVME_TARGET) ||
- (vport->fc_flag & FC_PT2PT))) ||
- (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
- /* Only try to re-login if this is NOT a Fabric Node
- * AND the remote NPORT is a FCP/NVME Target or we
- * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
- * case for LOGO as a response to ADISC behavior.
- */
- mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(&ndlp->lock);
-
- ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_ELS | LOG_DISCOVERY,
+ "3203 LOGO recover nport x%06x state x%x "
+ "ntype x%x fc_flag x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_type, vport->fc_flag);
+
+ /* Special cases for rports that recover post LOGO. */
+ if ((!(ndlp->nlp_type == NLP_FABRIC) &&
+ (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) ||
+ vport->fc_flag & FC_PT2PT)) ||
+ (ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
+ ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(1000 * 1));
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(&ndlp->lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_ELS | LOG_DISCOVERY,
+ "3204 Start nlpdelay on DID x%06x "
+ "nflag x%x lastels x%x ref cnt %u",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_last_elscmd,
+ kref_read(&ndlp->kref));
+ }
}
out:
/* Unregister from backend, could have been skipped due to ADISC */
@@ -1854,7 +1865,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
- struct lpfc_nodelist *ns_ndlp;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1882,13 +1892,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
}
spin_unlock_irq(&phba->hbalock);
- /* software abort if any GID_FT is outstanding */
- if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
- ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ns_ndlp)
- lpfc_els_abort(phba, ns_ndlp);
- }
-
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -2148,6 +2151,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_nvme_prli *nvpr;
void *temp_ptr;
u32 ulp_status;
+ bool acc_imode_sps = false;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->rsp_iocb;
@@ -2182,22 +2186,32 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto out_err;
}
- if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
- (npr->prliType == PRLI_FCP_TYPE)) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
- npr->initiatorFunc,
- npr->targetFunc);
- if (npr->initiatorFunc)
- ndlp->nlp_type |= NLP_FCP_INITIATOR;
- if (npr->targetFunc) {
- ndlp->nlp_type |= NLP_FCP_TARGET;
- if (npr->writeXferRdyDis)
- ndlp->nlp_flag |= NLP_FIRSTBURST;
+ if (npr && npr->prliType == PRLI_FCP_TYPE) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "6028 FCP NPR PRLI Cmpl Init %d Target %d "
+ "EIP %d AccCode x%x\n",
+ npr->initiatorFunc, npr->targetFunc,
+ npr->estabImagePair, npr->acceptRspCode);
+
+ if (npr->acceptRspCode == PRLI_INV_SRV_PARM) {
+ /* Strict initiators don't establish an image pair. */
+ if (npr->initiatorFunc && !npr->targetFunc &&
+ !npr->estabImagePair)
+ acc_imode_sps = true;
}
- if (npr->Retry)
- ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ if (npr->acceptRspCode == PRLI_REQ_EXECUTED || acc_imode_sps) {
+ if (npr->initiatorFunc)
+ ndlp->nlp_type |= NLP_FCP_INITIATOR;
+ if (npr->targetFunc) {
+ ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->writeXferRdyDis)
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ }
+ if (npr->Retry)
+ ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ }
} else if (nvpr &&
(bf_get_be32(prli_acc_rsp_code, nvpr) ==
PRLI_REQ_EXECUTED) &&
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 8db7cb99903d..39acbcb7ec66 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1864,7 +1864,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags;
int ret_val;
- struct nvme_fc_cmd_iu *cp;
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1988,16 +1987,10 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
- /*
- * Get Command Id from cmd to plug into response. This
- * code is not needed in the next NVME Transport drop.
- */
- cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6138 Transport Abort NVME Request Issued for "
- "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
- nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
- cp->sqe.common.command_id);
+ "ox_id x%x\n",
+ nvmereq_wqe->sli4_xritag);
return;
out_unlock:
@@ -2510,8 +2503,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"6031 RemotePort Registration failed "
- "err: %d, DID x%06x\n",
- ret, ndlp->nlp_DID);
+ "err: %d, DID x%06x ref %u\n",
+ ret, ndlp->nlp_DID, kref_read(&ndlp->kref));
+ lpfc_nlp_put(ndlp);
}
return ret;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index dff4584d338b..425328d9c2d8 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1620,10 +1620,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
cpu = cpumask_first(cpu_present_mask);
continue;
}
- cpu = cpumask_next(cpu, cpu_present_mask);
- if (cpu == nr_cpu_ids)
- cpu = cpumask_first(cpu_present_mask);
-
+ cpu = lpfc_next_present_cpu(cpu);
}
for_each_present_cpu(i) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 58d10f8f75a7..4dfadf254a72 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -3935,6 +3935,8 @@ void lpfc_poll_eratt(struct timer_list *t)
uint64_t sli_intr, cnt;
phba = from_timer(phba, t, eratt_poll);
+ if (!(phba->hba_flag & HBA_SETUP))
+ return;
/* Here we will also keep track of interrupts per sec of the hba */
sli_intr = phba->sli.slistat.sli_intr;
@@ -7693,7 +7695,9 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
spin_unlock_irq(&phba->hbalock);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "3161 Failure to post sgl to port.\n");
+ "3161 Failure to post sgl to port,status %x "
+ "blkcnt %d totalcnt %d postcnt %d\n",
+ status, block_cnt, total_cnt, post_cnt);
return -EIO;
}
@@ -8478,6 +8482,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
}
}
+ phba->hba_flag &= ~HBA_SETUP;
lpfc_sli4_dip(phba);
@@ -9282,6 +9287,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
* would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
* it to fail all outstanding SCSI IO.
*/
+ set_bit(MBX_TMO_ERR, &phba->bit_flags);
spin_lock_irq(&phba->pport->work_port_lock);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock_irq(&phba->pport->work_port_lock);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6f35491aed0f..13a547277f97 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.13"
+#define LPFC_DRIVER_VERSION "14.2.0.14"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index ef2b6380e19a..bc867da650b6 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -438,7 +438,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
// set up PCI related soft state and other pre-known parameters
- adapter->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ adapter->unique_id = pci_dev_id(pdev);
adapter->irq = pdev->irq;
adapter->pdev = pdev;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 050eed8e2684..b9d46dcb5210 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -7518,7 +7518,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
*/
instance->pdev = pdev;
instance->host = host;
- instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->unique_id = pci_dev_id(pdev);
instance->init_id = MEGASAS_DEFAULT_INIT_ID;
megasas_set_adapter_type(instance);
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 2fc196499c89..35f81af40f51 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -1482,7 +1482,7 @@ struct mpi3_security_page0 {
#define MPI3_SECURITY1_KEY_RECORD_MAX 1
#endif
#ifndef MPI3_SECURITY1_PAD_MAX
-#define MPI3_SECURITY1_PAD_MAX 1
+#define MPI3_SECURITY1_PAD_MAX 4
#endif
union mpi3_security1_key_data {
__le32 dword[128];
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index f5e9c2309ce6..1e4a60fc655f 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -600,6 +600,7 @@ struct mpi3_event_data_pcie_error_threshold {
__le16 threshold_count;
__le16 attached_dev_handle;
__le16 reserved12;
+ __le32 reserved14;
};
#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index 441cfc2c7f09..1e0a3dcaf723 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (27)
+#define MPI3_VERSION_UNIT (28)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0afb687402e1..ae98d15c30b1 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -55,8 +55,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.4.1.0.0"
-#define MPI3MR_DRIVER_RELDATE "16-March-2023"
+#define MPI3MR_DRIVER_VERSION "8.5.0.0.0"
+#define MPI3MR_DRIVER_RELDATE "24-July-2023"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -66,11 +66,12 @@ extern atomic64_t event_counter;
#define MPI3MR_NAME_LENGTH 32
#define IOCNAME "%s: "
-#define MPI3MR_MAX_SECTORS 2048
+#define MPI3MR_DEFAULT_MAX_IO_SIZE (1 * 1024 * 1024)
/* Definitions for internal SGL and Chain SGL buffers */
#define MPI3MR_PAGE_SIZE_4K 4096
-#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
+#define MPI3MR_DEFAULT_SGL_ENTRIES 256
+#define MPI3MR_MAX_SGL_ENTRIES 2048
/* Definitions for MAX values for shost */
#define MPI3MR_MAX_CMDS_LUN 128
@@ -206,6 +207,9 @@ extern atomic64_t event_counter;
*/
#define MPI3MR_MAX_APP_XFER_SECTORS (2048 + 512)
+#define MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS 256
+#define MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS 2048
+
/**
* struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe
* Encapsulated commands.
@@ -323,6 +327,7 @@ struct mpi3mr_ioc_facts {
u16 max_perids;
u16 max_pds;
u16 max_sasexpanders;
+ u32 max_data_length;
u16 max_sasinitiators;
u16 max_enclosures;
u16 max_pcie_switches;
@@ -676,6 +681,7 @@ enum mpi3mr_dev_state {
* @io_unit_port: IO Unit port ID
* @non_stl: Is this device not to be attached with SAS TL
* @io_throttle_enabled: I/O throttling needed or not
+ * @wslen: Write same max length
* @q_depth: Device specific Queue Depth
* @wwid: World wide ID
* @enclosure_logical_id: Enclosure logical identifier
@@ -698,6 +704,7 @@ struct mpi3mr_tgt_dev {
u8 io_unit_port;
u8 non_stl;
u8 io_throttle_enabled;
+ u16 wslen;
u16 q_depth;
u64 wwid;
u64 enclosure_logical_id;
@@ -751,6 +758,8 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
* @dev_removed: Device removed in the Firmware
* @dev_removedelay: Device is waiting to be removed in FW
* @dev_type: Device type
+ * @dev_nvme_dif: Device is NVMe DIF enabled
+ * @wslen: Write same max length
* @io_throttle_enabled: I/O throttling needed or not
* @io_divert: Flag indicates io divert is on or off for the dev
* @throttle_group: Pointer to throttle group info
@@ -767,6 +776,8 @@ struct mpi3mr_stgt_priv_data {
u8 dev_removed;
u8 dev_removedelay;
u8 dev_type;
+ u8 dev_nvme_dif;
+ u16 wslen;
u8 io_throttle_enabled;
u8 io_divert;
struct mpi3mr_throttle_group_info *throttle_group;
@@ -782,12 +793,14 @@ struct mpi3mr_stgt_priv_data {
* @ncq_prio_enable: NCQ priority enable for SATA device
* @pend_count: Counter to track pending I/Os during error
* handling
+ * @wslen: Write same max length
*/
struct mpi3mr_sdev_priv_data {
struct mpi3mr_stgt_priv_data *tgt_priv_data;
u32 lun_id;
u8 ncq_prio_enable;
u32 pend_count;
+ u16 wslen;
};
/**
@@ -959,6 +972,7 @@ struct scmd_priv {
* @stop_drv_processing: Stop all command processing
* @device_refresh_on: Don't process the events until devices are refreshed
* @max_host_ios: Maximum host I/O count
+ * @max_sgl_entries: Max SGL entries per I/O
* @chain_buf_count: Chain buffer count
* @chain_buf_pool: Chain buffer pool
* @chain_sgl_list: Chain SGL list
@@ -1129,6 +1143,7 @@ struct mpi3mr_ioc {
u16 max_host_ios;
spinlock_t tgtdev_lock;
struct list_head tgtdev_list;
+ u16 max_sgl_entries;
u32 chain_buf_count;
struct dma_pool *chain_buf_pool;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 5fa07d6ee5b8..f039f1d98647 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -1163,6 +1163,12 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
return -EPERM;
}
+ if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
+ ioc_err(mrioc, "Warning: The maximum data transfer length\n"
+ "\tchanged after reset: previous(%d), new(%d),\n"
+ "the driver cannot change this at run time\n",
+ mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
+
if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
ioc_err(mrioc,
@@ -2343,8 +2349,8 @@ static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
mrioc->init_cmds.is_waiting = 0;
if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT);
retval = -1;
goto out_unlock;
}
@@ -2856,6 +2862,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
le16_to_cpu(facts_data->max_pcie_switches);
mrioc->facts.max_sasexpanders =
le16_to_cpu(facts_data->max_sas_expanders);
+ mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
mrioc->facts.max_sasinitiators =
le16_to_cpu(facts_data->max_sas_initiators);
mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
@@ -2893,13 +2900,18 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.io_throttle_high =
le16_to_cpu(facts_data->io_throttle_high);
+ if (mrioc->facts.max_data_length ==
+ MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
+ mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
+ else
+ mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
/* Store in 512b block count */
if (mrioc->facts.io_throttle_data_length)
mrioc->io_throttle_data_length =
(mrioc->facts.io_throttle_data_length * 2 * 4);
else
/* set the length to 1MB + 1K to disable throttle */
- mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
+ mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
@@ -2914,9 +2926,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
mrioc->facts.sge_mod_shift);
- ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
+ ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
mrioc->facts.dma_mask, (facts_flags &
- MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
+ MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
ioc_info(mrioc,
"max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
@@ -3359,8 +3371,8 @@ int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
retval = -1;
goto out_unlock;
}
@@ -3414,7 +3426,14 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->chain_sgl_list)
goto out_failed;
- sz = MPI3MR_PAGE_SIZE_4K;
+ if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
+ MPI3MR_PAGE_SIZE_4K))
+ mrioc->max_sgl_entries = mrioc->facts.max_data_length /
+ MPI3MR_PAGE_SIZE_4K;
+ sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
+ ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
+ mrioc->max_sgl_entries, sz/1024);
+
mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
&mrioc->pdev->dev, sz, 16, 0);
if (!mrioc->chain_buf_pool) {
@@ -3813,7 +3832,7 @@ retry_init:
}
mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
-
+ mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
atomic_set(&mrioc->pend_large_data_sz, 0);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index d627355303d7..89ba015c5d7e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -33,6 +33,12 @@ static int logging_level;
module_param(logging_level, int, 0);
MODULE_PARM_DESC(logging_level,
" bits for enabling additional logging info (default=0)");
+static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
+module_param(max_sgl_entries, int, 0444);
+MODULE_PARM_DESC(max_sgl_entries,
+ "Preferred max number of SG entries to be used for a single I/O\n"
+ "The actual value will be determined by the driver\n"
+ "(Minimum=256, Maximum=2048, default=256)");
/* Forward declarations*/
static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
@@ -424,6 +430,7 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
tgt_priv->io_throttle_enabled = 0;
tgt_priv->io_divert = 0;
tgt_priv->throttle_group = NULL;
+ tgt_priv->wslen = 0;
if (tgtdev->host_exposed)
atomic_set(&tgt_priv->block_io, 1);
}
@@ -1034,6 +1041,19 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
{
struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ dprint_reset(mrioc, "refresh target devices: check for removals\n");
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
+ tgtdev->host_exposed && tgtdev->starget &&
+ tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ tgt_priv->dev_removed = 1;
+ atomic_set(&tgt_priv->block_io, 0);
+ }
+ }
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
@@ -1102,6 +1122,18 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->io_throttle_enabled =
(flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+ switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
+ tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS;
+ break;
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB:
+ tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS;
+ break;
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT:
+ default:
+ tgtdev->wslen = 0;
+ break;
+ }
if (tgtdev->starget && tgtdev->starget->hostdata) {
scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
@@ -1113,6 +1145,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->io_throttle_enabled;
if (is_added == true)
atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ scsi_tgt_priv_data->wslen = tgtdev->wslen;
}
switch (dev_pg0->access_status) {
@@ -3413,7 +3446,7 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
scsi_bufflen(scmd));
return -ENOMEM;
}
- if (sges_left > MPI3MR_SG_DEPTH) {
+ if (sges_left > mrioc->max_sgl_entries) {
sdev_printk(KERN_ERR, scmd->device,
"scsi_dma_map returned unsupported sge count %d!\n",
sges_left);
@@ -3934,6 +3967,48 @@ void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
}
/**
+ * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ * @scsiio_flags: Pointer to MPI3 SCSI IO Flags
+ * @wslen: write same max length
+ *
+ * Gets values of unmap, ndob and number of blocks from write
+ * same scsi io and based on these values it sets divert IO flag
+ * and reason for diverting IO to firmware.
+ *
+ * Return: Nothing
+ */
+static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req,
+ u32 *scsiio_flags, u16 wslen)
+{
+ u8 unmap = 0, ndob = 0;
+ u8 opcode = scmd->cmnd[0];
+ u32 num_blocks = 0;
+ u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]);
+
+ if (opcode == WRITE_SAME_16) {
+ unmap = scmd->cmnd[1] & 0x08;
+ ndob = scmd->cmnd[1] & 0x01;
+ num_blocks = get_unaligned_be32(scmd->cmnd + 10);
+ } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) {
+ unmap = scmd->cmnd[10] & 0x08;
+ ndob = scmd->cmnd[10] & 0x01;
+ num_blocks = get_unaligned_be32(scmd->cmnd + 28);
+ } else
+ return;
+
+ if ((unmap) && (ndob) && (num_blocks > wslen)) {
+ scsiio_req->msg_flags |=
+ MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
+ *scsiio_flags |=
+ MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE;
+ }
+}
+
+/**
* mpi3mr_eh_host_reset - Host reset error handling callback
* @scmd: SCSI command reference
*
@@ -4430,7 +4505,6 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
unsigned long flags;
int retval = 0;
struct sas_rphy *rphy = NULL;
- bool update_stgt_priv_data = false;
scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
if (!scsi_tgt_priv_data)
@@ -4439,39 +4513,50 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
starget->hostdata = scsi_tgt_priv_data;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
-
if (starget->channel == mrioc->scsi_device_channel) {
tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
- if (tgt_dev && !tgt_dev->is_hidden)
- update_stgt_priv_data = true;
- else
+ if (tgt_dev && !tgt_dev->is_hidden) {
+ scsi_tgt_priv_data->starget = starget;
+ scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
+ scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+ scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
+ scsi_tgt_priv_data->tgt_dev = tgt_dev;
+ tgt_dev->starget = starget;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ retval = 0;
+ if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
+ ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
+ ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0))
+ scsi_tgt_priv_data->dev_nvme_dif = 1;
+ scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
+ scsi_tgt_priv_data->wslen = tgt_dev->wslen;
+ if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg;
+ } else
retval = -ENXIO;
} else if (mrioc->sas_transport_enabled && !starget->channel) {
rphy = dev_to_rphy(starget->dev.parent);
tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
rphy->identify.sas_address, rphy);
if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
- (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA))
- update_stgt_priv_data = true;
- else
+ (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
+ scsi_tgt_priv_data->starget = starget;
+ scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
+ scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+ scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
+ scsi_tgt_priv_data->tgt_dev = tgt_dev;
+ scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
+ scsi_tgt_priv_data->wslen = tgt_dev->wslen;
+ tgt_dev->starget = starget;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ retval = 0;
+ } else
retval = -ENXIO;
}
-
- if (update_stgt_priv_data) {
- scsi_tgt_priv_data->starget = starget;
- scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
- scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
- scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
- scsi_tgt_priv_data->tgt_dev = tgt_dev;
- tgt_dev->starget = starget;
- atomic_set(&scsi_tgt_priv_data->block_io, 0);
- retval = 0;
- scsi_tgt_priv_data->io_throttle_enabled =
- tgt_dev->io_throttle_enabled;
- if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
- scsi_tgt_priv_data->throttle_group =
- tgt_dev->dev_spec.vd_inf.tg;
- }
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
return retval;
@@ -4732,6 +4817,10 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
+ if (stgt_priv_data->wslen)
+ mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags,
+ stgt_priv_data->wslen);
+
memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
scsiio_req->dev_handle = cpu_to_le16(dev_handle);
@@ -4818,10 +4907,10 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.no_write_same = 1,
.can_queue = 1,
.this_id = -1,
- .sg_tablesize = MPI3MR_SG_DEPTH,
+ .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES,
/* max xfer supported is 1M (2K in 512 byte sized sectors)
*/
- .max_sectors = 2048,
+ .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512),
.cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
.max_segment_size = 0xffffffff,
.track_queue_depth = 1,
@@ -5004,6 +5093,16 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mrioc->pdev = pdev;
mrioc->stop_bsgs = 1;
+ mrioc->max_sgl_entries = max_sgl_entries;
+ if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES)
+ mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES;
+ else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES)
+ mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
+ else {
+ mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES;
+ mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES;
+ }
+
/* init shost parameters */
shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
shost->max_lun = -1;
@@ -5068,7 +5167,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->nr_maps = 3;
shost->can_queue = mrioc->max_host_ios;
- shost->sg_tablesize = MPI3MR_SG_DEPTH;
+ shost->sg_tablesize = mrioc->max_sgl_entries;
shost->max_id = mrioc->facts.max_perids + 1;
retval = scsi_add_host(shost, &pdev->dev);
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index ed3923f8db4f..6de35b32223c 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -199,7 +199,7 @@
*
*****************************************************************************/
-typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
+typedef struct _MPI2_SYSTEM_INTERFACE_REGS {
U32 Doorbell; /*0x00 */
U32 WriteSequence; /*0x04 */
U32 HostDiagnostic; /*0x08 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 53f5492579cb..61a32bf00747 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -138,6 +138,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
static void
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
+static u32
+_base_readl_ext_retry(const void __iomem *addr);
+
/**
* mpt3sas_base_check_cmd_timeout - Function
* to check timeout and command termination due
@@ -201,7 +204,7 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
* while reading the system interface register.
*/
static inline u32
-_base_readl_aero(const volatile void __iomem *addr)
+_base_readl_aero(const void __iomem *addr)
{
u32 i = 0, ret_val;
@@ -213,8 +216,22 @@ _base_readl_aero(const volatile void __iomem *addr)
return ret_val;
}
+static u32
+_base_readl_ext_retry(const void __iomem *addr)
+{
+ u32 i, ret_val;
+
+ for (i = 0 ; i < 30 ; i++) {
+ ret_val = readl(addr);
+ if (ret_val == 0)
+ continue;
+ }
+
+ return ret_val;
+}
+
static inline u32
-_base_readl(const volatile void __iomem *addr)
+_base_readl(const void __iomem *addr)
{
return readl(addr);
}
@@ -940,7 +957,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
dump_stack();
- doorbell = ioc->base_readl(&ioc->chip->Doorbell);
+ doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt3sas_print_fault_code(ioc, doorbell &
MPI2_DOORBELL_DATA_MASK);
@@ -6686,7 +6703,7 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
{
u32 s, sc;
- s = ioc->base_readl(&ioc->chip->Doorbell);
+ s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
sc = s & MPI2_IOC_STATE_MASK;
return cooked ? sc : s;
}
@@ -6831,7 +6848,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
__func__, count, timeout));
return 0;
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
- doorbell = ioc->base_readl(&ioc->chip->Doorbell);
+ doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
if ((doorbell & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_FAULT) {
mpt3sas_print_fault_code(ioc, doorbell);
@@ -6871,7 +6888,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
count = 0;
cntdn = 1000 * timeout;
do {
- doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
+ doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
dhsprintk(ioc,
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
@@ -7019,7 +7036,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
__le32 *mfp;
/* make sure doorbell is not in use */
- if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
return -EFAULT;
}
@@ -7068,7 +7085,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* read the first two 16-bits, it gives the total length of the reply */
- reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
+ reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -7076,7 +7093,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
__LINE__);
return -EFAULT;
}
- reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
+ reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
@@ -7087,10 +7104,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
return -EFAULT;
}
if (i >= reply_bytes/2) /* overflow case */
- ioc->base_readl(&ioc->chip->Doorbell);
+ ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
else
reply[i] = le16_to_cpu(
- ioc->base_readl(&ioc->chip->Doorbell)
+ ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
@@ -7949,7 +7966,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
goto out;
}
- host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+ host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
drsprintk(ioc,
ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
count, host_diagnostic));
@@ -7969,7 +7986,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
for (count = 0; count < (300000000 /
MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
- host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+ host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
if (host_diagnostic == 0xFFFFFFFF) {
ioc_info(ioc,
@@ -8359,10 +8376,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->rdpq_array_enable_assigned = 0;
ioc->use_32bit_dma = false;
ioc->dma_mask = 64;
- if (ioc->is_aero_ioc)
+ if (ioc->is_aero_ioc) {
ioc->base_readl = &_base_readl_aero;
- else
+ ioc->base_readl_ext_retry = &_base_readl_ext_retry;
+ } else {
ioc->base_readl = &_base_readl;
+ ioc->base_readl_ext_retry = &_base_readl;
+ }
r = mpt3sas_base_map_resources(ioc);
if (r)
goto out_free_resources;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 05364aa15ecd..1be0850ca17a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -994,7 +994,7 @@ typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid,
typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 funcdep);
typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
-typedef u32 (*BASE_READ_REG) (const volatile void __iomem *addr);
+typedef u32 (*BASE_READ_REG) (const void __iomem *addr);
/*
* To get high iops reply queue's msix index when high iops mode is enabled
* else get the msix index of general reply queues.
@@ -1618,6 +1618,7 @@ struct MPT3SAS_ADAPTER {
u8 diag_trigger_active;
u8 atomic_desc_capable;
BASE_READ_REG base_readl;
+ BASE_READ_REG base_readl_ext_retry;
struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 49e2a5e7ce54..43ebb331e216 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -84,10 +84,8 @@ static void mvs_phy_init(struct mvs_info *mvi, int phy_id)
phy->port = NULL;
timer_setup(&phy->timer, NULL, 0);
sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -416,7 +414,7 @@ static int mvs_prep_sas_ha_init(struct Scsi_Host *shost,
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
- sha->core.shost = shost;
+ sha->shost = shost;
sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
if (!sha->lldd_ha)
@@ -458,7 +456,6 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
sha->sas_ha_name = DRV_NAME;
sha->dev = mvi->dev;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &mvi->sas_addr[0];
sha->num_phys = nr_core * chip_info->n_phy;
@@ -473,7 +470,7 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
shost->can_queue = can_queue;
mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
- sha->core.shost = mvi->shost;
+ sha->shost = mvi->shost;
}
static void mvs_init_sas_add(struct mvs_info *mvi)
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 9978c424214c..1444b1f1c4c8 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -564,7 +564,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
void *buf_prd;
struct ssp_frame_hdr *ssp_hdr;
void *buf_tmp;
- u8 *buf_cmd, *buf_oaf, fburst = 0;
+ u8 *buf_cmd, *buf_oaf;
dma_addr_t buf_tmp_dma;
u32 flags;
u32 resp_len, req_len, i, tag = tei->tag;
@@ -582,10 +582,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
(phy_mask << TXQ_PHY_SHIFT));
flags = MCH_RETRY;
- if (task->ssp_task.enable_first_burst) {
- flags |= MCH_FBURST;
- fburst = (1 << 7);
- }
if (is_tmf)
flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
else
@@ -667,8 +663,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (ssp_hdr->frame_type != SSP_TASK) {
- buf_cmd[9] = fburst | task->ssp_task.task_attr |
- (task->ssp_task.task_prio << 3);
+ buf_cmd[9] = task->ssp_task.task_attr;
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
} else{
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 73aa7059b556..d9d366ec17dc 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -1500,7 +1500,7 @@ static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
if (sdev) {
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
}
}
@@ -2490,7 +2490,7 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mhba->pdev = pdev;
mhba->shost = host;
- mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ mhba->unique_id = pci_dev_id(pdev);
ret = mvumi_init_fw(mhba);
if (ret)
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 73cd25f30ca5..33053db5a713 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4053,9 +4053,6 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
ssp_cmd.tag = cpu_to_le32(tag);
- if (task->ssp_task.enable_first_burst)
- ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
- ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
@@ -4095,7 +4092,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
u32 hdr_tag, ncg_tag = 0;
u64 phys_addr;
u32 ATAP = 0x0;
- u32 dir;
+ u32 dir, retfis = 0;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
@@ -4124,8 +4121,11 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.tag = cpu_to_le32(tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
- sata_cmd.ncqtag_atap_dir_m =
- cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir);
+ if (task->ata_task.return_fis_on_success)
+ retfis = 1;
+ sata_cmd.retfis_ncqtag_atap_dir_m =
+ cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) |
+ ((ATAP & 0x3f) << 10) | dir);
sata_cmd.sata_fis = task->ata_task.fis;
if (likely(!task->ata_task.device_control_reg_update))
sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 961d0465b923..fc2127dcb58d 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -515,7 +515,7 @@ struct sata_start_req {
__le32 tag;
__le32 device_id;
__le32 data_len;
- __le32 ncqtag_atap_dir_m;
+ __le32 retfis_ncqtag_atap_dir_m;
struct host_to_dev_fis sata_fis;
u32 reserved1;
u32 reserved2;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 4995e1ef4e0e..5e5ce1e74c3b 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -162,10 +162,8 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -654,10 +652,9 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
sha->sas_ha_name = DRV_NAME;
sha->dev = pm8001_ha->dev;
sha->strict_wide_ports = 1;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &pm8001_ha->sas_addr[0];
sha->num_phys = chip_info->n_phy;
- sha->core.shost = shost;
+ sha->shost = shost;
}
/**
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 953572fc0d9e..2fadd353f1c1 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -702,8 +702,6 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb);
int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
-struct sas_task *pm8001_alloc_task(void);
-void pm8001_free_task(struct sas_task *task);
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
u32 device_id);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 39a12ee94a72..f6857632dc7c 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -4316,9 +4316,6 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
ssp_cmd.tag = cpu_to_le32(tag);
- if (task->ssp_task.enable_first_burst)
- ssp_cmd.ssp_iu.efb_prio_attr = 0x80;
- ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
@@ -4457,7 +4454,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
u32 ATAP = 0x0;
- u32 dir;
+ u32 dir, retfis = 0;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
@@ -4487,7 +4484,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.tag = cpu_to_le32(tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
-
+ if (task->ata_task.return_fis_on_success)
+ retfis = 1;
sata_cmd.sata_fis = task->ata_task.fis;
if (likely(!task->ata_task.device_control_reg_update))
sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
@@ -4500,12 +4498,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
"Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
sata_cmd.sata_fis.command);
opc = OPC_INB_SATA_DIF_ENC_IO;
-
- /* set encryption bit */
- sata_cmd.ncqtag_atap_dir_m_dad =
- cpu_to_le32(((ncg_tag & 0xff)<<16)|
- ((ATAP & 0x3f) << 10) | 0x20 | dir);
- /* dad (bit 0-1) is 0 */
+ /* set encryption bit; dad (bits 0-1) is 0 */
+ sata_cmd.retfis_ncqtag_atap_dir_m_dad =
+ cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) |
+ ((ATAP & 0x3f) << 10) | 0x20 | dir);
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
@@ -4568,11 +4564,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, IO,
"Sending Normal SATA command 0x%x inb %x\n",
sata_cmd.sata_fis.command, q_index);
- /* dad (bit 0-1) is 0 */
- sata_cmd.ncqtag_atap_dir_m_dad =
- cpu_to_le32(((ncg_tag & 0xff)<<16) |
- ((ATAP & 0x3f) << 10) | dir);
-
+ /* dad (bits 0-1) is 0 */
+ sata_cmd.retfis_ncqtag_atap_dir_m_dad =
+ cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) |
+ ((ATAP & 0x3f) << 10) | dir);
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index acf6e3005b84..eb8fd37b2066 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -731,7 +731,7 @@ struct sata_start_req {
__le32 tag;
__le32 device_id;
__le32 data_len;
- __le32 ncqtag_atap_dir_m_dad;
+ __le32 retfis_ncqtag_atap_dir_m_dad;
struct host_to_dev_fis sata_fis;
u32 reserved1;
u32 reserved2; /* dword 11. rsvd for normal I/O. */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 9415a4819470..50dc30051f22 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3584,8 +3584,7 @@ static ssize_t pmcraid_show_adapter_id(
struct Scsi_Host *shost = class_to_shost(dev);
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)shost->hostdata;
- u32 adapter_id = (pinstance->pdev->bus->number << 8) |
- pinstance->pdev->devfn;
+ u32 adapter_id = pci_dev_id(pinstance->pdev);
u32 aen_group = pmcraid_event_family.id;
return snprintf(buf, PAGE_SIZE,
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 909c49541984..19f0b93fa3d8 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -45,6 +45,11 @@ typedef struct {
#include "ppa.h"
+static unsigned int mode = PPA_AUTODETECT;
+module_param(mode, uint, 0644);
+MODULE_PARM_DESC(mode, "Transfer mode (0 = Autodetect, 1 = SPP 4-bit, "
+ "2 = SPP 8-bit, 3 = EPP 8-bit, 4 = EPP 16-bit, 5 = EPP 32-bit");
+
static struct scsi_pointer *ppa_scsi_pointer(struct scsi_cmnd *cmd)
{
return scsi_cmd_priv(cmd);
@@ -157,7 +162,7 @@ static int ppa_show_info(struct seq_file *m, struct Scsi_Host *host)
return 0;
}
-static int device_check(ppa_struct *dev);
+static int device_check(ppa_struct *dev, bool autodetect);
#if PPA_DEBUG > 0
#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
@@ -302,13 +307,10 @@ static int ppa_out(ppa_struct *dev, char *buffer, int len)
case PPA_EPP_8:
epp_reset(ppb);
w_ctr(ppb, 0x4);
-#ifdef CONFIG_SCSI_IZIP_EPP16
- if (!(((long) buffer | len) & 0x01))
- outsw(ppb + 4, buffer, len >> 1);
-#else
- if (!(((long) buffer | len) & 0x03))
+ if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x01))
outsl(ppb + 4, buffer, len >> 2);
-#endif
+ else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x03))
+ outsw(ppb + 4, buffer, len >> 1);
else
outsb(ppb + 4, buffer, len);
w_ctr(ppb, 0xc);
@@ -355,13 +357,10 @@ static int ppa_in(ppa_struct *dev, char *buffer, int len)
case PPA_EPP_8:
epp_reset(ppb);
w_ctr(ppb, 0x24);
-#ifdef CONFIG_SCSI_IZIP_EPP16
- if (!(((long) buffer | len) & 0x01))
- insw(ppb + 4, buffer, len >> 1);
-#else
- if (!(((long) buffer | len) & 0x03))
+ if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03))
insl(ppb + 4, buffer, len >> 2);
-#endif
+ else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01))
+ insw(ppb + 4, buffer, len >> 1);
else
insb(ppb + 4, buffer, len);
w_ctr(ppb, 0x2c);
@@ -469,6 +468,27 @@ static int ppa_init(ppa_struct *dev)
{
int retv;
unsigned short ppb = dev->base;
+ bool autodetect = dev->mode == PPA_AUTODETECT;
+
+ if (autodetect) {
+ int modes = dev->dev->port->modes;
+ int ppb_hi = dev->dev->port->base_hi;
+
+ /* Mode detection works up the chain of speed
+ * This avoids a nasty if-then-else-if-... tree
+ */
+ dev->mode = PPA_NIBBLE;
+
+ if (modes & PARPORT_MODE_TRISTATE)
+ dev->mode = PPA_PS2;
+
+ if (modes & PARPORT_MODE_ECP) {
+ w_ecr(ppb_hi, 0x20);
+ dev->mode = PPA_PS2;
+ }
+ if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP))
+ w_ecr(ppb_hi, 0x80);
+ }
ppa_disconnect(dev);
ppa_connect(dev, CONNECT_NORMAL);
@@ -492,7 +512,7 @@ static int ppa_init(ppa_struct *dev)
if (retv)
return -EIO;
- return device_check(dev);
+ return device_check(dev, autodetect);
}
static inline int ppa_send_command(struct scsi_cmnd *cmd)
@@ -637,7 +657,7 @@ static void ppa_interrupt(struct work_struct *work)
case DID_OK:
break;
case DID_NO_CONNECT:
- printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", cmd->device->target);
+ printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", scmd_id(cmd));
break;
case DID_BUS_BUSY:
printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n");
@@ -883,7 +903,7 @@ static int ppa_reset(struct scsi_cmnd *cmd)
return SUCCESS;
}
-static int device_check(ppa_struct *dev)
+static int device_check(ppa_struct *dev, bool autodetect)
{
/* This routine looks for a device and then attempts to use EPP
to send a command. If all goes as planned then EPP is available. */
@@ -895,8 +915,8 @@ static int device_check(ppa_struct *dev)
old_mode = dev->mode;
for (loop = 0; loop < 8; loop++) {
/* Attempt to use EPP for Test Unit Ready */
- if ((ppb & 0x0007) == 0x0000)
- dev->mode = PPA_EPP_32;
+ if (autodetect && (ppb & 0x0007) == 0x0000)
+ dev->mode = PPA_EPP_8;
second_pass:
ppa_connect(dev, CONNECT_EPP_MAYBE);
@@ -924,7 +944,7 @@ second_pass:
udelay(1000);
ppa_disconnect(dev);
udelay(1000);
- if (dev->mode == PPA_EPP_32) {
+ if (dev->mode != old_mode) {
dev->mode = old_mode;
goto second_pass;
}
@@ -947,7 +967,7 @@ second_pass:
udelay(1000);
ppa_disconnect(dev);
udelay(1000);
- if (dev->mode == PPA_EPP_32) {
+ if (dev->mode != old_mode) {
dev->mode = old_mode;
goto second_pass;
}
@@ -1026,7 +1046,6 @@ static int __ppa_attach(struct parport *pb)
DEFINE_WAIT(wait);
ppa_struct *dev, *temp;
int ports;
- int modes, ppb, ppb_hi;
int err = -ENOMEM;
struct pardev_cb ppa_cb;
@@ -1034,7 +1053,7 @@ static int __ppa_attach(struct parport *pb)
if (!dev)
return -ENOMEM;
dev->base = -1;
- dev->mode = PPA_AUTODETECT;
+ dev->mode = mode < PPA_UNKNOWN ? mode : PPA_AUTODETECT;
dev->recon_tmo = PPA_RECON_TMO;
init_waitqueue_head(&waiting);
temp = find_parent();
@@ -1069,25 +1088,8 @@ static int __ppa_attach(struct parport *pb)
}
dev->waiting = NULL;
finish_wait(&waiting, &wait);
- ppb = dev->base = dev->dev->port->base;
- ppb_hi = dev->dev->port->base_hi;
- w_ctr(ppb, 0x0c);
- modes = dev->dev->port->modes;
-
- /* Mode detection works up the chain of speed
- * This avoids a nasty if-then-else-if-... tree
- */
- dev->mode = PPA_NIBBLE;
-
- if (modes & PARPORT_MODE_TRISTATE)
- dev->mode = PPA_PS2;
-
- if (modes & PARPORT_MODE_ECP) {
- w_ecr(ppb_hi, 0x20);
- dev->mode = PPA_PS2;
- }
- if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP))
- w_ecr(ppb_hi, 0x80);
+ dev->base = dev->dev->port->base;
+ w_ctr(dev->base, 0x0c);
/* Done configuration */
diff --git a/drivers/scsi/ppa.h b/drivers/scsi/ppa.h
index 6a1f8a2d70eb..098bcf7b9eb4 100644
--- a/drivers/scsi/ppa.h
+++ b/drivers/scsi/ppa.h
@@ -107,11 +107,7 @@ static char *PPA_MODE_STRING[] =
"PS/2",
"EPP 8 bit",
"EPP 16 bit",
-#ifdef CONFIG_SCSI_IZIP_EPP16
- "EPP 16 bit",
-#else
"EPP 32 bit",
-#endif
"Unknown"};
/* other options */
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index c5c0bbdafc4e..1619cc33034f 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -548,7 +548,6 @@ extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
extern void qedf_wq_grcdump(struct work_struct *work);
void qedf_stag_change_work(struct work_struct *work);
void qedf_ctx_soft_reset(struct fc_lport *lport);
-extern void qedf_board_disable_work(struct work_struct *work);
extern void qedf_schedule_hw_err_handler(void *dev,
enum qed_hw_err_type err_type);
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index f4d81127239e..5ec2b817c694 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -59,6 +59,8 @@ extern uint qedf_debug;
#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */
#define QEDF_LOG_WARN 0x80000000 /* Warning logs */
+#define QEDF_DEBUGFS_LOG_LEN (2 * PAGE_SIZE)
+
/* Debug context structure */
struct qedf_dbg_ctx {
unsigned int host_no;
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index a3ed681c8ce3..451fd236bfd0 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -8,6 +8,7 @@
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include "qedf.h"
#include "qedf_dbg.h"
@@ -98,7 +99,9 @@ static ssize_t
qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
+ ssize_t ret;
size_t cnt = 0;
+ char *cbuf;
int id;
struct qedf_fastpath *fp = NULL;
struct qedf_dbg_ctx *qedf_dbg =
@@ -108,19 +111,25 @@ qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
- cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
+ cbuf = vmalloc(QEDF_DEBUGFS_LOG_LEN);
+ if (!cbuf)
+ return 0;
+
+ cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, "\nFastpath I/O completions\n\n");
for (id = 0; id < qedf->num_queues; id++) {
fp = &(qedf->fp_array[id]);
if (fp->sb_id == QEDF_SB_ID_NULL)
continue;
- cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
- fp->completions);
+ cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt,
+ "#%d: %lu\n", id, fp->completions);
}
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ ret = simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
+
+ vfree(cbuf);
+
+ return ret;
}
static ssize_t
@@ -138,15 +147,14 @@ qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
int cnt;
+ char cbuf[32];
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug);
- cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
+ cnt = scnprintf(cbuf, sizeof(cbuf), "debug mask = 0x%x\n", qedf_debug);
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
}
static ssize_t
@@ -185,18 +193,17 @@ qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
int cnt;
+ char cbuf[7];
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
- cnt = sprintf(buffer, "%s\n",
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%s\n",
qedf->stop_io_on_error ? "true" : "false");
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
}
static ssize_t
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 0e316cc24b19..772218445a56 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -67,8 +67,6 @@ void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
-int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
-void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
void qedi_clearsq(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn,
struct iscsi_task *task);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b00222459607..44449c70a375 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -3093,8 +3093,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d7e8454304ce..691ef827a5ab 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,13 +12,12 @@
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0199 | |
* | Mailbox commands | 0x1206 | 0x11a5-0x11ff |
- * | Device Discovery | 0x2134 | 0x210e-0x2115 |
- * | | | 0x211c-0x2128 |
- * | | | 0x212c-0x2134 |
+ * | Device Discovery | 0x2134 | 0x2112-0x2115 |
+ * | | | 0x2127-0x2128 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
- * | | | 0x302d,0x3033 |
+ * | | | 0x302e,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 70482b55d240..54f0a412226f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -368,6 +368,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
#define ql_dbg_edif 0x00000400 /* edif and purex debug */
+#define ql_dbg_unsol 0x00000100 /* Unsolicited path debug */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 4ae38305c15a..deb642607deb 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -346,6 +346,12 @@ struct name_list_extended {
u8 sent;
};
+struct qla_nvme_fc_rjt {
+ struct fcnvme_ls_rjt *c;
+ dma_addr_t cdma;
+ u16 size;
+};
+
struct els_reject {
struct fc_els_ls_rjt *c;
dma_addr_t cdma;
@@ -466,6 +472,7 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id)
}
struct tmf_arg {
+ struct list_head tmf_elem;
struct qla_qpair *qpair;
struct fc_port *fcport;
struct scsi_qla_host *vha;
@@ -502,6 +509,20 @@ struct ct_arg {
port_id_t id;
};
+struct qla_nvme_lsrjt_pt_arg {
+ struct fc_port *fcport;
+ u8 opcode;
+ u8 vp_idx;
+ u8 reason;
+ u8 explanation;
+ __le16 nport_handle;
+ u16 control_flags;
+ __le16 ox_id;
+ __le32 xchg_address;
+ u32 tx_byte_count, rx_byte_count;
+ dma_addr_t tx_addr, rx_addr;
+};
+
/*
* SRB extensions.
*/
@@ -610,13 +631,16 @@ struct srb_iocb {
void *desc;
/* These are only used with ls4 requests */
- int cmd_len;
- int rsp_len;
+ __le32 cmd_len;
+ __le32 rsp_len;
dma_addr_t cmd_dma;
dma_addr_t rsp_dma;
enum nvmefc_fcp_datadir dir;
uint32_t dl;
uint32_t timeout_sec;
+ __le32 exchange_address;
+ __le16 nport_handle;
+ __le16 ox_id;
struct list_head entry;
} nvme;
struct {
@@ -706,6 +730,10 @@ typedef struct srb {
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
+ unsigned int abort:1;
+ unsigned int aborted:1;
+ unsigned int completed:1;
+ unsigned int unsol_rsp:1;
uint32_t handle;
uint16_t flags;
@@ -2541,7 +2569,7 @@ enum rscn_addr_format {
typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
- struct list_head tmf_pending;
+ struct list_head unsol_ctx_head;
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
@@ -2562,9 +2590,6 @@ typedef struct fc_port {
unsigned int do_prli_nvme:1;
uint8_t nvme_flag;
- uint8_t active_tmf;
-#define MAX_ACTIVE_TMF 8
-
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
port_id_t d_id;
@@ -3745,6 +3770,16 @@ struct qla_fw_resources {
u16 pad;
};
+struct qla_fw_res {
+ u16 iocb_total;
+ u16 iocb_limit;
+ atomic_t iocb_used;
+
+ u16 exch_total;
+ u16 exch_limit;
+ atomic_t exch_used;
+};
+
#define QLA_IOCB_PCT_LIMIT 95
struct qla_buf_pool {
@@ -3790,6 +3825,12 @@ struct qla_qpair {
uint16_t id; /* qp number used with FW */
uint16_t vp_idx; /* vport ID */
+
+ uint16_t dsd_inuse;
+ uint16_t dsd_avail;
+ struct list_head dsd_list;
+#define NUM_DSD_CHAIN 4096
+
mempool_t *srb_mempool;
struct pci_dev *pdev;
@@ -4387,7 +4428,6 @@ struct qla_hw_data {
uint8_t aen_mbx_count;
atomic_t num_pend_mbx_stage1;
atomic_t num_pend_mbx_stage2;
- atomic_t num_pend_mbx_stage3;
uint16_t frame_payload_size;
uint32_t login_retry_count;
@@ -4656,6 +4696,8 @@ struct qla_hw_data {
uint32_t flt_region_aux_img_status_sec;
};
uint8_t active_image;
+ uint8_t active_tmf;
+#define MAX_ACTIVE_TMF 8
/* Needed for BEACON */
uint16_t beacon_blink_led;
@@ -4670,6 +4712,8 @@ struct qla_hw_data {
struct qla_msix_entry *msix_entries;
+ struct list_head tmf_pending;
+ struct list_head tmf_active;
struct list_head vp_list; /* list of VP */
unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
sizeof(unsigned long)];
@@ -4713,11 +4757,6 @@ struct qla_hw_data {
struct fw_blob *hablob;
struct qla82xx_legacy_intr_set nx_legacy_intr;
- uint16_t gbl_dsd_inuse;
- uint16_t gbl_dsd_avail;
- struct list_head gbl_dsd_list;
-#define NUM_DSD_CHAIN 4096
-
uint8_t fw_type;
uint32_t file_prd_off; /* File firmware product offset */
@@ -4799,6 +4838,8 @@ struct qla_hw_data {
struct els_reject elsrej;
u8 edif_post_stop_cnt_down;
struct qla_vp_map *vp_map;
+ struct qla_nvme_fc_rjt lsrjt;
+ struct qla_fw_res fwres ____cacheline_aligned;
};
#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
@@ -4831,6 +4872,7 @@ struct active_regions {
* is variable) starting at "iocb".
*/
struct purex_item {
+ void *purls_context;
struct list_head list;
struct scsi_qla_host *vha;
void (*process_item)(struct scsi_qla_host *vha,
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 1925cc6897b6..f060e593685d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -276,6 +276,16 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
exch_used, ha->base_qpair->fwres.exch_limit);
+
+ if (ql2xenforce_iocb_limit == 2) {
+ iocbs_used = atomic_read(&ha->fwres.iocb_used);
+ exch_used = atomic_read(&ha->fwres.exch_used);
+ seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n",
+ iocbs_used, ha->fwres.iocb_limit);
+
+ seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n",
+ exch_used, ha->fwres.exch_limit);
+ }
}
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ba7831f24734..09cb9413670a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -48,8 +48,6 @@ extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
struct els_plogi *els_plogi);
-extern void qla2x00_update_fcports(scsi_qla_host_t *);
-
extern int qla2x00_abort_isp(scsi_qla_host_t *);
extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
extern void qla2x00_quiesce_io(scsi_qla_host_t *);
@@ -143,6 +141,7 @@ void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
void qla_edif_clear_appdata(struct scsi_qla_host *vha,
struct fc_port *fcport);
const char *sc_to_str(uint16_t cmd);
+void qla_adjust_iocb_limit(scsi_qla_host_t *vha);
/*
* Global Data in qla_os.c source file.
@@ -205,8 +204,6 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
- fc_port_t *, uint16_t *);
extern int qla2x00_set_exlogins_buffer(struct scsi_qla_host *);
extern void qla2x00_free_exlogin_buffer(struct qla_hw_data *);
extern int qla2x00_set_exchoffld_buffer(struct scsi_qla_host *);
@@ -216,7 +213,6 @@ extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
extern struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *,
struct qla_hw_data *);
-extern void qla2x00_free_host(struct scsi_qla_host *);
extern void qla2x00_relogin(struct scsi_qla_host *);
extern void qla2x00_do_work(struct scsi_qla_host *);
extern void qla2x00_free_fcports(struct scsi_qla_host *);
@@ -238,13 +234,10 @@ extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
-extern void qla_eeh_work(struct work_struct *);
extern void qla2x00_sp_compl(srb_t *sp, int);
extern void qla2xxx_qpair_sp_free_dma(srb_t *sp);
extern void qla2xxx_qpair_sp_compl(srb_t *sp, int);
extern void qla24xx_sched_upd_fcport(fc_port_t *);
-void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
@@ -610,7 +603,11 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
-void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp);
+void qla24xx_queue_purex_item(scsi_qla_host_t *, struct purex_item *,
+ void (*process_item)(struct scsi_qla_host *,
+ struct purex_item *));
+void __qla_consume_iocb(struct scsi_qla_host *, void **, struct rsp_que **);
+void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -673,9 +670,11 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
extern int qla2x00_mailbox_passthru(struct bsg_job *bsg_job);
-int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
- struct rsp_que **rsp, u8 *buf, u32 buf_len);
-
+int qla2x00_sys_ld_info(struct bsg_job *bsg_job);
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *, void **,
+ struct rsp_que **, u8 *, u32);
+struct purex_item *qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp, bool is_purls, bool byte_order);
int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in,
uint16_t *mbx_out);
@@ -728,7 +727,6 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
-void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
@@ -851,7 +849,6 @@ extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* Interrupt related */
extern irqreturn_t qla82xx_intr_handler(int, void *);
-extern irqreturn_t qla82xx_msi_handler(int, void *);
extern irqreturn_t qla82xx_msix_default(int, void *);
extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
extern void qla82xx_enable_intrs(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 367fba27fe69..a314cfc5b263 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -508,6 +508,7 @@ static
void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
struct fc_port *fcport = ea->fcport;
+ unsigned long flags;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
"%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
@@ -522,9 +523,15 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2066,
"%s %8phC: adisc fail: post delete\n",
__func__, ea->fcport->port_name);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
/* deleted = 0 & logout_on_delete = force fw cleanup */
- fcport->deleted = 0;
+ if (fcport->deleted == QLA_SESS_DELETED)
+ fcport->deleted = 0;
+
fcport->logout_on_delete = 1;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
qlt_schedule_sess_for_deletion(ea->fcport);
return;
}
@@ -1134,7 +1141,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
u16 *mb;
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
- return rval;
+ goto done;
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
@@ -1188,8 +1195,9 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ fcport->flags &= ~(FCF_ASYNC_SENT);
done:
- fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
+ fcport->flags &= ~(FCF_ASYNC_ACTIVE);
return rval;
}
@@ -1446,7 +1454,6 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->login_gen++;
- ea->fcport->deleted = 0;
ea->fcport->logout_on_delete = 1;
if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
@@ -1996,12 +2003,11 @@ qla2x00_tmf_iocb_timeout(void *data)
int rc, h;
unsigned long flags;
- if (sp->type == SRB_MARKER) {
- complete(&tmf->u.tmf.comp);
- return;
- }
+ if (sp->type == SRB_MARKER)
+ rc = QLA_FUNCTION_FAILED;
+ else
+ rc = qla24xx_async_abort_cmd(sp, false);
- rc = qla24xx_async_abort_cmd(sp, false);
if (rc) {
spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
@@ -2032,10 +2038,14 @@ static void qla_marker_sp_done(srb_t *sp, int res)
complete(&tmf->u.tmf.comp);
}
-#define START_SP_W_RETRIES(_sp, _rval) \
+#define START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \
{\
int cnt = 5; \
do { \
+ if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
+ _rval = EINVAL; \
+ break; \
+ } \
_rval = qla2x00_start_sp(_sp); \
if (_rval == EAGAIN) \
msleep(1); \
@@ -2058,6 +2068,7 @@ qla26xx_marker(struct tmf_arg *arg)
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
fc_port_t *fcport = arg->fcport;
+ u32 chip_gen, login_gen;
if (TMF_NOT_READY(arg->fcport)) {
ql_dbg(ql_dbg_taskm, vha, 0x8039,
@@ -2067,6 +2078,9 @@ qla26xx_marker(struct tmf_arg *arg)
return QLA_SUSPENDED;
}
+ chip_gen = vha->hw->chip_reset;
+ login_gen = fcport->login_gen;
+
/* ref: INIT */
sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
@@ -2084,7 +2098,7 @@ qla26xx_marker(struct tmf_arg *arg)
tm_iocb->u.tmf.loop_id = fcport->loop_id;
tm_iocb->u.tmf.vp_index = vha->vp_idx;
- START_SP_W_RETRIES(sp, rval);
+ START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
ql_dbg(ql_dbg_taskm, vha, 0x8006,
"Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
@@ -2123,6 +2137,17 @@ static void qla2x00_tmf_sp_done(srb_t *sp, int res)
complete(&tmf->u.tmf.comp);
}
+static int qla_tmf_wait(struct tmf_arg *arg)
+{
+ /* there are only 2 types of error handling that reaches here, lun or target reset */
+ if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET))
+ return qla2x00_eh_wait_for_pending_commands(arg->vha,
+ arg->fcport->d_id.b24, arg->lun, WAIT_LUN);
+ else
+ return qla2x00_eh_wait_for_pending_commands(arg->vha,
+ arg->fcport->d_id.b24, arg->lun, WAIT_TARGET);
+}
+
static int
__qla2x00_async_tm_cmd(struct tmf_arg *arg)
{
@@ -2130,8 +2155,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
-
fc_port_t *fcport = arg->fcport;
+ u32 chip_gen, login_gen;
+ u64 jif;
if (TMF_NOT_READY(arg->fcport)) {
ql_dbg(ql_dbg_taskm, vha, 0x8032,
@@ -2141,6 +2167,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
return QLA_SUSPENDED;
}
+ chip_gen = vha->hw->chip_reset;
+ login_gen = fcport->login_gen;
+
/* ref: INIT */
sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
@@ -2158,7 +2187,7 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
tm_iocb->u.tmf.flags = arg->flags;
tm_iocb->u.tmf.lun = arg->lun;
- START_SP_W_RETRIES(sp, rval);
+ START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
ql_dbg(ql_dbg_taskm, vha, 0x802f,
"Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
@@ -2176,8 +2205,26 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
"TM IOCB failed (%x).\n", rval);
}
- if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
- rval = qla26xx_marker(arg);
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+ jif = jiffies;
+ if (qla_tmf_wait(arg)) {
+ ql_log(ql_log_info, vha, 0x803e,
+ "Waited %u ms Nexus=%ld:%06x:%llu.\n",
+ jiffies_to_msecs(jiffies - jif), vha->host_no,
+ fcport->d_id.b24, arg->lun);
+ }
+
+ if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) {
+ rval = qla26xx_marker(arg);
+ } else {
+ ql_log(ql_log_info, vha, 0x803e,
+ "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n",
+ vha->host_no, fcport->d_id.b24, arg->lun);
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+ if (tm_iocb->u.tmf.data)
+ rval = tm_iocb->u.tmf.data;
done_free_sp:
/* ref: INIT */
@@ -2186,30 +2233,42 @@ done:
return rval;
}
-static void qla_put_tmf(fc_port_t *fcport)
+static void qla_put_tmf(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- fcport->active_tmf--;
+ ha->active_tmf--;
+ list_del(&arg->tmf_elem);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
static
-int qla_get_tmf(fc_port_t *fcport)
+int qla_get_tmf(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ fc_port_t *fcport = arg->fcport;
int rc = 0;
- LIST_HEAD(tmf_elem);
+ struct tmf_arg *t;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- list_add_tail(&tmf_elem, &fcport->tmf_pending);
+ list_for_each_entry(t, &ha->tmf_active, tmf_elem) {
+ if (t->fcport == arg->fcport && t->lun == arg->lun) {
+ /* reject duplicate TMF */
+ ql_log(ql_log_warn, vha, 0x802c,
+ "found duplicate TMF. Nexus=%ld:%06x:%llu.\n",
+ vha->host_no, fcport->d_id.b24, arg->lun);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return -EINVAL;
+ }
+ }
- while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
+ list_add_tail(&arg->tmf_elem, &ha->tmf_pending);
+ while (ha->active_tmf >= MAX_ACTIVE_TMF) {
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
msleep(1);
@@ -2221,15 +2280,17 @@ int qla_get_tmf(fc_port_t *fcport)
rc = EIO;
break;
}
- if (fcport->active_tmf < MAX_ACTIVE_TMF &&
- list_is_first(&tmf_elem, &fcport->tmf_pending))
+ if (ha->active_tmf < MAX_ACTIVE_TMF &&
+ list_is_first(&arg->tmf_elem, &ha->tmf_pending))
break;
}
- list_del(&tmf_elem);
+ list_del(&arg->tmf_elem);
- if (!rc)
- fcport->active_tmf++;
+ if (!rc) {
+ ha->active_tmf++;
+ list_add_tail(&arg->tmf_elem, &ha->tmf_active);
+ }
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -2241,9 +2302,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
uint32_t tag)
{
struct scsi_qla_host *vha = fcport->vha;
- struct qla_qpair *qpair;
struct tmf_arg a;
- int i, rval = QLA_SUCCESS;
+ int rval = QLA_SUCCESS;
if (TMF_NOT_READY(fcport))
return QLA_SUSPENDED;
@@ -2251,47 +2311,22 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
a.vha = fcport->vha;
a.fcport = fcport;
a.lun = lun;
+ a.flags = flags;
+ INIT_LIST_HEAD(&a.tmf_elem);
+
if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
a.modifier = MK_SYNC_ID_LUN;
-
- if (qla_get_tmf(fcport))
- return QLA_FUNCTION_FAILED;
} else {
a.modifier = MK_SYNC_ID;
}
- if (vha->hw->mqenable) {
- for (i = 0; i < vha->hw->num_qpairs; i++) {
- qpair = vha->hw->queue_pair_map[i];
- if (!qpair)
- continue;
-
- if (TMF_NOT_READY(fcport)) {
- ql_log(ql_log_warn, vha, 0x8026,
- "Unable to send TM due to disruption.\n");
- rval = QLA_SUSPENDED;
- break;
- }
-
- a.qpair = qpair;
- a.flags = flags|TCF_NOTMCMD_TO_TARGET;
- rval = __qla2x00_async_tm_cmd(&a);
- if (rval)
- break;
- }
- }
-
- if (rval)
- goto bailout;
+ if (qla_get_tmf(&a))
+ return QLA_FUNCTION_FAILED;
a.qpair = vha->hw->base_qpair;
- a.flags = flags;
rval = __qla2x00_async_tm_cmd(&a);
-bailout:
- if (a.modifier == MK_SYNC_ID_LUN)
- qla_put_tmf(fcport);
-
+ qla_put_tmf(&a);
return rval;
}
@@ -4147,39 +4182,61 @@ out:
return ha->flags.lr_detected;
}
-void qla_init_iocb_limit(scsi_qla_host_t *vha)
+static void __qla_adjust_iocb_limit(struct qla_qpair *qpair)
{
- u16 i, num_qps;
- u32 limit;
- struct qla_hw_data *ha = vha->hw;
+ u8 num_qps;
+ u16 limit;
+ struct qla_hw_data *ha = qpair->vha->hw;
num_qps = ha->num_qpairs + 1;
limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
- ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
- ha->base_qpair->fwres.iocbs_limit = limit;
- ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
- ha->base_qpair->fwres.iocbs_used = 0;
+ qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
+ qpair->fwres.iocbs_limit = limit;
+ qpair->fwres.iocbs_qp_limit = limit / num_qps;
+
+ qpair->fwres.exch_total = ha->orig_fw_xcb_count;
+ qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
+ QLA_IOCB_PCT_LIMIT) / 100;
+}
- ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
- ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
- QLA_IOCB_PCT_LIMIT) / 100;
+void qla_init_iocb_limit(scsi_qla_host_t *vha)
+{
+ u8 i;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla_adjust_iocb_limit(ha->base_qpair);
+ ha->base_qpair->fwres.iocbs_used = 0;
ha->base_qpair->fwres.exch_used = 0;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i]) {
- ha->queue_pair_map[i]->fwres.iocbs_total =
- ha->orig_fw_iocb_count;
- ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
- ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
- limit / num_qps;
+ __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
ha->queue_pair_map[i]->fwres.iocbs_used = 0;
- ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
- ha->queue_pair_map[i]->fwres.exch_limit =
- (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
ha->queue_pair_map[i]->fwres.exch_used = 0;
}
}
+
+ ha->fwres.iocb_total = ha->orig_fw_iocb_count;
+ ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ ha->fwres.exch_total = ha->orig_fw_xcb_count;
+ ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
+
+ atomic_set(&ha->fwres.iocb_used, 0);
+ atomic_set(&ha->fwres.exch_used, 0);
+}
+
+void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
+{
+ u8 i;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla_adjust_iocb_limit(ha->base_qpair);
+
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
+ }
}
/**
@@ -4777,15 +4834,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
if (ha->flags.edif_enabled)
mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
+ QLA_FW_STARTED(ha);
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
next_check:
if (rval) {
+ QLA_FW_STOPPED(ha);
ql_log(ql_log_fatal, vha, 0x00d2,
"Init Firmware **** FAILED ****.\n");
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
- QLA_FW_STARTED(ha);
vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
}
@@ -5506,7 +5564,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
- INIT_LIST_HEAD(&fcport->tmf_pending);
+ INIT_LIST_HEAD(&fcport->unsol_ctx_head);
INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
@@ -5549,7 +5607,7 @@ static void qla_get_login_template(scsi_qla_host_t *vha)
__be32 *q;
memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct fc_els_csp), ha->init_cb_size);
+ sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
ha->init_cb, sz);
if (rval != QLA_SUCCESS) {
@@ -6090,6 +6148,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
+ unsigned long flags;
+
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
@@ -6099,7 +6159,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
fcport->deleted = 0;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
if (vha->hw->current_topology == ISP_CFG_NL)
fcport->logout_on_delete = 0;
else
@@ -7337,14 +7401,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
}
/* purge MBox commands */
- if (atomic_read(&ha->num_pend_mbx_stage3)) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
complete(&ha->mbx_intr_comp);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
i = 0;
- while (atomic_read(&ha->num_pend_mbx_stage3) ||
- atomic_read(&ha->num_pend_mbx_stage2) ||
+ while (atomic_read(&ha->num_pend_mbx_stage2) ||
atomic_read(&ha->num_pend_mbx_stage1)) {
msleep(20);
i++;
@@ -9590,6 +9655,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->vp_idx = vp_idx;
qpair->fw_started = ha->flags.fw_started;
INIT_LIST_HEAD(&qpair->hints_list);
+ INIT_LIST_HEAD(&qpair->dsd_list);
qpair->chip_reset = ha->base_qpair->chip_reset;
qpair->enable_class_2 = ha->base_qpair->enable_class_2;
qpair->enable_explicit_conf =
@@ -9718,6 +9784,19 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
if (ret != QLA_SUCCESS)
goto fail;
+ if (!list_empty(&qpair->dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &qpair->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ }
+
mutex_lock(&ha->mq_lock);
ha->queue_pair_map[qpair->id] = NULL;
clear_bit(qpair->id, ha->qpair_qid_map);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 0167e85ba058..0556969f6dc1 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -386,6 +386,7 @@ enum {
RESOURCE_IOCB = BIT_0,
RESOURCE_EXCH = BIT_1, /* exchange */
RESOURCE_FORCE = BIT_2,
+ RESOURCE_HA = BIT_3,
};
static inline int
@@ -393,7 +394,7 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
u16 iocbs_used, i;
u16 exch_used;
- struct qla_hw_data *ha = qp->vha->hw;
+ struct qla_hw_data *ha = qp->hw;
if (!ql2xenforce_iocb_limit) {
iores->res_type = RESOURCE_NONE;
@@ -428,15 +429,69 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
return -ENOSPC;
}
}
+
+ if (ql2xenforce_iocb_limit == 2) {
+ if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
+ ha->fwres.iocb_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+
+ if (iores->res_type & RESOURCE_EXCH) {
+ if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
+ ha->fwres.exch_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+ }
+
force:
qp->fwres.iocbs_used += iores->iocb_cnt;
qp->fwres.exch_used += iores->exch_cnt;
+ if (ql2xenforce_iocb_limit == 2) {
+ atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
+ atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
+ iores->res_type |= RESOURCE_HA;
+ }
return 0;
}
+/*
+ * decrement to zero. This routine will not decrement below zero
+ * @v: pointer of type atomic_t
+ * @amount: amount to decrement from v
+ */
+static void qla_atomic_dtz(atomic_t *v, int amount)
+{
+ int c, old, dec;
+
+ c = atomic_read(v);
+ for (;;) {
+ dec = c - amount;
+ if (unlikely(dec < 0))
+ dec = 0;
+
+ old = atomic_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+}
+
static inline void
qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
+ struct qla_hw_data *ha = qp->hw;
+
+ if (iores->res_type & RESOURCE_HA) {
+ if (iores->res_type & RESOURCE_IOCB)
+ qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
+
+ if (iores->res_type & RESOURCE_EXCH)
+ qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
+ }
+
if (iores->res_type & RESOURCE_IOCB) {
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
qp->fwres.iocbs_used -= iores->iocb_cnt;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 730d8609276c..df90169f8244 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,6 +11,7 @@
#include <scsi/scsi_tcq.h>
+static int qla_start_scsi_type6(srb_t *sp);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @sp: SCSI command
@@ -590,8 +591,6 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint16_t tot_dsds)
{
struct dsd64 *cur_dsd = NULL, *next_dsd;
- scsi_qla_host_t *vha;
- struct qla_hw_data *ha;
struct scsi_cmnd *cmd;
struct scatterlist *cur_seg;
uint8_t avail_dsds;
@@ -613,9 +612,6 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
return 0;
}
- vha = sp->vha;
- ha = vha->hw;
-
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
@@ -636,14 +632,13 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
tot_dsds -= avail_dsds;
dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
- dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
- struct dsd_dma, list);
+ dsd_ptr = list_first_entry(&qpair->dsd_list, struct dsd_dma, list);
next_dsd = dsd_ptr->dsd_addr;
list_del(&dsd_ptr->list);
- ha->gbl_dsd_avail--;
+ qpair->dsd_avail--;
list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
ctx->dsd_use_cnt++;
- ha->gbl_dsd_inuse++;
+ qpair->dsd_inuse++;
if (first_iocb) {
first_iocb = 0;
@@ -1722,6 +1717,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
if (cmd->cmd_len <= 16)
return qla24xx_start_scsi(sp);
+ else
+ return qla_start_scsi_type6(sp);
}
/* Setup device pointers. */
@@ -2101,6 +2098,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
if (cmd->cmd_len <= 16)
return qla2xxx_start_scsi_mq(sp);
+ else
+ return qla_start_scsi_type6(sp);
}
spin_lock_irqsave(&qpair->qp_lock, flags);
@@ -3368,6 +3367,7 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ struct qla_qpair *qpair = sp->qpair;
/* Setup device pointers. */
reg = &ha->iobase->isp82;
@@ -3416,18 +3416,18 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t i;
more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
- if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+ if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
ql_dbg(ql_dbg_io, vha, 0x300d,
"Num of DSD list %d is than %d for cmd=%p.\n",
- more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+ more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN,
cmd);
goto queuing_error;
}
- if (more_dsd_lists <= ha->gbl_dsd_avail)
+ if (more_dsd_lists <= qpair->dsd_avail)
goto sufficient_dsds;
else
- more_dsd_lists -= ha->gbl_dsd_avail;
+ more_dsd_lists -= qpair->dsd_avail;
for (i = 0; i < more_dsd_lists; i++) {
dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
@@ -3447,8 +3447,8 @@ qla82xx_start_scsi(srb_t *sp)
"for cmd=%p.\n", cmd);
goto queuing_error;
}
- list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
- ha->gbl_dsd_avail++;
+ list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
+ qpair->dsd_avail++;
}
sufficient_dsds:
@@ -3767,21 +3767,28 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1;
- cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
-
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
- cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ if (sp->unsol_rsp) {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_LS4_RESPONDER << CF_LS4_SHIFT);
+ cmd_pkt->nport_handle = nvme->u.nvme.nport_handle;
+ cmd_pkt->exchange_address = nvme->u.nvme.exchange_address;
+ } else {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->rx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
+ cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
+ put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
+ }
+
cmd_pkt->tx_dseg_count = cpu_to_le16(1);
- cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
- cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
+ cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
+ cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
-
- cmd_pkt->rx_dseg_count = cpu_to_le16(1);
- cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
- cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
- put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
}
static void
@@ -3882,6 +3889,7 @@ qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
{
mrk->entry_type = MARKER_TYPE;
mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
+ mrk->handle = make_handle(sp->qpair->req->id, sp->handle);
if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
@@ -4197,3 +4205,267 @@ queuing_error:
return rval;
}
+
+/**
+ * qla_start_scsi_type6() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla_start_scsi_type6(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t handle;
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = sp->qpair;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+ __be32 *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Setup qpair pointers */
+ req = qpair->req;
+ rsp = qpair->rsp;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else {
+ nseg = 0;
+ }
+
+ tot_dsds = nseg;
+
+ /* eventhough driver only need 1 T6 IOCB, FW still convert DSD to Continueation IOCB */
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
+ sp->iores.iocb_cnt = req_cnt;
+
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ goto queuing_error;
+
+ more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
+ ql_dbg(ql_dbg_io, vha, 0x3028,
+ "Num of DSD list %d is than %d for cmd=%p.\n",
+ more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, cmd);
+ goto queuing_error;
+ }
+
+ if (more_dsd_lists <= qpair->dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= qpair->dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_log(ql_log_fatal, vha, 0x3029,
+ "Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ INIT_LIST_HEAD(&dsd_ptr->list);
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ ql_log(ql_log_fatal, vha, 0x302a,
+ "Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
+ qpair->dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ if (req->cnt < (req_cnt + 2)) {
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = (uint16_t)rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length - (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ ctx = &sp->u.scmd.ct6_ctx;
+
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3031,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if (cmd->cmd_len % 4 ||
+ cmd->cmd_len > QLA_CDB_BUF_SIZE) {
+ /*
+ * SCSI command bigger than 16 bytes must be
+ * multiple of 4 or too big.
+ */
+ ql_log(ql_log_warn, vha, 0x3033,
+ "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
+ cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = make_handle(req->id, handle);
+
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number */
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds);
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ put_unaligned_le64(ctx->fcp_cmnd_dma,
+ &cmd_pkt->fcp_cmnd_dseg_address);
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+
+ wmb();
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ sp->qpair->cmd_cnt++;
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ qla_put_fw_resources(sp->qpair, &sp->iores);
+
+ if (sp->u.scmd.crc_ctx) {
+ mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
+ sp->u.scmd.crc_ctx = NULL;
+ }
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 656700f79325..e98788191897 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -56,6 +56,22 @@ const char *const port_state_str[] = {
[FCS_ONLINE] = "ONLINE"
};
+#define SFP_DISABLE_LASER_INITIATED 0x15 /* Sub code of 8070 AEN */
+#define SFP_ENABLE_LASER_INITIATED 0x16 /* Sub code of 8070 AEN */
+
+static inline void display_Laser_info(scsi_qla_host_t *vha,
+ u16 mb1, u16 mb2, u16 mb3) {
+
+ if (mb1 == SFP_DISABLE_LASER_INITIATED)
+ ql_log(ql_log_warn, vha, 0xf0a2,
+ "SFP temperature (%d C) reached/exceeded the threshold (%d C). Laser is disabled.\n",
+ mb3, mb2);
+ if (mb1 == SFP_ENABLE_LASER_INITIATED)
+ ql_log(ql_log_warn, vha, 0xf0a3,
+ "SFP temperature (%d C) reached normal operating level. Laser is enabled.\n",
+ mb3);
+}
+
static void
qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
{
@@ -823,6 +839,135 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
}
}
+/**
+ * qla27xx_copy_multiple_pkt() - Copy over purex/purls packets that can
+ * span over multiple IOCBs.
+ * @vha: SCSI driver HA context
+ * @pkt: ELS packet
+ * @rsp: Response queue
+ * @is_purls: True, for Unsolicited Received FC-NVMe LS rsp IOCB
+ * false, for Unsolicited Received ELS IOCB
+ * @byte_order: True, to change the byte ordering of iocb payload
+ */
+struct purex_item *
+qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, void **pkt,
+ struct rsp_que **rsp, bool is_purls,
+ bool byte_order)
+{
+ struct purex_entry_24xx *purex = NULL;
+ struct pt_ls4_rx_unsol *purls = NULL;
+ struct rsp_que *rsp_q = *rsp;
+ sts_cont_entry_t *new_pkt;
+ uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
+ uint16_t buffer_copy_offset = 0, payload_size = 0;
+ uint16_t entry_count, entry_count_remaining;
+ struct purex_item *item;
+ void *iocb_pkt = NULL;
+
+ if (is_purls) {
+ purls = *pkt;
+ total_bytes = (le16_to_cpu(purls->frame_size) & 0x0FFF) -
+ PURX_ELS_HEADER_SIZE;
+ entry_count = entry_count_remaining = purls->entry_count;
+ payload_size = sizeof(purls->payload);
+ } else {
+ purex = *pkt;
+ total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) -
+ PURX_ELS_HEADER_SIZE;
+ entry_count = entry_count_remaining = purex->entry_count;
+ payload_size = sizeof(purex->els_frame_payload);
+ }
+
+ pending_bytes = total_bytes;
+ no_bytes = (pending_bytes > payload_size) ? payload_size :
+ pending_bytes;
+ ql_dbg(ql_dbg_async, vha, 0x509a,
+ "%s LS, frame_size 0x%x, entry count %d\n",
+ (is_purls ? "PURLS" : "FPIN"), total_bytes, entry_count);
+
+ item = qla24xx_alloc_purex_item(vha, total_bytes);
+ if (!item)
+ return item;
+
+ iocb_pkt = &item->iocb;
+
+ if (is_purls)
+ memcpy(iocb_pkt, &purls->payload[0], no_bytes);
+ else
+ memcpy(iocb_pkt, &purex->els_frame_payload[0], no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+
+ if (is_purls)
+ ((response_t *)purls)->signature = RESPONSE_PROCESSED;
+ else
+ ((response_t *)purex)->signature = RESPONSE_PROCESSED;
+ wmb();
+
+ do {
+ while ((total_bytes > 0) && (entry_count_remaining > 0)) {
+ if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
+ ql_dbg(ql_dbg_async, vha, 0x5084,
+ "Ran out of IOCBs, partial data 0x%x\n",
+ buffer_copy_offset);
+ cpu_relax();
+ continue;
+ }
+
+ new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ if (new_pkt->entry_type != STATUS_CONT_TYPE) {
+ ql_log(ql_log_warn, vha, 0x507a,
+ "Unexpected IOCB type, partial data 0x%x\n",
+ buffer_copy_offset);
+ break;
+ }
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+ no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
+ sizeof(new_pkt->data) : pending_bytes;
+ if ((buffer_copy_offset + no_bytes) <= total_bytes) {
+ memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
+ new_pkt->data, no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+ } else {
+ ql_log(ql_log_warn, vha, 0x5044,
+ "Attempt to copy more that we got, optimizing..%x\n",
+ buffer_copy_offset);
+ memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
+ new_pkt->data,
+ total_bytes - buffer_copy_offset);
+ }
+
+ ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
+ wmb();
+ }
+
+ if (pending_bytes != 0 || entry_count_remaining != 0) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
+ total_bytes, entry_count_remaining);
+ qla24xx_free_purex_item(item);
+ return NULL;
+ }
+ } while (entry_count_remaining > 0);
+
+ if (byte_order)
+ host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
+
+ return item;
+}
+
int
qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
{
@@ -958,7 +1103,7 @@ initialize_purex_header:
return item;
}
-static void
+void
qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
void (*process_item)(struct scsi_qla_host *vha,
struct purex_item *pkt))
@@ -1121,8 +1266,12 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
unsigned long flags;
fc_port_t *fcport = NULL;
- if (!vha->hw->flags.fw_started)
+ if (!vha->hw->flags.fw_started) {
+ ql_log(ql_log_warn, vha, 0x50ff,
+ "Dropping AEN - %04x %04x %04x %04x.\n",
+ mb[0], mb[1], mb[2], mb[3]);
return;
+ }
/* Setup to process RIO completion. */
handle_cnt = 0;
@@ -1794,6 +1943,8 @@ global_port_update:
break;
case MBA_TEMPERATURE_ALERT:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ display_Laser_info(vha, mb[1], mb[2], mb[3]);
ql_dbg(ql_dbg_async, vha, 0x505e,
"TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
break;
@@ -2539,7 +2690,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
case CS_PORT_BUSY:
case CS_INCOMPLETE:
case CS_PORT_UNAVAILABLE:
- case CS_TIMEOUT:
case CS_RESET:
if (atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
@@ -3808,6 +3958,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct purex_entry_24xx *purex_entry;
struct purex_item *pure_item;
+ struct pt_ls4_rx_unsol *p;
u16 rsp_in = 0, cur_ring_index;
int is_shadow_hba;
@@ -3980,7 +4131,19 @@ process_err:
qla28xx_sa_update_iocb_entry(vha, rsp->req,
(struct sa_update_28xx *)pkt);
break;
-
+ case PT_LS4_UNSOL:
+ p = (void *)pkt;
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
+ rsp->ring_ptr = (response_t *)pkt;
+ rsp->ring_index = cur_ring_index;
+
+ ql_dbg(ql_dbg_init, vha, 0x2124,
+ "Defer processing UNSOL LS req opcode %#x...\n",
+ p->payload[0]);
+ return;
+ }
+ qla2xxx_process_purls_iocb((void **)&pkt, &rsp);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 254fd4c64262..21ec32b4fb28 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -273,7 +273,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
- atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -290,7 +289,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
atomic_dec(&ha->num_pend_mbx_stage2);
- atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
@@ -302,11 +300,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
atomic_dec(&ha->num_pend_mbx_stage2);
- atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
- atomic_dec(&ha->num_pend_mbx_stage3);
if (time_after(jiffies, wait_time + 5 * HZ))
ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
@@ -2213,6 +2209,9 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
"Entered %s.\n", __func__);
+ if (!ha->flags.fw_started)
+ return QLA_FUNCTION_FAILED;
+
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
if (IS_FWI2_CAPABLE(vha->hw))
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 86e85f2f4782..db753d712991 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -12,6 +12,26 @@
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
+static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha,
+ struct qla_qpair *qp,
+ struct qla_nvme_lsrjt_pt_arg *a,
+ bool is_xchg_terminate);
+
+struct qla_nvme_unsol_ctx {
+ struct list_head elem;
+ struct scsi_qla_host *vha;
+ struct fc_port *fcport;
+ struct srb *sp;
+ struct nvmefc_ls_rsp lsrsp;
+ struct nvmefc_ls_rsp *fd_rsp;
+ struct work_struct lsrsp_work;
+ struct work_struct abort_work;
+ __le32 exchange_address;
+ __le16 nport_handle;
+ __le16 ox_id;
+ int comp_status;
+ spinlock_t cmd_lock;
+};
int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
{
@@ -132,6 +152,7 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
"Failed to allocate qpair\n");
return -EINVAL;
}
+ qla_adjust_iocb_limit(vha);
}
*handle = qpair;
@@ -215,6 +236,55 @@ static void qla_nvme_sp_ls_done(srb_t *sp, int res)
schedule_work(&priv->ls_work);
}
+static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct qla_nvme_unsol_ctx *uctx = sp->priv;
+ struct nvmefc_ls_rsp *fd_rsp;
+ unsigned long flags;
+
+ if (!uctx) {
+ qla2x00_rel_sp(sp);
+ return;
+ }
+
+ spin_lock_irqsave(&uctx->cmd_lock, flags);
+ uctx->sp = NULL;
+ sp->priv = NULL;
+ spin_unlock_irqrestore(&uctx->cmd_lock, flags);
+
+ fd_rsp = uctx->fd_rsp;
+
+ list_del(&uctx->elem);
+
+ fd_rsp->done(fd_rsp);
+ kfree(uctx);
+ qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_lsrsp_complete(struct work_struct *work)
+{
+ struct qla_nvme_unsol_ctx *uctx =
+ container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work);
+
+ kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref);
+}
+
+static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res)
+{
+ struct qla_nvme_unsol_ctx *uctx = sp->priv;
+
+ if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
+ return;
+
+ if (res)
+ res = -EINVAL;
+
+ uctx->comp_status = res;
+ INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete);
+ schedule_work(&uctx->lsrsp_work);
+}
+
/* it assumed that QPair lock is held. */
static void qla_nvme_sp_done(srb_t *sp, int res)
{
@@ -287,6 +357,92 @@ out:
kref_put(&sp->cmd_kref, sp->put_fn);
}
+static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport,
+ struct nvmefc_ls_rsp *fd_resp)
+{
+ struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp,
+ struct qla_nvme_unsol_ctx, lsrsp);
+ struct qla_nvme_rport *qla_rport = rport->private;
+ fc_port_t *fcport = qla_rport->fcport;
+ struct scsi_qla_host *vha = uctx->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_nvme_lsrjt_pt_arg a;
+ struct srb_iocb *nvme;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+ uint8_t cnt = 0;
+
+ if (!fcport || fcport->deleted)
+ goto out;
+
+ if (!ha->flags.fw_started)
+ goto out;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto out;
+
+ sp->type = SRB_NVME_LS;
+ sp->name = "nvme_ls";
+ sp->done = qla_nvme_sp_lsrsp_done;
+ sp->put_fn = qla_nvme_release_lsrsp_cmd_kref;
+ sp->priv = (void *)uctx;
+ sp->unsol_rsp = 1;
+ uctx->sp = sp;
+ spin_lock_init(&uctx->cmd_lock);
+ nvme = &sp->u.iocb_cmd;
+ uctx->fd_rsp = fd_resp;
+ nvme->u.nvme.desc = fd_resp;
+ nvme->u.nvme.dir = 0;
+ nvme->u.nvme.dl = 0;
+ nvme->u.nvme.timeout_sec = 0;
+ nvme->u.nvme.cmd_dma = fd_resp->rspdma;
+ nvme->u.nvme.cmd_len = fd_resp->rsplen;
+ nvme->u.nvme.rsp_len = 0;
+ nvme->u.nvme.rsp_dma = 0;
+ nvme->u.nvme.exchange_address = uctx->exchange_address;
+ nvme->u.nvme.nport_handle = uctx->nport_handle;
+ nvme->u.nvme.ox_id = uctx->ox_id;
+ dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE);
+
+ ql_dbg(ql_dbg_unsol, vha, 0x2122,
+ "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
+ fcport->d_id.b24, fcport->port_name, uctx->exchange_address,
+ uctx->ox_id, uctx->nport_handle);
+retry:
+ rval = qla2x00_start_sp(sp);
+ switch (rval) {
+ case QLA_SUCCESS:
+ break;
+ case EAGAIN:
+ msleep(PURLS_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < PURLS_RETRY_COUNT)
+ goto retry;
+
+ fallthrough;
+ default:
+ ql_dbg(ql_log_warn, vha, 0x2123,
+ "Failed to xmit Unsol ls response = %d\n", rval);
+ rval = -EIO;
+ qla2x00_rel_sp(sp);
+ goto out;
+ }
+
+ return 0;
+out:
+ memset((void *)&a, 0, sizeof(a));
+ a.vp_idx = vha->vp_idx;
+ a.nport_handle = uctx->nport_handle;
+ a.xchg_address = uctx->exchange_address;
+ qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true);
+ kfree(uctx);
+ return rval;
+}
+
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
@@ -354,7 +510,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
nvme->u.nvme.timeout_sec = fd->timeout;
nvme->u.nvme.cmd_dma = fd->rqstdma;
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
- fd->rqstlen, DMA_TO_DEVICE);
+ le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -667,7 +823,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
rval = qla2x00_start_nvme_mq(sp);
if (rval != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x212d,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
"qla2x00_start_nvme_mq failed = %d\n", rval);
sp->priv = NULL;
priv->sp = NULL;
@@ -719,6 +875,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
+ .xmt_ls_rsp = qla_nvme_xmt_ls_rsp,
.map_queues = qla_nvme_map_queues,
.max_hw_queues = DEF_NVME_HW_QUEUES,
.max_sgl_segments = 1024,
@@ -923,3 +1080,247 @@ inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
return;
kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
}
+
+static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason,
+ u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ rjt->w0.ls_cmd = FCNVME_LSDESC_RQST;
+ rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt));
+ rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ rjt->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ rjt->rqst.w0.ls_cmd = ls_cmd;
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+}
+
+static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha,
+ struct pt_ls4_request *lsrjt_iocb,
+ struct qla_nvme_lsrjt_pt_arg *a)
+{
+ lsrjt_iocb->entry_type = PT_LS4_REQUEST;
+ lsrjt_iocb->entry_count = 1;
+ lsrjt_iocb->sys_define = 0;
+ lsrjt_iocb->entry_status = 0;
+ lsrjt_iocb->handle = QLA_SKIP_HANDLE;
+ lsrjt_iocb->nport_handle = a->nport_handle;
+ lsrjt_iocb->exchange_address = a->xchg_address;
+ lsrjt_iocb->vp_index = a->vp_idx;
+
+ lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags);
+
+ put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address);
+ lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count);
+ lsrjt_iocb->tx_dseg_count = cpu_to_le16(1);
+ lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
+
+ put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address);
+ lsrjt_iocb->dsd[1].length = 0;
+ lsrjt_iocb->rx_dseg_count = 0;
+ lsrjt_iocb->rx_byte_count = 0;
+}
+
+static int
+qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp,
+ struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate)
+{
+ struct pt_ls4_request *lsrjt_iocb;
+
+ lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL);
+ if (!lsrjt_iocb) {
+ ql_log(ql_log_warn, vha, 0x210e,
+ "qla2x00_alloc_iocbs failed.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ if (!is_xchg_terminate) {
+ qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode,
+ a->reason, a->explanation, 0);
+
+ a->tx_byte_count = sizeof(struct fcnvme_ls_rjt);
+ a->tx_addr = vha->hw->lsrjt.cdma;
+ a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT;
+
+ ql_dbg(ql_dbg_unsol, vha, 0x211f,
+ "Sending nvme fc ls reject ox_id %04x op %04x\n",
+ a->ox_id, a->opcode);
+ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f,
+ vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c));
+ } else {
+ a->tx_byte_count = 0;
+ a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT;
+ ql_dbg(ql_dbg_unsol, vha, 0x2110,
+ "Terminate nvme ls xchg 0x%x\n", a->xchg_address);
+ }
+
+ qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a);
+ /* flush iocb to mem before notifying hw doorbell */
+ wmb();
+ qla2x00_start_iocbs(vha, qp->req);
+ return 0;
+}
+
+/*
+ * qla2xxx_process_purls_pkt() - Pass-up Unsolicited
+ * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req().
+ * LLDD need to provide memory for response buffer, which
+ * will be used to reference the exchange corresponding
+ * to the LS when issuing an ls response. LLDD will have to free
+ * response buffer in lport->ops->xmt_ls_rsp().
+ *
+ * @vha: SCSI qla host
+ * @item: ptr to purex_item
+ */
+static void
+qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item)
+{
+ struct qla_nvme_unsol_ctx *uctx = item->purls_context;
+ struct qla_nvme_lsrjt_pt_arg a;
+ int ret = 1;
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ ret = nvme_fc_rcv_ls_req(uctx->fcport->nvme_remote_port, &uctx->lsrsp,
+ &item->iocb, item->size);
+#endif
+ if (ret) {
+ ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n");
+ memset((void *)&a, 0, sizeof(a));
+ a.vp_idx = vha->vp_idx;
+ a.nport_handle = uctx->nport_handle;
+ a.xchg_address = uctx->exchange_address;
+ qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true);
+ list_del(&uctx->elem);
+ kfree(uctx);
+ }
+}
+
+static scsi_qla_host_t *
+qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index)
+{
+ scsi_qla_host_t *base_vha, *vha, *tvp;
+ unsigned long flags;
+
+ base_vha = pci_get_drvdata(ha->pdev);
+
+ if (!vp_index && !ha->num_vhosts)
+ return base_vha;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
+ if (vha->vp_idx == vp_index) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ return vha;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return NULL;
+}
+
+void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
+{
+ struct nvme_fc_remote_port *rport;
+ struct qla_nvme_rport *qla_rport;
+ struct qla_nvme_lsrjt_pt_arg a;
+ struct pt_ls4_rx_unsol *p = *pkt;
+ struct qla_nvme_unsol_ctx *uctx;
+ struct rsp_que *rsp_q = *rsp;
+ struct qla_hw_data *ha;
+ scsi_qla_host_t *vha;
+ fc_port_t *fcport = NULL;
+ struct purex_item *item;
+ port_id_t d_id = {0};
+ port_id_t id = {0};
+ u8 *opcode;
+ bool xmt_reject = false;
+
+ ha = rsp_q->hw;
+
+ vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index);
+ if (!vha) {
+ ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ memset((void *)&a, 0, sizeof(a));
+ opcode = (u8 *)&p->payload[0];
+ a.opcode = opcode[3];
+ a.vp_idx = p->vp_index;
+ a.nport_handle = p->nport_handle;
+ a.ox_id = p->ox_id;
+ a.xchg_address = p->exchange_address;
+
+ id.b.domain = p->s_id.domain;
+ id.b.area = p->s_id.area;
+ id.b.al_pa = p->s_id.al_pa;
+ d_id.b.domain = p->d_id[2];
+ d_id.b.area = p->d_id[1];
+ d_id.b.al_pa = p->d_id[0];
+
+ fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0);
+ if (!fcport) {
+ ql_dbg(ql_dbg_unsol, vha, 0x211e,
+ "Failed to find sid=%06x did=%06x\n",
+ id.b24, d_id.b24);
+ a.reason = FCNVME_RJT_RC_INV_ASSOC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ goto out;
+ }
+ rport = fcport->nvme_remote_port;
+ qla_rport = rport->private;
+
+ item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false);
+ if (!item) {
+ a.reason = FCNVME_RJT_RC_LOGIC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ goto out;
+ }
+
+ uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC);
+ if (!uctx) {
+ ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n");
+ a.reason = FCNVME_RJT_RC_LOGIC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ kfree(item);
+ goto out;
+ }
+
+ uctx->vha = vha;
+ uctx->fcport = fcport;
+ uctx->exchange_address = p->exchange_address;
+ uctx->nport_handle = p->nport_handle;
+ uctx->ox_id = p->ox_id;
+ qla_rport->uctx = uctx;
+ INIT_LIST_HEAD(&uctx->elem);
+ list_add_tail(&uctx->elem, &fcport->unsol_ctx_head);
+ item->purls_context = (void *)uctx;
+
+ ql_dbg(ql_dbg_unsol, vha, 0x2121,
+ "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
+ item->iocb.iocb[3], item->size, uctx->exchange_address,
+ fcport->d_id.b24);
+ /* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ * ----- -----------------------------------------------
+ * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00
+ * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00
+ * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ */
+ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120,
+ &item->iocb, item->size);
+
+ qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt);
+out:
+ if (xmt_reject) {
+ qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false);
+ __qla_consume_iocb(vha, pkt, rsp);
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index d299478371b2..a253ac55171b 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -21,6 +21,7 @@
#define Q2T_NVME_NUM_TAGS 2048
#define QLA_MAX_FC_SEGMENTS 64
+struct qla_nvme_unsol_ctx;
struct scsi_qla_host;
struct qla_hw_data;
struct req_que;
@@ -37,6 +38,7 @@ struct nvme_private {
struct qla_nvme_rport {
struct fc_port *fcport;
+ struct qla_nvme_unsol_ctx *uctx;
};
#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */
@@ -75,6 +77,9 @@ struct cmd_nvme {
struct dsd64 nvme_dsd;
};
+#define PURLS_MSLEEP_INTERVAL 1
+#define PURLS_RETRY_COUNT 5
+
#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
struct pt_ls4_request {
uint8_t entry_type;
@@ -118,21 +123,19 @@ struct pt_ls4_rx_unsol {
__le32 exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
- be_id_t s_id;
+ le_id_t s_id;
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
__le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- __le16 rx_id;
- __le16 ox_id;
- __le32 param;
- __le32 desc0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 desc0;
#define PT_LS4_PAYLOAD_OFFSET 0x2c
#define PT_LS4_FIRST_PACKET_LEN 20
- __le32 desc_len;
- __le32 payload[3];
+ __le32 payload[5];
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6dc80c8ddf79..5d1bdc15b75c 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -857,7 +857,9 @@ struct fcp_cmnd {
uint8_t task_attribute;
uint8_t task_management;
uint8_t additional_cdb_len;
- uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
+#define QLA_CDB_BUF_SIZE 256
+#define QLA_FCP_DL_SIZE 4
+ uint8_t cdb[QLA_CDB_BUF_SIZE + QLA_FCP_DL_SIZE]; /* 256 for CDB len and 4 for FCP_DL */
};
struct dsd_dma {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 877e4f446709..50db08265c51 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -44,10 +44,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
"Set this to take full dump on MPI hang.");
-int ql2xenforce_iocb_limit = 1;
+int ql2xenforce_iocb_limit = 2;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
- "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
+ "1: track usage per queue, 2: track usage per adapter");
/*
* CT6 CTX allocation cache
@@ -432,6 +433,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
ha->base_qpair->srb_mempool = ha->srb_mempool;
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
+ INIT_LIST_HEAD(&ha->base_qpair->dsd_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
qla_cpu_update(rsp->qpair, raw_smp_processor_id());
@@ -750,9 +752,9 @@ void qla2x00_sp_free_dma(srb_t *sp)
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
- list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
+ sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
+ sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
}
if (sp->flags & SRB_GOT_BUF)
@@ -836,9 +838,9 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp)
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
- list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
+ sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
+ sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
}
@@ -1488,8 +1490,9 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
goto eh_reset_failed;
}
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
- sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24,
+ cmd->device->lun,
+ WAIT_LUN) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
@@ -1555,8 +1558,8 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
goto eh_reset_failed;
}
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
- 0, WAIT_TARGET) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0,
+ WAIT_TARGET) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
@@ -3006,9 +3009,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
atomic_set(&ha->num_pend_mbx_stage1, 0);
atomic_set(&ha->num_pend_mbx_stage2, 0);
- atomic_set(&ha->num_pend_mbx_stage3, 0);
atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
+ INIT_LIST_HEAD(&ha->tmf_pending);
+ INIT_LIST_HEAD(&ha->tmf_active);
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3285,6 +3289,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_id = ha->max_fibre_devices;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
+
+ if (ql2xenabledif && ql2xenabledif != 2) {
+ ql_log(ql_log_warn, base_vha, 0x302d,
+ "Invalid value for ql2xenabledif, resetting it to default (2)\n");
+ ql2xenabledif = 2;
+ }
+
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
@@ -3521,8 +3532,6 @@ skip_dpc:
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
if (ql2xprotmask)
scsi_host_set_prot(host, ql2xprotmask);
else
@@ -4399,7 +4408,6 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"sf_init_cb=%p.\n", ha->sf_init_cb);
}
- INIT_LIST_HEAD(&ha->gbl_dsd_list);
/* Get consistent memory allocated for Async Port-Database. */
if (!IS_FWI2_CAPABLE(ha)) {
@@ -4454,8 +4462,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
- ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
-
+ ha->elsrej.size,
+ &ha->elsrej.cdma,
+ GFP_KERNEL);
if (!ha->elsrej.c) {
ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
"Alloc failed for els reject cmd.\n");
@@ -4464,8 +4473,21 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->elsrej.c->er_cmd = ELS_LS_RJT;
ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
+
+ ha->lsrjt.size = sizeof(struct fcnvme_ls_rjt);
+ ha->lsrjt.c = dma_alloc_coherent(&ha->pdev->dev, ha->lsrjt.size,
+ &ha->lsrjt.cdma, GFP_KERNEL);
+ if (!ha->lsrjt.c) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
+ "Alloc failed for nvme fc reject cmd.\n");
+ goto fail_lsrjt;
+ }
+
return 0;
+fail_lsrjt:
+ dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
+ ha->elsrej.c, ha->elsrej.cdma);
fail_elsrej:
dma_pool_destroy(ha->purex_dma_pool);
fail_flt:
@@ -4931,18 +4953,16 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
- if (IS_QLA82XX(ha)) {
- if (!list_empty(&ha->gbl_dsd_list)) {
- struct dsd_dma *dsd_ptr, *tdsd_ptr;
-
- /* clean up allocated prev pool */
- list_for_each_entry_safe(dsd_ptr,
- tdsd_ptr, &ha->gbl_dsd_list, list) {
- dma_pool_free(ha->dl_dma_pool,
- dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
- list_del(&dsd_ptr->list);
- kfree(dsd_ptr);
- }
+ if (!list_empty(&ha->base_qpair->dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &ha->base_qpair->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
}
}
@@ -4997,6 +5017,12 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->elsrej.c = NULL;
}
+ if (ha->lsrjt.c) {
+ dma_free_coherent(&ha->pdev->dev, ha->lsrjt.size, ha->lsrjt.c,
+ ha->lsrjt.cdma);
+ ha->lsrjt.c = NULL;
+ }
+
ha->init_cb = NULL;
ha->init_cb_dma = 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 5258b07687a9..2b815a9928ea 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1068,10 +1068,6 @@ void qlt_free_session_done(struct work_struct *work)
(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
}
- spin_lock_irqsave(&vha->work_lock, flags);
- sess->flags &= ~FCF_ASYNC_SENT;
- spin_unlock_irqrestore(&vha->work_lock, flags);
-
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->se_sess) {
sess->se_sess = NULL;
@@ -1081,7 +1077,6 @@ void qlt_free_session_done(struct work_struct *work)
qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
- sess->deleted = QLA_SESS_DELETED;
if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
vha->fcport_count--;
@@ -1133,10 +1128,15 @@ void qlt_free_session_done(struct work_struct *work)
sess->explicit_logout = 0;
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- sess->free_pending = 0;
qla2x00_dfs_remove_rport(vha, sess);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ sess->flags &= ~FCF_ASYNC_SENT;
+ sess->deleted = QLA_SESS_DELETED;
+ sess->free_pending = 0;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
ql_dbg(ql_dbg_disc, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
sess, sess->port_name, vha->fcport_count);
@@ -1185,12 +1185,12 @@ void qlt_unreg_sess(struct fc_port *sess)
* management from being sent.
*/
sess->flags |= FCF_ASYNC_SENT;
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e3771923b0d7..d903563e969e 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.08.400-k"
+#define QLA2XXX_VERSION "10.02.09.100-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
-#define QLA_DRIVER_PATCH_VER 8
-#define QLA_DRIVER_BETA_VER 400
+#define QLA_DRIVER_PATCH_VER 9
+#define QLA_DRIVER_BETA_VER 100
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index b2a3988e1e15..675332e49a7b 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -968,6 +968,11 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
memset(&chap_rec, 0, sizeof(chap_rec));
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*param_info)) {
+ rc = -EINVAL;
+ goto exit_set_chap;
+ }
+
param_info = nla_data(attr);
switch (param_info->param) {
@@ -2750,6 +2755,11 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
}
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*iface_param)) {
+ rval = -EINVAL;
+ goto exit_init_fw_cb;
+ }
+
iface_param = nla_data(attr);
if (iface_param->param_type == ISCSI_NET_PARAM) {
@@ -8104,6 +8114,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*fnode_param)) {
+ rc = -EINVAL;
+ goto exit_set_param;
+ }
+
fnode_param = nla_data(attr);
switch (fnode_param->param) {
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 1e8fbd457248..3b95f7a6216f 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -28,7 +28,7 @@
#include <linux/jiffies.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/pgtable.h>
@@ -843,7 +843,7 @@ static int qpti_map_queues(struct qlogicpti *qpti)
return 0;
}
-const char *qlogicpti_info(struct Scsi_Host *host)
+static const char *qlogicpti_info(struct Scsi_Host *host)
{
static char buf[80];
struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index 217b70c678c3..f795848b316c 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/seq_file.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_host.h>
#include "scsi_debugfs.h"
#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
@@ -33,14 +34,33 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags,
void scsi_show_rq(struct seq_file *m, struct request *rq)
{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq), *cmd2;
+ struct Scsi_Host *shost = cmd->device->host;
int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
int timeout_ms = jiffies_to_msecs(rq->timeout);
+ const char *list_info = NULL;
char buf[80] = "(?)";
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(cmd2, &shost->eh_abort_list, eh_entry) {
+ if (cmd == cmd2) {
+ list_info = "on eh_abort_list";
+ goto unlock;
+ }
+ }
+ list_for_each_entry(cmd2, &shost->eh_cmd_q, eh_entry) {
+ if (cmd == cmd2) {
+ list_info = "on eh_cmd_q";
+ goto unlock;
+ }
+ }
+unlock:
+ spin_unlock_irq(shost->host_lock);
+
__scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
- seq_printf(m, ", .cmd=%s, .retries=%d, .result = %#x, .flags=", buf,
- cmd->retries, cmd->result);
+ seq_printf(m, ", .cmd=%s, .retries=%d, .allowed=%d, .result = %#x, %s%s.flags=",
+ buf, cmd->retries, cmd->allowed, cmd->result,
+ list_info ? : "", list_info ? ", " : "");
scsi_flags_show(m, cmd->flags, scsi_cmd_flags,
ARRAY_SIZE(scsi_cmd_flags));
seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 59176946ab56..c2f647a7c1b0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2454,7 +2454,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
envp[idx++] = "SDEV_MEDIA_CHANGE=1";
break;
case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
break;
case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f42388ecb024..3f0dfb97db6b 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -103,7 +103,6 @@ bool scsi_noretry_cmd(struct scsi_cmnd *scmd);
void scsi_eh_done(struct scsi_cmnd *scmd);
/* scsi_lib.c */
-extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
@@ -138,7 +137,6 @@ extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, u64, enum scsi_scan_mode);
extern void scsi_forget_host(struct Scsi_Host *);
-extern void scsi_rescan_device(struct device *);
/* scsi_sysctl.c */
#ifdef CONFIG_SYSCTL
@@ -155,7 +153,6 @@ extern int scsi_sysfs_add_host(struct Scsi_Host *);
extern int scsi_sysfs_register(void);
extern void scsi_sysfs_unregister(void);
extern void scsi_sysfs_device_initialize(struct scsi_device *);
-extern int scsi_sysfs_target_initialize(struct scsi_device *);
extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index aa13feb17c62..52014b2d39e1 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1619,9 +1619,9 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
}
EXPORT_SYMBOL(scsi_add_device);
-void scsi_rescan_device(struct device *dev)
+void scsi_rescan_device(struct scsi_device *sdev)
{
- struct scsi_device *sdev = to_scsi_device(dev);
+ struct device *dev = &sdev->sdev_gendev;
device_lock(dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 60317676e45f..24f6eefb6803 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -747,7 +747,7 @@ static ssize_t
store_rescan_field (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- scsi_rescan_device(dev);
+ scsi_rescan_device(to_scsi_device(dev));
return count;
}
static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
@@ -840,7 +840,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
* waiting for pending I/O to finish.
*/
blk_mq_run_hw_queues(sdev->request_queue, true);
- scsi_rescan_device(dev);
+ scsi_rescan_device(sdev);
}
return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e527ece12453..3075b2ddf7a6 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3014,14 +3014,15 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
}
static int
-iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
{
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
int err = 0, value = 0, state;
- if (ev->u.set_param.len > PAGE_SIZE)
+ if (ev->u.set_param.len > rlen ||
+ ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
session = iscsi_session_lookup(ev->u.set_param.sid);
@@ -3029,6 +3030,10 @@ iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
if (!conn || !session)
return -EINVAL;
+ /* data will be regarded as NULL-ended string, do length check */
+ if (strlen(data) > ev->u.set_param.len)
+ return -EINVAL;
+
switch (ev->u.set_param.param) {
case ISCSI_PARAM_SESS_RECOVERY_TMO:
sscanf(data, "%d", &value);
@@ -3118,7 +3123,7 @@ put_ep:
static int
iscsi_if_transport_ep(struct iscsi_transport *transport,
- struct iscsi_uevent *ev, int msg_type)
+ struct iscsi_uevent *ev, int msg_type, u32 rlen)
{
struct iscsi_endpoint *ep;
int rc = 0;
@@ -3126,7 +3131,10 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
switch (msg_type) {
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
- rc = iscsi_if_ep_connect(transport, ev, msg_type);
+ if (rlen < sizeof(struct sockaddr))
+ rc = -EINVAL;
+ else
+ rc = iscsi_if_ep_connect(transport, ev, msg_type);
break;
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
if (!transport->ep_poll)
@@ -3150,12 +3158,15 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
static int
iscsi_tgt_dscvr(struct iscsi_transport *transport,
- struct iscsi_uevent *ev)
+ struct iscsi_uevent *ev, u32 rlen)
{
struct Scsi_Host *shost;
struct sockaddr *dst_addr;
int err;
+ if (rlen < sizeof(*dst_addr))
+ return -EINVAL;
+
if (!transport->tgt_dscvr)
return -EINVAL;
@@ -3176,7 +3187,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
static int
iscsi_set_host_param(struct iscsi_transport *transport,
- struct iscsi_uevent *ev)
+ struct iscsi_uevent *ev, u32 rlen)
{
char *data = (char*)ev + sizeof(*ev);
struct Scsi_Host *shost;
@@ -3185,7 +3196,8 @@ iscsi_set_host_param(struct iscsi_transport *transport,
if (!transport->set_host_param)
return -ENOSYS;
- if (ev->u.set_host_param.len > PAGE_SIZE)
+ if (ev->u.set_host_param.len > rlen ||
+ ev->u.set_host_param.len > PAGE_SIZE)
return -EINVAL;
shost = scsi_host_lookup(ev->u.set_host_param.host_no);
@@ -3195,6 +3207,10 @@ iscsi_set_host_param(struct iscsi_transport *transport,
return -ENODEV;
}
+ /* see similar check in iscsi_if_set_param() */
+ if (strlen(data) > ev->u.set_host_param.len)
+ return -EINVAL;
+
err = transport->set_host_param(shost, ev->u.set_host_param.param,
data, ev->u.set_host_param.len);
scsi_host_put(shost);
@@ -3202,12 +3218,15 @@ iscsi_set_host_param(struct iscsi_transport *transport,
}
static int
-iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
{
struct Scsi_Host *shost;
struct iscsi_path *params;
int err;
+ if (rlen < sizeof(*params))
+ return -EINVAL;
+
if (!transport->set_path)
return -ENOSYS;
@@ -3267,12 +3286,15 @@ iscsi_set_iface_params(struct iscsi_transport *transport,
}
static int
-iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
{
struct Scsi_Host *shost;
struct sockaddr *dst_addr;
int err;
+ if (rlen < sizeof(*dst_addr))
+ return -EINVAL;
+
if (!transport->send_ping)
return -ENOSYS;
@@ -3770,13 +3792,12 @@ exit_host_stats:
}
static int iscsi_if_transport_conn(struct iscsi_transport *transport,
- struct nlmsghdr *nlh)
+ struct nlmsghdr *nlh, u32 pdu_len)
{
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_cls_session *session;
struct iscsi_cls_conn *conn = NULL;
struct iscsi_endpoint *ep;
- uint32_t pdu_len;
int err = 0;
switch (nlh->nlmsg_type) {
@@ -3861,8 +3882,6 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
break;
case ISCSI_UEVENT_SEND_PDU:
- pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
-
if ((ev->u.send_pdu.hdr_size > pdu_len) ||
(ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
err = -EINVAL;
@@ -3892,6 +3911,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
struct iscsi_endpoint *ep = NULL;
+ u32 rlen;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
@@ -3911,6 +3931,13 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
portid = NETLINK_CB(skb).portid;
+ /*
+ * Even though the remaining payload may not be regarded as nlattr,
+ * (like address or something else), calculate the remaining length
+ * here to ease following length checks.
+ */
+ rlen = nlmsg_attrlen(nlh, sizeof(*ev));
+
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
@@ -3967,7 +3994,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = -EINVAL;
break;
case ISCSI_UEVENT_SET_PARAM:
- err = iscsi_if_set_param(transport, ev);
+ err = iscsi_if_set_param(transport, ev, rlen);
break;
case ISCSI_UEVENT_CREATE_CONN:
case ISCSI_UEVENT_DESTROY_CONN:
@@ -3975,7 +4002,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_START_CONN:
case ISCSI_UEVENT_BIND_CONN:
case ISCSI_UEVENT_SEND_PDU:
- err = iscsi_if_transport_conn(transport, nlh);
+ err = iscsi_if_transport_conn(transport, nlh, rlen);
break;
case ISCSI_UEVENT_GET_STATS:
err = iscsi_if_get_stats(transport, nlh);
@@ -3984,23 +4011,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
- err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
+ err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type, rlen);
break;
case ISCSI_UEVENT_TGT_DSCVR:
- err = iscsi_tgt_dscvr(transport, ev);
+ err = iscsi_tgt_dscvr(transport, ev, rlen);
break;
case ISCSI_UEVENT_SET_HOST_PARAM:
- err = iscsi_set_host_param(transport, ev);
+ err = iscsi_set_host_param(transport, ev, rlen);
break;
case ISCSI_UEVENT_PATH_UPDATE:
- err = iscsi_set_path(transport, ev);
+ err = iscsi_set_path(transport, ev, rlen);
break;
case ISCSI_UEVENT_SET_IFACE_PARAMS:
- err = iscsi_set_iface_params(transport, ev,
- nlmsg_attrlen(nlh, sizeof(*ev)));
+ err = iscsi_set_iface_params(transport, ev, rlen);
break;
case ISCSI_UEVENT_PING:
- err = iscsi_send_ping(transport, ev);
+ err = iscsi_send_ping(transport, ev, rlen);
break;
case ISCSI_UEVENT_GET_CHAP:
err = iscsi_get_chap(transport, nlh);
@@ -4009,13 +4035,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = iscsi_delete_chap(transport, ev);
break;
case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
- err = iscsi_set_flashnode_param(transport, ev,
- nlmsg_attrlen(nlh,
- sizeof(*ev)));
+ err = iscsi_set_flashnode_param(transport, ev, rlen);
break;
case ISCSI_UEVENT_NEW_FLASHNODE:
- err = iscsi_new_flashnode(transport, ev,
- nlmsg_attrlen(nlh, sizeof(*ev)));
+ err = iscsi_new_flashnode(transport, ev, rlen);
break;
case ISCSI_UEVENT_DEL_FLASHNODE:
err = iscsi_del_flashnode(transport, ev);
@@ -4030,8 +4053,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = iscsi_logout_flashnode_sid(transport, ev);
break;
case ISCSI_UEVENT_SET_CHAP:
- err = iscsi_set_chap(transport, ev,
- nlmsg_attrlen(nlh, sizeof(*ev)));
+ err = iscsi_set_chap(transport, ev, rlen);
break;
case ISCSI_UEVENT_GET_HOST_STATS:
err = iscsi_get_host_stats(transport, nlh);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3c668cfb146d..c92a317ba547 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -104,19 +104,7 @@ static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
-static int sd_probe(struct device *);
-static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
-static int sd_suspend_system(struct device *);
-static int sd_suspend_runtime(struct device *);
-static int sd_resume_system(struct device *);
-static int sd_resume_runtime(struct device *);
-static void sd_rescan(struct device *);
-static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
-static void sd_uninit_command(struct scsi_cmnd *SCpnt);
-static int sd_done(struct scsi_cmnd *);
-static void sd_eh_reset(struct scsi_cmnd *);
-static int sd_eh_action(struct scsi_cmnd *, int);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct device *cdev);
@@ -592,33 +580,6 @@ static struct class sd_disk_class = {
.dev_groups = sd_disk_groups,
};
-static const struct dev_pm_ops sd_pm_ops = {
- .suspend = sd_suspend_system,
- .resume = sd_resume_system,
- .poweroff = sd_suspend_system,
- .restore = sd_resume_system,
- .runtime_suspend = sd_suspend_runtime,
- .runtime_resume = sd_resume_runtime,
-};
-
-static struct scsi_driver sd_template = {
- .gendrv = {
- .name = "sd",
- .owner = THIS_MODULE,
- .probe = sd_probe,
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .remove = sd_remove,
- .shutdown = sd_shutdown,
- .pm = &sd_pm_ops,
- },
- .rescan = sd_rescan,
- .init_command = sd_init_command,
- .uninit_command = sd_uninit_command,
- .done = sd_done,
- .eh_action = sd_eh_action,
- .eh_reset = sd_eh_reset,
-};
-
/*
* Don't request a new module, as that could deadlock in multipath
* environment.
@@ -3929,6 +3890,33 @@ static int sd_resume_runtime(struct device *dev)
return sd_resume(dev);
}
+static const struct dev_pm_ops sd_pm_ops = {
+ .suspend = sd_suspend_system,
+ .resume = sd_resume_system,
+ .poweroff = sd_suspend_system,
+ .restore = sd_resume_system,
+ .runtime_suspend = sd_suspend_runtime,
+ .runtime_resume = sd_resume_runtime,
+};
+
+static struct scsi_driver sd_template = {
+ .gendrv = {
+ .name = "sd",
+ .owner = THIS_MODULE,
+ .probe = sd_probe,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .remove = sd_remove,
+ .shutdown = sd_shutdown,
+ .pm = &sd_pm_ops,
+ },
+ .rescan = sd_rescan,
+ .init_command = sd_init_command,
+ .uninit_command = sd_uninit_command,
+ .done = sd_done,
+ .eh_action = sd_eh_action,
+ .eh_reset = sd_eh_reset,
+};
+
/**
* init_sd - entry point for this driver (both when built in or when
* a module).
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e392eaf5b2bf..041940183516 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -710,7 +710,7 @@ typedef u32 pqi_index_t;
#define SOP_TMF_COMPLETE 0x0
#define SOP_TMF_REJECTED 0x4
#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
-#define SOP_RC_INCORRECT_LOGICAL_UNIT 0x9
+#define SOP_TMF_INCORRECT_LOGICAL_UNIT 0x9
/* additional CDB bytes usage field codes */
#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
@@ -1085,7 +1085,16 @@ struct pqi_stream_data {
u32 last_accessed;
};
-#define PQI_MAX_LUNS_PER_DEVICE 256
+#define PQI_MAX_LUNS_PER_DEVICE 256
+
+struct pqi_tmf_work {
+ struct work_struct work_struct;
+ struct scsi_cmnd *scmd;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ u8 lun;
+ u8 scsi_opcode;
+};
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY command */
@@ -1111,6 +1120,7 @@ struct pqi_scsi_dev {
u8 erase_in_progress : 1;
bool aio_enabled; /* only valid for physical disks */
bool in_remove;
+ bool in_reset[PQI_MAX_LUNS_PER_DEVICE];
bool device_offline;
u8 vendor[8]; /* bytes 8-15 of inquiry data */
u8 model[16]; /* bytes 16-31 of inquiry data */
@@ -1149,6 +1159,8 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
unsigned int raid_bypass_cnt;
+
+ struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE];
};
/* VPD inquiry pages */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 6aaaa7ebca37..9a58df9312fa 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.22-040"
+#define DRIVER_VERSION "2.1.24-046"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 22
-#define DRIVER_REVISION 40
+#define DRIVER_RELEASE 24
+#define DRIVER_REVISION 46
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -48,6 +48,8 @@
#define PQI_POST_RESET_DELAY_SECS 5
#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
+#define PQI_NO_COMPLETION ((void *)-1)
+
MODULE_AUTHOR("Microchip");
MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
DRIVER_VERSION);
@@ -96,6 +98,7 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
+static void pqi_tmf_worker(struct work_struct *work);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -455,6 +458,21 @@ static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
return device->in_remove;
}
+static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
+{
+ device->in_reset[lun] = true;
+}
+
+static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
+{
+ device->in_reset[lun] = false;
+}
+
+static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
+{
+ return device->in_reset[lun];
+}
+
static inline int pqi_event_type_to_event_index(unsigned int event_type)
{
int index;
@@ -2137,6 +2155,15 @@ static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
return device->sdev != NULL;
}
+static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
+{
+ unsigned int lun;
+ struct pqi_tmf_work *tmf_work;
+
+ for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
+ INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
+}
+
static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
{
@@ -2217,6 +2244,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
list_add_tail(&device->add_list_entry, &add_list);
/* To prevent this device structure from being freed later. */
device->keep_device = true;
+ pqi_init_device_tmf_work(device);
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -2257,7 +2285,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
if (device->rescan) {
- scsi_rescan_device(&device->sdev->sdev_gendev);
+ scsi_rescan_device(device->sdev);
device->rescan = false;
}
}
@@ -3330,7 +3358,7 @@ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_inf
case SOP_TMF_REJECTED:
rc = -EAGAIN;
break;
- case SOP_RC_INCORRECT_LOGICAL_UNIT:
+ case SOP_TMF_INCORRECT_LOGICAL_UNIT:
rc = -ENODEV;
break;
default:
@@ -5628,7 +5656,6 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
int rc;
struct pqi_io_request *io_request;
struct pqi_aio_path_request *request;
- struct pqi_scsi_dev *device;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
@@ -5648,9 +5675,8 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
- device = scmd->device->hostdata;
- if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
- put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
+ if (!raid_bypass && ctrl_info->multi_lun_device_supported)
+ put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
if (cdb_length > sizeof(request->cdb))
cdb_length = sizeof(request->cdb);
request->cdb_length = cdb_length;
@@ -5850,6 +5876,7 @@ static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
{
struct pqi_scsi_dev *device;
+ struct completion *wait;
if (!scmd->device) {
set_host_byte(scmd, DID_NO_CONNECT);
@@ -5863,6 +5890,10 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
}
atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
+
+ wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
+ if (wait != PQI_NO_COMPLETION)
+ complete(wait);
}
static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
@@ -5948,6 +5979,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
u16 hw_queue;
struct pqi_queue_group *queue_group;
bool raid_bypassed;
+ u8 lun;
+
+ scmd->host_scribble = PQI_NO_COMPLETION;
device = scmd->device->hostdata;
@@ -5957,7 +5991,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
return 0;
}
- atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
+ lun = (u8)scmd->device->lun;
+
+ atomic_inc(&device->scsi_cmds_outstanding[lun]);
ctrl_info = shost_to_hba(shost);
@@ -5967,7 +6003,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
return 0;
}
- if (pqi_ctrl_blocked(ctrl_info)) {
+ if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -6002,8 +6038,10 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
}
out:
- if (rc)
- atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
+ if (rc) {
+ scmd->host_scribble = NULL;
+ atomic_dec(&device->scsi_cmds_outstanding[lun]);
+ }
return rc;
}
@@ -6097,7 +6135,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
}
static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+ struct pqi_scsi_dev *device, u8 lun)
{
unsigned int i;
unsigned int path;
@@ -6127,6 +6165,9 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
if (scsi_device != device)
continue;
+ if ((u8)scmd->device->lun != lun)
+ continue;
+
list_del(&io_request->request_list_entry);
set_host_byte(scmd, DID_RESET);
pqi_free_io_request(io_request);
@@ -6224,15 +6265,13 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int rc;
struct pqi_io_request *io_request;
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
- struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info, NULL);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -6247,7 +6286,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
- request->ml_device_lun_number = (u8)scmd->device->lun;
+ request->ml_device_lun_number = lun;
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
if (ctrl_info->tmf_iu_timeout_supported)
put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
@@ -6255,7 +6294,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
+ rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
if (rc == 0)
rc = io_request->status;
@@ -6269,18 +6308,16 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
-static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int reset_rc;
int wait_rc;
unsigned int retries;
unsigned long timeout_msecs;
- struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
for (retries = 0;;) {
- reset_rc = pqi_lun_reset(ctrl_info, scmd);
- if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
+ reset_rc = pqi_lun_reset(ctrl_info, device, lun);
+ if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -6288,60 +6325,51 @@ static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct sc
timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
- wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
+ wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
if (wait_rc && reset_rc == 0)
reset_rc = wait_rc;
return reset_rc == 0 ? SUCCESS : FAILED;
}
-static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int rc;
- struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_fail_io_queued_for_device(ctrl_info, device);
+ pqi_fail_io_queued_for_device(ctrl_info, device, lun);
rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
+ pqi_device_reset_start(device, lun);
+ pqi_ctrl_unblock_requests(ctrl_info);
if (rc)
rc = FAILED;
else
- rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
- pqi_ctrl_unblock_requests(ctrl_info);
+ rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
+ pqi_device_reset_done(device, lun);
return rc;
}
-static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
+static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
{
int rc;
- struct Scsi_Host *shost;
- struct pqi_ctrl_info *ctrl_info;
- struct pqi_scsi_dev *device;
-
- shost = scmd->device->host;
- ctrl_info = shost_to_hba(shost);
- device = scmd->device->hostdata;
mutex_lock(&ctrl_info->lun_reset_mutex);
dev_err(&ctrl_info->pci_dev->dev,
- "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
- shost->host_no,
- device->bus, device->target, (u32)scmd->device->lun,
- scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
+ "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
rc = FAILED;
else
- rc = pqi_device_reset(ctrl_info, scmd);
+ rc = pqi_device_reset(ctrl_info, device, lun);
dev_err(&ctrl_info->pci_dev->dev,
- "reset of scsi %d:%d:%d:%d: %s\n",
- shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
+ "reset of scsi %d:%d:%d:%u: %s\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&ctrl_info->lun_reset_mutex);
@@ -6349,6 +6377,77 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
return rc;
}
+static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ u8 scsi_opcode;
+
+ shost = scmd->device->host;
+ ctrl_info = shost_to_hba(shost);
+ device = scmd->device->hostdata;
+ scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
+
+ return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
+}
+
+static void pqi_tmf_worker(struct work_struct *work)
+{
+ struct pqi_tmf_work *tmf_work;
+ struct scsi_cmnd *scmd;
+
+ tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
+ scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
+
+ pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
+}
+
+static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ struct pqi_tmf_work *tmf_work;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ shost = scmd->device->host;
+ ctrl_info = shost_to_hba(shost);
+ device = scmd->device->hostdata;
+
+ dev_err(&ctrl_info->pci_dev->dev,
+ "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
+
+ if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
+ scmd->result = DID_RESET << 16;
+ goto out;
+ }
+
+ tmf_work = &device->tmf_work[scmd->device->lun];
+
+ if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
+ tmf_work->ctrl_info = ctrl_info;
+ tmf_work->device = device;
+ tmf_work->lun = (u8)scmd->device->lun;
+ tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
+ schedule_work(&tmf_work->work_struct);
+ }
+
+ wait_for_completion(&wait);
+
+ dev_err(&ctrl_info->pci_dev->dev,
+ "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
+
+out:
+
+ return SUCCESS;
+}
+
static int pqi_slave_alloc(struct scsi_device *sdev)
{
struct pqi_scsi_dev *device;
@@ -6470,21 +6569,21 @@ static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *ar
struct pci_dev *pci_dev;
u32 subsystem_vendor;
u32 subsystem_device;
- cciss_pci_info_struct pciinfo;
+ cciss_pci_info_struct pci_info;
if (!arg)
return -EINVAL;
pci_dev = ctrl_info->pci_dev;
- pciinfo.domain = pci_domain_nr(pci_dev->bus);
- pciinfo.bus = pci_dev->bus->number;
- pciinfo.dev_fn = pci_dev->devfn;
+ pci_info.domain = pci_domain_nr(pci_dev->bus);
+ pci_info.bus = pci_dev->bus->number;
+ pci_info.dev_fn = pci_dev->devfn;
subsystem_vendor = pci_dev->subsystem_vendor;
subsystem_device = pci_dev->subsystem_device;
- pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
+ pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
- if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
+ if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
return -EFAULT;
return 0;
@@ -7362,6 +7461,7 @@ static const struct scsi_host_template pqi_driver_template = {
.scan_finished = pqi_scan_finished,
.this_id = -1,
.eh_device_reset_handler = pqi_eh_device_reset_handler,
+ .eh_abort_handler = pqi_eh_abort_handler,
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
@@ -8904,6 +9004,52 @@ static void pqi_ctrl_offline_worker(struct work_struct *work)
pqi_take_ctrl_offline_deferred(ctrl_info);
}
+static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
+{
+ char *string;
+
+ switch (ctrl_shutdown_reason) {
+ case PQI_IQ_NOT_DRAINED_TIMEOUT:
+ string = "inbound queue not drained timeout";
+ break;
+ case PQI_LUN_RESET_TIMEOUT:
+ string = "LUN reset timeout";
+ break;
+ case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
+ string = "I/O pending timeout after LUN reset";
+ break;
+ case PQI_NO_HEARTBEAT:
+ string = "no controller heartbeat detected";
+ break;
+ case PQI_FIRMWARE_KERNEL_NOT_UP:
+ string = "firmware kernel not ready";
+ break;
+ case PQI_OFA_RESPONSE_TIMEOUT:
+ string = "OFA response timeout";
+ break;
+ case PQI_INVALID_REQ_ID:
+ string = "invalid request ID";
+ break;
+ case PQI_UNMATCHED_REQ_ID:
+ string = "unmatched request ID";
+ break;
+ case PQI_IO_PI_OUT_OF_RANGE:
+ string = "I/O queue producer index out of range";
+ break;
+ case PQI_EVENT_PI_OUT_OF_RANGE:
+ string = "event queue producer index out of range";
+ break;
+ case PQI_UNEXPECTED_IU_TYPE:
+ string = "unexpected IU type";
+ break;
+ default:
+ string = "unknown reason";
+ break;
+ }
+
+ return string;
+}
+
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{
@@ -8916,7 +9062,9 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
if (!pqi_disable_ctrl_shutdown)
sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
pci_disable_device(ctrl_info->pci_dev);
- dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller offline: reason code 0x%x (%s)\n",
+ ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
schedule_work(&ctrl_info->ctrl_offline_work);
}
@@ -9062,7 +9210,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
rc = pqi_flush_cache(ctrl_info, shutdown_event);
if (rc)
dev_err(&pci_dev->dev,
- "unable to flush controller cache\n");
+ "unable to flush controller cache during shutdown\n");
pqi_crash_if_pending_command(ctrl_info);
pqi_reset(ctrl_info);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 14d7981ddcdd..338aa8c42968 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -414,6 +414,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
if (cmdstatp->have_sense &&
cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17)
STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */
+ if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29)
+ STp->pos_unknown = 1; /* ASC => power on / reset */
STp->pos_unknown |= STp->device->was_reset;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 047ffaf7d42a..a95936b18f69 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -316,6 +316,9 @@ enum storvsc_request_type {
#define SRB_STATUS_ABORTED 0x02
#define SRB_STATUS_ERROR 0x04
#define SRB_STATUS_INVALID_REQUEST 0x06
+#define SRB_STATUS_TIMEOUT 0x09
+#define SRB_STATUS_SELECTION_TIMEOUT 0x0A
+#define SRB_STATUS_BUS_RESET 0x0E
#define SRB_STATUS_DATA_OVERRUN 0x12
#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS_INTERNAL_ERROR 0x30
@@ -472,7 +475,7 @@ static void storvsc_device_scan(struct work_struct *work)
sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
if (!sdev)
goto done;
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
done:
@@ -981,6 +984,10 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
case SRB_STATUS_ABORTED:
case SRB_STATUS_INVALID_REQUEST:
case SRB_STATUS_INTERNAL_ERROR:
+ case SRB_STATUS_TIMEOUT:
+ case SRB_STATUS_SELECTION_TIMEOUT:
+ case SRB_STATUS_BUS_RESET:
+ case SRB_STATUS_DATA_OVERRUN:
if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) {
/* Check for capacity change */
if ((asc == 0x2a) && (ascq == 0x9)) {
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index d06e933191a2..afa9d02a33ec 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -12,7 +12,8 @@
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <asm/irq.h>
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index bd5633667d01..9d1bdcdc1331 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -325,7 +325,7 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
/* Handle "Parameters changed", "Mode parameters changed", and
"Capacity data has changed". */
if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
}
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index caae61aa2afe..9ec55ddc1204 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -743,7 +743,7 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
if (info->host_active == STATE_ERROR)
return -EIO;
- if (info && current == info->curr) {
+ if (current == info->curr) {
err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateConnected);
if (err) {
@@ -761,7 +761,7 @@ static void scsifront_sdev_destroy(struct scsi_device *sdev)
struct vscsifrnt_info *info = shost_priv(sdev->host);
int err;
- if (info && current == info->curr) {
+ if (current == info->curr) {
err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateClosed);
if (err)
@@ -903,7 +903,7 @@ static int scsifront_probe(struct xenbus_device *dev,
xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
return err;
}
- info = (struct vscsifrnt_info *)host->hostdata;
+ info = shost_priv(host);
dev_set_drvdata(&dev->dev, info);
info->dev = dev;