summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 21:49:33 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 21:49:33 +0300
commita829a8445f09036404060f4d6489cb13433f4304 (patch)
tree60067e1425239a9f372c10100ede39691c3d612b /drivers
parent84b6079134420f4635f23c2088a3892057b23bb0 (diff)
parentf5b893c947151d424a4ab55ea3a8544b81974b31 (diff)
downloadlinux-a829a8445f09036404060f4d6489cb13433f4304.tar.xz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This update includes the usual round of major driver updates (ncr5380, lpfc, hisi_sas, megaraid_sas, ufs, ibmvscsis, mpt3sas). There's also an assortment of minor fixes, mostly in error legs or other not very user visible stuff. The major change is the pci_alloc_irq_vectors replacement for the old pci_msix_.. calls; this effectively makes IRQ mapping generic for the drivers and allows blk_mq to use the information" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (256 commits) scsi: qla4xxx: switch to pci_alloc_irq_vectors scsi: hisi_sas: support deferred probe for v2 hw scsi: megaraid_sas: switch to pci_alloc_irq_vectors scsi: scsi_devinfo: remove synchronous ALUA for NETAPP devices scsi: be2iscsi: set errno on error path scsi: be2iscsi: set errno on error path scsi: hpsa: fallback to use legacy REPORT PHYS command scsi: scsi_dh_alua: Fix RCU annotations scsi: hpsa: use %phN for short hex dumps scsi: hisi_sas: fix free'ing in probe and remove scsi: isci: switch to pci_alloc_irq_vectors scsi: ipr: Fix runaway IRQs when falling back from MSI to LSI scsi: dpt_i2o: double free on error path scsi: cxlflash: Migrate scsi command pointer to AFU command scsi: cxlflash: Migrate IOARRIN specific routines to function pointers scsi: cxlflash: Cleanup queuecommand() scsi: cxlflash: Cleanup send_tmf() scsi: cxlflash: Remove AFU command lock scsi: cxlflash: Wait for active AFU commands to timeout upon tear down scsi: cxlflash: Remove private command pool ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cciss_scsi.c72
-rw-r--r--drivers/message/fusion/mptbase.c28
-rw-r--r--drivers/message/fusion/mptscsih.c11
-rw-r--r--drivers/phy/phy-qcom-ufs-i.h7
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-14nm.c72
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-20nm.c65
-rw-r--r--drivers/phy/phy-qcom-ufs.c273
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c36
-rw-r--r--drivers/scsi/Kconfig35
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c137
-rw-r--r--drivers/scsi/NCR5380.h87
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/comminit.c10
-rw-r--r--drivers/scsi/aacraid/commsup.c25
-rw-r--r--drivers/scsi/aacraid/linit.c20
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c5
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h5
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c82
-rw-r--r--drivers/scsi/arm/cumana_1.c98
-rw-r--r--drivers/scsi/arm/oak.c34
-rw-r--r--drivers/scsi/atari_scsi.c77
-rw-r--r--drivers/scsi/be2iscsi/be_main.c8
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h30
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c62
-rw-r--r--drivers/scsi/bfa/bfad_im.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxlflash/common.h39
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c6
-rw-r--r--drivers/scsi/cxlflash/main.c410
-rw-r--r--drivers/scsi/cxlflash/sislite.h2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c16
-rw-r--r--drivers/scsi/dmx3191d.c33
-rw-r--r--drivers/scsi/dpt_i2o.c7
-rw-r--r--drivers/scsi/fcoe/fcoe.c25
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c157
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c83
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c30
-rw-r--r--drivers/scsi/fnic/fnic_trace.c4
-rw-r--r--drivers/scsi/fnic/fnic_trace.h2
-rw-r--r--drivers/scsi/fnic/vnic_dev.c10
-rw-r--r--drivers/scsi/g_NCR5380.c296
-rw-r--r--drivers/scsi/g_NCR5380.h32
-rw-r--r--drivers/scsi/g_NCR5380_mmio.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c67
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c79
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c556
-rw-r--r--drivers/scsi/hpsa.c252
-rw-r--r--drivers/scsi/hpsa.h6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c40
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c900
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h5
-rw-r--r--drivers/scsi/ipr.c174
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/ips.c13
-rw-r--r--drivers/scsi/isci/host.h1
-rw-r--r--drivers/scsi/isci/init.c23
-rw-r--r--drivers/scsi/isci/probe_roms.c1
-rw-r--r--drivers/scsi/isci/remote_node_context.c7
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c61
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c256
-rw-r--r--drivers/scsi/libfc/fc_fcp.c235
-rw-r--r--drivers/scsi/libfc/fc_libfc.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c126
-rw-r--r--drivers/scsi/libfc/fc_rport.c561
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c422
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c83
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c136
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c23
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c186
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h39
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c69
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c129
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c2
-rw-r--r--drivers/scsi/pmcraid.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c449
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c15
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h18
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c27
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c97
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c51
-rw-r--r--drivers/scsi/scsi_transport_fc.c455
-rw-r--r--drivers/scsi/scsi_transport_srp.c52
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c102
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3_scsi.c80
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c39
-rw-r--r--drivers/scsi/ufs/ufs.h5
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h9
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c482
-rw-r--r--drivers/scsi/ufs/ufshcd.h46
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/ufs/unipro.h4
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_io.c4
130 files changed, 5249 insertions, 4215 deletions
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 1537302e56e3..a18de9d727b0 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -260,43 +260,6 @@ scsi_cmd_stack_free(ctlr_info_t *h)
}
#if 0
-static int xmargin=8;
-static int amargin=60;
-
-static void
-print_bytes (unsigned char *c, int len, int hex, int ascii)
-{
-
- int i;
- unsigned char *x;
-
- if (hex)
- {
- x = c;
- for (i=0;i<len;i++)
- {
- if ((i % xmargin) == 0 && i>0) printk("\n");
- if ((i % xmargin) == 0) printk("0x%04x:", i);
- printk(" %02x", *x);
- x++;
- }
- printk("\n");
- }
- if (ascii)
- {
- x = c;
- for (i=0;i<len;i++)
- {
- if ((i % amargin) == 0 && i>0) printk("\n");
- if ((i % amargin) == 0) printk("0x%04x:", i);
- if (*x > 26 && *x < 128) printk("%c", *x);
- else printk(".");
- x++;
- }
- printk("\n");
- }
-}
-
static void
print_cmd(CommandList_struct *cp)
{
@@ -305,30 +268,13 @@ print_cmd(CommandList_struct *cp)
printk("sgtot:%d\n", cp->Header.SGTotal);
printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
cp->Header.Tag.lower);
- printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- cp->Header.LUN.LunAddrBytes[0],
- cp->Header.LUN.LunAddrBytes[1],
- cp->Header.LUN.LunAddrBytes[2],
- cp->Header.LUN.LunAddrBytes[3],
- cp->Header.LUN.LunAddrBytes[4],
- cp->Header.LUN.LunAddrBytes[5],
- cp->Header.LUN.LunAddrBytes[6],
- cp->Header.LUN.LunAddrBytes[7]);
+ printk("LUN:0x%8phN\n", cp->Header.LUN.LunAddrBytes);
printk("CDBLen:%d\n", cp->Request.CDBLen);
printk("Type:%d\n",cp->Request.Type.Type);
printk("Attr:%d\n",cp->Request.Type.Attribute);
printk(" Dir:%d\n",cp->Request.Type.Direction);
printk("Timeout:%d\n",cp->Request.Timeout);
- printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x"
- " %02x %02x %02x %02x %02x %02x %02x %02x\n",
- cp->Request.CDB[0], cp->Request.CDB[1],
- cp->Request.CDB[2], cp->Request.CDB[3],
- cp->Request.CDB[4], cp->Request.CDB[5],
- cp->Request.CDB[6], cp->Request.CDB[7],
- cp->Request.CDB[8], cp->Request.CDB[9],
- cp->Request.CDB[10], cp->Request.CDB[11],
- cp->Request.CDB[12], cp->Request.CDB[13],
- cp->Request.CDB[14], cp->Request.CDB[15]),
+ printk("CDB: %16ph\n", cp->Request.CDB);
printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n",
cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
cp->ErrDesc.Len);
@@ -340,9 +286,7 @@ print_cmd(CommandList_struct *cp)
printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-
}
-
#endif
static int
@@ -782,8 +726,10 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
"reported\n", c);
break;
case CMD_INVALID: {
- /* print_bytes(c, sizeof(*c), 1, 0);
- print_cmd(c); */
+ /*
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+ print_cmd(c);
+ */
/* We get CMD_INVALID if you address a non-existent tape drive instead
of a selection timeout (no response). You will see this if you yank
out a tape drive, then try to access it. This is kind of a shame
@@ -985,8 +931,10 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
dev_warn(&h->pdev->dev,
"%p is reported invalid (probably means "
"target device no longer present)\n", c);
- /* print_bytes((unsigned char *) c, sizeof(*c), 1, 0);
- print_cmd(c); */
+ /*
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+ print_cmd(c);
+ */
}
break;
case CMD_PROTOCOL_ERR:
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 89c7ed16b4df..1e73064b0fb2 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -2585,10 +2585,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
(void) GetLanConfigPages(ioc);
a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "LanAddr = %02X:%02X:%02X"
- ":%02X:%02X:%02X\n",
- ioc->name, a[5], a[4],
- a[3], a[2], a[1], a[0]));
+ "LanAddr = %pMR\n", ioc->name, a));
}
break;
@@ -2868,21 +2865,21 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
printk(KERN_INFO "%s: ", ioc->name);
if (ioc->prod_name)
- printk("%s: ", ioc->prod_name);
- printk("Capabilities={");
+ pr_cont("%s: ", ioc->prod_name);
+ pr_cont("Capabilities={");
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
- printk("Initiator");
+ pr_cont("Initiator");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
- printk("%sTarget", i ? "," : "");
+ pr_cont("%sTarget", i ? "," : "");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
- printk("%sLAN", i ? "," : "");
+ pr_cont("%sLAN", i ? "," : "");
i++;
}
@@ -2891,12 +2888,12 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
* This would probably evoke more questions than it's worth
*/
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
- printk("%sLogBusAddr", i ? "," : "");
+ pr_cont("%sLogBusAddr", i ? "," : "");
i++;
}
#endif
- printk("}\n");
+ pr_cont("}\n");
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -6783,8 +6780,7 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
if (ioc->bus_type == FC) {
if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- seq_printf(m, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ seq_printf(m, " LanAddr = %pMR\n", a);
}
seq_printf(m, " WWN = %08X%08X:%08X%08X\n",
ioc->fc_port_page0[p].WWNN.High,
@@ -6861,8 +6857,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- y += sprintf(buffer+len+y, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ y += sprintf(buffer+len+y, ", LanAddr=%pMR", a);
}
y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq);
@@ -6896,8 +6891,7 @@ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- seq_printf(m, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ seq_printf(m, ", LanAddr=%pMR", a);
}
seq_printf(m, ", IRQ=%d", ioc->pci_irq);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6c9fc11efb87..08a807d6a44f 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1366,15 +1366,10 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt)
/* Default to untagged. Once a target structure has been allocated,
* use the Inquiry data to determine if device supports tagged.
*/
- if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
- && (SCpnt->device->tagged_supported)) {
+ if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES) &&
+ SCpnt->device->tagged_supported)
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
- if (SCpnt->request && SCpnt->request->ioprio) {
- if (((SCpnt->request->ioprio & 0x7) == 1) ||
- !(SCpnt->request->ioprio & 0x7))
- scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
- }
- } else
+ else
scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 2bd5ce43a724..d505d98cf5f8 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -141,11 +141,8 @@ struct ufs_qcom_phy_specific_ops {
struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
int ufs_qcom_phy_power_on(struct phy *generic_phy);
int ufs_qcom_phy_power_off(struct phy *generic_phy);
-int ufs_qcom_phy_exit(struct phy *generic_phy);
-int ufs_qcom_phy_init_clks(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common);
-int ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common);
int ufs_qcom_phy_remove(struct phy *generic_phy,
struct ufs_qcom_phy *ufs_qcom_phy);
struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
index 6ee51490f786..c71c84734916 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -44,30 +44,12 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
{
- struct ufs_qcom_phy_qmp_14nm *phy = phy_get_drvdata(generic_phy);
- struct ufs_qcom_phy *phy_common = &phy->common_cfg;
- int err;
-
- err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
- __func__, err);
- goto out;
- }
- phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
- phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-
- ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
+ return 0;
+}
-out:
- return err;
+static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
+{
+ return 0;
}
static
@@ -117,7 +99,7 @@ static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
.init = ufs_qcom_phy_qmp_14nm_init,
- .exit = ufs_qcom_phy_exit,
+ .exit = ufs_qcom_phy_qmp_14nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
.owner = THIS_MODULE,
@@ -136,6 +118,7 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct ufs_qcom_phy_qmp_14nm *phy;
+ struct ufs_qcom_phy *phy_common;
int err = 0;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -143,8 +126,9 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out;
}
+ phy_common = &phy->common_cfg;
- generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
&ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops);
if (!generic_phy) {
@@ -154,39 +138,43 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
goto out;
}
- phy_set_drvdata(generic_phy, phy);
+ err = ufs_qcom_phy_init_clks(phy_common);
+ if (err) {
+ dev_err(phy_common->dev,
+ "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
- strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
- sizeof(phy->common_cfg.name));
+ err = ufs_qcom_phy_init_vregulators(phy_common);
+ if (err) {
+ dev_err(phy_common->dev,
+ "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+ phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
+ phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-out:
- return err;
-}
+ ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
-static int ufs_qcom_phy_qmp_14nm_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = to_phy(dev);
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int err = 0;
+ phy_set_drvdata(generic_phy, phy);
- err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
- if (err)
- dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
- __func__, err);
+ strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
+out:
return err;
}
static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = {
{.compatible = "qcom,ufs-phy-qmp-14nm"},
+ {.compatible = "qcom,msm8996-ufs-phy-qmp-14nm"},
{},
};
MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match);
static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = {
.probe = ufs_qcom_phy_qmp_14nm_probe,
- .remove = ufs_qcom_phy_qmp_14nm_remove,
.driver = {
.of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
.name = "ufs_qcom_phy_qmp_14nm",
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
index 770087ab05e2..1a26a64e06d3 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
@@ -63,28 +63,12 @@ void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
{
- struct ufs_qcom_phy_qmp_20nm *phy = phy_get_drvdata(generic_phy);
- struct ufs_qcom_phy *phy_common = &phy->common_cfg;
- int err = 0;
-
- err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
- __func__, err);
- goto out;
- }
-
- ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
+ return 0;
+}
-out:
- return err;
+static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
+{
+ return 0;
}
static
@@ -173,7 +157,7 @@ static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
.init = ufs_qcom_phy_qmp_20nm_init,
- .exit = ufs_qcom_phy_exit,
+ .exit = ufs_qcom_phy_qmp_20nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
.owner = THIS_MODULE,
@@ -192,6 +176,7 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct ufs_qcom_phy_qmp_20nm *phy;
+ struct ufs_qcom_phy *phy_common;
int err = 0;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -199,8 +184,9 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out;
}
+ phy_common = &phy->common_cfg;
- generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
&ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops);
if (!generic_phy) {
@@ -210,27 +196,27 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
goto out;
}
- phy_set_drvdata(generic_phy, phy);
+ err = ufs_qcom_phy_init_clks(phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
- strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
- sizeof(phy->common_cfg.name));
+ err = ufs_qcom_phy_init_vregulators(phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
-out:
- return err;
-}
+ ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
-static int ufs_qcom_phy_qmp_20nm_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = to_phy(dev);
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int err = 0;
+ phy_set_drvdata(generic_phy, phy);
- err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
- if (err)
- dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
- __func__, err);
+ strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
+out:
return err;
}
@@ -242,7 +228,6 @@ MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match);
static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
.probe = ufs_qcom_phy_qmp_20nm_probe,
- .remove = ufs_qcom_phy_qmp_20nm_remove,
.driver = {
.of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
.name = "ufs_qcom_phy_qmp_20nm",
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 18a5b495ad65..c69568b8543d 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -22,13 +22,6 @@
#define VDDP_REF_CLK_MIN_UV 1200000
#define VDDP_REF_CLK_MAX_UV 1200000
-static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
- const char *, bool);
-static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
- const char *);
-static int ufs_qcom_phy_base_init(struct platform_device *pdev,
- struct ufs_qcom_phy *phy_common);
-
int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
struct ufs_qcom_phy_calibration *tbl_A,
int tbl_size_A,
@@ -75,45 +68,6 @@ out:
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
-struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
- struct ufs_qcom_phy *common_cfg,
- const struct phy_ops *ufs_qcom_phy_gen_ops,
- struct ufs_qcom_phy_specific_ops *phy_spec_ops)
-{
- int err;
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = NULL;
- struct phy_provider *phy_provider;
-
- err = ufs_qcom_phy_base_init(pdev, common_cfg);
- if (err) {
- dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
- goto out;
- }
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider)) {
- err = PTR_ERR(phy_provider);
- dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
- goto out;
- }
-
- generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
- if (IS_ERR(generic_phy)) {
- err = PTR_ERR(generic_phy);
- dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
- generic_phy = NULL;
- goto out;
- }
-
- common_cfg->phy_spec_ops = phy_spec_ops;
- common_cfg->dev = dev;
-
-out:
- return generic_phy;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
-
/*
* This assumes the embedded phy structure inside generic_phy is of type
* struct ufs_qcom_phy. In order to function properly it's crucial
@@ -154,13 +108,50 @@ int ufs_qcom_phy_base_init(struct platform_device *pdev,
return 0;
}
-static int __ufs_qcom_phy_clk_get(struct phy *phy,
+struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
+ struct ufs_qcom_phy *common_cfg,
+ const struct phy_ops *ufs_qcom_phy_gen_ops,
+ struct ufs_qcom_phy_specific_ops *phy_spec_ops)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy = NULL;
+ struct phy_provider *phy_provider;
+
+ err = ufs_qcom_phy_base_init(pdev, common_cfg);
+ if (err) {
+ dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
+ goto out;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ err = PTR_ERR(phy_provider);
+ dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
+ goto out;
+ }
+
+ generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ err = PTR_ERR(generic_phy);
+ dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
+ generic_phy = NULL;
+ goto out;
+ }
+
+ common_cfg->phy_spec_ops = phy_spec_ops;
+ common_cfg->dev = dev;
+
+out:
+ return generic_phy;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
+
+static int __ufs_qcom_phy_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool err_print)
{
struct clk *clk;
int err = 0;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
clk = devm_clk_get(dev, name);
if (IS_ERR(clk)) {
@@ -174,42 +165,44 @@ static int __ufs_qcom_phy_clk_get(struct phy *phy,
return err;
}
-static
-int ufs_qcom_phy_clk_get(struct phy *phy,
+static int ufs_qcom_phy_clk_get(struct device *dev,
const char *name, struct clk **clk_out)
{
- return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
+ return __ufs_qcom_phy_clk_get(dev, name, clk_out, true);
}
-int
-ufs_qcom_phy_init_clks(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common)
+int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common)
{
int err;
- err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
+ if (of_device_is_compatible(phy_common->dev->of_node,
+ "qcom,msm8996-ufs-phy-qmp-14nm"))
+ goto skip_txrx_clk;
+
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk",
&phy_common->tx_iface_clk);
if (err)
goto out;
- err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk",
&phy_common->rx_iface_clk);
if (err)
goto out;
- err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src",
&phy_common->ref_clk_src);
if (err)
goto out;
+skip_txrx_clk:
/*
* "ref_clk_parent" is optional hence don't abort init if it's not
* found.
*/
- __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
+ __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
&phy_common->ref_clk_parent, false);
- err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
&phy_common->ref_clk);
out:
@@ -217,41 +210,14 @@ out:
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
-int
-ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common)
-{
- int err;
-
- err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
- "vdda-pll");
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
- "vdda-phy");
-
- if (err)
- goto out;
-
- /* vddp-ref-clk-* properties are optional */
- __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
- "vddp-ref-clk", true);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
-
-static int __ufs_qcom_phy_init_vreg(struct phy *phy,
+static int __ufs_qcom_phy_init_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
{
int err = 0;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
char prop_name[MAX_PROP_NAME];
- vreg->name = kstrdup(name, GFP_KERNEL);
+ vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!vreg->name) {
err = -ENOMEM;
goto out;
@@ -304,14 +270,36 @@ out:
return err;
}
-static int ufs_qcom_phy_init_vreg(struct phy *phy,
+static int ufs_qcom_phy_init_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, const char *name)
{
- return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
+ return __ufs_qcom_phy_init_vreg(dev, vreg, name, false);
}
-static
-int ufs_qcom_phy_cfg_vreg(struct phy *phy,
+int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common)
+{
+ int err;
+
+ err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll,
+ "vdda-pll");
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy,
+ "vdda-phy");
+
+ if (err)
+ goto out;
+
+ /* vddp-ref-clk-* properties are optional */
+ __ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk,
+ "vddp-ref-clk", true);
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
+
+static int ufs_qcom_phy_cfg_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, bool on)
{
int ret = 0;
@@ -319,10 +307,6 @@ int ufs_qcom_phy_cfg_vreg(struct phy *phy,
const char *name = vreg->name;
int min_uV;
int uA_load;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
-
- BUG_ON(!vreg);
if (regulator_count_voltages(reg) > 0) {
min_uV = on ? vreg->min_uV : 0;
@@ -350,18 +334,15 @@ out:
return ret;
}
-static
-int ufs_qcom_phy_enable_vreg(struct phy *phy,
+static int ufs_qcom_phy_enable_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
int ret = 0;
if (!vreg || vreg->enabled)
goto out;
- ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
+ ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true);
if (ret) {
dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
__func__, ret);
@@ -380,10 +361,9 @@ out:
return ret;
}
-int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
+static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy)
{
int ret = 0;
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
if (phy->is_ref_clk_enabled)
goto out;
@@ -430,14 +410,10 @@ out_disable_src:
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
-static
-int ufs_qcom_phy_disable_vreg(struct phy *phy,
+static int ufs_qcom_phy_disable_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
int ret = 0;
if (!vreg || !vreg->enabled || vreg->is_always_on)
@@ -447,7 +423,7 @@ int ufs_qcom_phy_disable_vreg(struct phy *phy,
if (!ret) {
/* ignore errors on applying disable config */
- ufs_qcom_phy_cfg_vreg(phy, vreg, false);
+ ufs_qcom_phy_cfg_vreg(dev, vreg, false);
vreg->enabled = false;
} else {
dev_err(dev, "%s: %s disable failed, err=%d\n",
@@ -457,10 +433,8 @@ out:
return ret;
}
-void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
+static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
if (phy->is_ref_clk_enabled) {
clk_disable_unprepare(phy->ref_clk);
/*
@@ -473,7 +447,6 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
phy->is_ref_clk_enabled = false;
}
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
#define UFS_REF_CLK_EN (1 << 5)
@@ -526,9 +499,8 @@ void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
/* Turn ON M-PHY RMMI interface clocks */
-int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
+static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
int ret = 0;
if (phy->is_iface_clk_enabled)
@@ -552,20 +524,16 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
/* Turn OFF M-PHY RMMI interface clocks */
-void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
+void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
if (phy->is_iface_clk_enabled) {
clk_disable_unprepare(phy->tx_iface_clk);
clk_disable_unprepare(phy->rx_iface_clk);
phy->is_iface_clk_enabled = false;
}
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
{
@@ -634,29 +602,6 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
-int ufs_qcom_phy_remove(struct phy *generic_phy,
- struct ufs_qcom_phy *ufs_qcom_phy)
-{
- phy_power_off(generic_phy);
-
- kfree(ufs_qcom_phy->vdda_pll.name);
- kfree(ufs_qcom_phy->vdda_phy.name);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove);
-
-int ufs_qcom_phy_exit(struct phy *generic_phy)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-
- if (ufs_qcom_phy->is_powered_on)
- phy_power_off(generic_phy);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit);
-
int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
{
struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
@@ -678,7 +623,10 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
struct device *dev = phy_common->dev;
int err;
- err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
+ if (phy_common->is_powered_on)
+ return 0;
+
+ err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
if (err) {
dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
__func__, err);
@@ -688,23 +636,30 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
phy_common->phy_spec_ops->power_control(phy_common, true);
/* vdda_pll also enables ref clock LDOs so enable it first */
- err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
+ err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll);
if (err) {
dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
__func__, err);
goto out_disable_phy;
}
- err = ufs_qcom_phy_enable_ref_clk(generic_phy);
+ err = ufs_qcom_phy_enable_iface_clk(phy_common);
if (err) {
- dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+ dev_err(dev, "%s enable phy iface clock failed, err=%d\n",
__func__, err);
goto out_disable_pll;
}
+ err = ufs_qcom_phy_enable_ref_clk(phy_common);
+ if (err) {
+ dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+ __func__, err);
+ goto out_disable_iface_clk;
+ }
+
/* enable device PHY ref_clk pad rail */
if (phy_common->vddp_ref_clk.reg) {
- err = ufs_qcom_phy_enable_vreg(generic_phy,
+ err = ufs_qcom_phy_enable_vreg(dev,
&phy_common->vddp_ref_clk);
if (err) {
dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
@@ -717,11 +672,13 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
goto out;
out_disable_ref_clk:
- ufs_qcom_phy_disable_ref_clk(generic_phy);
+ ufs_qcom_phy_disable_ref_clk(phy_common);
+out_disable_iface_clk:
+ ufs_qcom_phy_disable_iface_clk(phy_common);
out_disable_pll:
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
+ ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll);
out_disable_phy:
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+ ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy);
out:
return err;
}
@@ -731,15 +688,19 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
{
struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ if (!phy_common->is_powered_on)
+ return 0;
+
phy_common->phy_spec_ops->power_control(phy_common, false);
if (phy_common->vddp_ref_clk.reg)
- ufs_qcom_phy_disable_vreg(generic_phy,
+ ufs_qcom_phy_disable_vreg(phy_common->dev,
&phy_common->vddp_ref_clk);
- ufs_qcom_phy_disable_ref_clk(generic_phy);
+ ufs_qcom_phy_disable_ref_clk(phy_common);
+ ufs_qcom_phy_disable_iface_clk(phy_common);
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+ ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
+ ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
phy_common->is_powered_on = false;
return 0;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c8fed9fa1cca..968a0ab4b398 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -84,8 +84,8 @@ extern void zfcp_fc_link_test_work(struct work_struct *);
extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
-extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
-extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern int zfcp_fc_exec_bsg_job(struct bsg_job *);
+extern int zfcp_fc_timeout_bsg_job(struct bsg_job *);
extern void zfcp_fc_sym_name_update(struct work_struct *);
extern unsigned int zfcp_fc_port_scan_backoff(void);
extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 237688af179b..7331eea67435 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/random.h>
+#include <linux/bsg-lib.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
@@ -885,26 +886,30 @@ out_free:
static void zfcp_fc_ct_els_job_handler(void *data)
{
- struct fc_bsg_job *job = data;
+ struct bsg_job *job = data;
struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
struct fc_bsg_reply *jr = job->reply;
jr->reply_payload_rcv_len = job->reply_payload.payload_len;
jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
jr->result = zfcp_ct_els->status ? -EIO : 0;
- job->job_done(job);
+ bsg_job_done(job, jr->result, jr->reply_payload_rcv_len);
}
-static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
+static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct bsg_job *job)
{
u32 preamble_word1;
u8 gs_type;
struct zfcp_adapter *adapter;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
+ struct Scsi_Host *shost;
- preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
+ preamble_word1 = bsg_request->rqst_data.r_ct.preamble_word1;
gs_type = (preamble_word1 & 0xff000000) >> 24;
- adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
+ shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
+ adapter = (struct zfcp_adapter *) shost->hostdata[0];
switch (gs_type) {
case FC_FST_ALIAS:
@@ -924,7 +929,7 @@ static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
static void zfcp_fc_ct_job_handler(void *data)
{
- struct fc_bsg_job *job = data;
+ struct bsg_job *job = data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
@@ -933,11 +938,12 @@ static void zfcp_fc_ct_job_handler(void *data)
zfcp_fc_ct_els_job_handler(data);
}
-static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
+static int zfcp_fc_exec_els_job(struct bsg_job *job,
struct zfcp_adapter *adapter)
{
struct zfcp_fsf_ct_els *els = job->dd_data;
- struct fc_rport *rport = job->rport;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
+ struct fc_bsg_request *bsg_request = job->request;
struct zfcp_port *port;
u32 d_id;
@@ -949,13 +955,13 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
d_id = port->d_id;
put_device(&port->dev);
} else
- d_id = ntoh24(job->request->rqst_data.h_els.port_id);
+ d_id = ntoh24(bsg_request->rqst_data.h_els.port_id);
els->handler = zfcp_fc_ct_els_job_handler;
return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
}
-static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
+static int zfcp_fc_exec_ct_job(struct bsg_job *job,
struct zfcp_adapter *adapter)
{
int ret;
@@ -978,13 +984,15 @@ static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
return ret;
}
-int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
+int zfcp_fc_exec_bsg_job(struct bsg_job *job)
{
struct Scsi_Host *shost;
struct zfcp_adapter *adapter;
struct zfcp_fsf_ct_els *ct_els = job->dd_data;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
- shost = job->rport ? rport_to_shost(job->rport) : job->shost;
+ shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
adapter = (struct zfcp_adapter *)shost->hostdata[0];
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
@@ -994,7 +1002,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
ct_els->resp = job->reply_payload.sg_list;
ct_els->handler_data = job;
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
return zfcp_fc_exec_els_job(job, adapter);
@@ -1006,7 +1014,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
}
}
-int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
+int zfcp_fc_timeout_bsg_job(struct bsg_job *job)
{
/* hardware tracks timeout, reset bsg timeout to not interfere */
return -EAGAIN;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3e2bdb90813c..dfa93347c752 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,6 +263,7 @@ config SCSI_SPI_ATTRS
config SCSI_FC_ATTRS
tristate "FiberChannel Transport Attributes"
depends on SCSI && NET
+ select BLK_DEV_BSGLIB
select SCSI_NETLINK
help
If you wish to export transport-specific information about
@@ -743,40 +744,18 @@ config SCSI_ISCI
control unit found in the Intel(R) C600 series chipset.
config SCSI_GENERIC_NCR5380
- tristate "Generic NCR5380/53c400 SCSI PIO support"
- depends on ISA && SCSI
+ tristate "Generic NCR5380/53c400 SCSI ISA card support"
+ depends on ISA && SCSI && HAS_IOPORT_MAP
select SCSI_SPI_ATTRS
---help---
- This is a driver for the old NCR 53c80 series of SCSI controllers
- on boards using PIO. Most boards such as the Trantor T130 fit this
- category, along with a large number of ISA 8bit controllers shipped
- for free with SCSI scanners. If you have a PAS16, T128 or DMX3191
- you should select the specific driver for that card rather than
- generic 5380 support.
-
- It is explained in section 3.8 of the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/g_NCR5380.h>.
+ This is a driver for old ISA card SCSI controllers based on a
+ NCR 5380, 53C80, 53C400, 53C400A, or DTC 436 device.
+ Most boards such as the Trantor T130 fit this category, as do
+ various 8-bit and 16-bit ISA cards bundled with SCSI scanners.
To compile this driver as a module, choose M here: the
module will be called g_NCR5380.
-config SCSI_GENERIC_NCR5380_MMIO
- tristate "Generic NCR5380/53c400 SCSI MMIO support"
- depends on ISA && SCSI
- select SCSI_SPI_ATTRS
- ---help---
- This is a driver for the old NCR 53c80 series of SCSI controllers
- on boards using memory mapped I/O.
- It is explained in section 3.8 of the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/g_NCR5380.h>.
-
- To compile this driver as a module, choose M here: the
- module will be called g_NCR5380_mmio.
-
config SCSI_IPS
tristate "IBM ServeRAID support"
depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1520596f54a6..a2d03957cbe2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -74,7 +74,6 @@ obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
-obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 790babc5ef66..d849ffa378b1 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -121,9 +121,10 @@
*
* Either real DMA *or* pseudo DMA may be implemented
*
- * NCR5380_dma_write_setup(instance, src, count) - initialize
- * NCR5380_dma_read_setup(instance, dst, count) - initialize
- * NCR5380_dma_residual(instance); - residual count
+ * NCR5380_dma_xfer_len - determine size of DMA/PDMA transfer
+ * NCR5380_dma_send_setup - execute DMA/PDMA from memory to 5380
+ * NCR5380_dma_recv_setup - execute DMA/PDMA from 5380 to memory
+ * NCR5380_dma_residual - residual byte count
*
* The generic driver is initialized by calling NCR5380_init(instance),
* after setting the appropriate host specific fields and ID. If the
@@ -178,7 +179,7 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
/**
* NCR5380_poll_politely2 - wait for two chip register values
- * @instance: controller to poll
+ * @hostdata: host private data
* @reg1: 5380 register to poll
* @bit1: Bitmask to check
* @val1: Expected value
@@ -195,18 +196,14 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
* Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
*/
-static int NCR5380_poll_politely2(struct Scsi_Host *instance,
- int reg1, int bit1, int val1,
- int reg2, int bit2, int val2, int wait)
+static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata,
+ unsigned int reg1, u8 bit1, u8 val1,
+ unsigned int reg2, u8 bit2, u8 val2,
+ unsigned long wait)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ unsigned long n = hostdata->poll_loops;
unsigned long deadline = jiffies + wait;
- unsigned long n;
- /* Busy-wait for up to 10 ms */
- n = min(10000U, jiffies_to_usecs(wait));
- n *= hostdata->accesses_per_ms;
- n /= 2000;
do {
if ((NCR5380_read(reg1) & bit1) == val1)
return 0;
@@ -288,6 +285,7 @@ mrs[] = {
static void NCR5380_print(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char status, data, basr, mr, icr, i;
data = NCR5380_read(CURRENT_SCSI_DATA_REG);
@@ -337,6 +335,7 @@ static struct {
static void NCR5380_print_phase(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char status;
int i;
@@ -441,14 +440,14 @@ static void prepare_info(struct Scsi_Host *instance)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
snprintf(hostdata->info, sizeof(hostdata->info),
- "%s, io_port 0x%lx, n_io_port %d, "
- "base 0x%lx, irq %d, "
+ "%s, irq %d, "
+ "io_port 0x%lx, base 0x%lx, "
"can_queue %d, cmd_per_lun %d, "
"sg_tablesize %d, this_id %d, "
"flags { %s%s%s}, "
"options { %s} ",
- instance->hostt->name, instance->io_port, instance->n_io_port,
- instance->base, instance->irq,
+ instance->hostt->name, instance->irq,
+ hostdata->io_port, hostdata->base,
instance->can_queue, instance->cmd_per_lun,
instance->sg_tablesize, instance->this_id,
hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "",
@@ -482,6 +481,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int i;
unsigned long deadline;
+ unsigned long accesses_per_ms;
instance->max_lun = 7;
@@ -530,7 +530,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
++i;
cpu_relax();
} while (time_is_after_jiffies(deadline));
- hostdata->accesses_per_ms = i / 256;
+ accesses_per_ms = i / 256;
+ hostdata->poll_loops = NCR5380_REG_POLL_TIME * accesses_per_ms / 2;
return 0;
}
@@ -560,7 +561,7 @@ static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance)
case 3:
case 5:
shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n");
- NCR5380_poll_politely(instance,
+ NCR5380_poll_politely(hostdata,
STATUS_REG, SR_BSY, 0, 5 * HZ);
break;
case 2:
@@ -871,7 +872,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
+ transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata);
hostdata->dma_len = 0;
data = (unsigned char **)&hostdata->connected->SCp.ptr;
@@ -994,7 +995,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
}
handled = 1;
} else {
- shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n");
+ dsprintk(NDEBUG_INTR, instance, "interrupt without IRQ bit\n");
#ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE;
#endif
@@ -1075,7 +1076,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
*/
spin_unlock_irq(&hostdata->lock);
- err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0,
+ err = NCR5380_poll_politely2(hostdata, MODE_REG, MR_ARBITRATE, 0,
INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS,
ICR_ARBITRATION_PROGRESS, HZ);
spin_lock_irq(&hostdata->lock);
@@ -1201,7 +1202,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
* selection.
*/
- err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY,
+ err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, SR_BSY,
msecs_to_jiffies(250));
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
@@ -1247,7 +1248,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* Wait for start of REQ/ACK handshake */
- err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock);
if (err < 0) {
shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
@@ -1318,6 +1319,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
unsigned char *phase, int *count,
unsigned char **data)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char p = *phase, tmp;
int c = *count;
unsigned char *d = *data;
@@ -1336,7 +1338,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
* valid
*/
- if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
+ if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@@ -1381,7 +1383,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
}
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, 0, 5 * HZ) < 0)
break;
@@ -1440,6 +1442,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
static void do_reset(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata __maybe_unused *hostdata = shost_priv(instance);
unsigned long flags;
local_irq_save(flags);
@@ -1462,6 +1465,7 @@ static void do_reset(struct Scsi_Host *instance)
static int do_abort(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char *msgptr, phase, tmp;
int len;
int rc;
@@ -1479,7 +1483,7 @@ static int do_abort(struct Scsi_Host *instance)
* the target sees, so we just handshake.
*/
- rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
if (rc < 0)
goto timeout;
@@ -1490,7 +1494,7 @@ static int do_abort(struct Scsi_Host *instance)
if (tmp != PHASE_MSGOUT) {
NCR5380_write(INITIATOR_COMMAND_REG,
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
- rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
if (rc < 0)
goto timeout;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -1575,9 +1579,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* starting the NCR. This is also the cleaner way for the TT.
*/
if (p & SR_IO)
- result = NCR5380_dma_recv_setup(instance, d, c);
+ result = NCR5380_dma_recv_setup(hostdata, d, c);
else
- result = NCR5380_dma_send_setup(instance, d, c);
+ result = NCR5380_dma_send_setup(hostdata, d, c);
}
/*
@@ -1609,9 +1613,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* NCR access, else the DMA setup gets trashed!
*/
if (p & SR_IO)
- result = NCR5380_dma_recv_setup(instance, d, c);
+ result = NCR5380_dma_recv_setup(hostdata, d, c);
else
- result = NCR5380_dma_send_setup(instance, d, c);
+ result = NCR5380_dma_send_setup(hostdata, d, c);
}
/* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */
@@ -1678,12 +1682,12 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* byte.
*/
- if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ, BASR_DRQ, HZ) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
}
- if (NCR5380_poll_politely(instance, STATUS_REG,
+ if (NCR5380_poll_politely(hostdata, STATUS_REG,
SR_REQ, 0, HZ) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
@@ -1694,7 +1698,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* Wait for the last byte to be sent. If REQ is being asserted for
* the byte we're interested, we'll ACK it and it will go false.
*/
- if (NCR5380_poll_politely2(instance,
+ if (NCR5380_poll_politely2(hostdata,
BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) {
result = -1;
@@ -1751,22 +1755,26 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
}
#ifdef CONFIG_SUN3
- if (phase == PHASE_CMDOUT) {
- void *d;
- unsigned long count;
+ if (phase == PHASE_CMDOUT &&
+ sun3_dma_setup_done != cmd) {
+ int count;
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- count = cmd->SCp.buffer->length;
- d = sg_virt(cmd->SCp.buffer);
- } else {
- count = cmd->SCp.this_residual;
- d = cmd->SCp.ptr;
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
- if (sun3_dma_setup_done != cmd &&
- sun3scsi_dma_xfer_len(count, cmd) > 0) {
- sun3scsi_dma_setup(instance, d, count,
- rq_data_dir(cmd->request));
+ count = sun3scsi_dma_xfer_len(hostdata, cmd);
+
+ if (count > 0) {
+ if (rq_data_dir(cmd->request))
+ sun3scsi_dma_send_setup(hostdata,
+ cmd->SCp.ptr, count);
+ else
+ sun3scsi_dma_recv_setup(hostdata,
+ cmd->SCp.ptr, count);
sun3_dma_setup_done = cmd;
}
#ifdef SUN3_SCSI_VME
@@ -1827,7 +1835,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
transfersize = 0;
if (!cmd->device->borken)
- transfersize = NCR5380_dma_xfer_len(instance, cmd, phase);
+ transfersize = NCR5380_dma_xfer_len(hostdata, cmd);
if (transfersize > 0) {
len = transfersize;
@@ -2073,7 +2081,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
} /* switch(phase) */
} else {
spin_unlock_irq(&hostdata->lock);
- NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock);
}
}
@@ -2119,7 +2127,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
*/
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return;
@@ -2130,7 +2138,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
* Wait for target to go into MSGIN.
*/
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
do_abort(instance);
return;
@@ -2204,22 +2212,25 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
}
#ifdef CONFIG_SUN3
- {
- void *d;
- unsigned long count;
+ if (sun3_dma_setup_done != tmp) {
+ int count;
if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
- count = tmp->SCp.buffer->length;
- d = sg_virt(tmp->SCp.buffer);
- } else {
- count = tmp->SCp.this_residual;
- d = tmp->SCp.ptr;
+ ++tmp->SCp.buffer;
+ --tmp->SCp.buffers_residual;
+ tmp->SCp.this_residual = tmp->SCp.buffer->length;
+ tmp->SCp.ptr = sg_virt(tmp->SCp.buffer);
}
- if (sun3_dma_setup_done != tmp &&
- sun3scsi_dma_xfer_len(count, tmp) > 0) {
- sun3scsi_dma_setup(instance, d, count,
- rq_data_dir(tmp->request));
+ count = sun3scsi_dma_xfer_len(hostdata, tmp);
+
+ if (count > 0) {
+ if (rq_data_dir(tmp->request))
+ sun3scsi_dma_send_setup(hostdata,
+ tmp->SCp.ptr, count);
+ else
+ sun3scsi_dma_recv_setup(hostdata,
+ tmp->SCp.ptr, count);
sun3_dma_setup_done = tmp;
}
}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 965d92339455..3c6ce5434449 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -219,27 +219,32 @@
#define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */
struct NCR5380_hostdata {
- NCR5380_implementation_fields; /* implementation specific */
- struct Scsi_Host *host; /* Host backpointer */
- unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */
- unsigned char busy[8]; /* index = target, bit = lun */
- int dma_len; /* requested length of DMA */
- unsigned char last_message; /* last message OUT */
- struct scsi_cmnd *connected; /* currently connected cmnd */
- struct scsi_cmnd *selecting; /* cmnd to be connected */
- struct list_head unissued; /* waiting to be issued */
- struct list_head autosense; /* priority issue queue */
- struct list_head disconnected; /* waiting for reconnect */
- spinlock_t lock; /* protects this struct */
- int flags;
- struct scsi_eh_save ses;
- struct scsi_cmnd *sensing;
+ NCR5380_implementation_fields; /* Board-specific data */
+ u8 __iomem *io; /* Remapped 5380 address */
+ u8 __iomem *pdma_io; /* Remapped PDMA address */
+ unsigned long poll_loops; /* Register polling limit */
+ spinlock_t lock; /* Protects this struct */
+ struct scsi_cmnd *connected; /* Currently connected cmnd */
+ struct list_head disconnected; /* Waiting for reconnect */
+ struct Scsi_Host *host; /* SCSI host backpointer */
+ struct workqueue_struct *work_q; /* SCSI host work queue */
+ struct work_struct main_task; /* Work item for main loop */
+ int flags; /* Board-specific quirks */
+ int dma_len; /* Requested length of DMA */
+ int read_overruns; /* Transfer size reduction for DMA erratum */
+ unsigned long io_port; /* Device IO port */
+ unsigned long base; /* Device base address */
+ struct list_head unissued; /* Waiting to be issued */
+ struct scsi_cmnd *selecting; /* Cmnd to be connected */
+ struct list_head autosense; /* Priority cmnd queue */
+ struct scsi_cmnd *sensing; /* Cmnd needing autosense */
+ struct scsi_eh_save ses; /* Cmnd state saved for EH */
+ unsigned char busy[8]; /* Index = target, bit = lun */
+ unsigned char id_mask; /* 1 << Host ID */
+ unsigned char id_higher_mask; /* All bits above id_mask */
+ unsigned char last_message; /* Last Message Out */
+ unsigned long region_size; /* Size of address/port range */
char info[256];
- int read_overruns; /* number of bytes to cut from a
- * transfer to handle chip overruns */
- struct work_struct main_task;
- struct workqueue_struct *work_q;
- unsigned long accesses_per_ms; /* chip register accesses per ms */
};
#ifdef __KERNEL__
@@ -252,6 +257,9 @@ struct NCR5380_cmd {
#define NCR5380_PIO_CHUNK_SIZE 256
+/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
+#define NCR5380_REG_POLL_TIME 15
+
static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
{
return ((struct scsi_cmnd *)ncmd_ptr) - 1;
@@ -294,14 +302,45 @@ static void NCR5380_reselect(struct Scsi_Host *instance);
static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
+static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
+ unsigned int, u8, u8,
+ unsigned int, u8, u8, unsigned long);
-static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
- int reg, int bit, int val, int wait)
+static inline int NCR5380_poll_politely(struct NCR5380_hostdata *hostdata,
+ unsigned int reg, u8 bit, u8 val,
+ unsigned long wait)
{
- return NCR5380_poll_politely2(instance, reg, bit, val,
+ if ((NCR5380_read(reg) & bit) == val)
+ return 0;
+
+ return NCR5380_poll_politely2(hostdata, reg, bit, val,
reg, bit, val, wait);
}
+static int NCR5380_dma_xfer_len(struct NCR5380_hostdata *,
+ struct scsi_cmnd *);
+static int NCR5380_dma_send_setup(struct NCR5380_hostdata *,
+ unsigned char *, int);
+static int NCR5380_dma_recv_setup(struct NCR5380_hostdata *,
+ unsigned char *, int);
+static int NCR5380_dma_residual(struct NCR5380_hostdata *);
+
+static inline int NCR5380_dma_xfer_none(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
+{
+ return 0;
+}
+
+static inline int NCR5380_dma_setup_none(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return 0;
+}
+
+static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata)
+{
+ return 0;
+}
+
#endif /* __KERNEL__ */
#endif /* NCR5380_H */
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 969c312de1be..f059c14efa0c 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1246,7 +1246,6 @@ struct aac_dev
u32 max_msix; /* max. MSI-X vectors */
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
- struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
u8 adapter_shutdown;
u32 handle_pci_error;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 341ea327ae79..4f56b1003cc7 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -378,16 +378,12 @@ void aac_define_int_mode(struct aac_dev *dev)
if (msi_count > AAC_MAX_MSIX)
msi_count = AAC_MAX_MSIX;
- for (i = 0; i < msi_count; i++)
- dev->msixentry[i].entry = i;
-
if (msi_count > 1 &&
pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
min_msix = 2;
- i = pci_enable_msix_range(dev->pdev,
- dev->msixentry,
- min_msix,
- msi_count);
+ i = pci_alloc_irq_vectors(dev->pdev,
+ min_msix, msi_count,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (i > 0) {
dev->msi_enabled = 1;
msi_count = i;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 0aeecec1f5ea..9e7551fe4b19 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2043,30 +2043,22 @@ int aac_acquire_irq(struct aac_dev *dev)
int i;
int j;
int ret = 0;
- int cpu;
- cpu = cpumask_first(cpu_online_mask);
if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
for (i = 0; i < dev->max_msix; i++) {
dev->aac_msix[i].vector_no = i;
dev->aac_msix[i].dev = dev;
- if (request_irq(dev->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(dev->pdev, i),
dev->a_ops.adapter_intr,
0, "aacraid", &(dev->aac_msix[i]))) {
printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
dev->name, dev->id, i);
for (j = 0 ; j < i ; j++)
- free_irq(dev->msixentry[j].vector,
+ free_irq(pci_irq_vector(dev->pdev, j),
&(dev->aac_msix[j]));
pci_disable_msix(dev->pdev);
ret = -1;
}
- if (irq_set_affinity_hint(dev->msixentry[i].vector,
- get_cpu_mask(cpu))) {
- printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
- dev->name, dev->id, cpu);
- }
- cpu = cpumask_next(cpu, cpu_online_mask);
}
} else {
dev->aac_msix[0].vector_no = 0;
@@ -2096,16 +2088,9 @@ void aac_free_irq(struct aac_dev *dev)
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9) {
if (dev->max_msix > 1) {
- for (i = 0; i < dev->max_msix; i++) {
- if (irq_set_affinity_hint(
- dev->msixentry[i].vector, NULL)) {
- printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
- dev->name, dev->id, cpu);
- }
- cpu = cpumask_next(cpu, cpu_online_mask);
- free_irq(dev->msixentry[i].vector,
- &(dev->aac_msix[i]));
- }
+ for (i = 0; i < dev->max_msix; i++)
+ free_irq(pci_irq_vector(dev->pdev, i),
+ &(dev->aac_msix[i]));
} else {
free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 79871f3519ff..e4f3e22fcbd9 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1071,7 +1071,6 @@ static struct scsi_host_template aac_driver_template = {
static void __aac_shutdown(struct aac_dev * aac)
{
int i;
- int cpu;
aac_send_shutdown(aac);
@@ -1087,24 +1086,13 @@ static void __aac_shutdown(struct aac_dev * aac)
kthread_stop(aac->thread);
}
aac_adapter_disable_int(aac);
- cpu = cpumask_first(cpu_online_mask);
if (aac->pdev->device == PMC_DEVICE_S6 ||
aac->pdev->device == PMC_DEVICE_S7 ||
aac->pdev->device == PMC_DEVICE_S8 ||
aac->pdev->device == PMC_DEVICE_S9) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {
- if (irq_set_affinity_hint(
- aac->msixentry[i].vector,
- NULL)) {
- printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
- aac->name,
- aac->id,
- cpu);
- }
- cpu = cpumask_next(cpu,
- cpu_online_mask);
- free_irq(aac->msixentry[i].vector,
+ free_irq(pci_irq_vector(aac->pdev, i),
&(aac->aac_msix[i]));
}
} else {
@@ -1350,7 +1338,7 @@ static void aac_release_resources(struct aac_dev *aac)
aac->pdev->device == PMC_DEVICE_S9) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++)
- free_irq(aac->msixentry[i].vector,
+ free_irq(pci_irq_vector(aac->pdev, i),
&(aac->aac_msix[i]));
} else {
free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
@@ -1396,13 +1384,13 @@ static int aac_acquire_resources(struct aac_dev *dev)
dev->aac_msix[i].vector_no = i;
dev->aac_msix[i].dev = dev;
- if (request_irq(dev->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(dev->pdev, i),
dev->a_ops.adapter_intr,
0, "aacraid", &(dev->aac_msix[i]))) {
printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
name, instance, i);
for (j = 0 ; j < i ; j++)
- free_irq(dev->msixentry[j].vector,
+ free_irq(pci_irq_vector(dev->pdev, j),
&(dev->aac_msix[j]));
pci_disable_msix(dev->pdev);
goto error_iounmap;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index febbd83e2ecd..81dd0927246b 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -11030,6 +11030,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
ASC_DBG(2, "AdvInitGetConfig()\n");
ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0;
+#else
+ share_irq = 0;
+ ret = -ENODEV;
#endif /* CONFIG_PCI */
}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 7c713f797535..f2671a8fa7e3 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -228,8 +228,11 @@ static int asd_init_scbs(struct asd_ha_struct *asd_ha)
bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
- if (!asd_ha->seq.tc_index_bitmap)
+ if (!asd_ha->seq.tc_index_bitmap) {
+ kfree(asd_ha->seq.tc_index_array);
+ asd_ha->seq.tc_index_array = NULL;
return -ENOMEM;
+ }
spin_lock_init(&seq->tc_index_lock);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index cf99f8cf4cdd..a254b32eba39 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -629,7 +629,6 @@ struct AdapterControlBlock
struct pci_dev * pdev;
struct Scsi_Host * host;
unsigned long vir2phy_offset;
- struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
/* Offset is used in making arc cdb physical to virtual calculations */
uint32_t outbound_int_enable;
uint32_t cdb_phyaddr_hi32;
@@ -671,8 +670,6 @@ struct AdapterControlBlock
/* iop init */
#define ACB_F_ABORT 0x0200
#define ACB_F_FIRMWARE_TRAP 0x0400
- #define ACB_F_MSI_ENABLED 0x1000
- #define ACB_F_MSIX_ENABLED 0x2000
struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
struct list_head ccb_free_list;
@@ -725,7 +722,7 @@ struct AdapterControlBlock
atomic_t rq_map_token;
atomic_t ante_token_value;
uint32_t maxOutstanding;
- int msix_vector_count;
+ int vector_count;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f0cfb0451757..9e45749d55ed 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -720,51 +720,39 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
static int
arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
{
- int i, j, r;
- struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
-
- for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
- entries[i].entry = i;
- r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
- if (r < 0)
- goto msi_int;
- acb->msix_vector_count = r;
- for (i = 0; i < r; i++) {
- if (request_irq(entries[i].vector,
- arcmsr_do_interrupt, 0, "arcmsr", acb)) {
+ unsigned long flags;
+ int nvec, i;
+
+ nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
+ PCI_IRQ_MSIX);
+ if (nvec > 0) {
+ pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
+ flags = 0;
+ } else {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (nvec < 1)
+ return FAILED;
+
+ flags = IRQF_SHARED;
+ }
+
+ acb->vector_count = nvec;
+ for (i = 0; i < nvec; i++) {
+ if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
+ flags, "arcmsr", acb)) {
pr_warn("arcmsr%d: request_irq =%d failed!\n",
- acb->host->host_no, entries[i].vector);
- for (j = 0 ; j < i ; j++)
- free_irq(entries[j].vector, acb);
- pci_disable_msix(pdev);
- goto msi_int;
+ acb->host->host_no, pci_irq_vector(pdev, i));
+ goto out_free_irq;
}
- acb->entries[i] = entries[i];
- }
- acb->acb_flags |= ACB_F_MSIX_ENABLED;
- pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
- return SUCCESS;
-msi_int:
- if (pci_enable_msi_exact(pdev, 1) < 0)
- goto legacy_int;
- if (request_irq(pdev->irq, arcmsr_do_interrupt,
- IRQF_SHARED, "arcmsr", acb)) {
- pr_warn("arcmsr%d: request_irq =%d failed!\n",
- acb->host->host_no, pdev->irq);
- pci_disable_msi(pdev);
- goto legacy_int;
- }
- acb->acb_flags |= ACB_F_MSI_ENABLED;
- pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
- return SUCCESS;
-legacy_int:
- if (request_irq(pdev->irq, arcmsr_do_interrupt,
- IRQF_SHARED, "arcmsr", acb)) {
- pr_warn("arcmsr%d: request_irq = %d failed!\n",
- acb->host->host_no, pdev->irq);
- return FAILED;
}
+
return SUCCESS;
+out_free_irq:
+ while (--i >= 0)
+ free_irq(pci_irq_vector(pdev, i), acb);
+ pci_free_irq_vectors(pdev);
+ return FAILED;
}
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -886,15 +874,9 @@ static void arcmsr_free_irq(struct pci_dev *pdev,
{
int i;
- if (acb->acb_flags & ACB_F_MSI_ENABLED) {
- free_irq(pdev->irq, acb);
- pci_disable_msi(pdev);
- } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
- for (i = 0; i < acb->msix_vector_count; i++)
- free_irq(acb->entries[i].vector, acb);
- pci_disable_msix(pdev);
- } else
- free_irq(pdev->irq, acb);
+ for (i = 0; i < acb->vector_count; i++)
+ free_irq(pci_irq_vector(pdev, i), acb);
+ pci_free_irq_vectors(pdev);
}
static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 8e9cfe8f22f5..a87b99c7fb9a 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -14,49 +14,48 @@
#include <scsi/scsi_host.h>
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
-#define NCR5380_read(reg) cumanascsi_read(instance, reg)
-#define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value)
+#define NCR5380_read(reg) cumanascsi_read(hostdata, reg)
+#define NCR5380_write(reg, value) cumanascsi_write(hostdata, reg, value)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
+#define NCR5380_dma_xfer_len cumanascsi_dma_xfer_len
#define NCR5380_dma_recv_setup cumanascsi_pread
#define NCR5380_dma_send_setup cumanascsi_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_intr cumanascsi_intr
#define NCR5380_queue_command cumanascsi_queue_command
#define NCR5380_info cumanascsi_info
#define NCR5380_implementation_fields \
- unsigned ctrl; \
- void __iomem *base; \
- void __iomem *dma
+ unsigned ctrl
-#include "../NCR5380.h"
+struct NCR5380_hostdata;
+static u8 cumanascsi_read(struct NCR5380_hostdata *, unsigned int);
+static void cumanascsi_write(struct NCR5380_hostdata *, unsigned int, u8);
-void cumanascsi_setup(char *str, int *ints)
-{
-}
+#include "../NCR5380.h"
#define CTRL 0x16fc
#define STAT 0x2004
#define L(v) (((v)<<16)|((v) & 0x0000ffff))
#define H(v) (((v)>>16)|((v) & 0xffff0000))
-static inline int cumanascsi_pwrite(struct Scsi_Host *host,
+static inline int cumanascsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
unsigned long *laddr;
- void __iomem *dma = priv(host)->dma + 0x2000;
+ u8 __iomem *base = hostdata->io;
+ u8 __iomem *dma = hostdata->pdma_io + 0x2000;
if(!len) return 0;
- writeb(0x02, priv(host)->base + CTRL);
+ writeb(0x02, base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
unsigned int status;
unsigned long v;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
@@ -75,12 +74,12 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
}
addr = (unsigned char *)laddr;
- writeb(0x12, priv(host)->base + CTRL);
+ writeb(0x12, base + CTRL);
while(len > 0)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -90,7 +89,7 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
break;
}
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -101,27 +100,28 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
}
}
end:
- writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+ writeb(hostdata->ctrl | 0x40, base + CTRL);
if (len)
return -1;
return 0;
}
-static inline int cumanascsi_pread(struct Scsi_Host *host,
+static inline int cumanascsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
unsigned long *laddr;
- void __iomem *dma = priv(host)->dma + 0x2000;
+ u8 __iomem *base = hostdata->io;
+ u8 __iomem *dma = hostdata->pdma_io + 0x2000;
if(!len) return 0;
- writeb(0x00, priv(host)->base + CTRL);
+ writeb(0x00, base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
@@ -140,12 +140,12 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
}
addr = (unsigned char *)laddr;
- writeb(0x10, priv(host)->base + CTRL);
+ writeb(0x10, base + CTRL);
while(len > 0)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -155,7 +155,7 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
break;
}
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -166,37 +166,45 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
}
}
end:
- writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+ writeb(hostdata->ctrl | 0x40, base + CTRL);
if (len)
return -1;
return 0;
}
-static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg)
+static int cumanascsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
+{
+ return cmd->transfersize;
+}
+
+static u8 cumanascsi_read(struct NCR5380_hostdata *hostdata,
+ unsigned int reg)
{
- void __iomem *base = priv(host)->base;
- unsigned char val;
+ u8 __iomem *base = hostdata->io;
+ u8 val;
writeb(0, base + CTRL);
val = readb(base + 0x2100 + (reg << 2));
- priv(host)->ctrl = 0x40;
+ hostdata->ctrl = 0x40;
writeb(0x40, base + CTRL);
return val;
}
-static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value)
+static void cumanascsi_write(struct NCR5380_hostdata *hostdata,
+ unsigned int reg, u8 value)
{
- void __iomem *base = priv(host)->base;
+ u8 __iomem *base = hostdata->io;
writeb(0, base + CTRL);
writeb(value, base + 0x2100 + (reg << 2));
- priv(host)->ctrl = 0x40;
+ hostdata->ctrl = 0x40;
writeb(0x40, base + CTRL);
}
@@ -235,11 +243,11 @@ static int cumanascsi1_probe(struct expansion_card *ec,
goto out_release;
}
- priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
- ecard_resource_len(ec, ECARD_RES_IOCSLOW));
- priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
- ecard_resource_len(ec, ECARD_RES_MEMC));
- if (!priv(host)->base || !priv(host)->dma) {
+ priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
+ ecard_resource_len(ec, ECARD_RES_IOCSLOW));
+ priv(host)->pdma_io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->io || !priv(host)->pdma_io) {
ret = -ENOMEM;
goto out_unmap;
}
@@ -253,7 +261,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
NCR5380_maybe_reset_bus(host);
priv(host)->ctrl = 0;
- writeb(0, priv(host)->base + CTRL);
+ writeb(0, priv(host)->io + CTRL);
ret = request_irq(host->irq, cumanascsi_intr, 0,
"CumanaSCSI-1", host);
@@ -275,8 +283,8 @@ static int cumanascsi1_probe(struct expansion_card *ec,
out_exit:
NCR5380_exit(host);
out_unmap:
- iounmap(priv(host)->base);
- iounmap(priv(host)->dma);
+ iounmap(priv(host)->io);
+ iounmap(priv(host)->pdma_io);
scsi_host_put(host);
out_release:
ecard_release_resources(ec);
@@ -287,15 +295,17 @@ static int cumanascsi1_probe(struct expansion_card *ec,
static void cumanascsi1_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
+ void __iomem *base = priv(host)->io;
+ void __iomem *dma = priv(host)->pdma_io;
ecard_set_drvdata(ec, NULL);
scsi_remove_host(host);
free_irq(host->irq, host);
NCR5380_exit(host);
- iounmap(priv(host)->base);
- iounmap(priv(host)->dma);
scsi_host_put(host);
+ iounmap(base);
+ iounmap(dma);
ecard_release_resources(ec);
}
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index a396024a3cae..6be6666534d4 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -16,21 +16,18 @@
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
-#define NCR5380_read(reg) \
- readb(priv(instance)->base + ((reg) << 2))
-#define NCR5380_write(reg, value) \
- writeb(value, priv(instance)->base + ((reg) << 2))
+#define NCR5380_read(reg) readb(hostdata->io + ((reg) << 2))
+#define NCR5380_write(reg, value) writeb(value, hostdata->io + ((reg) << 2))
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (0)
+#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none
#define NCR5380_dma_recv_setup oakscsi_pread
#define NCR5380_dma_send_setup oakscsi_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_queue_command oakscsi_queue_command
#define NCR5380_info oakscsi_info
-#define NCR5380_implementation_fields \
- void __iomem *base
+#define NCR5380_implementation_fields /* none */
#include "../NCR5380.h"
@@ -40,10 +37,10 @@
#define STAT ((128 + 16) << 2)
#define DATA ((128 + 8) << 2)
-static inline int oakscsi_pwrite(struct Scsi_Host *instance,
+static inline int oakscsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
- void __iomem *base = priv(instance)->base;
+ u8 __iomem *base = hostdata->io;
printk("writing %p len %d\n",addr, len);
@@ -55,10 +52,11 @@ printk("writing %p len %d\n",addr, len);
return 0;
}
-static inline int oakscsi_pread(struct Scsi_Host *instance,
+static inline int oakscsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
- void __iomem *base = priv(instance)->base;
+ u8 __iomem *base = hostdata->io;
+
printk("reading %p len %d\n", addr, len);
while(len > 0)
{
@@ -133,15 +131,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
goto release;
}
- priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
- ecard_resource_len(ec, ECARD_RES_MEMC));
- if (!priv(host)->base) {
+ priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->io) {
ret = -ENOMEM;
goto unreg;
}
host->irq = NO_IRQ;
- host->n_io_port = 255;
ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP);
if (ret)
@@ -159,7 +156,7 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
out_exit:
NCR5380_exit(host);
out_unmap:
- iounmap(priv(host)->base);
+ iounmap(priv(host)->io);
unreg:
scsi_host_put(host);
release:
@@ -171,13 +168,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
static void oakscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
+ void __iomem *base = priv(host)->io;
ecard_set_drvdata(ec, NULL);
scsi_remove_host(host);
NCR5380_exit(host);
- iounmap(priv(host)->base);
scsi_host_put(host);
+ iounmap(base);
ecard_release_resources(ec);
}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a59ad94ea52b..105b35393ce9 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -57,6 +57,9 @@
#define NCR5380_implementation_fields /* none */
+static u8 (*atari_scsi_reg_read)(unsigned int);
+static void (*atari_scsi_reg_write)(unsigned int, u8);
+
#define NCR5380_read(reg) atari_scsi_reg_read(reg)
#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value)
@@ -64,14 +67,10 @@
#define NCR5380_abort atari_scsi_abort
#define NCR5380_info atari_scsi_info
-#define NCR5380_dma_recv_setup(instance, data, count) \
- atari_scsi_dma_setup(instance, data, count, 0)
-#define NCR5380_dma_send_setup(instance, data, count) \
- atari_scsi_dma_setup(instance, data, count, 1)
-#define NCR5380_dma_residual(instance) \
- atari_scsi_dma_residual(instance)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
+#define NCR5380_dma_xfer_len atari_scsi_dma_xfer_len
+#define NCR5380_dma_recv_setup atari_scsi_dma_recv_setup
+#define NCR5380_dma_send_setup atari_scsi_dma_send_setup
+#define NCR5380_dma_residual atari_scsi_dma_residual
#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance)
#define NCR5380_release_dma_irq(instance) falcon_release_lock()
@@ -126,9 +125,6 @@ static inline unsigned long SCSI_DMA_GETADR(void)
static void atari_scsi_fetch_restbytes(void);
-static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
-static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value);
-
static unsigned long atari_dma_residual, atari_dma_startaddr;
static short atari_dma_active;
/* pointer to the dribble buffer */
@@ -457,15 +453,14 @@ static int __init atari_scsi_setup(char *str)
__setup("atascsi=", atari_scsi_setup);
#endif /* !MODULE */
-
-static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
+static unsigned long atari_scsi_dma_setup(struct NCR5380_hostdata *hostdata,
void *data, unsigned long count,
int dir)
{
unsigned long addr = virt_to_phys(data);
- dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
- "dir = %d\n", instance->host_no, data, addr, count, dir);
+ dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, dir = %d\n",
+ hostdata->host->host_no, data, addr, count, dir);
if (!IS_A_TT() && !STRAM_ADDR(addr)) {
/* If we have a non-DMAable address on a Falcon, use the dribble
@@ -522,8 +517,19 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
return count;
}
+static inline int atari_scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return atari_scsi_dma_setup(hostdata, data, count, 0);
+}
+
+static inline int atari_scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return atari_scsi_dma_setup(hostdata, data, count, 1);
+}
-static long atari_scsi_dma_residual(struct Scsi_Host *instance)
+static int atari_scsi_dma_residual(struct NCR5380_hostdata *hostdata)
{
return atari_dma_residual;
}
@@ -564,10 +570,11 @@ static int falcon_classify_cmd(struct scsi_cmnd *cmd)
* the overrun problem, so this question is academic :-)
*/
-static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
- struct scsi_cmnd *cmd, int write_flag)
+static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
{
- unsigned long possible_len, limit;
+ int wanted_len = cmd->SCp.this_residual;
+ int possible_len, limit;
if (wanted_len < DMA_MIN_SIZE)
return 0;
@@ -604,7 +611,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
* use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes.
*/
- if (write_flag) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
/* Write operation can always use the DMA, but the transfer size must
* be rounded up to the next multiple of 512 (atari_dma_setup() does
* this).
@@ -644,8 +651,8 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
possible_len = limit;
if (possible_len != wanted_len)
- dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "
- "instead of %ld\n", possible_len, wanted_len);
+ dprintk(NDEBUG_DMA, "DMA transfer now %d bytes instead of %d\n",
+ possible_len, wanted_len);
return possible_len;
}
@@ -658,26 +665,38 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
* NCR5380_write call these functions via function pointers.
*/
-static unsigned char atari_scsi_tt_reg_read(unsigned char reg)
+static u8 atari_scsi_tt_reg_read(unsigned int reg)
{
return tt_scsi_regp[reg * 2];
}
-static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value)
+static void atari_scsi_tt_reg_write(unsigned int reg, u8 value)
{
tt_scsi_regp[reg * 2] = value;
}
-static unsigned char atari_scsi_falcon_reg_read(unsigned char reg)
+static u8 atari_scsi_falcon_reg_read(unsigned int reg)
{
- dma_wd.dma_mode_status= (u_short)(0x88 + reg);
- return (u_char)dma_wd.fdc_acces_seccount;
+ unsigned long flags;
+ u8 result;
+
+ reg += 0x88;
+ local_irq_save(flags);
+ dma_wd.dma_mode_status = (u_short)reg;
+ result = (u8)dma_wd.fdc_acces_seccount;
+ local_irq_restore(flags);
+ return result;
}
-static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
+static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value)
{
- dma_wd.dma_mode_status = (u_short)(0x88 + reg);
+ unsigned long flags;
+
+ reg += 0x88;
+ local_irq_save(flags);
+ dma_wd.dma_mode_status = (u_short)reg;
dma_wd.fdc_acces_seccount = (u_short)value;
+ local_irq_restore(flags);
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index d9239c2d49b1..b5112d6d7e73 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3049,8 +3049,10 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
eq_vaddress = pci_alloc_consistent(phba->pcidev,
num_eq_pages * PAGE_SIZE,
&paddr);
- if (!eq_vaddress)
+ if (!eq_vaddress) {
+ ret = -ENOMEM;
goto create_eq_error;
+ }
mem->va = eq_vaddress;
ret = be_fill_queue(eq, phba->params.num_eq_entries,
@@ -3113,8 +3115,10 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
cq_vaddress = pci_alloc_consistent(phba->pcidev,
num_cq_pages * PAGE_SIZE,
&paddr);
- if (!cq_vaddress)
+ if (!cq_vaddress) {
+ ret = -ENOMEM;
goto create_cq_error;
+ }
ret = be_fill_queue(cq, phba->params.num_cq_entries,
sizeof(struct sol_cqe), cq_vaddress);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 713745da44c6..0f9fab770339 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -111,20 +111,24 @@ struct bfa_meminfo_s {
struct bfa_mem_kva_s kva_info;
};
-/* BFA memory segment setup macros */
-#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \
- ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \
- if (_seg_sz) \
- list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \
- &(_meminfo)->dma_info.qe); \
-} while (0)
+/* BFA memory segment setup helpers */
+static inline void bfa_mem_dma_setup(struct bfa_meminfo_s *meminfo,
+ struct bfa_mem_dma_s *dm_ptr,
+ size_t seg_sz)
+{
+ dm_ptr->mem_len = seg_sz;
+ if (seg_sz)
+ list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe);
+}
-#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \
- ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \
- if (_seg_sz) \
- list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
- &(_meminfo)->kva_info.qe); \
-} while (0)
+static inline void bfa_mem_kva_setup(struct bfa_meminfo_s *meminfo,
+ struct bfa_mem_kva_s *kva_ptr,
+ size_t seg_sz)
+{
+ kva_ptr->mem_len = seg_sz;
+ if (seg_sz)
+ list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe);
+}
/* BFA dma memory segments iterator */
#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)])
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index d1ad0208dfe7..a9a00169ad91 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3130,11 +3130,12 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
}
static int
-bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
+bfad_im_bsg_vendor_request(struct bsg_job *job)
{
- uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
- struct bfad_im_port_s *im_port =
- (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
+ struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
struct bfad_s *bfad = im_port->bfad;
struct request_queue *request_q = job->req->q;
void *payload_kbuf;
@@ -3175,18 +3176,19 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
/* Fill the BSG job reply data */
job->reply_len = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
- job->reply->result = rc;
+ bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ bsg_reply->result = rc;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
error:
/* free the command buffer */
kfree(payload_kbuf);
out:
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->reply_len = sizeof(uint32_t);
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
return rc;
}
@@ -3312,7 +3314,7 @@ bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
}
int
-bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
+bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
bfa_bsg_fcpt_t *bsg_fcpt)
{
struct bfa_fcxp_s *hal_fcxp;
@@ -3352,28 +3354,29 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
}
int
-bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
+bfad_im_bsg_els_ct_request(struct bsg_job *job)
{
struct bfa_bsg_data *bsg_data;
- struct bfad_im_port_s *im_port =
- (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
struct bfad_s *bfad = im_port->bfad;
bfa_bsg_fcpt_t *bsg_fcpt;
struct bfad_fcxp *drv_fcxp;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
- uint32_t command_type = job->request->msgcode;
+ struct fc_bsg_request *bsg_request = bsg_request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t command_type = bsg_request->msgcode;
unsigned long flags;
struct bfad_buf_info *rsp_buf_info;
void *req_kbuf = NULL, *rsp_kbuf = NULL;
int rc = -EINVAL;
job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* Get the payload passed in from userspace */
- bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
- sizeof(struct fc_bsg_request));
+ bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) +
+ sizeof(struct fc_bsg_request));
if (bsg_data == NULL)
goto out;
@@ -3517,13 +3520,13 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
/* fill the job->reply data */
if (drv_fcxp->req_status == BFA_STATUS_OK) {
job->reply_len = drv_fcxp->rsp_len;
- job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
- job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
} else {
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
job->reply_len = sizeof(uint32_t);
- job->reply->reply_data.ctels_reply.status =
+ bsg_reply->reply_data.ctels_reply.status =
FC_CTELS_STATUS_REJECT;
}
@@ -3549,20 +3552,23 @@ out_free_mem:
kfree(bsg_fcpt);
kfree(drv_fcxp);
out:
- job->reply->result = rc;
+ bsg_reply->result = rc;
if (rc == BFA_STATUS_OK)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
int
-bfad_im_bsg_request(struct fc_bsg_job *job)
+bfad_im_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t rc = BFA_STATUS_OK;
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_HST_VENDOR:
/* Process BSG HST Vendor requests */
rc = bfad_im_bsg_vendor_request(job);
@@ -3575,8 +3581,8 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
rc = bfad_im_bsg_els_ct_request(job);
break;
default:
- job->reply->result = rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = rc = -EINVAL;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
}
@@ -3584,7 +3590,7 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
}
int
-bfad_im_bsg_timeout(struct fc_bsg_job *job)
+bfad_im_bsg_timeout(struct bsg_job *job)
{
/* Don't complete the BSG job request - return -EAGAIN
* to reset bsg job timeout : for ELS/CT pass thru we
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 836fdc221edd..c81ec2a77ef5 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -166,8 +166,8 @@ extern struct device_attribute *bfad_im_vport_attrs[];
irqreturn_t bfad_intx(int irq, void *dev_id);
-int bfad_im_bsg_request(struct fc_bsg_job *job);
-int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+int bfad_im_bsg_request(struct bsg_job *job);
+int bfad_im_bsg_timeout(struct bsg_job *job);
/*
* Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f9ddb6156f14..0990130821fa 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -970,7 +970,6 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
sizeof(struct libfc_function_template));
fc_elsct_init(lport);
fc_exch_init(lport);
- fc_rport_init(lport);
fc_disc_init(lport);
fc_disc_config(lport, lport);
return 0;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 08ec318afb99..739bfb62aff6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -80,7 +80,6 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt,
struct fc_rport_priv *rdata)
{
- struct fc_lport *lport = rdata->local_port;
struct fc_rport *rport = rdata->rport;
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
@@ -160,7 +159,7 @@ ofld_err:
tgt_init_err:
if (tgt->fcoe_conn_id != -1)
bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 4655a9f9dcea..9e6f647ff1c1 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1411,7 +1411,7 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
if (csk->atid < 0) {
pr_err("%s, NO atid available.\n", ndev->name);
- return -EINVAL;
+ goto rel_resource_without_clip;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6e6815545a71..0e9de5d62da2 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -19,6 +19,7 @@
#include <linux/rwsem.h>
#include <linux/types.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
extern const struct file_operations cxlflash_cxl_fops;
@@ -62,11 +63,6 @@ static inline void check_sizes(void)
/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
#define CMD_BUFSIZE SIZE_4K
-/* flags in IOA status area for host use */
-#define B_DONE 0x01
-#define B_ERROR 0x02 /* set with B_DONE */
-#define B_TIMEOUT 0x04 /* set with B_DONE & B_ERROR */
-
enum cxlflash_lr_state {
LINK_RESET_INVALID,
LINK_RESET_REQUIRED,
@@ -132,12 +128,9 @@ struct cxlflash_cfg {
struct afu_cmd {
struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
struct sisl_ioasa sa; /* IOASA must follow IOARCB */
- spinlock_t slock;
- struct completion cevent;
- char *buf; /* per command buffer */
struct afu *parent;
- int slot;
- atomic_t free;
+ struct scsi_cmnd *scp;
+ struct completion cevent;
u8 cmd_tmf:1;
@@ -147,19 +140,31 @@ struct afu_cmd {
*/
} __aligned(cache_line_size());
+static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
+{
+ return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
+}
+
+static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
+{
+ struct afu_cmd *afuc = sc_to_afuc(sc);
+
+ memset(afuc, 0, sizeof(*afuc));
+ return afuc;
+}
+
struct afu {
/* Stuff requiring alignment go first. */
u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
- /*
- * Command & data for AFU commands.
- */
- struct afu_cmd cmd[CXLFLASH_NUM_CMDS];
/* Beware of alignment till here. Preferably introduce new
* fields after this point
*/
+ int (*send_cmd)(struct afu *, struct afu_cmd *);
+ void (*context_reset)(struct afu_cmd *);
+
/* AFU HW */
struct cxl_ioctl_start_work work;
struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
@@ -173,10 +178,10 @@ struct afu {
u64 *hrrq_end;
u64 *hrrq_curr;
bool toggle;
- bool read_room;
- atomic64_t room;
+ atomic_t cmds_active; /* Number of currently active AFU commands */
+ s64 room;
+ spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
u64 hb;
- u32 cmd_couts; /* Number of command checkouts */
u32 internal_lun; /* User-desired LUN mode for this AFU */
char version[16];
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index a0923cade6f3..6c318db90c85 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -254,8 +254,14 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
if (lli->parent->mode != MODE_NONE)
rc = -EBUSY;
else {
+ /*
+ * Clean up local LUN for this port and reset table
+ * tracking when no more references exist.
+ */
sdev->hostdata = NULL;
lli->port_sel &= ~CHAN2PORT(chan);
+ if (lli->port_sel == 0U)
+ lli->in_table = false;
}
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b301655f91cd..b17ebf6d0a7e 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -35,67 +35,6 @@ MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
/**
- * cmd_checkout() - checks out an AFU command
- * @afu: AFU to checkout from.
- *
- * Commands are checked out in a round-robin fashion. Note that since
- * the command pool is larger than the hardware queue, the majority of
- * times we will only loop once or twice before getting a command. The
- * buffer and CDB within the command are initialized (zeroed) prior to
- * returning.
- *
- * Return: The checked out command or NULL when command pool is empty.
- */
-static struct afu_cmd *cmd_checkout(struct afu *afu)
-{
- int k, dec = CXLFLASH_NUM_CMDS;
- struct afu_cmd *cmd;
-
- while (dec--) {
- k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
-
- cmd = &afu->cmd[k];
-
- if (!atomic_dec_if_positive(&cmd->free)) {
- pr_devel("%s: returning found index=%d cmd=%p\n",
- __func__, cmd->slot, cmd);
- memset(cmd->buf, 0, CMD_BUFSIZE);
- memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
- return cmd;
- }
- }
-
- return NULL;
-}
-
-/**
- * cmd_checkin() - checks in an AFU command
- * @cmd: AFU command to checkin.
- *
- * Safe to pass commands that have already been checked in. Several
- * internal tracking fields are reset as part of the checkin. Note
- * that these are intentionally reset prior to toggling the free bit
- * to avoid clobbering values in the event that the command is checked
- * out right away.
- */
-static void cmd_checkin(struct afu_cmd *cmd)
-{
- cmd->rcb.scp = NULL;
- cmd->rcb.timeout = 0;
- cmd->sa.ioasc = 0;
- cmd->cmd_tmf = false;
- cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
-
- if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
- pr_err("%s: Freeing cmd (%d) that is not in use!\n",
- __func__, cmd->slot);
- return;
- }
-
- pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
-}
-
-/**
* process_cmd_err() - command error handler
* @cmd: AFU command that experienced the error.
* @scp: SCSI command associated with the AFU command in error.
@@ -212,7 +151,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
*
* Prepares and submits command that has either completed or timed out to
* the SCSI stack. Checks AFU command back into command pool for non-internal
- * (rcb.scp populated) commands.
+ * (cmd->scp populated) commands.
*/
static void cmd_complete(struct afu_cmd *cmd)
{
@@ -222,19 +161,14 @@ static void cmd_complete(struct afu_cmd *cmd)
struct cxlflash_cfg *cfg = afu->parent;
bool cmd_is_tmf;
- spin_lock_irqsave(&cmd->slock, lock_flags);
- cmd->sa.host_use_b[0] |= B_DONE;
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
- if (cmd->rcb.scp) {
- scp = cmd->rcb.scp;
+ if (cmd->scp) {
+ scp = cmd->scp;
if (unlikely(cmd->sa.ioasc))
process_cmd_err(cmd, scp);
else
scp->result = (DID_OK << 16);
cmd_is_tmf = cmd->cmd_tmf;
- cmd_checkin(cmd); /* Don't use cmd after here */
pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
"ioasc=%d\n", __func__, scp, scp->result,
@@ -254,49 +188,19 @@ static void cmd_complete(struct afu_cmd *cmd)
}
/**
- * context_reset() - timeout handler for AFU commands
+ * context_reset_ioarrin() - reset command owner context via IOARRIN register
* @cmd: AFU command that timed out.
- *
- * Sends a reset to the AFU.
*/
-static void context_reset(struct afu_cmd *cmd)
+static void context_reset_ioarrin(struct afu_cmd *cmd)
{
int nretry = 0;
u64 rrin = 0x1;
- u64 room = 0;
struct afu *afu = cmd->parent;
- ulong lock_flags;
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
pr_debug("%s: cmd=%p\n", __func__, cmd);
- spin_lock_irqsave(&cmd->slock, lock_flags);
-
- /* Already completed? */
- if (cmd->sa.host_use_b[0] & B_DONE) {
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
- return;
- }
-
- cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
- /*
- * We really want to send this reset at all costs, so spread
- * out wait time on successive retries for available room.
- */
- do {
- room = readq_be(&afu->host_map->cmd_room);
- atomic64_set(&afu->room, room);
- if (room)
- goto write_rrin;
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- pr_err("%s: no cmd_room to send reset\n", __func__);
- return;
-
-write_rrin:
- nretry = 0;
writeq_be(rrin, &afu->host_map->ioarrin);
do {
rrin = readq_be(&afu->host_map->ioarrin);
@@ -305,93 +209,81 @@ write_rrin:
/* Double delay each time */
udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
+
+ dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
+ __func__, rrin, nretry);
}
/**
- * send_cmd() - sends an AFU command
+ * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
* @afu: AFU associated with the host.
* @cmd: AFU command to send.
*
* Return:
* 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
*/
-static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
+static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- int nretry = 0;
int rc = 0;
- u64 room;
- long newval;
+ s64 room;
+ ulong lock_flags;
/*
- * This routine is used by critical users such an AFU sync and to
- * send a task management function (TMF). Thus we want to retry a
- * bit before returning an error. To avoid the performance penalty
- * of MMIO, we spread the update of 'room' over multiple commands.
+ * To avoid the performance penalty of MMIO, spread the update of
+ * 'room' over multiple commands.
*/
-retry:
- newval = atomic64_dec_if_positive(&afu->room);
- if (!newval) {
- do {
- room = readq_be(&afu->host_map->cmd_room);
- atomic64_set(&afu->room, room);
- if (room)
- goto write_ioarrin;
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- dev_err(dev, "%s: no cmd_room to send 0x%X\n",
- __func__, cmd->rcb.cdb[0]);
-
- goto no_room;
- } else if (unlikely(newval < 0)) {
- /* This should be rare. i.e. Only if two threads race and
- * decrement before the MMIO read is done. In this case
- * just benefit from the other thread having updated
- * afu->room.
- */
- if (nretry++ < MC_ROOM_RETRY_CNT) {
- udelay(1 << nretry);
- goto retry;
+ spin_lock_irqsave(&afu->rrin_slock, lock_flags);
+ if (--afu->room < 0) {
+ room = readq_be(&afu->host_map->cmd_room);
+ if (room <= 0) {
+ dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
+ "0x%02X, room=0x%016llX\n",
+ __func__, cmd->rcb.cdb[0], room);
+ afu->room = 0;
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
}
-
- goto no_room;
+ afu->room = room - 1;
}
-write_ioarrin:
writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
out:
+ spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
return rc;
-
-no_room:
- afu->read_room = true;
- kref_get(&cfg->afu->mapcount);
- schedule_work(&cfg->work_q);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
}
/**
* wait_resp() - polls for a response or timeout to a sent AFU command
* @afu: AFU associated with the host.
* @cmd: AFU command that was sent.
+ *
+ * Return:
+ * 0 on success, -1 on timeout/error
*/
-static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
+static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
{
+ int rc = 0;
ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout)
- context_reset(cmd);
+ if (!timeout) {
+ afu->context_reset(cmd);
+ rc = -1;
+ }
- if (unlikely(cmd->sa.ioasc != 0))
+ if (unlikely(cmd->sa.ioasc != 0)) {
pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
"scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
cmd->sa.rc.fc_rc);
+ rc = -1;
+ }
+
+ return rc;
}
/**
@@ -405,24 +297,15 @@ static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
*/
static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
{
- struct afu_cmd *cmd;
-
u32 port_sel = scp->device->channel + 1;
- short lflag = 0;
struct Scsi_Host *host = scp->device->host;
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct afu_cmd *cmd = sc_to_afucz(scp);
struct device *dev = &cfg->dev->dev;
ulong lock_flags;
int rc = 0;
ulong to;
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- dev_err(dev, "%s: could not get a free command\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
/* When Task Management Function is active do not send another */
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active)
@@ -430,28 +313,23 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
!cfg->tmf_active,
cfg->tmf_slock);
cfg->tmf_active = true;
- cmd->cmd_tmf = true;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
+ cmd->scp = scp;
+ cmd->parent = afu;
+ cmd->cmd_tmf = true;
+
cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = port_sel;
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
-
- lflag = SISL_REQ_FLAGS_TMF_CMD;
-
cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
-
- /* Stash the scp in the reserved field, for reuse during interrupt */
- cmd->rcb.scp = scp;
-
- /* Copy the CDB from the cmd passed in */
+ SISL_REQ_FLAGS_SUP_UNDERRUN |
+ SISL_REQ_FLAGS_TMF_CMD);
memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
- /* Send the command */
- rc = send_cmd(afu, cmd);
+ rc = afu->send_cmd(afu, cmd);
if (unlikely(rc)) {
- cmd_checkin(cmd);
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
cfg->tmf_active = false;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
@@ -507,12 +385,12 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd;
+ struct afu_cmd *cmd = sc_to_afucz(scp);
+ struct scatterlist *sg = scsi_sglist(scp);
u32 port_sel = scp->device->channel + 1;
- int nseg, i, ncount;
- struct scatterlist *sg;
+ u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
ulong lock_flags;
- short lflag = 0;
+ int nseg = 0;
int rc = 0;
int kref_got = 0;
@@ -552,55 +430,38 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
break;
}
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- dev_err(dev, "%s: could not get a free command\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
kref_get(&cfg->afu->mapcount);
kref_got = 1;
+ if (likely(sg)) {
+ nseg = scsi_dma_map(scp);
+ if (unlikely(nseg < 0)) {
+ dev_err(dev, "%s: Fail DMA map!\n", __func__);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ cmd->rcb.data_len = sg_dma_len(sg);
+ cmd->rcb.data_ea = sg_dma_address(sg);
+ }
+
+ cmd->scp = scp;
+ cmd->parent = afu;
+
cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = port_sel;
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
if (scp->sc_data_direction == DMA_TO_DEVICE)
- lflag = SISL_REQ_FLAGS_HOST_WRITE;
- else
- lflag = SISL_REQ_FLAGS_HOST_READ;
-
- cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
-
- /* Stash the scp in the reserved field, for reuse during interrupt */
- cmd->rcb.scp = scp;
-
- nseg = scsi_dma_map(scp);
- if (unlikely(nseg < 0)) {
- dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
- __func__, nseg);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
+ req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
- ncount = scsi_sg_count(scp);
- scsi_for_each_sg(scp, sg, ncount, i) {
- cmd->rcb.data_len = sg_dma_len(sg);
- cmd->rcb.data_ea = sg_dma_address(sg);
- }
-
- /* Copy the CDB from the scsi_cmnd passed in */
+ cmd->rcb.req_flags = req_flags;
memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
- /* Send the command */
- rc = send_cmd(afu, cmd);
- if (unlikely(rc)) {
- cmd_checkin(cmd);
+ rc = afu->send_cmd(afu, cmd);
+ if (unlikely(rc))
scsi_dma_unmap(scp);
- }
-
out:
if (kref_got)
kref_put(&afu->mapcount, afu_unmap);
@@ -628,17 +489,9 @@ static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
*/
static void free_mem(struct cxlflash_cfg *cfg)
{
- int i;
- char *buf = NULL;
struct afu *afu = cfg->afu;
if (cfg->afu) {
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- buf = afu->cmd[i].buf;
- if (!((u64)buf & (PAGE_SIZE - 1)))
- free_page((ulong)buf);
- }
-
free_pages((ulong)afu, get_order(sizeof(struct afu)));
cfg->afu = NULL;
}
@@ -650,30 +503,16 @@ static void free_mem(struct cxlflash_cfg *cfg)
*
* Safe to call with AFU in a partially allocated/initialized state.
*
- * Cleans up all state associated with the command queue, and unmaps
+ * Waits for any active internal AFU commands to timeout and then unmaps
* the MMIO space.
- *
- * - complete() will take care of commands we initiated (they'll be checked
- * in as part of the cleanup that occurs after the completion)
- *
- * - cmd_checkin() will take care of entries that we did not initiate and that
- * have not (and will not) complete because they are sitting on a [now stale]
- * hardware queue
*/
static void stop_afu(struct cxlflash_cfg *cfg)
{
- int i;
struct afu *afu = cfg->afu;
- struct afu_cmd *cmd;
if (likely(afu)) {
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- cmd = &afu->cmd[i];
- complete(&cmd->cevent);
- if (!atomic_read(&cmd->free))
- cmd_checkin(cmd);
- }
-
+ while (atomic_read(&afu->cmds_active))
+ ssleep(1);
if (likely(afu->afu_map)) {
cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL;
@@ -886,8 +725,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
static int alloc_mem(struct cxlflash_cfg *cfg)
{
int rc = 0;
- int i;
- char *buf = NULL;
struct device *dev = &cfg->dev->dev;
/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
@@ -901,25 +738,6 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
}
cfg->afu->parent = cfg;
cfg->afu->afu_map = NULL;
-
- for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
- if (!((u64)buf & (PAGE_SIZE - 1))) {
- buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (unlikely(!buf)) {
- dev_err(dev,
- "%s: Allocate command buffers fail!\n",
- __func__);
- rc = -ENOMEM;
- free_mem(cfg);
- goto out;
- }
- }
-
- cfg->afu->cmd[i].buf = buf;
- atomic_set(&cfg->afu->cmd[i].free, 1);
- cfg->afu->cmd[i].slot = i;
- }
-
out:
return rc;
}
@@ -1549,13 +1367,6 @@ static void init_pcr(struct cxlflash_cfg *cfg)
/* Program the Endian Control for the master context */
writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
-
- /* Initialize cmd fields that never change */
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
- afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
- afu->cmd[i].rcb.rrq = 0x0;
- }
}
/**
@@ -1644,19 +1455,8 @@ out:
static int start_afu(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
- struct afu_cmd *cmd;
-
- int i = 0;
int rc = 0;
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- cmd = &afu->cmd[i];
-
- init_completion(&cmd->cevent);
- spin_lock_init(&cmd->slock);
- cmd->parent = afu;
- }
-
init_pcr(cfg);
/* After an AFU reset, RRQ entries are stale, clear them */
@@ -1829,6 +1629,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
goto err2;
}
+ afu->send_cmd = send_cmd_ioarrin;
+ afu->context_reset = context_reset_ioarrin;
+
pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
afu->version, afu->interface_version);
@@ -1840,7 +1643,8 @@ static int init_afu(struct cxlflash_cfg *cfg)
}
afu_err_intr_init(cfg->afu);
- atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
+ spin_lock_init(&afu->rrin_slock);
+ afu->room = readq_be(&afu->host_map->cmd_room);
/* Restore the LUN mappings */
cxlflash_restore_luntable(cfg);
@@ -1884,8 +1688,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd = NULL;
+ char *buf = NULL;
int rc = 0;
- int retry_cnt = 0;
static DEFINE_MUTEX(sync_active);
if (cfg->state != STATE_NORMAL) {
@@ -1894,27 +1698,23 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
}
mutex_lock(&sync_active);
-retry:
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- retry_cnt++;
- udelay(1000 * retry_cnt);
- if (retry_cnt < MC_RETRY_CNT)
- goto retry;
- dev_err(dev, "%s: could not get a free command\n", __func__);
+ atomic_inc(&afu->cmds_active);
+ buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ dev_err(dev, "%s: no memory for command\n", __func__);
rc = -1;
goto out;
}
- pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
+ cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
+ init_completion(&cmd->cevent);
+ cmd->parent = afu;
- memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
+ pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- cmd->rcb.port_sel = 0x0; /* NA */
- cmd->rcb.lun_id = 0x0; /* NA */
- cmd->rcb.data_len = 0x0;
- cmd->rcb.data_ea = 0x0;
+ cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
@@ -1924,20 +1724,17 @@ retry:
*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
- rc = send_cmd(afu, cmd);
+ rc = afu->send_cmd(afu, cmd);
if (unlikely(rc))
goto out;
- wait_resp(afu, cmd);
-
- /* Set on timeout */
- if (unlikely((cmd->sa.ioasc != 0) ||
- (cmd->sa.host_use_b[0] & B_ERROR)))
+ rc = wait_resp(afu, cmd);
+ if (unlikely(rc))
rc = -1;
out:
+ atomic_dec(&afu->cmds_active);
mutex_unlock(&sync_active);
- if (cmd)
- cmd_checkin(cmd);
+ kfree(buf);
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -2376,8 +2173,9 @@ static struct scsi_host_template driver_template = {
.change_queue_depth = cxlflash_change_queue_depth,
.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
.can_queue = CXLFLASH_MAX_CMDS,
+ .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
.this_id = -1,
- .sg_tablesize = SG_NONE, /* No scatter gather support */
+ .sg_tablesize = 1, /* No scatter gather support */
.max_sectors = CXLFLASH_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = cxlflash_host_attrs,
@@ -2412,7 +2210,6 @@ MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
* Handles the following events:
* - Link reset which cannot be performed on interrupt context due to
* blocking up to a few seconds
- * - Read AFU command room
* - Rescan the host
*/
static void cxlflash_worker_thread(struct work_struct *work)
@@ -2449,11 +2246,6 @@ static void cxlflash_worker_thread(struct work_struct *work)
cfg->lr_state = LINK_RESET_COMPLETE;
}
- if (afu->read_room) {
- atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
- afu->read_room = false;
- }
-
spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 347fc1671975..1a2d09c148b3 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -72,7 +72,7 @@ struct sisl_ioarcb {
u16 timeout; /* in units specified by req_flags */
u32 rsvd1;
u8 cdb[16]; /* must be in big endian */
- struct scsi_cmnd *scp;
+ u64 reserved; /* Reserved area */
} __packed;
struct sisl_rc {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index db03c49e2350..d704752b6332 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -95,7 +95,7 @@ struct alua_port_group {
struct alua_dh_data {
struct list_head node;
- struct alua_port_group *pg;
+ struct alua_port_group __rcu *pg;
int group_id;
spinlock_t pg_lock;
struct scsi_device *sdev;
@@ -371,7 +371,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
/* Check for existing port group references */
spin_lock(&h->pg_lock);
- old_pg = h->pg;
+ old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
if (old_pg != pg) {
/* port group has changed. Update to new port group */
if (h->pg) {
@@ -390,7 +390,9 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
list_add_rcu(&h->node, &pg->dh_list);
spin_unlock_irqrestore(&pg->lock, flags);
- alua_rtpg_queue(h->pg, sdev, NULL, true);
+ alua_rtpg_queue(rcu_dereference_protected(h->pg,
+ lockdep_is_held(&h->pg_lock)),
+ sdev, NULL, true);
spin_unlock(&h->pg_lock);
if (old_pg)
@@ -942,7 +944,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
static int alua_set_params(struct scsi_device *sdev, const char *params)
{
struct alua_dh_data *h = sdev->handler_data;
- struct alua_port_group __rcu *pg = NULL;
+ struct alua_port_group *pg = NULL;
unsigned int optimize = 0, argc;
const char *p = params;
int result = SCSI_DH_OK;
@@ -989,7 +991,7 @@ static int alua_activate(struct scsi_device *sdev,
struct alua_dh_data *h = sdev->handler_data;
int err = SCSI_DH_OK;
struct alua_queue_data *qdata;
- struct alua_port_group __rcu *pg;
+ struct alua_port_group *pg;
qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
if (!qdata) {
@@ -1053,7 +1055,7 @@ static void alua_check(struct scsi_device *sdev, bool force)
static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct alua_dh_data *h = sdev->handler_data;
- struct alua_port_group __rcu *pg;
+ struct alua_port_group *pg;
unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
int ret = BLKPREP_OK;
@@ -1123,7 +1125,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
struct alua_port_group *pg;
spin_lock(&h->pg_lock);
- pg = h->pg;
+ pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
rcu_assign_pointer(h->pg, NULL);
h->sdev = NULL;
spin_unlock(&h->pg_lock);
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 9b5a457d4bca..6af3394d051d 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -34,13 +34,13 @@
* Definitions for the generic 5380 driver.
*/
-#define NCR5380_read(reg) inb(instance->io_port + reg)
-#define NCR5380_write(reg, value) outb(value, instance->io_port + reg)
+#define NCR5380_read(reg) inb(hostdata->base + (reg))
+#define NCR5380_write(reg, value) outb(value, hostdata->base + (reg))
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (0)
-#define NCR5380_dma_recv_setup(instance, dst, len) (0)
-#define NCR5380_dma_send_setup(instance, src, len) (0)
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none
+#define NCR5380_dma_recv_setup NCR5380_dma_setup_none
+#define NCR5380_dma_send_setup NCR5380_dma_setup_none
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_implementation_fields /* none */
@@ -71,6 +71,7 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
+ struct NCR5380_hostdata *hostdata;
unsigned long io;
int error = -ENODEV;
@@ -88,7 +89,9 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
sizeof(struct NCR5380_hostdata));
if (!shost)
goto out_release_region;
- shost->io_port = io;
+
+ hostdata = shost_priv(shost);
+ hostdata->base = io;
/* This card does not seem to raise an interrupt on pdev->irq.
* Steam-powered SCSI controllers run without an IRQ anyway.
@@ -125,7 +128,8 @@ out_host_put:
static void dmx3191d_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
- unsigned long io = shost->io_port;
+ struct NCR5380_hostdata *hostdata = shost_priv(shost);
+ unsigned long io = hostdata->base;
scsi_remove_host(shost);
@@ -149,18 +153,7 @@ static struct pci_driver dmx3191d_pci_driver = {
.remove = dmx3191d_remove_one,
};
-static int __init dmx3191d_init(void)
-{
- return pci_register_driver(&dmx3191d_pci_driver);
-}
-
-static void __exit dmx3191d_exit(void)
-{
- pci_unregister_driver(&dmx3191d_pci_driver);
-}
-
-module_init(dmx3191d_init);
-module_exit(dmx3191d_exit);
+module_pci_driver(dmx3191d_pci_driver);
MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
MODULE_DESCRIPTION("Domex DMX3191D SCSI driver");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 21c8d210c456..27c0dce22e72 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -651,7 +651,6 @@ static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
}
spin_unlock_irqrestore(pHba->host->host_lock, flags);
if (i >= nr) {
- kfree (reply);
printk(KERN_WARNING"%s: Too many outstanding "
"ioctl commands\n", pHba->name);
return (u32)-1;
@@ -1754,8 +1753,10 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
sg_offset = (msg[0]>>4)&0xf;
msg[2] = 0x40000000; // IOCTL context
msg[3] = adpt_ioctl_to_context(pHba, reply);
- if (msg[3] == (u32)-1)
+ if (msg[3] == (u32)-1) {
+ kfree(reply);
return -EBUSY;
+ }
memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
if(sg_offset) {
@@ -3350,7 +3351,7 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
if (opblk_va == NULL) {
dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
resblk_va, resblk_pa);
- printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
+ printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
pHba->name);
return -ENOMEM;
}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9bd41a35a78a..59150cad0353 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -63,6 +63,14 @@ unsigned int fcoe_debug_logging;
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+unsigned int fcoe_e_d_tov = 2 * 1000;
+module_param_named(e_d_tov, fcoe_e_d_tov, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(e_d_tov, "E_D_TOV in ms, default 2000");
+
+unsigned int fcoe_r_a_tov = 2 * 2 * 1000;
+module_param_named(r_a_tov, fcoe_r_a_tov, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(r_a_tov, "R_A_TOV in ms, default 4000");
+
static DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
@@ -582,7 +590,8 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
* Use default VLAN for FIP VLAN discovery protocol
*/
frame = (struct fip_frame *)skb->data;
- if (frame->fip.fip_op == ntohs(FIP_OP_VLAN) &&
+ if (ntohs(frame->eth.h_proto) == ETH_P_FIP &&
+ ntohs(frame->fip.fip_op) == FIP_OP_VLAN &&
fcoe->realdev != fcoe->netdev)
skb->dev = fcoe->realdev;
else
@@ -633,8 +642,8 @@ static int fcoe_lport_config(struct fc_lport *lport)
lport->qfull = 0;
lport->max_retry_count = 3;
lport->max_rport_retry_count = 3;
- lport->e_d_tov = 2 * 1000; /* FC-FS default */
- lport->r_a_tov = 2 * 2 * 1000;
+ lport->e_d_tov = fcoe_e_d_tov;
+ lport->r_a_tov = fcoe_r_a_tov;
lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
lport->does_npiv = 1;
@@ -2160,11 +2169,13 @@ static bool fcoe_match(struct net_device *netdev)
*/
static void fcoe_dcb_create(struct fcoe_interface *fcoe)
{
+ int ctlr_prio = TC_PRIO_BESTEFFORT;
+ int fcoe_prio = TC_PRIO_INTERACTIVE;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
#ifdef CONFIG_DCB
int dcbx;
u8 fup, up;
struct net_device *netdev = fcoe->realdev;
- struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
struct dcb_app app = {
.priority = 0,
.protocol = ETH_P_FCOE
@@ -2186,10 +2197,12 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
fup = dcb_getapp(netdev, &app);
}
- fcoe->priority = ffs(up) ? ffs(up) - 1 : 0;
- ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority;
+ fcoe_prio = ffs(up) ? ffs(up) - 1 : 0;
+ ctlr_prio = ffs(fup) ? ffs(fup) - 1 : fcoe_prio;
}
#endif
+ fcoe->priority = fcoe_prio;
+ ctlr->priority = ctlr_prio;
}
enum fcoe_create_link_state {
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index dcf36537a767..cea57e27e713 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -801,6 +801,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
return -EINPROGRESS;
drop:
kfree_skb(skb);
+ LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
+ op, ntoh24(fh->fh_d_id));
return -EINVAL;
}
EXPORT_SYMBOL(fcoe_ctlr_els_send);
@@ -1316,7 +1318,7 @@ drop:
* The overall length has already been checked.
*/
static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
- struct fip_header *fh)
+ struct sk_buff *skb)
{
struct fip_desc *desc;
struct fip_mac_desc *mp;
@@ -1331,14 +1333,18 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
int num_vlink_desc;
int reset_phys_port = 0;
struct fip_vn_desc **vlink_desc_arr = NULL;
+ struct fip_header *fh = (struct fip_header *)skb->data;
+ struct ethhdr *eh = eth_hdr(skb);
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
- if (!fcf || !lport->port_id) {
+ if (!fcf) {
/*
* We are yet to select best FCF, but we got CVL in the
* meantime. reset the ctlr and let it rediscover the FCF
*/
+ LIBFCOE_FIP_DBG(fip, "Resetting fcoe_ctlr as FCF has not been "
+ "selected yet\n");
mutex_lock(&fip->ctlr_mutex);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
@@ -1346,6 +1352,31 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
}
/*
+ * If we've selected an FCF check that the CVL is from there to avoid
+ * processing CVLs from an unexpected source. If it is from an
+ * unexpected source drop it on the floor.
+ */
+ if (!ether_addr_equal(eh->h_source, fcf->fcf_mac)) {
+ LIBFCOE_FIP_DBG(fip, "Dropping CVL due to source address "
+ "mismatch with FCF src=%pM\n", eh->h_source);
+ return;
+ }
+
+ /*
+ * If we haven't logged into the fabric but receive a CVL we should
+ * reset everything and go back to solicitation.
+ */
+ if (!lport->port_id) {
+ LIBFCOE_FIP_DBG(fip, "lport not logged in, resoliciting\n");
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+ fc_lport_reset(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
+ return;
+ }
+
+ /*
* mask of required descriptors. Validating each one clears its bit.
*/
desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
@@ -1576,7 +1607,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
fcoe_ctlr_recv_adv(fip, skb);
else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
- fcoe_ctlr_recv_clr_vlink(fip, fiph);
+ fcoe_ctlr_recv_clr_vlink(fip, skb);
kfree_skb(skb);
return 0;
drop:
@@ -2122,7 +2153,7 @@ static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport,
LIBFCOE_FIP_DBG(fip,
"rport FLOGI limited port_id %6.6x\n",
rdata->ids.port_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
break;
default:
@@ -2145,9 +2176,15 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
+ if (kref_get_unless_zero(&rdata->kref)) {
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ }
+ rcu_read_unlock();
mutex_lock(&lport->disc.disc_mutex);
- list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
- lport->tt.rport_logoff(rdata);
lport->disc.disc_callback = NULL;
mutex_unlock(&lport->disc.disc_mutex);
}
@@ -2178,7 +2215,7 @@ static void fcoe_ctlr_disc_stop(struct fc_lport *lport)
static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport)
{
fcoe_ctlr_disc_stop(lport);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
synchronize_rcu();
}
@@ -2393,6 +2430,8 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
switch (fip->state) {
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: send reply, state %x\n",
+ fip->state);
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
frport->enode_mac, 0);
break;
@@ -2407,15 +2446,21 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
*/
if (fip->lp->wwpn > rdata->ids.port_name &&
!(frport->flags & FIP_FL_REC_OR_P2P)) {
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
+ "port_id collision\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
frport->enode_mac, 0);
break;
}
/* fall through */
case FIP_ST_VNMP_START:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
+ "restart VN2VN negotiation\n");
fcoe_ctlr_vn_restart(fip);
break;
default:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: ignore state %x\n",
+ fip->state);
break;
}
}
@@ -2437,9 +2482,12 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
case FIP_ST_VNMP_CLAIM:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_reply: restart state %x\n",
+ fip->state);
fcoe_ctlr_vn_restart(fip);
break;
case FIP_ST_VNMP_UP:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_reply: send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
break;
default:
@@ -2467,26 +2515,33 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
return;
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, port_id);
+ rdata = fc_rport_create(lport, port_id);
if (!rdata) {
mutex_unlock(&lport->disc.disc_mutex);
return;
}
+ mutex_lock(&rdata->rp_mutex);
+ mutex_unlock(&lport->disc.disc_mutex);
rdata->ops = &fcoe_ctlr_vn_rport_ops;
rdata->disc_id = lport->disc.disc_id;
ids = &rdata->ids;
if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
- (ids->node_name != -1 && ids->node_name != new->ids.node_name))
- lport->tt.rport_logoff(rdata);
+ (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
+ mutex_unlock(&rdata->rp_mutex);
+ LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
+ fc_rport_logoff(rdata);
+ mutex_lock(&rdata->rp_mutex);
+ }
ids->port_name = new->ids.port_name;
ids->node_name = new->ids.node_name;
- mutex_unlock(&lport->disc.disc_mutex);
+ mutex_unlock(&rdata->rp_mutex);
frport = fcoe_ctlr_rport(rdata);
- LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n",
- port_id, frport->fcoe_len ? "old" : "new");
+ LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
+ port_id, frport->fcoe_len ? "old" : "new",
+ rdata->rp_state);
*frport = *fcoe_ctlr_rport(new);
frport->time = 0;
}
@@ -2506,12 +2561,12 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
struct fcoe_rport *frport;
int ret = -1;
- rdata = lport->tt.rport_lookup(lport, port_id);
+ rdata = fc_rport_lookup(lport, port_id);
if (rdata) {
frport = fcoe_ctlr_rport(rdata);
memcpy(mac, frport->enode_mac, ETH_ALEN);
ret = 0;
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
return ret;
}
@@ -2529,6 +2584,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
struct fcoe_rport *frport = fcoe_ctlr_rport(new);
if (frport->flags & FIP_FL_REC_OR_P2P) {
+ LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
@@ -2536,25 +2592,37 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_START:
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
- if (new->ids.port_id == fip->port_id)
+ if (new->ids.port_id == fip->port_id) {
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "restart, state %d\n",
+ fip->state);
fcoe_ctlr_vn_restart(fip);
+ }
break;
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
if (new->ids.port_id == fip->port_id) {
if (new->ids.port_name > fip->lp->wwpn) {
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "restart, port_id collision\n");
fcoe_ctlr_vn_restart(fip);
break;
}
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
break;
}
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
+ new->ids.port_id);
fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
min((u32)frport->fcoe_len,
fcoe_ctlr_fcoe_size(fip)));
fcoe_ctlr_vn_add(fip, new);
break;
default:
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "ignoring claim from %x\n", new->ids.port_id);
break;
}
}
@@ -2591,19 +2659,26 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
frport = fcoe_ctlr_rport(new);
if (frport->flags & FIP_FL_REC_OR_P2P) {
+ LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
- rdata = lport->tt.rport_lookup(lport, new->ids.port_id);
+ rdata = fc_rport_lookup(lport, new->ids.port_id);
if (rdata) {
if (rdata->ids.node_name == new->ids.node_name &&
rdata->ids.port_name == new->ids.port_name) {
frport = fcoe_ctlr_rport(rdata);
- if (!frport->time && fip->state == FIP_ST_VNMP_UP)
- lport->tt.rport_login(rdata);
+ LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
+ rdata->ids.port_id);
+ if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
+ LIBFCOE_FIP_DBG(fip, "beacon expired "
+ "for rport %x\n",
+ rdata->ids.port_id);
+ fc_rport_login(rdata);
+ }
frport->time = jiffies;
}
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
if (fip->state != FIP_ST_VNMP_UP)
@@ -2638,11 +2713,15 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
unsigned long deadline;
next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
- mutex_lock(&lport->disc.disc_mutex);
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
+ if (!kref_get_unless_zero(&rdata->kref))
+ continue;
frport = fcoe_ctlr_rport(rdata);
- if (!frport->time)
+ if (!frport->time) {
+ kref_put(&rdata->kref, fc_rport_destroy);
continue;
+ }
deadline = frport->time +
msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10);
if (time_after_eq(jiffies, deadline)) {
@@ -2650,11 +2729,12 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
LIBFCOE_FIP_DBG(fip,
"port %16.16llx fc_id %6.6x beacon expired\n",
rdata->ids.port_name, rdata->ids.port_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
} else if (time_before(deadline, next_time))
next_time = deadline;
+ kref_put(&rdata->kref, fc_rport_destroy);
}
- mutex_unlock(&lport->disc.disc_mutex);
+ rcu_read_unlock();
return next_time;
}
@@ -2674,11 +2754,21 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fc_rport_priv rdata;
struct fcoe_rport frport;
} buf;
- int rc;
+ int rc, vlan_id = 0;
fiph = (struct fip_header *)skb->data;
sub = fiph->fip_subcode;
+ if (fip->lp->vlan)
+ vlan_id = skb_vlan_tag_get_id(skb);
+
+ if (vlan_id && vlan_id != fip->lp->vlan) {
+ LIBFCOE_FIP_DBG(fip, "vn_recv drop frame sub %x vlan %d\n",
+ sub, vlan_id);
+ rc = -EAGAIN;
+ goto drop;
+ }
+
rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
@@ -2941,7 +3031,7 @@ static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp)
rjt_data.reason = ELS_RJT_UNSUP;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -2991,12 +3081,17 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
mutex_lock(&disc->disc_mutex);
callback = disc->pending ? disc->disc_callback : NULL;
disc->pending = 0;
+ mutex_unlock(&disc->disc_mutex);
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ if (!kref_get_unless_zero(&rdata->kref))
+ continue;
frport = fcoe_ctlr_rport(rdata);
if (frport->time)
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
- mutex_unlock(&disc->disc_mutex);
+ rcu_read_unlock();
if (callback)
callback(lport, DISC_EV_SUCCESS);
}
@@ -3015,11 +3110,13 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
switch (fip->state) {
case FIP_ST_VNMP_START:
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send 1st probe request\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT);
break;
case FIP_ST_VNMP_PROBE1:
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send 2nd probe request\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
break;
@@ -3030,6 +3127,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
hton24(mac + 3, new_port_id);
fcoe_ctlr_map_dest(fip);
fip->update_mac(fip->lp, mac);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
break;
@@ -3041,6 +3139,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT);
if (time_after_eq(jiffies, next_time)) {
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
@@ -3051,6 +3150,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
case FIP_ST_VNMP_UP:
next_time = fcoe_ctlr_vn_age(fip);
if (time_after_eq(jiffies, fip->port_ka_time)) {
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
fcoe_all_vn2vn, 0);
fip->port_ka_time = jiffies +
@@ -3135,7 +3235,6 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
fc_exch_init(lport);
fc_elsct_init(lport);
fc_lport_init(lport);
- fc_rport_init(lport);
fc_disc_init(lport);
fcoe_ctlr_mode_set(lport, fip, fip->mode);
return 0;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 0675fd128734..9cf3d56296ab 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -335,16 +335,24 @@ static ssize_t store_ctlr_enabled(struct device *dev,
const char *buf, size_t count)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ bool enabled;
int rc;
+ if (*buf == '1')
+ enabled = true;
+ else if (*buf == '0')
+ enabled = false;
+ else
+ return -EINVAL;
+
switch (ctlr->enabled) {
case FCOE_CTLR_ENABLED:
- if (*buf == '1')
+ if (enabled)
return count;
ctlr->enabled = FCOE_CTLR_DISABLED;
break;
case FCOE_CTLR_DISABLED:
- if (*buf == '0')
+ if (!enabled)
return count;
ctlr->enabled = FCOE_CTLR_ENABLED;
break;
@@ -424,6 +432,75 @@ static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR,
store_ctlr_fip_resp);
static ssize_t
+fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count)
+{
+ int err;
+ unsigned long v;
+
+ err = kstrtoul(buf, 10, &v);
+ if (err || v > UINT_MAX)
+ return -EINVAL;
+
+ *var = v;
+
+ return count;
+}
+
+static ssize_t store_ctlr_r_a_tov(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
+ return -EBUSY;
+ if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
+ return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count);
+ return -ENOTSUPP;
+}
+
+static ssize_t show_ctlr_r_a_tov(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ return sprintf(buf, "%d\n", ctlr->lp->r_a_tov);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR,
+ show_ctlr_r_a_tov, store_ctlr_r_a_tov);
+
+static ssize_t store_ctlr_e_d_tov(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
+ return -EBUSY;
+ if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
+ return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count);
+ return -ENOTSUPP;
+}
+
+static ssize_t show_ctlr_e_d_tov(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ return sprintf(buf, "%d\n", ctlr->lp->e_d_tov);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR,
+ show_ctlr_e_d_tov, store_ctlr_e_d_tov);
+
+static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -507,6 +584,8 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
static struct attribute *fcoe_ctlr_attrs[] = {
&device_attr_fcoe_ctlr_fip_vlan_responder.attr,
&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_r_a_tov.attr,
+ &device_attr_fcoe_ctlr_e_d_tov.attr,
&device_attr_fcoe_ctlr_enabled.attr,
&device_attr_fcoe_ctlr_mode.attr,
NULL,
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d9fd2f841585..2544a37ece0a 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -441,30 +441,38 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
unsigned long ptr;
spinlock_t *io_lock = NULL;
int io_lock_acquired = 0;
+ struct fc_rport_libfc_priv *rp;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
rport = starget_to_rport(scsi_target(sc->device));
+ if (!rport) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "returning DID_NO_CONNECT for IO as rport is NULL\n");
+ sc->result = DID_NO_CONNECT << 16;
+ done(sc);
+ return 0;
+ }
+
ret = fc_remote_port_chkready(rport);
if (ret) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "rport is not ready\n");
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = ret;
done(sc);
return 0;
}
- if (rport) {
- struct fc_rport_libfc_priv *rp = rport->dd_data;
-
- if (!rp || rp->rp_state != RPORT_ST_READY) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ rp = rport->dd_data;
+ if (!rp || rp->rp_state != RPORT_ST_READY) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"returning DID_NO_CONNECT for IO as rport is removed\n");
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
- sc->result = DID_NO_CONNECT<<16;
- done(sc);
- return 0;
- }
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ sc->result = DID_NO_CONNECT<<16;
+ done(sc);
+ return 0;
}
if (lp->state != LPORT_ST_READY || !(lp->link_up))
@@ -2543,7 +2551,7 @@ int fnic_reset(struct Scsi_Host *shost)
* Reset local port, this will clean up libFC exchanges,
* reset remote port sessions, and if link is up, begin flogi
*/
- ret = lp->tt.lport_reset(lp);
+ ret = fc_lport_reset(lp);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from fnic reset %s\n",
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4e15c4bf0795..5a5fa01576b7 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -613,7 +613,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
fc_trace_entries.rd_idx = 0;
}
- fc_buf->time_stamp = CURRENT_TIME;
+ ktime_get_real_ts64(&fc_buf->time_stamp);
fc_buf->host_no = host_no;
fc_buf->frame_type = frame_type;
@@ -740,7 +740,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
len = *orig_len;
- time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
+ time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
len += snprintf(fnic_dbgfs_prt->buffer + len,
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index a8aa0578fcb0..e375d0c2eaaf 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -72,7 +72,7 @@ struct fnic_trace_data {
typedef struct fnic_trace_data fnic_trace_data_t;
struct fc_trace_hdr {
- struct timespec time_stamp;
+ struct timespec64 time_stamp;
u32 host_no;
u8 frame_type;
u8 frame_len;
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 9795d6f3e197..ba69d6112fa1 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -499,10 +499,7 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
- printk(KERN_ERR
- "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
- err);
+ pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
@@ -517,10 +514,7 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
- printk(KERN_ERR
- "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
- err);
+ pr_err("Can't del addr [%pM], %d\n", addr, err);
}
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index cbf010324c18..de5147a8c959 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -64,9 +64,9 @@ static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
module_param_array(card, int, NULL, 0);
MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
+MODULE_ALIAS("g_NCR5380_mmio");
MODULE_LICENSE("GPL");
-#ifndef SCSI_G_NCR5380_MEM
/*
* Configure I/O address of 53C400A or DTC436 by writing magic numbers
* to ports 0x779 and 0x379.
@@ -88,40 +88,35 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
cfg = 0x80 | idx | (irq << 4);
outb(cfg, 0x379);
}
-#endif
+
+static unsigned int ncr_53c400a_ports[] = {
+ 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
+};
+static unsigned int dtc_3181e_ports[] = {
+ 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
+};
+static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
+ 0x59, 0xb9, 0xc5, 0xae, 0xa6
+};
+static u8 hp_c2502_magic[] = { /* HP C2502 */
+ 0x0f, 0x22, 0xf0, 0x20, 0x80
+};
static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
struct device *pdev, int base, int irq, int board)
{
- unsigned int *ports;
+ bool is_pmio = base <= 0xffff;
+ int ret;
+ int flags = 0;
+ unsigned int *ports = NULL;
u8 *magic = NULL;
-#ifndef SCSI_G_NCR5380_MEM
int i;
int port_idx = -1;
unsigned long region_size;
-#endif
- static unsigned int ncr_53c400a_ports[] = {
- 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
- };
- static unsigned int dtc_3181e_ports[] = {
- 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
- };
- static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
- 0x59, 0xb9, 0xc5, 0xae, 0xa6
- };
- static u8 hp_c2502_magic[] = { /* HP C2502 */
- 0x0f, 0x22, 0xf0, 0x20, 0x80
- };
- int flags, ret;
struct Scsi_Host *instance;
struct NCR5380_hostdata *hostdata;
-#ifdef SCSI_G_NCR5380_MEM
- void __iomem *iomem;
- resource_size_t iomem_size;
-#endif
+ u8 __iomem *iomem;
- ports = NULL;
- flags = 0;
switch (board) {
case BOARD_NCR5380:
flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
@@ -140,8 +135,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
break;
}
-#ifndef SCSI_G_NCR5380_MEM
- if (ports && magic) {
+ if (is_pmio && ports && magic) {
/* wakeup sequence for the NCR53C400A and DTC3181E */
/* Disable the adapter and look for a free io port */
@@ -170,84 +164,89 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
if (ports[i]) {
/* At this point we have our region reserved */
magic_configure(i, 0, magic); /* no IRQ yet */
- outb(0xc0, ports[i] + 9);
- if (inb(ports[i] + 9) != 0x80) {
+ base = ports[i];
+ outb(0xc0, base + 9);
+ if (inb(base + 9) != 0x80) {
ret = -ENODEV;
goto out_release;
}
- base = ports[i];
port_idx = i;
} else
return -EINVAL;
- }
- else
- {
+ } else if (is_pmio) {
/* NCR5380 - no configuration, just grab */
region_size = 8;
if (!base || !request_region(base, region_size, "ncr5380"))
return -EBUSY;
+ } else { /* MMIO */
+ region_size = NCR53C400_region_size;
+ if (!request_mem_region(base, region_size, "ncr5380"))
+ return -EBUSY;
}
-#else
- iomem_size = NCR53C400_region_size;
- if (!request_mem_region(base, iomem_size, "ncr5380"))
- return -EBUSY;
- iomem = ioremap(base, iomem_size);
+
+ if (is_pmio)
+ iomem = ioport_map(base, region_size);
+ else
+ iomem = ioremap(base, region_size);
+
if (!iomem) {
- release_mem_region(base, iomem_size);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_release;
}
-#endif
+
instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
if (instance == NULL) {
ret = -ENOMEM;
- goto out_release;
+ goto out_unmap;
}
hostdata = shost_priv(instance);
-#ifndef SCSI_G_NCR5380_MEM
- instance->io_port = base;
- instance->n_io_port = region_size;
- hostdata->io_width = 1; /* 8-bit PDMA by default */
-
- /*
- * On NCR53C400 boards, NCR5380 registers are mapped 8 past
- * the base address.
- */
- switch (board) {
- case BOARD_NCR53C400:
- instance->io_port += 8;
- hostdata->c400_ctl_status = 0;
- hostdata->c400_blk_cnt = 1;
- hostdata->c400_host_buf = 4;
- break;
- case BOARD_DTC3181E:
- hostdata->io_width = 2; /* 16-bit PDMA */
- /* fall through */
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- hostdata->c400_ctl_status = 9;
- hostdata->c400_blk_cnt = 10;
- hostdata->c400_host_buf = 8;
- break;
- }
-#else
- instance->base = base;
- hostdata->iomem = iomem;
- hostdata->iomem_size = iomem_size;
- switch (board) {
- case BOARD_NCR53C400:
- hostdata->c400_ctl_status = 0x100;
- hostdata->c400_blk_cnt = 0x101;
- hostdata->c400_host_buf = 0x104;
- break;
- case BOARD_DTC3181E:
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
- ret = -EINVAL;
- goto out_unregister;
+ hostdata->io = iomem;
+ hostdata->region_size = region_size;
+
+ if (is_pmio) {
+ hostdata->io_port = base;
+ hostdata->io_width = 1; /* 8-bit PDMA by default */
+ hostdata->offset = 0;
+
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->io_port += 8;
+ hostdata->c400_ctl_status = 0;
+ hostdata->c400_blk_cnt = 1;
+ hostdata->c400_host_buf = 4;
+ break;
+ case BOARD_DTC3181E:
+ hostdata->io_width = 2; /* 16-bit PDMA */
+ /* fall through */
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ hostdata->c400_ctl_status = 9;
+ hostdata->c400_blk_cnt = 10;
+ hostdata->c400_host_buf = 8;
+ break;
+ }
+ } else {
+ hostdata->base = base;
+ hostdata->offset = NCR53C400_mem_base;
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->c400_ctl_status = 0x100;
+ hostdata->c400_blk_cnt = 0x101;
+ hostdata->c400_host_buf = 0x104;
+ break;
+ case BOARD_DTC3181E:
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
+ ret = -EINVAL;
+ goto out_unregister;
+ }
}
-#endif
ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
if (ret)
@@ -273,11 +272,9 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
instance->irq = NO_IRQ;
if (instance->irq != NO_IRQ) {
-#ifndef SCSI_G_NCR5380_MEM
/* set IRQ for HP C2502 */
if (board == BOARD_HP_C2502)
magic_configure(port_idx, instance->irq, magic);
-#endif
if (request_irq(instance->irq, generic_NCR5380_intr,
0, "NCR5380", instance)) {
printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
@@ -303,38 +300,39 @@ out_free_irq:
NCR5380_exit(instance);
out_unregister:
scsi_host_put(instance);
-out_release:
-#ifndef SCSI_G_NCR5380_MEM
- release_region(base, region_size);
-#else
+out_unmap:
iounmap(iomem);
- release_mem_region(base, iomem_size);
-#endif
+out_release:
+ if (is_pmio)
+ release_region(base, region_size);
+ else
+ release_mem_region(base, region_size);
return ret;
}
static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ void __iomem *iomem = hostdata->io;
+ unsigned long io_port = hostdata->io_port;
+ unsigned long base = hostdata->base;
+ unsigned long region_size = hostdata->region_size;
+
scsi_remove_host(instance);
if (instance->irq != NO_IRQ)
free_irq(instance->irq, instance);
NCR5380_exit(instance);
-#ifndef SCSI_G_NCR5380_MEM
- release_region(instance->io_port, instance->n_io_port);
-#else
- {
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- iounmap(hostdata->iomem);
- release_mem_region(instance->base, hostdata->iomem_size);
- }
-#endif
scsi_host_put(instance);
+ iounmap(iomem);
+ if (io_port)
+ release_region(io_port, region_size);
+ else
+ release_mem_region(base, region_size);
}
/**
* generic_NCR5380_pread - pseudo DMA read
- * @instance: adapter to read from
+ * @hostdata: scsi host private data
* @dst: buffer to read into
* @len: buffer length
*
@@ -342,10 +340,9 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
* controller
*/
-static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
+static inline int generic_NCR5380_pread(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int blocks = len / 128;
int start = 0;
@@ -361,18 +358,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; /* FIXME - no timeout */
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- insw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ insw(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 64);
- else
- insb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ insb(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_fromio(dst + start,
- hostdata->iomem + NCR53C400_host_buffer, 128);
-#endif
+ else
+ memcpy_fromio(dst + start,
+ hostdata->io + NCR53C400_host_buffer, 128);
+
start += 128;
blocks--;
}
@@ -381,18 +376,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; /* FIXME - no timeout */
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- insw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ insw(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 64);
- else
- insb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ insb(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_fromio(dst + start,
- hostdata->iomem + NCR53C400_host_buffer, 128);
-#endif
+ else
+ memcpy_fromio(dst + start,
+ hostdata->io + NCR53C400_host_buffer, 128);
+
start += 128;
blocks--;
}
@@ -412,7 +405,7 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
/**
* generic_NCR5380_pwrite - pseudo DMA write
- * @instance: adapter to read from
+ * @hostdata: scsi host private data
* @dst: buffer to read into
* @len: buffer length
*
@@ -420,10 +413,9 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
* controller
*/
-static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
+static inline int generic_NCR5380_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *src, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int blocks = len / 128;
int start = 0;
@@ -439,18 +431,17 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
break;
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - timeout
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- outsw(instance->io_port + hostdata->c400_host_buf,
+
+ if (hostdata->io_port && hostdata->io_width == 2)
+ outsw(hostdata->io_port + hostdata->c400_host_buf,
src + start, 64);
- else
- outsb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ outsb(hostdata->io_port + hostdata->c400_host_buf,
src + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_toio(hostdata->iomem + NCR53C400_host_buffer,
- src + start, 128);
-#endif
+ else
+ memcpy_toio(hostdata->io + NCR53C400_host_buffer,
+ src + start, 128);
+
start += 128;
blocks--;
}
@@ -458,18 +449,16 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - no timeout
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- outsw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ outsw(hostdata->io_port + hostdata->c400_host_buf,
src + start, 64);
- else
- outsb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ outsb(hostdata->io_port + hostdata->c400_host_buf,
src + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_toio(hostdata->iomem + NCR53C400_host_buffer,
- src + start, 128);
-#endif
+ else
+ memcpy_toio(hostdata->io + NCR53C400_host_buffer,
+ src + start, 128);
+
start += 128;
blocks--;
}
@@ -489,10 +478,9 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
return 0;
}
-static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance,
+static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int transfersize = cmd->transfersize;
if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
@@ -566,7 +554,7 @@ static struct isa_driver generic_NCR5380_isa_driver = {
},
};
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
{ .id = "DTC436e", .driver_data = BOARD_DTC3181E },
{ .id = "" }
@@ -600,7 +588,7 @@ static struct pnp_driver generic_NCR5380_pnp_driver = {
.probe = generic_NCR5380_pnp_probe,
.remove = generic_NCR5380_pnp_remove,
};
-#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */
+#endif /* defined(CONFIG_PNP) */
static int pnp_registered, isa_registered;
@@ -624,7 +612,7 @@ static int __init generic_NCR5380_init(void)
card[0] = BOARD_HP_C2502;
}
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
if (!pnp_register_driver(&generic_NCR5380_pnp_driver))
pnp_registered = 1;
#endif
@@ -637,7 +625,7 @@ static int __init generic_NCR5380_init(void)
static void __exit generic_NCR5380_exit(void)
{
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
if (pnp_registered)
pnp_unregister_driver(&generic_NCR5380_pnp_driver);
#endif
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index b175b9234458..3ce5b65ccb00 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,49 +14,28 @@
#ifndef GENERIC_NCR5380_H
#define GENERIC_NCR5380_H
-#ifndef SCSI_G_NCR5380_MEM
#define DRV_MODULE_NAME "g_NCR5380"
#define NCR5380_read(reg) \
- inb(instance->io_port + (reg))
+ ioread8(hostdata->io + hostdata->offset + (reg))
#define NCR5380_write(reg, value) \
- outb(value, instance->io_port + (reg))
+ iowrite8(value, hostdata->io + hostdata->offset + (reg))
#define NCR5380_implementation_fields \
+ int offset; \
int c400_ctl_status; \
int c400_blk_cnt; \
int c400_host_buf; \
int io_width;
-#else
-/* therefore SCSI_G_NCR5380_MEM */
-#define DRV_MODULE_NAME "g_NCR5380_mmio"
-
#define NCR53C400_mem_base 0x3880
#define NCR53C400_host_buffer 0x3900
#define NCR53C400_region_size 0x3a00
-#define NCR5380_read(reg) \
- readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
- NCR53C400_mem_base + (reg))
-#define NCR5380_write(reg, value) \
- writeb(value, ((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
- NCR53C400_mem_base + (reg))
-
-#define NCR5380_implementation_fields \
- void __iomem *iomem; \
- resource_size_t iomem_size; \
- int c400_ctl_status; \
- int c400_blk_cnt; \
- int c400_host_buf;
-
-#endif
-
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- generic_NCR5380_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len
#define NCR5380_dma_recv_setup generic_NCR5380_pread
#define NCR5380_dma_send_setup generic_NCR5380_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_intr generic_NCR5380_intr
#define NCR5380_queue_command generic_NCR5380_queue_command
@@ -73,4 +52,3 @@
#define BOARD_HP_C2502 4
#endif /* GENERIC_NCR5380_H */
-
diff --git a/drivers/scsi/g_NCR5380_mmio.c b/drivers/scsi/g_NCR5380_mmio.c
deleted file mode 100644
index 8cdde71ba0c8..000000000000
--- a/drivers/scsi/g_NCR5380_mmio.c
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * There is probably a nicer way to do this but this one makes
- * pretty obvious what is happening. We rebuild the same file with
- * different options for mmio versus pio.
- */
-
-#define SCSI_G_NCR5380_MEM
-
-#include "g_NCR5380.c"
-
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 72c98522bd26..c0cd505a9ef7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -13,6 +13,7 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/dmapool.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -110,7 +111,7 @@ struct hisi_sas_device {
struct domain_device *sas_device;
u64 attached_phy;
u64 device_id;
- u64 running_req;
+ atomic64_t running_req;
u8 dev_status;
};
@@ -149,7 +150,8 @@ struct hisi_sas_hw {
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
- int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s);
+ int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s);
void (*start_delivery)(struct hisi_hba *hisi_hba);
int (*prep_ssp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int is_tmf,
@@ -166,6 +168,9 @@ struct hisi_sas_hw {
void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
+ void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *linkrates);
+ enum sas_linkrate (*phy_get_max_linkrate)(void);
void (*free_device)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
@@ -183,6 +188,7 @@ struct hisi_hba {
u32 ctrl_reset_reg;
u32 ctrl_reset_sts_reg;
u32 ctrl_clock_ena_reg;
+ u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
int n_phy;
@@ -205,7 +211,6 @@ struct hisi_hba {
struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
int queue_count;
- int queue;
struct hisi_sas_slot *slot_prep;
struct dma_pool *sge_page_pool;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 2f872f784e10..d50e9cfefd24 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -162,8 +162,8 @@ out:
hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
if (task->task_done)
task->task_done(task);
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
}
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
@@ -232,8 +232,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
if (rc)
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
- &dlvry_queue_slot);
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
+ &dlvry_queue, &dlvry_queue_slot);
if (rc)
goto err_out_tag;
@@ -303,7 +303,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
hisi_hba->slot_prep = slot;
- sas_dev->running_req++;
+ atomic64_inc(&sas_dev->running_req);
++(*pass);
return 0;
@@ -369,9 +369,14 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
struct sas_phy *sphy = sas_phy->phy;
sphy->negotiated_linkrate = sas_phy->linkrate;
- sphy->minimum_linkrate = phy->minimum_linkrate;
sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sphy->maximum_linkrate = phy->maximum_linkrate;
+ sphy->maximum_linkrate_hw =
+ hisi_hba->hw->phy_get_max_linkrate();
+ if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
+ sphy->minimum_linkrate = phy->minimum_linkrate;
+
+ if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
+ sphy->maximum_linkrate = phy->maximum_linkrate;
}
if (phy->phy_type & PORT_TYPE_SAS) {
@@ -537,7 +542,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id];
+ struct hisi_sas_port *port = &hisi_hba->port[phy->port_id];
unsigned long flags;
if (!sas_port)
@@ -645,6 +650,9 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_SET_LINK_RATE:
+ hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
+ break;
+
case PHY_FUNC_RELEASE_SPINUP_HOLD:
default:
return -EOPNOTSUPP;
@@ -764,7 +772,8 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task = NULL;
}
ex_err:
- WARN_ON(retry == TASK_RETRY);
+ if (retry == TASK_RETRY)
+ dev_warn(dev, "abort tmf: executing internal task failed!\n");
sas_free_task(task);
return res;
}
@@ -960,6 +969,9 @@ static int hisi_sas_query_task(struct sas_task *task)
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
break;
+ default:
+ rc = TMF_RESP_FUNC_FAILED;
+ break;
}
}
return rc;
@@ -987,8 +999,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
if (rc)
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
- &dlvry_queue_slot);
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
+ &dlvry_queue, &dlvry_queue_slot);
if (rc)
goto err_out_tag;
@@ -1023,7 +1035,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
hisi_hba->slot_prep = slot;
- sas_dev->running_req++;
+ atomic64_inc(&sas_dev->running_req);
+
/* send abort command to our chip */
hisi_hba->hw->start_delivery(hisi_hba);
@@ -1396,10 +1409,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
+ struct clk *refclk;
shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
- if (!shost)
- goto err_out;
+ if (!shost) {
+ dev_err(dev, "scsi host alloc failed\n");
+ return NULL;
+ }
hisi_hba = shost_priv(shost);
hisi_hba->hw = hw;
@@ -1432,6 +1448,12 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
}
+ refclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(refclk))
+ dev_info(dev, "no ref clk property\n");
+ else
+ hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
+
if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
goto err_out;
@@ -1457,6 +1479,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
return shost;
err_out:
+ kfree(shost);
dev_err(dev, "shost alloc failed\n");
return NULL;
}
@@ -1483,10 +1506,8 @@ int hisi_sas_probe(struct platform_device *pdev,
int rc, phy_nr, port_nr, i;
shost = hisi_sas_shost_alloc(pdev, hw);
- if (!shost) {
- rc = -ENOMEM;
- goto err_out_ha;
- }
+ if (!shost)
+ return -ENOMEM;
sha = SHOST_TO_SAS_HA(shost);
hisi_hba = shost_priv(shost);
@@ -1496,12 +1517,13 @@ int hisi_sas_probe(struct platform_device *pdev,
arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
- if (!arr_phy || !arr_port)
- return -ENOMEM;
+ if (!arr_phy || !arr_port) {
+ rc = -ENOMEM;
+ goto err_out_ha;
+ }
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
- sha->core.shost = shost;
sha->lldd_ha = hisi_hba;
shost->transportt = hisi_sas_stt;
@@ -1546,6 +1568,7 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_free(hisi_hba);
kfree(shost);
return rc;
}
@@ -1555,12 +1578,14 @@ int hisi_sas_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = sha->core.shost;
scsi_remove_host(sha->core.shost);
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
hisi_sas_free(hisi_hba);
+ kfree(shost);
return 0;
}
EXPORT_SYMBOL_GPL(hisi_sas_remove);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index c0ac49d8bc8d..8a1be0ba8a22 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -843,6 +843,49 @@ static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
}
+static enum sas_linkrate phy_get_max_linkrate_v1_hw(void)
+{
+ return SAS_LINK_RATE_6_0_GBPS;
+}
+
+static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *r)
+{
+ u32 prog_phy_link_rate =
+ hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
+ enum sas_linkrate min, max;
+ u32 rate_mask = 0;
+
+ if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = sas_phy->phy->maximum_linkrate;
+ min = r->minimum_linkrate;
+ } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = r->maximum_linkrate;
+ min = sas_phy->phy->minimum_linkrate;
+ } else
+ return;
+
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
+ min -= SAS_LINK_RATE_1_5_GBPS;
+ max -= SAS_LINK_RATE_1_5_GBPS;
+
+ for (i = 0; i <= max; i++)
+ rate_mask |= 1 << (i * 2);
+
+ prog_phy_link_rate &= ~0xff;
+ prog_phy_link_rate |= rate_mask;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ prog_phy_link_rate);
+
+ phy_hard_reset_v1_hw(hisi_hba, phy_no);
+}
+
static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
{
int i, bitmap = 0;
@@ -862,29 +905,23 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s)
{
struct device *dev = &hisi_hba->pdev->dev;
struct hisi_sas_dq *dq;
u32 r, w;
- int queue = hisi_hba->queue;
-
- while (1) {
- dq = &hisi_hba->dq[queue];
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- queue = (queue + 1) % hisi_hba->queue_count;
- if (queue == hisi_hba->queue) {
- dev_warn(dev, "could not find free slot\n");
- return -EAGAIN;
- }
- continue;
- }
- break;
+ int queue = dev_id % hisi_hba->queue_count;
+
+ dq = &hisi_hba->dq[queue];
+ w = dq->wr_point;
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ dev_warn(dev, "could not find free slot\n");
+ return -EAGAIN;
}
- hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+
*q = queue;
*s = w;
return 0;
@@ -1372,8 +1409,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
}
out:
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
@@ -1824,6 +1861,8 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.phy_enable = enable_phy_v1_hw,
.phy_disable = disable_phy_v1_hw,
.phy_hard_reset = phy_hard_reset_v1_hw,
+ .phy_set_linkrate = phy_set_linkrate_v1_hw,
+ .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw,
.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9825a3f49f53..b934aec1eebb 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -55,10 +55,44 @@
#define HGC_DFX_CFG2 0xc0
#define HGC_IOMB_PROC1_STATUS 0x104
#define CFG_1US_TIMER_TRSH 0xcc
+#define HGC_LM_DFX_STATUS2 0x128
+#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0
+#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
+#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12
+#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
+#define HGC_CQE_ECC_ADDR 0x13c
+#define HGC_CQE_ECC_1B_ADDR_OFF 0
+#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
+#define HGC_CQE_ECC_MB_ADDR_OFF 8
+#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
+#define HGC_IOST_ECC_ADDR 0x140
+#define HGC_IOST_ECC_1B_ADDR_OFF 0
+#define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
+#define HGC_IOST_ECC_MB_ADDR_OFF 16
+#define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
+#define HGC_DQE_ECC_ADDR 0x144
+#define HGC_DQE_ECC_1B_ADDR_OFF 0
+#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
+#define HGC_DQE_ECC_MB_ADDR_OFF 16
+#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
#define HGC_INVLD_DQE_INFO 0x148
#define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9
#define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
#define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18
+#define HGC_ITCT_ECC_ADDR 0x150
+#define HGC_ITCT_ECC_1B_ADDR_OFF 0
+#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
+ HGC_ITCT_ECC_1B_ADDR_OFF)
+#define HGC_ITCT_ECC_MB_ADDR_OFF 16
+#define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \
+ HGC_ITCT_ECC_MB_ADDR_OFF)
+#define HGC_AXI_FIFO_ERR_INFO 0x154
+#define AXI_ERR_INFO_OFF 0
+#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
+#define FIFO_ERR_INFO_OFF 8
+#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
#define INT_COAL_EN 0x19c
#define OQ_INT_COAL_TIME 0x1a0
#define OQ_INT_COAL_CNT 0x1a4
@@ -73,13 +107,41 @@
#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
#define ENT_INT_SRC2 0x1bc
#define ENT_INT_SRC3 0x1c0
+#define ENT_INT_SRC3_WP_DEPTH_OFF 8
+#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
+#define ENT_INT_SRC3_RP_DEPTH_OFF 10
+#define ENT_INT_SRC3_AXI_OFF 11
+#define ENT_INT_SRC3_FIFO_OFF 12
+#define ENT_INT_SRC3_LM_OFF 14
#define ENT_INT_SRC3_ITC_INT_OFF 15
#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
+#define ENT_INT_SRC3_ABT_OFF 16
#define ENT_INT_SRC_MSK1 0x1c4
#define ENT_INT_SRC_MSK2 0x1c8
#define ENT_INT_SRC_MSK3 0x1cc
#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
+#define SAS_ECC_INTR 0x1e8
+#define SAS_ECC_INTR_DQE_ECC_1B_OFF 0
+#define SAS_ECC_INTR_DQE_ECC_MB_OFF 1
+#define SAS_ECC_INTR_IOST_ECC_1B_OFF 2
+#define SAS_ECC_INTR_IOST_ECC_MB_OFF 3
+#define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4
+#define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5
+#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6
+#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7
+#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8
+#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9
+#define SAS_ECC_INTR_CQE_ECC_1B_OFF 10
+#define SAS_ECC_INTR_CQE_ECC_MB_OFF 11
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19
#define SAS_ECC_INTR_MSK 0x1ec
#define HGC_ERR_STAT_EN 0x238
#define DLVRY_Q_0_BASE_ADDR_LO 0x260
@@ -94,7 +156,20 @@
#define COMPL_Q_0_DEPTH 0x4e8
#define COMPL_Q_0_WR_PTR 0x4ec
#define COMPL_Q_0_RD_PTR 0x4f0
-
+#define HGC_RXM_DFX_STATUS14 0xae8
+#define HGC_RXM_DFX_STATUS14_MEM0_OFF 0
+#define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM0_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM1_OFF 9
+#define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM1_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM2_OFF 18
+#define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM2_OFF)
+#define HGC_RXM_DFX_STATUS15 0xaec
+#define HGC_RXM_DFX_STATUS15_MEM3_OFF 0
+#define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS15_MEM3_OFF)
/* phy registers need init */
#define PORT_BASE (0x2000)
@@ -119,6 +194,9 @@
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
#define SL_CONTROL_CTA_OFF 17
#define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF)
+#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
+#define RX_BCAST_CHG_OFF 1
+#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -267,6 +345,8 @@
#define ITCT_HDR_RTOLT_OFF 48
#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+#define HISI_SAS_FATAL_INT_NR 2
+
struct hisi_sas_complete_v2_hdr {
__le32 dw0;
__le32 dw1;
@@ -659,8 +739,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
- hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
- hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
/* clear the itct */
hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
@@ -808,7 +886,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
- hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
for (i = 0; i < hisi_hba->queue_count; i++)
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
@@ -824,7 +902,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
@@ -836,7 +914,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
- hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+ if (hisi_hba->refclk_frequency_mhz == 66)
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+ /* else, do nothing -> leave it how you found it */
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -980,6 +1060,49 @@ static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
}
+static enum sas_linkrate phy_get_max_linkrate_v2_hw(void)
+{
+ return SAS_LINK_RATE_12_0_GBPS;
+}
+
+static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *r)
+{
+ u32 prog_phy_link_rate =
+ hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
+ enum sas_linkrate min, max;
+ u32 rate_mask = 0;
+
+ if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = sas_phy->phy->maximum_linkrate;
+ min = r->minimum_linkrate;
+ } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = r->maximum_linkrate;
+ min = sas_phy->phy->minimum_linkrate;
+ } else
+ return;
+
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
+ min -= SAS_LINK_RATE_1_5_GBPS;
+ max -= SAS_LINK_RATE_1_5_GBPS;
+
+ for (i = 0; i <= max; i++)
+ rate_mask |= 1 << (i * 2);
+
+ prog_phy_link_rate &= ~0xff;
+ prog_phy_link_rate |= rate_mask;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ prog_phy_link_rate);
+
+ phy_hard_reset_v2_hw(hisi_hba, phy_no);
+}
+
static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
{
int i, bitmap = 0;
@@ -1010,29 +1133,24 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s)
{
struct device *dev = &hisi_hba->pdev->dev;
struct hisi_sas_dq *dq;
u32 r, w;
- int queue = hisi_hba->queue;
-
- while (1) {
- dq = &hisi_hba->dq[queue];
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- queue = (queue + 1) % hisi_hba->queue_count;
- if (queue == hisi_hba->queue) {
- dev_warn(dev, "could not find free slot\n");
- return -EAGAIN;
- }
- continue;
- }
- break;
+ int queue = dev_id % hisi_hba->queue_count;
+
+ dq = &hisi_hba->dq[queue];
+ w = dq->wr_point;
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
+ queue, r, w);
+ return -EAGAIN;
}
- hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+
*q = queue;
*s = w;
return 0;
@@ -1653,8 +1771,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
}
out:
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
@@ -1675,6 +1793,7 @@ static u8 get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_NCQ_NON_DATA:
return SATA_PROTOCOL_FPDMA;
+ case ATA_CMD_DOWNLOAD_MICRO:
case ATA_CMD_ID_ATA:
case ATA_CMD_PMP_READ:
case ATA_CMD_READ_LOG_EXT:
@@ -1686,18 +1805,27 @@ static u8 get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_PIO_WRITE_EXT:
return SATA_PROTOCOL_PIO;
+ case ATA_CMD_DSM:
+ case ATA_CMD_DOWNLOAD_MICRO_DMA:
+ case ATA_CMD_PMP_READ_DMA:
+ case ATA_CMD_PMP_WRITE_DMA:
case ATA_CMD_READ:
case ATA_CMD_READ_EXT:
case ATA_CMD_READ_LOG_DMA_EXT:
+ case ATA_CMD_READ_STREAM_DMA_EXT:
+ case ATA_CMD_TRUSTED_RCV_DMA:
+ case ATA_CMD_TRUSTED_SND_DMA:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE_FUA_EXT:
case ATA_CMD_WRITE_QUEUED:
case ATA_CMD_WRITE_LOG_DMA_EXT:
+ case ATA_CMD_WRITE_STREAM_DMA_EXT:
return SATA_PROTOCOL_DMA;
- case ATA_CMD_DOWNLOAD_MICRO:
- case ATA_CMD_DEV_RESET:
case ATA_CMD_CHK_POWER:
+ case ATA_CMD_DEV_RESET:
+ case ATA_CMD_EDD:
case ATA_CMD_FLUSH:
case ATA_CMD_FLUSH_EXT:
case ATA_CMD_VERIFY:
@@ -1970,9 +2098,12 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -2005,8 +2136,9 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
if (irq_value1) {
if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
- dev_name(dev), irq_value1);
+ panic("%s: DMAC RX/TX ecc bad error!\
+ (0x%x)",
+ dev_name(dev), irq_value1);
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT1, irq_value1);
@@ -2037,6 +2169,318 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
+static void
+one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 reg_val;
+
+ if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
+ dev_warn(dev, "hgc_dqe_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >>
+ HGC_DQE_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
+ dev_warn(dev, "hgc_iost_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >>
+ HGC_IOST_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
+ dev_warn(dev, "hgc_itct_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >>
+ HGC_ITCT_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ dev_warn(dev, "hgc_iostl_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ dev_warn(dev, "hgc_itctl_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
+ dev_warn(dev, "hgc_cqe_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >>
+ HGC_CQE_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem0_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM0_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem1_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM1_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem2_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM2_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
+ dev_warn(dev, "rxm_mem3_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
+ HGC_RXM_DFX_STATUS15_MEM3_OFF);
+ }
+
+}
+
+static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
+ u32 irq_value)
+{
+ u32 reg_val;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
+ panic("%s: hgc_dqe_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >>
+ HGC_DQE_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
+ panic("%s: hgc_iost_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >>
+ HGC_IOST_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
+ panic("%s: hgc_itct_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >>
+ HGC_ITCT_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ panic("%s: hgc_iostl_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ panic("%s: hgc_itctl_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
+ panic("%s: hgc_cqe_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >>
+ HGC_CQE_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem0_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM0_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem1_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM1_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem2_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM2_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
+ panic("%s: rxm_mem3_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
+ HGC_RXM_DFX_STATUS15_MEM3_OFF);
+ }
+
+}
+
+static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_value, irq_msk;
+
+ irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
+ if (irq_value) {
+ one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
+ multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
+ }
+
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
+
+ return IRQ_HANDLED;
+}
+
+#define AXI_ERR_NR 8
+static const char axi_err_info[AXI_ERR_NR][32] = {
+ "IOST_AXI_W_ERR",
+ "IOST_AXI_R_ERR",
+ "ITCT_AXI_W_ERR",
+ "ITCT_AXI_R_ERR",
+ "SATA_AXI_W_ERR",
+ "SATA_AXI_R_ERR",
+ "DQE_AXI_R_ERR",
+ "CQE_AXI_W_ERR"
+};
+
+#define FIFO_ERR_NR 5
+static const char fifo_err_info[FIFO_ERR_NR][32] = {
+ "CQE_WINFO_FIFO",
+ "CQE_MSG_FIFIO",
+ "GETDQE_FIFO",
+ "CMDP_FIFO",
+ "AWTCTRL_FIFO"
+};
+
+static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_value, irq_msk, err_value;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
+
+ irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ if (irq_value) {
+ if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_WP_DEPTH_OFF);
+ panic("%s: write pointer and depth error (0x%x) \
+ found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 <<
+ ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
+ panic("%s: iptt no match slot error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF))
+ panic("%s: read pointer and depth error (0x%x) \
+ found!\n",
+ dev_name(dev), irq_value);
+
+ if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
+ int i;
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_AXI_OFF);
+ err_value = hisi_sas_read32(hisi_hba,
+ HGC_AXI_FIFO_ERR_INFO);
+
+ for (i = 0; i < AXI_ERR_NR; i++) {
+ if (err_value & BIT(i))
+ panic("%s: %s (0x%x) found!\n",
+ dev_name(dev),
+ axi_err_info[i], irq_value);
+ }
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
+ int i;
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_FIFO_OFF);
+ err_value = hisi_sas_read32(hisi_hba,
+ HGC_AXI_FIFO_ERR_INFO);
+
+ for (i = 0; i < FIFO_ERR_NR; i++) {
+ if (err_value & BIT(AXI_ERR_NR + i))
+ panic("%s: %s (0x%x) found!\n",
+ dev_name(dev),
+ fifo_err_info[i], irq_value);
+ }
+
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_LM_OFF);
+ panic("%s: LM add/fetch list error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_ABT_OFF);
+ panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
{
struct hisi_sas_cq *cq = p;
@@ -2136,6 +2580,16 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
goto end;
}
+ /* check ERR bit of Status Register */
+ if (fis->status & ATA_ERR) {
+ dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
+ fis->status);
+ disable_phy_v2_hw(hisi_hba, phy_no);
+ enable_phy_v2_hw(hisi_hba, phy_no);
+ res = IRQ_NONE;
+ goto end;
+ }
+
if (unlikely(phy_no == 8)) {
u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
@@ -2190,6 +2644,11 @@ static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
int_chnl_int_v2_hw,
};
+static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
+ fatal_ecc_int_v2_hw,
+ fatal_axi_int_v2_hw
+};
+
/**
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
@@ -2245,6 +2704,26 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
+ for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) {
+ int idx = i;
+
+ irq = irq_map[idx + 81];
+ if (!irq) {
+ dev_err(dev, "irq init: fail map fatal interrupt %d\n",
+ idx);
+ return -ENOENT;
+ }
+
+ rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
+ DRV_NAME " fatal", hisi_hba);
+ if (rc) {
+ dev_err(dev,
+ "irq init: could not request fatal interrupt %d, rc=%d\n",
+ irq, rc);
+ return -ENOENT;
+ }
+ }
+
for (i = 0; i < hisi_hba->queue_count; i++) {
int idx = i + 96; /* First cq interrupt is irq96 */
@@ -2303,12 +2782,26 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.phy_enable = enable_phy_v2_hw,
.phy_disable = disable_phy_v2_hw,
.phy_hard_reset = phy_hard_reset_v2_hw,
+ .phy_set_linkrate = phy_set_linkrate_v2_hw,
+ .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
{
+ /*
+ * Check if we should defer the probe before we probe the
+ * upper layer, as it's hard to defer later on.
+ */
+ int ret = platform_get_irq(pdev, 0);
+
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "cannot obtain irq\n");
+ return ret;
+ }
+
return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
}
@@ -2319,6 +2812,7 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
static const struct of_device_id sas_v2_of_match[] = {
{ .compatible = "hisilicon,hip06-sas-v2",},
+ { .compatible = "hisilicon,hip07-sas-v2",},
{},
};
MODULE_DEVICE_TABLE(of, sas_v2_of_match);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a1d6ab76a514..691a09316952 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -276,6 +276,9 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int wait_for_device_to_become_ready(struct ctlr_info *h,
+ unsigned char lunaddr[],
+ int reply_queue);
static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
int wait_for_ready);
static inline void finish_cmd(struct CommandList *c);
@@ -700,9 +703,7 @@ static ssize_t lunid_show(struct device *dev,
}
memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- lunid[0], lunid[1], lunid[2], lunid[3],
- lunid[4], lunid[5], lunid[6], lunid[7]);
+ return snprintf(buf, 20, "0x%8phN\n", lunid);
}
static ssize_t unique_id_show(struct device *dev,
@@ -864,6 +865,16 @@ static ssize_t path_info_show(struct device *dev,
return output_len;
}
+static ssize_t host_show_ctlr_num(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%d\n", h->ctlr);
+}
+
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
@@ -887,6 +898,8 @@ static DEVICE_ATTR(resettable, S_IRUGO,
host_show_resettable, NULL);
static DEVICE_ATTR(lockup_detected, S_IRUGO,
host_show_lockup_detected, NULL);
+static DEVICE_ATTR(ctlr_num, S_IRUGO,
+ host_show_ctlr_num, NULL);
static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
@@ -907,6 +920,7 @@ static struct device_attribute *hpsa_shost_attrs[] = {
&dev_attr_hp_ssd_smart_path_status,
&dev_attr_raid_offload_debug,
&dev_attr_lockup_detected,
+ &dev_attr_ctlr_num,
NULL,
};
@@ -1001,7 +1015,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
{
if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
- if (unlikely(!h->msix_vector))
+ if (unlikely(!h->msix_vectors))
return;
if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
c->Header.ReplyQueue =
@@ -2541,7 +2555,7 @@ static void complete_scsi_command(struct CommandList *cp)
if ((unlikely(hpsa_is_pending_event(cp)))) {
if (cp->reset_pending)
- return hpsa_cmd_resolve_and_free(h, cp);
+ return hpsa_cmd_free_and_done(h, cp, cmd);
if (cp->abort_pending)
return hpsa_cmd_abort_and_free(h, cp, cmd);
}
@@ -2824,14 +2838,8 @@ static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
const u8 *cdb = c->Request.CDB;
const u8 *lun = c->Header.LUN.LunAddrBytes;
- dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
- " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- txt, lun[0], lun[1], lun[2], lun[3],
- lun[4], lun[5], lun[6], lun[7],
- cdb[0], cdb[1], cdb[2], cdb[3],
- cdb[4], cdb[5], cdb[6], cdb[7],
- cdb[8], cdb[9], cdb[10], cdb[11],
- cdb[12], cdb[13], cdb[14], cdb[15]);
+ dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
+ txt, lun, cdb);
}
static void hpsa_scsi_interpret_error(struct ctlr_info *h,
@@ -3080,6 +3088,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
if (unlikely(rc))
atomic_set(&dev->reset_cmds_out, 0);
+ else
+ wait_for_device_to_become_ready(h, scsi3addr, 0);
mutex_unlock(&h->reset_mutex);
return rc;
@@ -3623,8 +3633,32 @@ out:
static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
struct ReportExtendedLUNdata *buf, int bufsize)
{
- return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
- HPSA_REPORT_PHYS_EXTENDED);
+ int rc;
+ struct ReportLUNdata *lbuf;
+
+ rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
+ HPSA_REPORT_PHYS_EXTENDED);
+ if (!rc || !hpsa_allow_any)
+ return rc;
+
+ /* REPORT PHYS EXTENDED is not supported */
+ lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
+ if (!lbuf)
+ return -ENOMEM;
+
+ rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
+ if (!rc) {
+ int i;
+ u32 nphys;
+
+ /* Copy ReportLUNdata header */
+ memcpy(buf, lbuf, 8);
+ nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
+ for (i = 0; i < nphys; i++)
+ memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
+ }
+ kfree(lbuf);
+ return rc;
}
static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
@@ -5488,7 +5522,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
dev = cmd->device->hostdata;
if (!dev) {
- cmd->result = NOT_READY << 16; /* host byte */
+ cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
}
@@ -5569,6 +5603,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
if (unlikely(lockup_detected(h)))
return hpsa_scan_complete(h);
+ /*
+ * Do the scan after a reset completion
+ */
+ if (h->reset_in_progress) {
+ h->drv_req_rescan = 1;
+ return;
+ }
+
hpsa_update_scsi_devices(h);
hpsa_scan_complete(h);
@@ -5624,7 +5666,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
sh->sg_tablesize = h->maxsgentries;
sh->transportt = hpsa_sas_transport_template;
sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[h->intr_mode];
+ sh->irq = pci_irq_vector(h->pdev, 0);
sh->unique_id = sh->irq;
h->scsi_host = sh;
@@ -5999,11 +6041,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
- "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
- "Reset as abort",
- scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
- scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+ "Reset as abort", scsi3addr);
if (!dev->offload_enabled) {
dev_warn(&h->pdev->dev,
@@ -6020,32 +6060,28 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
/* send the reset */
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
- "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
+ psa);
rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
if (rc != 0) {
dev_warn(&h->pdev->dev,
- "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
+ psa);
return rc; /* failed to reset */
}
/* wait for device to recover */
if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
dev_warn(&h->pdev->dev,
- "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
+ psa);
return -1; /* failed to recover */
}
/* device recovered */
dev_info(&h->pdev->dev,
- "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
+ psa);
return rc; /* success */
}
@@ -6663,8 +6699,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- ioc = (BIG_IOCTL_Command_struct *)
- kmalloc(sizeof(*ioc), GFP_KERNEL);
+ ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
if (!ioc) {
status = -ENOMEM;
goto cleanup1;
@@ -7658,67 +7693,41 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
{
- if (h->msix_vector) {
- if (h->pdev->msix_enabled)
- pci_disable_msix(h->pdev);
- h->msix_vector = 0;
- } else if (h->msi_vector) {
- if (h->pdev->msi_enabled)
- pci_disable_msi(h->pdev);
- h->msi_vector = 0;
- }
+ pci_free_irq_vectors(h->pdev);
+ h->msix_vectors = 0;
}
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use legacy INTx mode.
*/
-static void hpsa_interrupt_mode(struct ctlr_info *h)
+static int hpsa_interrupt_mode(struct ctlr_info *h)
{
-#ifdef CONFIG_PCI_MSI
- int err, i;
- struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
-
- for (i = 0; i < MAX_REPLY_QUEUES; i++) {
- hpsa_msix_entries[i].vector = 0;
- hpsa_msix_entries[i].entry = i;
- }
+ unsigned int flags = PCI_IRQ_LEGACY;
+ int ret;
/* Some boards advertise MSI but don't really support it */
- if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
- (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
- goto default_int_mode;
- if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
- dev_info(&h->pdev->dev, "MSI-X capable controller\n");
- h->msix_vector = MAX_REPLY_QUEUES;
- if (h->msix_vector > num_online_cpus())
- h->msix_vector = num_online_cpus();
- err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
- 1, h->msix_vector);
- if (err < 0) {
- dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
- h->msix_vector = 0;
- goto single_msi_mode;
- } else if (err < h->msix_vector) {
- dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
- "available\n", err);
+ switch (h->board_id) {
+ case 0x40700E11:
+ case 0x40800E11:
+ case 0x40820E11:
+ case 0x40830E11:
+ break;
+ default:
+ ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (ret > 0) {
+ h->msix_vectors = ret;
+ return 0;
}
- h->msix_vector = err;
- for (i = 0; i < h->msix_vector; i++)
- h->intr[i] = hpsa_msix_entries[i].vector;
- return;
- }
-single_msi_mode:
- if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
- dev_info(&h->pdev->dev, "MSI capable controller\n");
- if (!pci_enable_msi(h->pdev))
- h->msi_vector = 1;
- else
- dev_warn(&h->pdev->dev, "MSI init failed\n");
+
+ flags |= PCI_IRQ_MSI;
+ break;
}
-default_int_mode:
-#endif /* CONFIG_PCI_MSI */
- /* if we get here we're going to use the default interrupt mode */
- h->intr[h->intr_mode] = h->pdev->irq;
+
+ ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
+ if (ret < 0)
+ return ret;
+ return 0;
}
static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -8074,7 +8083,9 @@ static int hpsa_pci_init(struct ctlr_info *h)
pci_set_master(h->pdev);
- hpsa_interrupt_mode(h);
+ err = hpsa_interrupt_mode(h);
+ if (err)
+ goto clean1;
err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
if (err)
goto clean2; /* intmode+region, pci */
@@ -8110,6 +8121,7 @@ clean3: /* vaddr, intmode+region, pci */
h->vaddr = NULL;
clean2: /* intmode+region, pci */
hpsa_disable_interrupt_mode(h);
+clean1:
/*
* call pci_disable_device before pci_release_regions per
* Documentation/PCI/pci.txt
@@ -8243,34 +8255,20 @@ clean_up:
return -ENOMEM;
}
-static void hpsa_irq_affinity_hints(struct ctlr_info *h)
-{
- int i, cpu;
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < h->msix_vector; i++) {
- irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-}
-
/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
static void hpsa_free_irqs(struct ctlr_info *h)
{
int i;
- if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+ if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
/* Single reply queue, only one irq to free */
- i = h->intr_mode;
- irq_set_affinity_hint(h->intr[i], NULL);
- free_irq(h->intr[i], &h->q[i]);
- h->q[i] = 0;
+ free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
+ h->q[h->intr_mode] = 0;
return;
}
- for (i = 0; i < h->msix_vector; i++) {
- irq_set_affinity_hint(h->intr[i], NULL);
- free_irq(h->intr[i], &h->q[i]);
+ for (i = 0; i < h->msix_vectors; i++) {
+ free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
h->q[i] = 0;
}
for (; i < MAX_REPLY_QUEUES; i++)
@@ -8291,11 +8289,11 @@ static int hpsa_request_irqs(struct ctlr_info *h,
for (i = 0; i < MAX_REPLY_QUEUES; i++)
h->q[i] = (u8) i;
- if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
+ if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
/* If performant mode and MSI-X, use multiple reply queues */
- for (i = 0; i < h->msix_vector; i++) {
+ for (i = 0; i < h->msix_vectors; i++) {
sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
- rc = request_irq(h->intr[i], msixhandler,
+ rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
0, h->intrname[i],
&h->q[i]);
if (rc) {
@@ -8303,9 +8301,9 @@ static int hpsa_request_irqs(struct ctlr_info *h,
dev_err(&h->pdev->dev,
"failed to get irq %d for %s\n",
- h->intr[i], h->devname);
+ pci_irq_vector(h->pdev, i), h->devname);
for (j = 0; j < i; j++) {
- free_irq(h->intr[j], &h->q[j]);
+ free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
h->q[j] = 0;
}
for (; j < MAX_REPLY_QUEUES; j++)
@@ -8313,33 +8311,27 @@ static int hpsa_request_irqs(struct ctlr_info *h,
return rc;
}
}
- hpsa_irq_affinity_hints(h);
} else {
/* Use single reply pool */
- if (h->msix_vector > 0 || h->msi_vector) {
- if (h->msix_vector)
- sprintf(h->intrname[h->intr_mode],
- "%s-msix", h->devname);
- else
- sprintf(h->intrname[h->intr_mode],
- "%s-msi", h->devname);
- rc = request_irq(h->intr[h->intr_mode],
+ if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
+ sprintf(h->intrname[0], "%s-msi%s", h->devname,
+ h->msix_vectors ? "x" : "");
+ rc = request_irq(pci_irq_vector(h->pdev, 0),
msixhandler, 0,
- h->intrname[h->intr_mode],
+ h->intrname[0],
&h->q[h->intr_mode]);
} else {
sprintf(h->intrname[h->intr_mode],
"%s-intx", h->devname);
- rc = request_irq(h->intr[h->intr_mode],
+ rc = request_irq(pci_irq_vector(h->pdev, 0),
intxhandler, IRQF_SHARED,
- h->intrname[h->intr_mode],
+ h->intrname[0],
&h->q[h->intr_mode]);
}
- irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
}
if (rc) {
dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
- h->intr[h->intr_mode], h->devname);
+ pci_irq_vector(h->pdev, 0), h->devname);
hpsa_free_irqs(h);
return -ENODEV;
}
@@ -8640,6 +8632,14 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work)
if (h->remove_in_progress)
return;
+ /*
+ * Do the scan after the reset
+ */
+ if (h->reset_in_progress) {
+ h->drv_req_rescan = 1;
+ return;
+ }
+
if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
scsi_host_get(h->scsi_host);
hpsa_ack_ctlr_events(h);
@@ -9525,7 +9525,7 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
return rc;
}
- h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
+ h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */
h->reply_queue_size = h->max_commands * sizeof(u64);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 9ea162de80dc..64e98295b707 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -176,9 +176,7 @@ struct ctlr_info {
# define DOORBELL_INT 1
# define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3
- unsigned int intr[MAX_REPLY_QUEUES];
- unsigned int msix_vector;
- unsigned int msi_vector;
+ unsigned int msix_vectors;
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
@@ -466,7 +464,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
unsigned long register_value = FIFO_EMPTY;
/* msi auto clears the interrupt pending bit. */
- if (unlikely(!(h->msi_vector || h->msix_vector))) {
+ if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
/* flush the controller write of the reply queue by reading
* outbound doorbell status register.
*/
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 7e487c78279c..78b72c28a55d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -32,6 +32,7 @@
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/stringify.h>
+#include <linux/bsg-lib.h>
#include <asm/firmware.h>
#include <asm/irq.h>
#include <asm/vio.h>
@@ -1701,14 +1702,14 @@ static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
/**
* ibmvfc_bsg_timeout - Handle a BSG timeout
- * @job: struct fc_bsg_job that timed out
+ * @job: struct bsg_job that timed out
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
+static int ibmvfc_bsg_timeout(struct bsg_job *job)
{
- struct ibmvfc_host *vhost = shost_priv(job->shost);
+ struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
unsigned long port_id = (unsigned long)job->dd_data;
struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
@@ -1814,41 +1815,43 @@ unlock_out:
/**
* ibmvfc_bsg_request - Handle a BSG request
- * @job: struct fc_bsg_job to be executed
+ * @job: struct bsg_job to be executed
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_bsg_request(struct fc_bsg_job *job)
+static int ibmvfc_bsg_request(struct bsg_job *job)
{
- struct ibmvfc_host *vhost = shost_priv(job->shost);
- struct fc_rport *rport = job->rport;
+ struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
+ struct fc_rport *rport = fc_bsg_to_rport(job);
struct ibmvfc_passthru_mad *mad;
struct ibmvfc_event *evt;
union ibmvfc_iu rsp_iu;
unsigned long flags, port_id = -1;
- unsigned int code = job->request->msgcode;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ unsigned int code = bsg_request->msgcode;
int rc = 0, req_seg, rsp_seg, issue_login = 0;
u32 fc_flags, rsp_len;
ENTER;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (rport)
port_id = rport->port_id;
switch (code) {
case FC_BSG_HST_ELS_NOLOGIN:
- port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
- (job->request->rqst_data.h_els.port_id[1] << 8) |
- job->request->rqst_data.h_els.port_id[2];
+ port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
+ (bsg_request->rqst_data.h_els.port_id[1] << 8) |
+ bsg_request->rqst_data.h_els.port_id[2];
case FC_BSG_RPT_ELS:
fc_flags = IBMVFC_FC_ELS;
break;
case FC_BSG_HST_CT:
issue_login = 1;
- port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
- (job->request->rqst_data.h_ct.port_id[1] << 8) |
- job->request->rqst_data.h_ct.port_id[2];
+ port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
+ (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
+ bsg_request->rqst_data.h_ct.port_id[2];
case FC_BSG_RPT_CT:
fc_flags = IBMVFC_FC_CT_IU;
break;
@@ -1937,13 +1940,14 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job)
if (rsp_iu.passthru.common.status)
rc = -EIO;
else
- job->reply->reply_payload_rcv_len = rsp_len;
+ bsg_reply->reply_payload_rcv_len = rsp_len;
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
rc = 0;
out:
dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 642b739ad0da..c9fa3565c671 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -22,7 +22,7 @@
*
****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
@@ -81,7 +81,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
}
} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
if (se_cmd->data_direction == DMA_TO_DEVICE) {
- /* residual data from an overflow write */
+ /* residual data from an overflow write */
rsp->flags = SRP_RSP_FLAG_DOOVER;
rsp->data_out_res_cnt = cpu_to_be32(residual_count);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -101,7 +101,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
* and the function returns TRUE.
*
* EXECUTION ENVIRONMENT:
- * Interrupt or Process environment
+ * Interrupt or Process environment
*/
static bool connection_broken(struct scsi_info *vscsi)
{
@@ -324,7 +324,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
}
/**
- * ibmvscsis_send_init_message() - send initialize message to the client
+ * ibmvscsis_send_init_message() - send initialize message to the client
* @vscsi: Pointer to our adapter structure
* @format: Which Init Message format to send
*
@@ -382,13 +382,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
vscsi->cmd_q.base_addr);
if (crq) {
*format = (uint)(crq->format);
- rc = ERROR;
+ rc = ERROR;
crq->valid = INVALIDATE_CMD_RESP_EL;
dma_rmb();
}
} else {
*format = (uint)(crq->format);
- rc = ERROR;
+ rc = ERROR;
crq->valid = INVALIDATE_CMD_RESP_EL;
dma_rmb();
}
@@ -397,166 +397,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
}
/**
- * ibmvscsis_establish_new_q() - Establish new CRQ queue
- * @vscsi: Pointer to our adapter structure
- * @new_state: New state being established after resetting the queue
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state)
-{
- long rc = ADAPT_SUCCESS;
- uint format;
-
- vscsi->flags &= PRESERVE_FLAG_FIELDS;
- vscsi->rsp_q_timer.timer_pops = 0;
- vscsi->debit = 0;
- vscsi->credit = 0;
-
- rc = vio_enable_interrupts(vscsi->dma_dev);
- if (rc) {
- pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
- rc);
- return rc;
- }
-
- rc = ibmvscsis_check_init_msg(vscsi, &format);
- if (rc) {
- dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
- rc);
- return rc;
- }
-
- if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
- switch (rc) {
- case H_SUCCESS:
- case H_DROPPED:
- case H_CLOSED:
- rc = ADAPT_SUCCESS;
- break;
-
- case H_PARAMETER:
- case H_HARDWARE:
- break;
-
- default:
- vscsi->state = UNDEFINED;
- rc = H_HARDWARE;
- break;
- }
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_reset_queue() - Reset CRQ Queue
- * @vscsi: Pointer to our adapter structure
- * @new_state: New state to establish after resetting the queue
- *
- * This function calls h_free_q and then calls h_reg_q and does all
- * of the bookkeeping to get us back to where we can communicate.
- *
- * Actually, we don't always call h_free_crq. A problem was discovered
- * where one partition would close and reopen his queue, which would
- * cause his partner to get a transport event, which would cause him to
- * close and reopen his queue, which would cause the original partition
- * to get a transport event, etc., etc. To prevent this, we don't
- * actually close our queue if the client initiated the reset, (i.e.
- * either we got a transport event or we have detected that the client's
- * queue is gone)
- *
- * EXECUTION ENVIRONMENT:
- * Process environment, called with interrupt lock held
- */
-static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
-{
- int bytes;
- long rc = ADAPT_SUCCESS;
-
- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
-
- /* don't reset, the client did it for us */
- if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
- vscsi->flags &= PRESERVE_FLAG_FIELDS;
- vscsi->rsp_q_timer.timer_pops = 0;
- vscsi->debit = 0;
- vscsi->credit = 0;
- vscsi->state = new_state;
- vio_enable_interrupts(vscsi->dma_dev);
- } else {
- rc = ibmvscsis_free_command_q(vscsi);
- if (rc == ADAPT_SUCCESS) {
- vscsi->state = new_state;
-
- bytes = vscsi->cmd_q.size * PAGE_SIZE;
- rc = h_reg_crq(vscsi->dds.unit_id,
- vscsi->cmd_q.crq_token, bytes);
- if (rc == H_CLOSED || rc == H_SUCCESS) {
- rc = ibmvscsis_establish_new_q(vscsi,
- new_state);
- }
-
- if (rc != ADAPT_SUCCESS) {
- pr_debug("reset_queue: reg_crq rc %ld\n", rc);
-
- vscsi->state = ERR_DISCONNECTED;
- vscsi->flags |= RESPONSE_Q_DOWN;
- ibmvscsis_free_command_q(vscsi);
- }
- } else {
- vscsi->state = ERR_DISCONNECTED;
- vscsi->flags |= RESPONSE_Q_DOWN;
- }
- }
-}
-
-/**
- * ibmvscsis_free_cmd_resources() - Free command resources
- * @vscsi: Pointer to our adapter structure
- * @cmd: Command which is not longer in use
- *
- * Must be called with interrupt lock held.
- */
-static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
- struct ibmvscsis_cmd *cmd)
-{
- struct iu_entry *iue = cmd->iue;
-
- switch (cmd->type) {
- case TASK_MANAGEMENT:
- case SCSI_CDB:
- /*
- * When the queue goes down this value is cleared, so it
- * cannot be cleared in this general purpose function.
- */
- if (vscsi->debit)
- vscsi->debit -= 1;
- break;
- case ADAPTER_MAD:
- vscsi->flags &= ~PROCESSING_MAD;
- break;
- case UNSET_TYPE:
- break;
- default:
- dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
- cmd->type);
- break;
- }
-
- cmd->iue = NULL;
- list_add_tail(&cmd->list, &vscsi->free_cmd);
- srp_iu_put(iue);
-
- if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
- list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
- vscsi->flags &= ~WAIT_FOR_IDLE;
- complete(&vscsi->wait_idle);
- }
-}
-
-/**
* ibmvscsis_disconnect() - Helper function to disconnect
* @work: Pointer to work_struct, gives access to our adapter structure
*
@@ -575,7 +415,6 @@ static void ibmvscsis_disconnect(struct work_struct *work)
proc_work);
u16 new_state;
bool wait_idle = false;
- long rc = ADAPT_SUCCESS;
spin_lock_bh(&vscsi->intr_lock);
new_state = vscsi->new_state;
@@ -589,7 +428,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
* should transitition to the new state
*/
switch (vscsi->state) {
- /* Should never be called while in this state. */
+ /* Should never be called while in this state. */
case NO_QUEUE:
/*
* Can never transition from this state;
@@ -628,30 +467,24 @@ static void ibmvscsis_disconnect(struct work_struct *work)
vscsi->state = new_state;
break;
- /*
- * If this is a transition into an error state.
- * a client is attempting to establish a connection
- * and has violated the RPA protocol.
- * There can be nothing pending on the adapter although
- * there can be requests in the command queue.
- */
case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
switch (new_state) {
- case ERR_DISCONNECT:
- vscsi->flags |= RESPONSE_Q_DOWN;
+ case UNCONFIGURING:
vscsi->state = new_state;
+ vscsi->flags |= RESPONSE_Q_DOWN;
vscsi->flags &= ~(SCHEDULE_DISCONNECT |
DISCONNECT_SCHEDULED);
- ibmvscsis_free_command_q(vscsi);
- break;
- case ERR_DISCONNECT_RECONNECT:
- ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
+ dma_rmb();
+ if (vscsi->flags & CFG_SLEEPING) {
+ vscsi->flags &= ~CFG_SLEEPING;
+ complete(&vscsi->unconfig);
+ }
break;
/* should never happen */
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
case WAIT_IDLE:
- rc = ERROR;
dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
vscsi->state);
break;
@@ -660,6 +493,13 @@ static void ibmvscsis_disconnect(struct work_struct *work)
case WAIT_IDLE:
switch (new_state) {
+ case UNCONFIGURING:
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ vscsi->state = new_state;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT |
+ DISCONNECT_SCHEDULED);
+ ibmvscsis_free_command_q(vscsi);
+ break;
case ERR_DISCONNECT:
case ERR_DISCONNECT_RECONNECT:
vscsi->state = new_state;
@@ -788,7 +628,6 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
break;
case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
case WAIT_IDLE:
case WAIT_CONNECTION:
case CONNECTED:
@@ -806,6 +645,310 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
}
/**
+ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ case UNCONFIGURING:
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case WAIT_CONNECTION:
+ vscsi->state = CONNECTED;
+ break;
+
+ case WAIT_IDLE:
+ case SRP_PROCESSING:
+ case CONNECTED:
+ case WAIT_ENABLED:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_msg() - Respond to an Init Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case WAIT_CONNECTION:
+ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ vscsi->state = CONNECTED;
+ break;
+
+ case H_PARAMETER:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ break;
+
+ case H_DROPPED:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ rc = ERROR;
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+
+ case H_CLOSED:
+ pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ rc = 0;
+ break;
+ }
+ break;
+
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case UNCONFIGURING:
+ break;
+
+ case WAIT_ENABLED:
+ case CONNECTED:
+ case SRP_PROCESSING:
+ case WAIT_IDLE:
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_init_msg() - Respond to an init message
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to CRQ element containing the Init Message
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+
+ rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+ 0);
+ if (rc == H_SUCCESS) {
+ vscsi->client_data.partition_number =
+ be64_to_cpu(*(u64 *)vscsi->map_buf);
+ pr_debug("init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
+ } else {
+ pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ rc = ADAPT_SUCCESS;
+ }
+
+ if (crq->format == INIT_MSG) {
+ rc = ibmvscsis_handle_init_msg(vscsi);
+ } else if (crq->format == INIT_COMPLETE_MSG) {
+ rc = ibmvscsis_handle_init_compl_msg(vscsi);
+ } else {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_establish_new_q() - Establish new CRQ queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+ uint format;
+
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+
+ rc = vio_enable_interrupts(vscsi->dma_dev);
+ if (rc) {
+ pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ rc = ibmvscsis_check_init_msg(vscsi, &format);
+ if (rc) {
+ dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ if (format == UNUSED_FORMAT) {
+ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ case H_DROPPED:
+ case H_CLOSED:
+ rc = ADAPT_SUCCESS;
+ break;
+
+ case H_PARAMETER:
+ case H_HARDWARE:
+ break;
+
+ default:
+ vscsi->state = UNDEFINED;
+ rc = H_HARDWARE;
+ break;
+ }
+ } else if (format == INIT_MSG) {
+ rc = ibmvscsis_handle_init_msg(vscsi);
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_reset_queue() - Reset CRQ Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function calls h_free_q and then calls h_reg_q and does all
+ * of the bookkeeping to get us back to where we can communicate.
+ *
+ * Actually, we don't always call h_free_crq. A problem was discovered
+ * where one partition would close and reopen his queue, which would
+ * cause his partner to get a transport event, which would cause him to
+ * close and reopen his queue, which would cause the original partition
+ * to get a transport event, etc., etc. To prevent this, we don't
+ * actually close our queue if the client initiated the reset, (i.e.
+ * either we got a transport event or we have detected that the client's
+ * queue is gone)
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process environment, called with interrupt lock held
+ */
+static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
+{
+ int bytes;
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+
+ /* don't reset, the client did it for us */
+ if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+ vscsi->state = WAIT_CONNECTION;
+ vio_enable_interrupts(vscsi->dma_dev);
+ } else {
+ rc = ibmvscsis_free_command_q(vscsi);
+ if (rc == ADAPT_SUCCESS) {
+ vscsi->state = WAIT_CONNECTION;
+
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ rc = h_reg_crq(vscsi->dds.unit_id,
+ vscsi->cmd_q.crq_token, bytes);
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
+ rc = ibmvscsis_establish_new_q(vscsi);
+ }
+
+ if (rc != ADAPT_SUCCESS) {
+ pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ ibmvscsis_free_command_q(vscsi);
+ }
+ } else {
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ }
+ }
+}
+
+/**
+ * ibmvscsis_free_cmd_resources() - Free command resources
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Command which is not longer in use
+ *
+ * Must be called with interrupt lock held.
+ */
+static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+
+ switch (cmd->type) {
+ case TASK_MANAGEMENT:
+ case SCSI_CDB:
+ /*
+ * When the queue goes down this value is cleared, so it
+ * cannot be cleared in this general purpose function.
+ */
+ if (vscsi->debit)
+ vscsi->debit -= 1;
+ break;
+ case ADAPTER_MAD:
+ vscsi->flags &= ~PROCESSING_MAD;
+ break;
+ case UNSET_TYPE:
+ break;
+ default:
+ dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
+ cmd->type);
+ break;
+ }
+
+ cmd->iue = NULL;
+ list_add_tail(&cmd->list, &vscsi->free_cmd);
+ srp_iu_put(iue);
+
+ if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
+ list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
+ vscsi->flags &= ~WAIT_FOR_IDLE;
+ complete(&vscsi->wait_idle);
+ }
+}
+
+/**
* ibmvscsis_trans_event() - Handle a Transport Event
* @vscsi: Pointer to our adapter structure
* @crq: Pointer to CRQ entry containing the Transport Event
@@ -863,10 +1006,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
TRANS_EVENT));
break;
- case PART_UP_WAIT_ENAB:
- vscsi->state = WAIT_ENABLED;
- break;
-
case SRP_PROCESSING:
if ((vscsi->debit > 0) ||
!list_empty(&vscsi->schedule_q) ||
@@ -895,7 +1034,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
}
}
- rc = vscsi->flags & SCHEDULE_DISCONNECT;
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
vscsi->flags, vscsi->state, rc);
@@ -1066,16 +1205,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
free_qs = true;
switch (vscsi->state) {
+ case UNCONFIGURING:
+ ibmvscsis_free_command_q(vscsi);
+ dma_rmb();
+ isync();
+ if (vscsi->flags & CFG_SLEEPING) {
+ vscsi->flags &= ~CFG_SLEEPING;
+ complete(&vscsi->unconfig);
+ }
+ break;
case ERR_DISCONNECT_RECONNECT:
- ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
+ ibmvscsis_reset_queue(vscsi);
pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
break;
case ERR_DISCONNECT:
ibmvscsis_free_command_q(vscsi);
- vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
vscsi->flags |= RESPONSE_Q_DOWN;
- vscsi->state = ERR_DISCONNECTED;
+ if (vscsi->tport.enabled)
+ vscsi->state = ERR_DISCONNECTED;
+ else
+ vscsi->state = WAIT_ENABLED;
pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
vscsi->flags, vscsi->state);
break;
@@ -1220,7 +1371,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
* @iue: Information Unit containing the Adapter Info MAD request
*
* EXECUTION ENVIRONMENT:
- * Interrupt adpater lock is held
+ * Interrupt adapter lock is held
*/
static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
struct iu_entry *iue)
@@ -1620,8 +1771,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
be64_to_cpu(msg_hi),
be64_to_cpu(cmd->rsp.tag));
- pr_debug("send_messages: tag 0x%llx, rc %ld\n",
- be64_to_cpu(cmd->rsp.tag), rc);
+ pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+ cmd, be64_to_cpu(cmd->rsp.tag), rc);
/* if all ok free up the command element resources */
if (rc == H_SUCCESS) {
@@ -1691,7 +1842,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
* @crq: Pointer to the CRQ entry containing the MAD request
*
* EXECUTION ENVIRONMENT:
- * Interrupt called with adapter lock held
+ * Interrupt, called with adapter lock held
*/
static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
{
@@ -1745,14 +1896,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
- if (be16_to_cpu(mad->length) < 0) {
- dev_err(&vscsi->dev, "mad: length is < 0\n");
- ibmvscsis_post_disconnect(vscsi,
- ERR_DISCONNECT_RECONNECT, 0);
- rc = SRP_VIOLATION;
- } else {
- rc = ibmvscsis_process_mad(vscsi, iue);
- }
+ rc = ibmvscsis_process_mad(vscsi, iue);
pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
rc);
@@ -1864,7 +2008,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
break;
case H_PERMISSION:
if (connection_broken(vscsi))
- flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
+ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
rc);
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
@@ -2187,156 +2331,6 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi)
}
/**
- * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
- * @vscsi: Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
-{
- long rc = ADAPT_SUCCESS;
-
- switch (vscsi->state) {
- case NO_QUEUE:
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- case ERR_DISCONNECTED:
- case UNCONFIGURING:
- case UNDEFINED:
- rc = ERROR;
- break;
-
- case WAIT_CONNECTION:
- vscsi->state = CONNECTED;
- break;
-
- case WAIT_IDLE:
- case SRP_PROCESSING:
- case CONNECTED:
- case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
- default:
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
- vscsi->state);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- break;
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_handle_init_msg() - Respond to an Init Message
- * @vscsi: Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
-{
- long rc = ADAPT_SUCCESS;
-
- switch (vscsi->state) {
- case WAIT_ENABLED:
- vscsi->state = PART_UP_WAIT_ENAB;
- break;
-
- case WAIT_CONNECTION:
- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
- switch (rc) {
- case H_SUCCESS:
- vscsi->state = CONNECTED;
- break;
-
- case H_PARAMETER:
- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
- rc);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
- break;
-
- case H_DROPPED:
- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
- rc);
- rc = ERROR;
- ibmvscsis_post_disconnect(vscsi,
- ERR_DISCONNECT_RECONNECT, 0);
- break;
-
- case H_CLOSED:
- pr_warn("init_msg: failed to send, rc %ld\n", rc);
- rc = 0;
- break;
- }
- break;
-
- case UNDEFINED:
- rc = ERROR;
- break;
-
- case UNCONFIGURING:
- break;
-
- case PART_UP_WAIT_ENAB:
- case CONNECTED:
- case SRP_PROCESSING:
- case WAIT_IDLE:
- case NO_QUEUE:
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- case ERR_DISCONNECTED:
- default:
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
- vscsi->state);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- break;
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_init_msg() - Respond to an init message
- * @vscsi: Pointer to our adapter structure
- * @crq: Pointer to CRQ element containing the Init Message
- *
- * EXECUTION ENVIRONMENT:
- * Interrupt, interrupt lock held
- */
-static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
-{
- long rc = ADAPT_SUCCESS;
-
- pr_debug("init_msg: state 0x%hx\n", vscsi->state);
-
- rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
- (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
- 0);
- if (rc == H_SUCCESS) {
- vscsi->client_data.partition_number =
- be64_to_cpu(*(u64 *)vscsi->map_buf);
- pr_debug("init_msg, part num %d\n",
- vscsi->client_data.partition_number);
- } else {
- pr_debug("init_msg h_vioctl rc %ld\n", rc);
- rc = ADAPT_SUCCESS;
- }
-
- if (crq->format == INIT_MSG) {
- rc = ibmvscsis_handle_init_msg(vscsi);
- } else if (crq->format == INIT_COMPLETE_MSG) {
- rc = ibmvscsis_handle_init_compl_msg(vscsi);
- } else {
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
- (uint)crq->format);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- }
-
- return rc;
-}
-
-/**
* ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
* @vscsi: Pointer to our adapter structure
* @crq: Pointer to CRQ element containing the SRP request
@@ -2391,7 +2385,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi,
break;
case VALID_TRANS_EVENT:
- rc = ibmvscsis_trans_event(vscsi, crq);
+ rc = ibmvscsis_trans_event(vscsi, crq);
break;
case VALID_INIT_MSG:
@@ -2522,7 +2516,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
srp->tag);
goto fail;
- return;
}
cmd->rsp.sol_not = srp->sol_not;
@@ -2559,6 +2552,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
data_len, attr, dir, 0);
if (rc) {
dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
+ spin_lock_bh(&vscsi->intr_lock);
+ list_del(&cmd->list);
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ spin_unlock_bh(&vscsi->intr_lock);
goto fail;
}
return;
@@ -2638,6 +2635,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
if (rc) {
dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
rc);
+ spin_lock_bh(&vscsi->intr_lock);
+ list_del(&cmd->list);
+ spin_unlock_bh(&vscsi->intr_lock);
cmd->se_cmd.se_tmr_req->response =
TMR_FUNCTION_REJECTED;
}
@@ -2786,36 +2786,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
}
/**
- * ibmvscsis_check_q() - Helper function to Check Init Message Valid
- * @vscsi: Pointer to our adapter structure
- *
- * Checks if a initialize message was queued by the initiatior
- * while the timing window was open. This function is called from
- * probe after the CRQ is created and interrupts are enabled.
- * It would only be used by adapters who wait for some event before
- * completing the init handshake with the client. For ibmvscsi, this
- * event is waiting for the port to be enabled.
- *
- * EXECUTION ENVIRONMENT:
- * Process level only, interrupt lock held
- */
-static long ibmvscsis_check_q(struct scsi_info *vscsi)
-{
- uint format;
- long rc;
-
- rc = ibmvscsis_check_init_msg(vscsi, &format);
- if (rc)
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- else if (format == UNUSED_FORMAT)
- vscsi->state = WAIT_ENABLED;
- else
- vscsi->state = PART_UP_WAIT_ENAB;
-
- return rc;
-}
-
-/**
* ibmvscsis_enable_change_state() - Set new state based on enabled status
* @vscsi: Pointer to our adapter structure
*
@@ -2826,77 +2796,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi)
*/
static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
{
+ int bytes;
long rc = ADAPT_SUCCESS;
-handle_state_change:
- switch (vscsi->state) {
- case WAIT_ENABLED:
- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
- switch (rc) {
- case H_SUCCESS:
- case H_DROPPED:
- case H_CLOSED:
- vscsi->state = WAIT_CONNECTION;
- rc = ADAPT_SUCCESS;
- break;
-
- case H_PARAMETER:
- break;
-
- case H_HARDWARE:
- break;
-
- default:
- vscsi->state = UNDEFINED;
- rc = H_HARDWARE;
- break;
- }
- break;
- case PART_UP_WAIT_ENAB:
- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
- switch (rc) {
- case H_SUCCESS:
- vscsi->state = CONNECTED;
- rc = ADAPT_SUCCESS;
- break;
-
- case H_DROPPED:
- case H_CLOSED:
- vscsi->state = WAIT_ENABLED;
- goto handle_state_change;
-
- case H_PARAMETER:
- break;
-
- case H_HARDWARE:
- break;
-
- default:
- rc = H_HARDWARE;
- break;
- }
- break;
-
- case WAIT_CONNECTION:
- case WAIT_IDLE:
- case SRP_PROCESSING:
- case CONNECTED:
- rc = ADAPT_SUCCESS;
- break;
- /* should not be able to get here */
- case UNCONFIGURING:
- rc = ERROR;
- vscsi->state = UNDEFINED;
- break;
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
+ vscsi->state = WAIT_CONNECTION;
+ rc = ibmvscsis_establish_new_q(vscsi);
+ }
- /* driver should never allow this to happen */
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- default:
- dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
- vscsi->state);
- rc = ADAPT_SUCCESS;
- break;
+ if (rc != ADAPT_SUCCESS) {
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
}
return rc;
@@ -2916,7 +2828,6 @@ handle_state_change:
*/
static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
{
- long rc = 0;
int pages;
struct vio_dev *vdev = vscsi->dma_dev;
@@ -2940,22 +2851,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
return -ENOMEM;
}
- rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
- if (rc) {
- if (rc == H_CLOSED) {
- vscsi->state = WAIT_ENABLED;
- rc = 0;
- } else {
- dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- free_page((unsigned long)vscsi->cmd_q.base_addr);
- rc = -ENODEV;
- }
- } else {
- vscsi->state = WAIT_ENABLED;
- }
-
- return rc;
+ return 0;
}
/**
@@ -3270,7 +3166,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
/*
* if we are in a path where we are waiting for all pending commands
* to complete because we received a transport event and anything in
- * the command queue is for a new connection, do nothing
+ * the command queue is for a new connection, do nothing
*/
if (TARGET_STOP(vscsi)) {
vio_enable_interrupts(vscsi->dma_dev);
@@ -3314,7 +3210,7 @@ cmd_work:
* everything but transport events on the queue
*
* need to decrement the queue index so we can
- * look at the elment again
+ * look at the element again
*/
if (vscsi->cmd_q.index)
vscsi->cmd_q.index -= 1;
@@ -3378,7 +3274,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
INIT_LIST_HEAD(&vscsi->waiting_rsp);
INIT_LIST_HEAD(&vscsi->active_q);
- snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
+ snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
+ dev_name(&vdev->dev));
pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
@@ -3393,6 +3290,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
strncat(vscsi->eye, vdev->name, MAX_EYE);
vscsi->dds.unit_id = vdev->unit_address;
+ strncpy(vscsi->dds.partition_name, partition_name,
+ sizeof(vscsi->dds.partition_name));
+ vscsi->dds.partition_num = partition_number;
spin_lock_bh(&ibmvscsis_dev_lock);
list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
@@ -3469,6 +3369,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
(unsigned long)vscsi);
init_completion(&vscsi->wait_idle);
+ init_completion(&vscsi->unconfig);
snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
vscsi->work_q = create_workqueue(wq_name);
@@ -3485,31 +3386,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
goto destroy_WQ;
}
- spin_lock_bh(&vscsi->intr_lock);
- vio_enable_interrupts(vdev);
- if (rc) {
- dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
- rc = -ENODEV;
- spin_unlock_bh(&vscsi->intr_lock);
- goto free_irq;
- }
-
- if (ibmvscsis_check_q(vscsi)) {
- rc = ERROR;
- dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
- spin_unlock_bh(&vscsi->intr_lock);
- goto disable_interrupt;
- }
- spin_unlock_bh(&vscsi->intr_lock);
+ vscsi->state = WAIT_ENABLED;
dev_set_drvdata(&vdev->dev, vscsi);
return 0;
-disable_interrupt:
- vio_disable_interrupts(vdev);
-free_irq:
- free_irq(vdev->irq, vscsi);
destroy_WQ:
destroy_workqueue(vscsi->work_q);
unmap_buf:
@@ -3543,10 +3425,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
- /*
- * TBD: Need to handle if there are commands on the waiting_rsp q
- * Actually, can there still be cmds outstanding to tcm?
- */
+ spin_lock_bh(&vscsi->intr_lock);
+ ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
+ vscsi->flags |= CFG_SLEEPING;
+ spin_unlock_bh(&vscsi->intr_lock);
+ wait_for_completion(&vscsi->unconfig);
vio_disable_interrupts(vdev);
free_irq(vdev->irq, vscsi);
@@ -3555,7 +3438,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
DMA_BIDIRECTIONAL);
kfree(vscsi->map_buf);
tasklet_kill(&vscsi->work_task);
- ibmvscsis_unregister_command_q(vscsi);
ibmvscsis_destroy_command_q(vscsi);
ibmvscsis_freetimer(vscsi);
ibmvscsis_free_cmds(vscsi);
@@ -3609,7 +3491,7 @@ static int ibmvscsis_get_system_info(void)
num = of_get_property(rootdn, "ibm,partition-no", NULL);
if (num)
- partition_number = *num;
+ partition_number = of_read_number(num, 1);
of_node_put(rootdn);
@@ -3903,18 +3785,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
}
if (tmp) {
- tport->enabled = true;
spin_lock_bh(&vscsi->intr_lock);
+ tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
if (lrc)
pr_err("enable_change_state failed, rc %ld state %d\n",
lrc, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
} else {
+ spin_lock_bh(&vscsi->intr_lock);
tport->enabled = false;
+ /* This simulates the server going down */
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ spin_unlock_bh(&vscsi->intr_lock);
}
- pr_debug("tpg_enable_store, state %d\n", vscsi->state);
+ pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
return count;
}
@@ -3983,10 +3869,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = {
ATTRIBUTE_GROUPS(ibmvscsis_dev);
static struct class ibmvscsis_class = {
- .name = "ibmvscsis",
- .dev_release = ibmvscsis_dev_release,
- .class_attrs = ibmvscsis_class_attrs,
- .dev_groups = ibmvscsis_dev_groups,
+ .name = "ibmvscsis",
+ .dev_release = ibmvscsis_dev_release,
+ .class_attrs = ibmvscsis_class_attrs,
+ .dev_groups = ibmvscsis_dev_groups,
};
static struct vio_device_id ibmvscsis_device_table[] = {
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 981a0c992b6c..98b0ca79a5c5 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -204,8 +204,6 @@ struct scsi_info {
struct list_head waiting_rsp;
#define NO_QUEUE 0x00
#define WAIT_ENABLED 0X01
- /* driver has received an initialize command */
-#define PART_UP_WAIT_ENAB 0x02
#define WAIT_CONNECTION 0x04
/* have established a connection */
#define CONNECTED 0x08
@@ -259,6 +257,8 @@ struct scsi_info {
#define SCHEDULE_DISCONNECT 0x00400
/* disconnect handler is scheduled */
#define DISCONNECT_SCHEDULED 0x00800
+ /* remove function is sleeping */
+#define CFG_SLEEPING 0x01000
u32 flags;
/* adapter lock */
spinlock_t intr_lock;
@@ -287,6 +287,7 @@ struct scsi_info {
struct workqueue_struct *work_q;
struct completion wait_idle;
+ struct completion unconfig;
struct device dev;
struct vio_dev *dma_dev;
struct srp_target target;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 532474109624..835c59c777f2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -186,16 +186,16 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
};
static const struct ipr_chip_t ipr_chip[] = {
- { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
};
static int ipr_max_bus_speeds[] = {
@@ -9439,23 +9439,11 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
{
struct pci_dev *pdev = ioa_cfg->pdev;
+ int i;
- if (ioa_cfg->intr_flag == IPR_USE_MSI ||
- ioa_cfg->intr_flag == IPR_USE_MSIX) {
- int i;
- for (i = 0; i < ioa_cfg->nvectors; i++)
- free_irq(ioa_cfg->vectors_info[i].vec,
- &ioa_cfg->hrrq[i]);
- } else
- free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
-
- if (ioa_cfg->intr_flag == IPR_USE_MSI) {
- pci_disable_msi(pdev);
- ioa_cfg->intr_flag &= ~IPR_USE_MSI;
- } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
- pci_disable_msix(pdev);
- ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
- }
+ for (i = 0; i < ioa_cfg->nvectors; i++)
+ free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
+ pci_free_irq_vectors(pdev);
}
/**
@@ -9883,45 +9871,6 @@ static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
}
}
-static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
-{
- struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
- int i, vectors;
-
- for (i = 0; i < ARRAY_SIZE(entries); ++i)
- entries[i].entry = i;
-
- vectors = pci_enable_msix_range(ioa_cfg->pdev,
- entries, 1, ipr_number_of_msix);
- if (vectors < 0) {
- ipr_wait_for_pci_err_recovery(ioa_cfg);
- return vectors;
- }
-
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = entries[i].vector;
- ioa_cfg->nvectors = vectors;
-
- return 0;
-}
-
-static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
-{
- int i, vectors;
-
- vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
- if (vectors < 0) {
- ipr_wait_for_pci_err_recovery(ioa_cfg);
- return vectors;
- }
-
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
- ioa_cfg->nvectors = vectors;
-
- return 0;
-}
-
static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
{
int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
@@ -9934,19 +9883,20 @@ static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
}
}
-static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
+ struct pci_dev *pdev)
{
int i, rc;
for (i = 1; i < ioa_cfg->nvectors; i++) {
- rc = request_irq(ioa_cfg->vectors_info[i].vec,
+ rc = request_irq(pci_irq_vector(pdev, i),
ipr_isr_mhrrq,
0,
ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]);
if (rc) {
while (--i >= 0)
- free_irq(ioa_cfg->vectors_info[i].vec,
+ free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]);
return rc;
}
@@ -9984,8 +9934,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
* ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
* @pdev: PCI device struct
*
- * Description: The return value from pci_enable_msi_range() can not always be
- * trusted. This routine sets up and initiates a test interrupt to determine
+ * Description: This routine sets up and initiates a test interrupt to determine
* if the interrupt is received via the ipr_test_intr() service routine.
* If the tests fails, the driver will fall back to LSI.
*
@@ -9997,6 +9946,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int rc;
volatile u32 int_reg;
unsigned long lock_flags = 0;
+ int irq = pci_irq_vector(pdev, 0);
ENTER;
@@ -10008,15 +9958,12 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
- else
- rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
if (rc) {
- dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
+ dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
return rc;
} else if (ipr_debug)
- dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
+ dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
@@ -10033,10 +9980,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
- else
- free_irq(pdev->irq, ioa_cfg);
+ free_irq(irq, ioa_cfg);
LEAVE;
@@ -10060,6 +10004,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc, interrupts;
unsigned long lock_flags, driver_lock_flags;
+ unsigned int irq_flag;
ENTER;
@@ -10175,18 +10120,18 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
}
- if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
- ipr_enable_msix(ioa_cfg) == 0)
- ioa_cfg->intr_flag = IPR_USE_MSIX;
- else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
- ipr_enable_msi(ioa_cfg) == 0)
- ioa_cfg->intr_flag = IPR_USE_MSI;
- else {
- ioa_cfg->intr_flag = IPR_USE_LSI;
- ioa_cfg->clear_isr = 1;
- ioa_cfg->nvectors = 1;
- dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ irq_flag = PCI_IRQ_LEGACY;
+ if (ioa_cfg->ipr_chip->has_msi)
+ irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
+ rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
+ if (rc < 0) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ goto cleanup_nomem;
}
+ ioa_cfg->nvectors = rc;
+
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ ioa_cfg->clear_isr = 1;
pci_set_master(pdev);
@@ -10199,33 +10144,23 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
}
}
- if (ioa_cfg->intr_flag == IPR_USE_MSI ||
- ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ if (pdev->msi_enabled || pdev->msix_enabled) {
rc = ipr_test_msi(ioa_cfg, pdev);
- if (rc == -EOPNOTSUPP) {
+ switch (rc) {
+ case 0:
+ dev_info(&pdev->dev,
+ "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
+ pdev->msix_enabled ? "-X" : "");
+ break;
+ case -EOPNOTSUPP:
ipr_wait_for_pci_err_recovery(ioa_cfg);
- if (ioa_cfg->intr_flag == IPR_USE_MSI) {
- ioa_cfg->intr_flag &= ~IPR_USE_MSI;
- pci_disable_msi(pdev);
- } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
- ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
- pci_disable_msix(pdev);
- }
+ pci_free_irq_vectors(pdev);
- ioa_cfg->intr_flag = IPR_USE_LSI;
ioa_cfg->nvectors = 1;
- }
- else if (rc)
+ ioa_cfg->clear_isr = 1;
+ break;
+ default:
goto out_msi_disable;
- else {
- if (ioa_cfg->intr_flag == IPR_USE_MSI)
- dev_info(&pdev->dev,
- "Request for %d MSIs succeeded with starting IRQ: %d\n",
- ioa_cfg->nvectors, pdev->irq);
- else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- dev_info(&pdev->dev,
- "Request for %d MSIXs succeeded.",
- ioa_cfg->nvectors);
}
}
@@ -10273,15 +10208,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSI
- || ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ if (pdev->msi_enabled || pdev->msix_enabled) {
name_msi_vectors(ioa_cfg);
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
- 0,
+ rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
ioa_cfg->vectors_info[0].desc,
&ioa_cfg->hrrq[0]);
if (!rc)
- rc = ipr_request_other_msi_irqs(ioa_cfg);
+ rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
} else {
rc = request_irq(pdev->irq, ipr_isr,
IRQF_SHARED,
@@ -10323,10 +10256,7 @@ cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
ipr_wait_for_pci_err_recovery(ioa_cfg);
- if (ioa_cfg->intr_flag == IPR_USE_MSI)
- pci_disable_msi(pdev);
- else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
cleanup_nomem:
iounmap(ipr_regs);
out_disable:
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 8995053d01b3..b7d2e98eb45b 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1413,10 +1413,7 @@ struct ipr_chip_cfg_t {
struct ipr_chip_t {
u16 vendor;
u16 device;
- u16 intr_type;
-#define IPR_USE_LSI 0x00
-#define IPR_USE_MSI 0x01
-#define IPR_USE_MSIX 0x02
+ bool has_msi;
u16 sis_type;
#define IPR_SIS32 0x00
#define IPR_SIS64 0x01
@@ -1593,11 +1590,9 @@ struct ipr_ioa_cfg {
struct ipr_cmnd **ipr_cmnd_list;
dma_addr_t *ipr_cmnd_list_dma;
- u16 intr_flag;
unsigned int nvectors;
struct {
- unsigned short vec;
char desc[22];
} vectors_info[IPR_MAX_MSIX_VECTORS];
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 02cb76fd4420..3419e1bcdff6 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -2241,9 +2241,6 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
uint8_t minor;
uint8_t subminor;
uint8_t *buffer;
- char hexDigits[] =
- { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
- 'D', 'E', 'F' };
METHOD_TRACE("ips_get_bios_version", 1);
@@ -2374,13 +2371,13 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
}
}
- ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4];
+ ha->bios_version[0] = hex_asc_upper_hi(major);
ha->bios_version[1] = '.';
- ha->bios_version[2] = hexDigits[major & 0x0F];
- ha->bios_version[3] = hexDigits[subminor];
+ ha->bios_version[2] = hex_asc_upper_lo(major);
+ ha->bios_version[3] = hex_asc_upper_lo(subminor);
ha->bios_version[4] = '.';
- ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4];
- ha->bios_version[6] = hexDigits[minor & 0x0F];
+ ha->bios_version[5] = hex_asc_upper_hi(minor);
+ ha->bios_version[6] = hex_asc_upper_lo(minor);
ha->bios_version[7] = 0;
}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 22a9bb1abae1..b3539928073c 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -295,7 +295,6 @@ enum sci_controller_states {
#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
struct isci_pci_info {
- struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
struct isci_host *hosts[SCI_MAX_CONTROLLERS];
struct isci_orom *orom;
};
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 77128d680e3b..0b5b5db0d0f8 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -350,16 +350,12 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
*/
num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
- for (i = 0; i < num_msix; i++)
- pci_info->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix);
- if (err)
+ err = pci_alloc_irq_vectors(pdev, num_msix, num_msix, PCI_IRQ_MSIX);
+ if (err < 0)
goto intx;
for (i = 0; i < num_msix; i++) {
int id = i / SCI_NUM_MSI_X_INT;
- struct msix_entry *msix = &pci_info->msix_entries[i];
irq_handler_t isr;
ihost = pci_info->hosts[id];
@@ -369,8 +365,8 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
else
isr = isci_msix_isr;
- err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
- DRV_NAME"-msix", ihost);
+ err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ isr, 0, DRV_NAME"-msix", ihost);
if (!err)
continue;
@@ -378,18 +374,19 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
while (i--) {
id = i / SCI_NUM_MSI_X_INT;
ihost = pci_info->hosts[id];
- msix = &pci_info->msix_entries[i];
- devm_free_irq(&pdev->dev, msix->vector, ihost);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ ihost);
}
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
goto intx;
}
return 0;
intx:
for_each_isci_host(i, ihost, pdev) {
- err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
- IRQF_SHARED, DRV_NAME"-intx", ihost);
+ err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0),
+ isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx",
+ ihost);
if (err)
break;
}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 8ac646e5eddc..a2bbe46f8ccb 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -54,6 +54,7 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
len = pci_biosrom_size(pdev);
rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
if (!rom) {
+ pci_unmap_biosrom(oprom);
dev_warn(&pdev->dev,
"Unable to allocate memory for orom\n");
return NULL;
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100638a2..e3f2a5359d71 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
{
static const char * const strings[] = RNC_STATES;
+ if (state >= ARRAY_SIZE(strings))
+ return "UNKNOWN";
+
return strings[state];
}
#undef C
@@ -454,7 +457,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
* the device since it's being invalidated anyway */
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
- "suspeneded by hardware while being "
+ "suspended by hardware while being "
"invalidated.\n", __func__, sci_rnc);
break;
default:
@@ -473,7 +476,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
* the device since it's being resumed anyway */
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
- "suspeneded by hardware while being resumed.\n",
+ "suspended by hardware while being resumed.\n",
__func__, sci_rnc);
break;
default:
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index b709d2b20880..47f66e949745 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2473,7 +2473,7 @@ static void isci_request_process_response_iu(
"%s: resp_iu = %p "
"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
"resp_iu->response_data_len = %x, "
- "resp_iu->sense_data_len = %x\nrepsonse data: ",
+ "resp_iu->sense_data_len = %x\nresponse data: ",
__func__,
resp_iu,
resp_iu->status,
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 880a9068ca12..6103231104da 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -68,10 +68,14 @@ static void fc_disc_stop_rports(struct fc_disc *disc)
lport = fc_disc_lport(disc);
- mutex_lock(&disc->disc_mutex);
- list_for_each_entry_rcu(rdata, &disc->rports, peers)
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&disc->disc_mutex);
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ if (kref_get_unless_zero(&rdata->kref)) {
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ }
+ rcu_read_unlock();
}
/**
@@ -150,7 +154,7 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
break;
}
}
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
/*
* If not doing a complete rediscovery, do GPN_ID on
@@ -178,7 +182,7 @@ reject:
FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -289,15 +293,19 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
* Skip ports which were never discovered. These are the dNS port
* and ports which were created by PLOGI.
*/
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
- if (!rdata->disc_id)
+ if (!kref_get_unless_zero(&rdata->kref))
continue;
- if (rdata->disc_id == disc->disc_id)
- lport->tt.rport_login(rdata);
- else
- lport->tt.rport_logoff(rdata);
+ if (rdata->disc_id) {
+ if (rdata->disc_id == disc->disc_id)
+ fc_rport_login(rdata);
+ else
+ fc_rport_logoff(rdata);
+ }
+ kref_put(&rdata->kref, fc_rport_destroy);
}
-
+ rcu_read_unlock();
mutex_unlock(&disc->disc_mutex);
disc->disc_callback(lport, event);
mutex_lock(&disc->disc_mutex);
@@ -446,7 +454,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
if (ids.port_id != lport->port_id &&
ids.port_name != lport->wwpn) {
- rdata = lport->tt.rport_create(lport, ids.port_id);
+ rdata = fc_rport_create(lport, ids.port_id);
if (rdata) {
rdata->ids.port_name = ids.port_name;
rdata->disc_id = disc->disc_id;
@@ -592,7 +600,6 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
lport = rdata->local_port;
disc = &lport->disc;
- mutex_lock(&disc->disc_mutex);
if (PTR_ERR(fp) == -FC_EX_CLOSED)
goto out;
if (IS_ERR(fp))
@@ -607,37 +614,41 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
goto redisc;
pn = (struct fc_ns_gid_pn *)(cp + 1);
port_name = get_unaligned_be64(&pn->fn_wwpn);
+ mutex_lock(&rdata->rp_mutex);
if (rdata->ids.port_name == -1)
rdata->ids.port_name = port_name;
else if (rdata->ids.port_name != port_name) {
FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
"Port-id %6.6x wwpn %16.16llx\n",
rdata->ids.port_id, port_name);
- lport->tt.rport_logoff(rdata);
-
- new_rdata = lport->tt.rport_create(lport,
- rdata->ids.port_id);
+ mutex_unlock(&rdata->rp_mutex);
+ fc_rport_logoff(rdata);
+ mutex_lock(&lport->disc.disc_mutex);
+ new_rdata = fc_rport_create(lport, rdata->ids.port_id);
+ mutex_unlock(&lport->disc.disc_mutex);
if (new_rdata) {
new_rdata->disc_id = disc->disc_id;
- lport->tt.rport_login(new_rdata);
+ fc_rport_login(new_rdata);
}
goto out;
}
rdata->disc_id = disc->disc_id;
- lport->tt.rport_login(rdata);
+ mutex_unlock(&rdata->rp_mutex);
+ fc_rport_login(rdata);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
cp->ct_reason, cp->ct_explan);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
} else {
FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
ntohs(cp->ct_cmd));
redisc:
+ mutex_lock(&disc->disc_mutex);
fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
}
out:
- mutex_unlock(&disc->disc_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -678,7 +689,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
{
struct fc_rport_priv *rdata;
- rdata = lport->tt.rport_create(lport, dp->port_id);
+ rdata = fc_rport_create(lport, dp->port_id);
if (!rdata)
return -ENOMEM;
rdata->disc_id = 0;
@@ -708,7 +719,7 @@ static void fc_disc_stop(struct fc_lport *lport)
static void fc_disc_stop_final(struct fc_lport *lport)
{
fc_disc_stop(lport);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
}
/**
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index c2384d501470..6384a98048af 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -67,7 +67,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
FC_FCTL_REQ, 0);
- return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
+ return fc_exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
}
EXPORT_SYMBOL(fc_elsct_send);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 16ca31ad5ec0..42bcf7f3a0f9 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -94,6 +94,7 @@ struct fc_exch_pool {
struct fc_exch_mgr {
struct fc_exch_pool __percpu *pool;
mempool_t *ep_pool;
+ struct fc_lport *lport;
enum fc_class class;
struct kref kref;
u16 min_xid;
@@ -362,8 +363,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
fc_exch_hold(ep); /* hold for timer */
if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
- msecs_to_jiffies(timer_msec)))
+ msecs_to_jiffies(timer_msec))) {
+ FC_EXCH_DBG(ep, "Exchange already queued\n");
fc_exch_release(ep);
+ }
}
/**
@@ -406,6 +409,8 @@ static int fc_exch_done_locked(struct fc_exch *ep)
return rc;
}
+static struct fc_exch fc_quarantine_exch;
+
/**
* fc_exch_ptr_get() - Return an exchange from an exchange pool
* @pool: Exchange Pool to get an exchange from
@@ -450,14 +455,17 @@ static void fc_exch_delete(struct fc_exch *ep)
/* update cache of free slot */
index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
- if (pool->left == FC_XID_UNKNOWN)
- pool->left = index;
- else if (pool->right == FC_XID_UNKNOWN)
- pool->right = index;
- else
- pool->next_index = index;
-
- fc_exch_ptr_set(pool, index, NULL);
+ if (!(ep->state & FC_EX_QUARANTINE)) {
+ if (pool->left == FC_XID_UNKNOWN)
+ pool->left = index;
+ else if (pool->right == FC_XID_UNKNOWN)
+ pool->right = index;
+ else
+ pool->next_index = index;
+ fc_exch_ptr_set(pool, index, NULL);
+ } else {
+ fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
+ }
list_del(&ep->ex_list);
spin_unlock_bh(&pool->lock);
fc_exch_release(ep); /* drop hold for exch in mp */
@@ -525,8 +533,7 @@ out:
* Note: The frame will be freed either by a direct call to fc_frame_free(fp)
* or indirectly by calling libfc_function_template.frame_send().
*/
-static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
- struct fc_frame *fp)
+int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
{
struct fc_exch *ep;
int error;
@@ -536,6 +543,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
spin_unlock_bh(&ep->ex_lock);
return error;
}
+EXPORT_SYMBOL(fc_seq_send);
/**
* fc_seq_alloc() - Allocate a sequence for a given exchange
@@ -577,7 +585,7 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
* for a given sequence/exchange pair
* @sp: The sequence/exchange to get a new exchange for
*/
-static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
@@ -587,16 +595,16 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
return sp;
}
+EXPORT_SYMBOL(fc_seq_start_next);
/*
* Set the response handler for the exchange associated with a sequence.
*
* Note: May sleep if invoked from outside a response handler.
*/
-static void fc_seq_set_resp(struct fc_seq *sp,
- void (*resp)(struct fc_seq *, struct fc_frame *,
- void *),
- void *arg)
+void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *, void *),
+ void *arg)
{
struct fc_exch *ep = fc_seq_exch(sp);
DEFINE_WAIT(wait);
@@ -615,12 +623,20 @@ static void fc_seq_set_resp(struct fc_seq *sp,
ep->arg = arg;
spin_unlock_bh(&ep->ex_lock);
}
+EXPORT_SYMBOL(fc_seq_set_resp);
/**
* fc_exch_abort_locked() - Abort an exchange
* @ep: The exchange to be aborted
* @timer_msec: The period of time to wait before aborting
*
+ * Abort an exchange and sequence. Generally called because of a
+ * exchange timeout or an abort from the upper layer.
+ *
+ * A timer_msec can be specified for abort timeout, if non-zero
+ * timer_msec value is specified then exchange resp handler
+ * will be called with timeout error if no response to abort.
+ *
* Locking notes: Called with exch lock held
*
* Return value: 0 on success else error code
@@ -632,9 +648,13 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
struct fc_frame *fp;
int error;
+ FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
- ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
+ ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
+ FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
+ ep->esb_stat, ep->state);
return -ENXIO;
+ }
/*
* Send the abort on a new sequence if possible.
@@ -680,8 +700,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
*
* Return value: 0 on success else error code
*/
-static int fc_seq_exch_abort(const struct fc_seq *req_sp,
- unsigned int timer_msec)
+int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
{
struct fc_exch *ep;
int error;
@@ -758,7 +777,7 @@ static void fc_exch_timeout(struct work_struct *work)
u32 e_stat;
int rc = 1;
- FC_EXCH_DBG(ep, "Exchange timed out\n");
+ FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
@@ -821,14 +840,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
- index = pool->left;
- pool->left = FC_XID_UNKNOWN;
- goto hit;
+ if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
+ index = pool->left;
+ pool->left = FC_XID_UNKNOWN;
+ goto hit;
+ }
}
if (pool->right != FC_XID_UNKNOWN) {
- index = pool->right;
- pool->right = FC_XID_UNKNOWN;
- goto hit;
+ if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
+ index = pool->right;
+ pool->right = FC_XID_UNKNOWN;
+ goto hit;
+ }
}
index = pool->next_index;
@@ -888,14 +911,19 @@ err:
* EM is selected when a NULL match function pointer is encountered
* or when a call to a match function returns true.
*/
-static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
- struct fc_frame *fp)
+static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+ struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
+ struct fc_exch *ep;
- list_for_each_entry(ema, &lport->ema_list, ema_list)
- if (!ema->match || ema->match(fp))
- return fc_exch_em_alloc(lport, ema->mp);
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if (!ema->match || ema->match(fp)) {
+ ep = fc_exch_em_alloc(lport, ema->mp);
+ if (ep)
+ return ep;
+ }
+ }
return NULL;
}
@@ -906,14 +934,17 @@ static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
*/
static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
{
+ struct fc_lport *lport = mp->lport;
struct fc_exch_pool *pool;
struct fc_exch *ep = NULL;
u16 cpu = xid & fc_cpu_mask;
+ if (xid == FC_XID_UNKNOWN)
+ return NULL;
+
if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
- printk_ratelimited(KERN_ERR
- "libfc: lookup request for XID = %d, "
- "indicates invalid CPU %d\n", xid, cpu);
+ pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
+ lport->host->host_no, lport->port_id, xid, cpu);
return NULL;
}
@@ -921,6 +952,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
+ if (ep == &fc_quarantine_exch) {
+ FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
+ ep = NULL;
+ }
if (ep) {
WARN_ON(ep->xid != xid);
fc_exch_hold(ep);
@@ -938,7 +973,7 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
*
* Note: May sleep if invoked from outside a response handler.
*/
-static void fc_exch_done(struct fc_seq *sp)
+void fc_exch_done(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
int rc;
@@ -951,6 +986,7 @@ static void fc_exch_done(struct fc_seq *sp)
if (!rc)
fc_exch_delete(ep);
}
+EXPORT_SYMBOL(fc_exch_done);
/**
* fc_exch_resp() - Allocate a new exchange for a response frame
@@ -1197,8 +1233,8 @@ static void fc_exch_set_addr(struct fc_exch *ep,
*
* The received frame is not freed.
*/
-static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
- struct fc_seq_els_data *els_data)
+void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
+ struct fc_seq_els_data *els_data)
{
switch (els_cmd) {
case ELS_LS_RJT:
@@ -1217,6 +1253,7 @@ static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
}
}
+EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
/**
* fc_seq_send_last() - Send a sequence that is the last in the exchange
@@ -1258,8 +1295,10 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
*/
if (fc_sof_needs_ack(fr_sof(rx_fp))) {
fp = fc_frame_alloc(lport, 0);
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
return;
+ }
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_ACK_1;
@@ -1312,13 +1351,18 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
struct fc_frame_header *rx_fh;
struct fc_frame_header *fh;
struct fc_ba_rjt *rp;
+ struct fc_seq *sp;
struct fc_lport *lport;
unsigned int f_ctl;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*rp));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "Drop BA_RJT request, out of memory\n");
return;
+ }
fh = fc_frame_header_get(fp);
rx_fh = fc_frame_header_get(rx_fp);
@@ -1383,14 +1427,17 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
if (!ep)
goto reject;
+ FC_EXCH_DBG(ep, "exch: ABTS received\n");
fp = fc_frame_alloc(ep->lp, sizeof(*ap));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
goto free;
+ }
spin_lock_bh(&ep->ex_lock);
if (ep->esb_stat & ESB_ST_COMPLETE) {
spin_unlock_bh(&ep->ex_lock);
-
+ FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
fc_frame_free(fp);
goto reject;
}
@@ -1433,7 +1480,7 @@ reject:
* A reference will be held on the exchange/sequence for the caller, which
* must call fc_seq_release().
*/
-static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
+struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
@@ -1447,15 +1494,17 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
break;
return fr_seq(fp);
}
+EXPORT_SYMBOL(fc_seq_assign);
/**
* fc_seq_release() - Release the hold
* @sp: The sequence.
*/
-static void fc_seq_release(struct fc_seq *sp)
+void fc_seq_release(struct fc_seq *sp)
{
fc_exch_release(fc_seq_exch(sp));
}
+EXPORT_SYMBOL(fc_seq_release);
/**
* fc_exch_recv_req() - Handler for an incoming request
@@ -1491,7 +1540,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
* The upper-level protocol may request one later, if needed.
*/
if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
- return lport->tt.lport_recv(lport, fp);
+ return fc_lport_recv(lport, fp);
reject = fc_seq_lookup_recip(lport, mp, fp);
if (reject == FC_RJT_NONE) {
@@ -1512,7 +1561,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
* first.
*/
if (!fc_invoke_resp(ep, sp, fp))
- lport->tt.lport_recv(lport, fp);
+ fc_lport_recv(lport, fp);
fc_exch_release(ep); /* release from lookup */
} else {
FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
@@ -1562,9 +1611,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
if (fc_sof_is_init(sof)) {
sp->ssb_stat |= SSB_ST_RESP;
sp->id = fh->fh_seq_id;
- } else if (sp->id != fh->fh_seq_id) {
- atomic_inc(&mp->stats.seq_not_found);
- goto rel;
}
f_ctl = ntoh24(fh->fh_f_ctl);
@@ -1761,7 +1807,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
fc_frame_free(fp);
break;
case FC_RCTL_BA_ABTS:
- fc_exch_recv_abts(ep, fp);
+ if (ep)
+ fc_exch_recv_abts(ep, fp);
+ else
+ fc_frame_free(fp);
break;
default: /* ignore junk */
fc_frame_free(fp);
@@ -1784,11 +1833,16 @@ static void fc_seq_ls_acc(struct fc_frame *rx_fp)
struct fc_lport *lport;
struct fc_els_ls_acc *acc;
struct fc_frame *fp;
+ struct fc_seq *sp;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*acc));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "exch: drop LS_ACC, out of memory\n");
return;
+ }
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
acc->la_cmd = ELS_LS_ACC;
@@ -1811,11 +1865,16 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
struct fc_lport *lport;
struct fc_els_ls_rjt *rjt;
struct fc_frame *fp;
+ struct fc_seq *sp;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*rjt));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "exch: drop LS_ACC, out of memory\n");
return;
+ }
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
memset(rjt, 0, sizeof(*rjt));
rjt->er_cmd = ELS_LS_RJT;
@@ -1960,8 +2019,7 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
enum fc_els_rjt_explan explan;
u32 sid;
- u16 rxid;
- u16 oxid;
+ u16 xid, rxid, oxid;
lport = fr_dev(rfp);
rp = fc_frame_payload_get(rfp, sizeof(*rp));
@@ -1972,18 +2030,35 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
rxid = ntohs(rp->rec_rx_id);
oxid = ntohs(rp->rec_ox_id);
- ep = fc_exch_lookup(lport,
- sid == fc_host_port_id(lport->host) ? oxid : rxid);
explan = ELS_EXPL_OXID_RXID;
- if (!ep)
+ if (sid == fc_host_port_id(lport->host))
+ xid = oxid;
+ else
+ xid = rxid;
+ if (xid == FC_XID_UNKNOWN) {
+ FC_LPORT_DBG(lport,
+ "REC request from %x: invalid rxid %x oxid %x\n",
+ sid, rxid, oxid);
+ goto reject;
+ }
+ ep = fc_exch_lookup(lport, xid);
+ if (!ep) {
+ FC_LPORT_DBG(lport,
+ "REC request from %x: rxid %x oxid %x not found\n",
+ sid, rxid, oxid);
goto reject;
+ }
+ FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
+ sid, rxid, oxid);
if (ep->oid != sid || oxid != ep->oxid)
goto rel;
if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
goto rel;
fp = fc_frame_alloc(lport, sizeof(*acc));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
goto out;
+ }
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
@@ -2065,6 +2140,24 @@ cleanup:
* @arg: The argument to be passed to the response handler
* @timer_msec: The timeout period for the exchange
*
+ * The exchange response handler is set in this routine to resp()
+ * function pointer. It can be called in two scenarios: if a timeout
+ * occurs or if a response frame is received for the exchange. The
+ * fc_frame pointer in response handler will also indicate timeout
+ * as error using IS_ERR related macros.
+ *
+ * The exchange destructor handler is also set in this routine.
+ * The destructor handler is invoked by EM layer when exchange
+ * is about to free, this can be used by caller to free its
+ * resources along with exchange free.
+ *
+ * The arg is passed back to resp and destructor handler.
+ *
+ * The timeout value (in msec) for an exchange is set if non zero
+ * timer_msec argument is specified. The timer is canceled when
+ * it fires or when the exchange is done. The exchange timeout handler
+ * is registered by EM layer.
+ *
* The frame pointer with some of the header's fields must be
* filled before calling this routine, those fields are:
*
@@ -2075,14 +2168,13 @@ cleanup:
* - frame control
* - parameter or relative offset
*/
-static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
- struct fc_frame *fp,
- void (*resp)(struct fc_seq *,
- struct fc_frame *fp,
- void *arg),
- void (*destructor)(struct fc_seq *,
- void *),
- void *arg, u32 timer_msec)
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void (*destructor)(struct fc_seq *, void *),
+ void *arg, u32 timer_msec)
{
struct fc_exch *ep;
struct fc_seq *sp = NULL;
@@ -2101,7 +2193,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
ep->resp = resp;
ep->destructor = destructor;
ep->arg = arg;
- ep->r_a_tov = FC_DEF_R_A_TOV;
+ ep->r_a_tov = lport->r_a_tov;
ep->lp = lport;
sp = &ep->seq;
@@ -2135,6 +2227,7 @@ err:
fc_exch_delete(ep);
return NULL;
}
+EXPORT_SYMBOL(fc_exch_seq_send);
/**
* fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
@@ -2176,6 +2269,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
return;
retry:
+ FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
spin_unlock_bh(&ep->ex_lock);
@@ -2218,6 +2312,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp)
if (!ep)
goto reject;
spin_lock_bh(&ep->ex_lock);
+ FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
+ sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
if (ep->oxid != ntohs(rp->rrq_ox_id))
goto unlock_reject;
if (ep->rxid != ntohs(rp->rrq_rx_id) &&
@@ -2385,6 +2481,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
return NULL;
mp->class = class;
+ mp->lport = lport;
/* adjust em exch xid range for offload */
mp->min_xid = min_xid;
@@ -2558,36 +2655,9 @@ EXPORT_SYMBOL(fc_exch_recv);
*/
int fc_exch_init(struct fc_lport *lport)
{
- if (!lport->tt.seq_start_next)
- lport->tt.seq_start_next = fc_seq_start_next;
-
- if (!lport->tt.seq_set_resp)
- lport->tt.seq_set_resp = fc_seq_set_resp;
-
- if (!lport->tt.exch_seq_send)
- lport->tt.exch_seq_send = fc_exch_seq_send;
-
- if (!lport->tt.seq_send)
- lport->tt.seq_send = fc_seq_send;
-
- if (!lport->tt.seq_els_rsp_send)
- lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
-
- if (!lport->tt.exch_done)
- lport->tt.exch_done = fc_exch_done;
-
if (!lport->tt.exch_mgr_reset)
lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
- if (!lport->tt.seq_exch_abort)
- lport->tt.seq_exch_abort = fc_seq_exch_abort;
-
- if (!lport->tt.seq_assign)
- lport->tt.seq_assign = fc_seq_assign;
-
- if (!lport->tt.seq_release)
- lport->tt.seq_release = fc_seq_release;
-
return 0;
}
EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5121272f28fd..0e67621477a8 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -122,6 +122,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
#define FC_HRD_ERROR 9
#define FC_CRC_ERROR 10
#define FC_TIMED_OUT 11
+#define FC_TRANS_RESET 12
/*
* Error recovery timeout values.
@@ -195,7 +196,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
* @seq: The sequence that the FCP packet is on (required by destructor API)
* @fsp: The FCP packet to be released
*
- * This routine is called by a destructor callback in the exch_seq_send()
+ * This routine is called by a destructor callback in the fc_exch_seq_send()
* routine of the libfc Transport Template. The 'struct fc_seq' is a required
* argument even though it is not used by this routine.
*
@@ -253,8 +254,21 @@ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
*/
static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
{
- if (!(fsp->state & FC_SRB_COMPL))
+ if (!(fsp->state & FC_SRB_COMPL)) {
mod_timer(&fsp->timer, jiffies + delay);
+ fsp->timer_delay = delay;
+ }
+}
+
+static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp)
+{
+ fsp->state |= FC_SRB_ABORTED;
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+
+ if (fsp->wait_for_comp)
+ complete(&fsp->tm_done);
+ else
+ fc_fcp_complete_locked(fsp);
}
/**
@@ -264,6 +278,8 @@ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
*/
static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
{
+ int rc;
+
if (!fsp->seq_ptr)
return -EINVAL;
@@ -271,7 +287,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
put_cpu();
fsp->state |= FC_SRB_ABORT_PENDING;
- return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
+ rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
+ /*
+ * fc_seq_exch_abort() might return -ENXIO if
+ * the sequence is already completed
+ */
+ if (rc == -ENXIO) {
+ fc_fcp_abort_done(fsp);
+ rc = 0;
+ }
+ return rc;
}
/**
@@ -283,16 +308,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
* fc_io_compl() will notify the SCSI-ml that the I/O is done.
* The SCSI-ml will retry the command.
*/
-static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code)
{
if (fsp->seq_ptr) {
- fsp->lp->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->state &= ~FC_SRB_ABORT_PENDING;
fsp->io_status = 0;
- fsp->status_code = FC_ERROR;
+ fsp->status_code = status_code;
fc_fcp_complete_locked(fsp);
}
@@ -402,8 +427,6 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
if (!can_queue)
can_queue = 1;
lport->host->can_queue = can_queue;
- shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
- "Reducing can_queue to %d.\n", can_queue);
unlock:
spin_unlock_irqrestore(lport->host->host_lock, flags);
@@ -430,10 +453,29 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
put_cpu();
/* error case */
fc_fcp_can_queue_ramp_down(lport);
+ shost_printk(KERN_ERR, lport->host,
+ "libfc: Could not allocate frame, "
+ "reducing can_queue to %d.\n", lport->host->can_queue);
return NULL;
}
/**
+ * get_fsp_rec_tov() - Helper function to get REC_TOV
+ * @fsp: the FCP packet
+ *
+ * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
+ */
+static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
+{
+ struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
+ unsigned int e_d_tov = FC_DEF_E_D_TOV;
+
+ if (rpriv && rpriv->e_d_tov > e_d_tov)
+ e_d_tov = rpriv->e_d_tov;
+ return msecs_to_jiffies(e_d_tov) + HZ;
+}
+
+/**
* fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
* @fsp: The FCP packet the data is on
* @fp: The data frame
@@ -536,8 +578,10 @@ crc_err:
* and completes the transfer, call the completion handler.
*/
if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
- fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+ fsp->xfer_len == fsp->data_len - fsp->scsi_resid) {
+ FC_FCP_DBG( fsp, "complete out-of-order sequence\n" );
fc_fcp_complete_locked(fsp);
+ }
return;
err:
fc_fcp_recovery(fsp, host_bcode);
@@ -609,7 +653,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
remaining = seq_blen;
fh_parm_offset = frame_offset = offset;
tlen = 0;
- seq = lport->tt.seq_start_next(seq);
+ seq = fc_seq_start_next(seq);
f_ctl = FC_FC_REL_OFF;
WARN_ON(!seq);
@@ -687,7 +731,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
/*
* send fragment using for a sequence.
*/
- error = lport->tt.seq_send(lport, seq, fp);
+ error = fc_seq_send(lport, seq, fp);
if (error) {
WARN_ON(1); /* send error should be rare */
return error;
@@ -727,15 +771,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
ba_done = 0;
}
- if (ba_done) {
- fsp->state |= FC_SRB_ABORTED;
- fsp->state &= ~FC_SRB_ABORT_PENDING;
-
- if (fsp->wait_for_comp)
- complete(&fsp->tm_done);
- else
- fc_fcp_complete_locked(fsp);
- }
+ if (ba_done)
+ fc_fcp_abort_done(fsp);
}
/**
@@ -764,8 +801,11 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fh = fc_frame_header_get(fp);
r_ctl = fh->fh_r_ctl;
- if (lport->state != LPORT_ST_READY)
+ if (lport->state != LPORT_ST_READY) {
+ FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n",
+ lport->state, r_ctl);
goto out;
+ }
if (fc_fcp_lock_pkt(fsp))
goto out;
@@ -774,8 +814,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
goto unlock;
}
- if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
+ if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) {
+ FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl);
goto unlock;
+ }
if (r_ctl == FC_RCTL_DD_DATA_DESC) {
/*
@@ -910,7 +952,16 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Wait a at least one jiffy to see if it is delivered.
* If this expires without data, we may do SRR.
*/
- fc_fcp_timer_set(fsp, 2);
+ if (fsp->lp->qfull) {
+ FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n",
+ fsp->rport->port_id);
+ return;
+ }
+ FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun "
+ "len %x, data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
return;
}
fsp->status_code = FC_DATA_OVRRUN;
@@ -959,8 +1010,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
if (fsp->cdb_status == SAM_STAT_GOOD &&
fsp->xfer_len < fsp->data_len && !fsp->io_status &&
(!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
- fsp->xfer_len < fsp->data_len - fsp->scsi_resid))
+ fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+ FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n",
+ fsp->xfer_len, fsp->data_len);
fsp->status_code = FC_DATA_UNDRUN;
+ }
}
seq = fsp->seq_ptr;
@@ -970,7 +1024,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
struct fc_frame *conf_frame;
struct fc_seq *csp;
- csp = lport->tt.seq_start_next(seq);
+ csp = fc_seq_start_next(seq);
conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
if (conf_frame) {
f_ctl = FC_FC_SEQ_INIT;
@@ -979,10 +1033,10 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, 0);
- lport->tt.seq_send(lport, csp, conf_frame);
+ fc_seq_send(lport, csp, conf_frame);
}
}
- lport->tt.exch_done(seq);
+ fc_exch_done(seq);
}
/*
* Some resets driven by SCSI are not I/Os and do not have
@@ -1000,10 +1054,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
*/
static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
{
- struct fc_lport *lport = fsp->lp;
-
if (fsp->seq_ptr) {
- lport->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->status_code = error;
@@ -1116,19 +1168,6 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
}
/**
- * get_fsp_rec_tov() - Helper function to get REC_TOV
- * @fsp: the FCP packet
- *
- * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
- */
-static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
-{
- struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
-
- return msecs_to_jiffies(rpriv->e_d_tov) + HZ;
-}
-
-/**
* fc_fcp_cmd_send() - Send a FCP command
* @lport: The local port to send the command on
* @fsp: The FCP packet the command is on
@@ -1165,8 +1204,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FCTL_REQ, 0);
- seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
- fsp, 0);
+ seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
if (!seq) {
rc = -1;
goto unlock;
@@ -1196,7 +1234,7 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
return;
if (error == -FC_EX_CLOSED) {
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
goto unlock;
}
@@ -1222,8 +1260,16 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
int rc = FAILED;
unsigned long ticks_left;
- if (fc_fcp_send_abort(fsp))
+ FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state);
+ if (fc_fcp_send_abort(fsp)) {
+ FC_FCP_DBG(fsp, "failed to send abort\n");
return FAILED;
+ }
+
+ if (fsp->state & FC_SRB_ABORTED) {
+ FC_FCP_DBG(fsp, "target abort cmd completed\n");
+ return SUCCESS;
+ }
init_completion(&fsp->tm_done);
fsp->wait_for_comp = 1;
@@ -1301,7 +1347,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
spin_lock_bh(&fsp->scsi_pkt_lock);
if (fsp->seq_ptr) {
- lport->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->wait_for_comp = 0;
@@ -1355,7 +1401,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
if (fh->fh_type != FC_TYPE_BLS)
fc_fcp_resp(fsp, fp);
fsp->seq_ptr = NULL;
- fsp->lp->tt.exch_done(seq);
+ fc_exch_done(seq);
out_unlock:
fc_fcp_unlock_pkt(fsp);
out:
@@ -1394,6 +1440,15 @@ static void fc_fcp_timeout(unsigned long data)
if (fsp->cdb_cmd.fc_tm_flags)
goto unlock;
+ if (fsp->lp->qfull) {
+ FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
+ fsp->timer_delay);
+ setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp, fsp->timer_delay);
+ goto unlock;
+ }
+ FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n",
+ fsp->timer_delay, rpriv->flags, fsp->state);
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
@@ -1486,8 +1541,8 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
switch (rjt->er_reason) {
default:
- FC_FCP_DBG(fsp, "device %x unexpected REC reject "
- "reason %d expl %d\n",
+ FC_FCP_DBG(fsp,
+ "device %x invalid REC reject %d/%d\n",
fsp->rport->port_id, rjt->er_reason,
rjt->er_explan);
/* fall through */
@@ -1503,18 +1558,23 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
case ELS_RJT_LOGIC:
case ELS_RJT_UNAB:
+ FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
/*
- * If no data transfer, the command frame got dropped
- * so we just retry. If data was transferred, we
- * lost the response but the target has no record,
- * so we abort and retry.
+ * If response got lost or is stuck in the
+ * queue somewhere we have no idea if and when
+ * the response will be received. So quarantine
+ * the xid and retry the command.
*/
- if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
- fsp->xfer_len == 0) {
- fc_fcp_retry_cmd(fsp);
+ if (rjt->er_explan == ELS_EXPL_OXID_RXID) {
+ struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
+ ep->state |= FC_EX_QUARANTINE;
+ fsp->state |= FC_SRB_ABORTED;
+ fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
break;
}
- fc_fcp_recovery(fsp, FC_ERROR);
+ fc_fcp_recovery(fsp, FC_TRANS_RESET);
break;
}
} else if (opcode == ELS_LS_ACC) {
@@ -1608,7 +1668,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
switch (error) {
case -FC_EX_CLOSED:
- fc_fcp_retry_cmd(fsp);
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n",
+ fsp, fsp->rport->port_id);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
default:
@@ -1622,8 +1684,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Assume REC or LS_ACC was lost.
* The exchange manager will have aborted REC, so retry.
*/
- FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
- fsp->rport->port_id, error, fsp->recov_retry,
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n",
+ fsp, fsp->rport->port_id, fsp->recov_retry,
FC_MAX_RECOV_RETRY);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
@@ -1642,6 +1704,7 @@ out:
*/
static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
{
+ FC_FCP_DBG(fsp, "start recovery code %x\n", code);
fsp->status_code = code;
fsp->cdb_status = 0;
fsp->io_status = 0;
@@ -1668,7 +1731,6 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
struct fc_seq *seq;
struct fcp_srr *srr;
struct fc_frame *fp;
- unsigned int rec_tov;
rport = fsp->rport;
rpriv = rport->dd_data;
@@ -1692,10 +1754,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FCTL_REQ, 0);
- rec_tov = get_fsp_rec_tov(fsp);
- seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
- fc_fcp_pkt_destroy,
- fsp, jiffies_to_msecs(rec_tov));
+ seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp,
+ fc_fcp_pkt_destroy,
+ fsp, get_fsp_rec_tov(fsp));
if (!seq)
goto retry;
@@ -1706,7 +1767,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
return;
retry:
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
}
/**
@@ -1730,9 +1791,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fh = fc_frame_header_get(fp);
/*
- * BUG? fc_fcp_srr_error calls exch_done which would release
+ * BUG? fc_fcp_srr_error calls fc_exch_done which would release
* the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
- * then fc_exch_timeout would be sending an abort. The exch_done
+ * then fc_exch_timeout would be sending an abort. The fc_exch_done
* call by fc_fcp_srr_error would prevent fc_exch.c from seeing
* an abort response though.
*/
@@ -1753,7 +1814,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
}
fc_fcp_unlock_pkt(fsp);
out:
- fsp->lp->tt.exch_done(seq);
+ fc_exch_done(seq);
fc_frame_free(fp);
}
@@ -1768,20 +1829,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
goto out;
switch (PTR_ERR(fp)) {
case -FC_EX_TIMEOUT:
+ FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
break;
case -FC_EX_CLOSED: /* e.g., link failure */
+ FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
/* fall through */
default:
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
}
fc_fcp_unlock_pkt(fsp);
out:
- fsp->lp->tt.exch_done(fsp->recov_seq);
+ fc_exch_done(fsp->recov_seq);
}
/**
@@ -1832,8 +1895,13 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
rpriv = rport->dd_data;
if (!fc_fcp_lport_queue_ready(lport)) {
- if (lport->qfull)
+ if (lport->qfull) {
fc_fcp_can_queue_ramp_down(lport);
+ shost_printk(KERN_ERR, lport->host,
+ "libfc: queue full, "
+ "reducing can_queue to %d.\n",
+ lport->host->can_queue);
+ }
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -1980,15 +2048,26 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
break;
case FC_CMD_ABORTED:
- FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
- "due to FC_CMD_ABORTED\n");
- sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
+ if (host_byte(sc_cmd->result) == DID_TIME_OUT)
+ FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ else {
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ set_host_byte(sc_cmd, DID_ERROR);
+ }
+ sc_cmd->result |= fsp->io_status;
break;
case FC_CMD_RESET:
FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
"due to FC_CMD_RESET\n");
sc_cmd->result = (DID_RESET << 16);
break;
+ case FC_TRANS_RESET:
+ FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml "
+ "due to FC_TRANS_RESET\n");
+ sc_cmd->result = (DID_SOFT_ERROR << 16);
+ break;
case FC_HRD_ERROR:
FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
"due to FC_HRD_ERROR\n");
@@ -2142,7 +2221,7 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
fc_block_scsi_eh(sc_cmd);
- lport->tt.lport_reset(lport);
+ fc_lport_reset(lport);
wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
wait_tmo))
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index c11a638f32e6..d623d084b7ec 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -226,7 +226,7 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
sp = fr_seq(in_fp);
if (sp)
- fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp);
+ fr_seq(fp) = fc_seq_start_next(sp);
fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
}
EXPORT_SYMBOL(fc_fill_reply_hdr);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 50c71678a156..919736a74ffa 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -149,7 +149,7 @@ static const char *fc_lport_state_names[] = {
* @offset: The offset into the response data
*/
struct fc_bsg_info {
- struct fc_bsg_job *job;
+ struct bsg_job *job;
struct fc_lport *lport;
u16 rsp_code;
struct scatterlist *sg;
@@ -200,7 +200,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
"in the DNS or FDMI state, it's in the "
"%d state", rdata->ids.port_id,
lport->state);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
break;
case RPORT_EV_LOGO:
@@ -237,23 +237,26 @@ static const char *fc_lport_state(struct fc_lport *lport)
* @remote_fid: The FID of the ptp rport
* @remote_wwpn: The WWPN of the ptp rport
* @remote_wwnn: The WWNN of the ptp rport
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
*/
static void fc_lport_ptp_setup(struct fc_lport *lport,
u32 remote_fid, u64 remote_wwpn,
u64 remote_wwnn)
{
- mutex_lock(&lport->disc.disc_mutex);
if (lport->ptp_rdata) {
- lport->tt.rport_logoff(lport->ptp_rdata);
- kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
}
- lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->ptp_rdata = fc_rport_create(lport, remote_fid);
kref_get(&lport->ptp_rdata->kref);
lport->ptp_rdata->ids.port_name = remote_wwpn;
lport->ptp_rdata->ids.node_name = remote_wwnn;
mutex_unlock(&lport->disc.disc_mutex);
- lport->tt.rport_login(lport->ptp_rdata);
+ fc_rport_login(lport->ptp_rdata);
fc_lport_enter_ready(lport);
}
@@ -409,7 +412,7 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
fc_lport_state(lport));
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_frame_free(fp);
}
@@ -478,7 +481,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
if (!req) {
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
} else {
fmt = req->rnid_fmt;
len = sizeof(*rp);
@@ -518,7 +521,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
*/
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_lport_enter_reset(lport);
fc_frame_free(fp);
}
@@ -620,9 +623,9 @@ int fc_fabric_logoff(struct fc_lport *lport)
lport->tt.disc_stop_final(lport);
mutex_lock(&lport->lp_mutex);
if (lport->dns_rdata)
- lport->tt.rport_logoff(lport->dns_rdata);
+ fc_rport_logoff(lport->dns_rdata);
mutex_unlock(&lport->lp_mutex);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
mutex_lock(&lport->lp_mutex);
fc_lport_enter_logo(lport);
mutex_unlock(&lport->lp_mutex);
@@ -899,7 +902,7 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
/*
* Check opcode.
*/
- recv = lport->tt.rport_recv_req;
+ recv = fc_rport_recv_req;
switch (fc_frame_payload_op(fp)) {
case ELS_FLOGI:
if (!lport->point_to_multipoint)
@@ -941,15 +944,14 @@ struct fc4_prov fc_lport_els_prov = {
};
/**
- * fc_lport_recv_req() - The generic lport request handler
+ * fc_lport_recv() - The generic lport request handler
* @lport: The lport that received the request
* @fp: The frame the request is in
*
* Locking Note: This function should not be called with the lport
* lock held because it may grab the lock.
*/
-static void fc_lport_recv_req(struct fc_lport *lport,
- struct fc_frame *fp)
+void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_seq *sp = fr_seq(fp);
@@ -978,8 +980,9 @@ drop:
FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
fc_frame_free(fp);
if (sp)
- lport->tt.exch_done(sp);
+ fc_exch_done(sp);
}
+EXPORT_SYMBOL(fc_lport_recv);
/**
* fc_lport_reset() - Reset a local port
@@ -1007,12 +1010,14 @@ EXPORT_SYMBOL(fc_lport_reset);
*/
static void fc_lport_reset_locked(struct fc_lport *lport)
{
- if (lport->dns_rdata)
- lport->tt.rport_logoff(lport->dns_rdata);
+ if (lport->dns_rdata) {
+ fc_rport_logoff(lport->dns_rdata);
+ lport->dns_rdata = NULL;
+ }
if (lport->ptp_rdata) {
- lport->tt.rport_logoff(lport->ptp_rdata);
- kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
lport->ptp_rdata = NULL;
}
@@ -1426,13 +1431,13 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
fc_lport_state_enter(lport, LPORT_ST_DNS);
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
+ rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
return;
err:
@@ -1543,13 +1548,13 @@ static void fc_lport_enter_fdmi(struct fc_lport *lport)
fc_lport_state_enter(lport, LPORT_ST_FDMI);
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
+ rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
return;
err:
@@ -1772,7 +1777,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
if ((csp_flags & FC_SP_FT_FPORT) == 0) {
if (e_d_tov > lport->e_d_tov)
lport->e_d_tov = e_d_tov;
- lport->r_a_tov = 2 * e_d_tov;
+ lport->r_a_tov = 2 * lport->e_d_tov;
fc_lport_set_port_id(lport, did, fp);
printk(KERN_INFO "host%d: libfc: "
"Port (%6.6x) entered "
@@ -1784,8 +1789,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
get_unaligned_be64(
&flp->fl_wwnn));
} else {
- lport->e_d_tov = e_d_tov;
- lport->r_a_tov = r_a_tov;
+ if (e_d_tov > lport->e_d_tov)
+ lport->e_d_tov = e_d_tov;
+ if (r_a_tov > lport->r_a_tov)
+ lport->r_a_tov = r_a_tov;
fc_host_fabric_name(lport->host) =
get_unaligned_be64(&flp->fl_wwnn);
fc_lport_set_port_id(lport, did, fp);
@@ -1858,12 +1865,6 @@ EXPORT_SYMBOL(fc_lport_config);
*/
int fc_lport_init(struct fc_lport *lport)
{
- if (!lport->tt.lport_recv)
- lport->tt.lport_recv = fc_lport_recv_req;
-
- if (!lport->tt.lport_reset)
- lport->tt.lport_reset = fc_lport_reset;
-
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
fc_host_node_name(lport->host) = lport->wwnn;
fc_host_port_name(lport->host) = lport->wwpn;
@@ -1900,18 +1901,19 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
void *info_arg)
{
struct fc_bsg_info *info = info_arg;
- struct fc_bsg_job *job = info->job;
+ struct bsg_job *job = info->job;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct fc_lport *lport = info->lport;
struct fc_frame_header *fh;
size_t len;
void *buf;
if (IS_ERR(fp)) {
- job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
+ bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
-ECONNABORTED : -ETIMEDOUT;
job->reply_len = sizeof(uint32_t);
- job->state_flags |= FC_RQST_STATE_DONE;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(info);
return;
}
@@ -1928,25 +1930,25 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
(unsigned short)fc_frame_payload_op(fp);
/* Save the reply status of the job */
- job->reply->reply_data.ctels_reply.status =
+ bsg_reply->reply_data.ctels_reply.status =
(cmd == info->rsp_code) ?
FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
}
- job->reply->reply_payload_rcv_len +=
+ bsg_reply->reply_payload_rcv_len +=
fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
&info->offset, NULL);
if (fr_eof(fp) == FC_EOF_T &&
(ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
- if (job->reply->reply_payload_rcv_len >
+ if (bsg_reply->reply_payload_rcv_len >
job->reply_payload.payload_len)
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
job->reply_payload.payload_len;
- job->reply->result = 0;
- job->state_flags |= FC_RQST_STATE_DONE;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(info);
}
fc_frame_free(fp);
@@ -1962,7 +1964,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-static int fc_lport_els_request(struct fc_bsg_job *job,
+static int fc_lport_els_request(struct bsg_job *job,
struct fc_lport *lport,
u32 did, u32 tov)
{
@@ -2005,8 +2007,8 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
info->nents = job->reply_payload.sg_cnt;
info->sg = job->reply_payload.sg_list;
- if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
- NULL, info, tov)) {
+ if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
kfree(info);
return -ECOMM;
}
@@ -2023,7 +2025,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-static int fc_lport_ct_request(struct fc_bsg_job *job,
+static int fc_lport_ct_request(struct bsg_job *job,
struct fc_lport *lport, u32 did, u32 tov)
{
struct fc_bsg_info *info;
@@ -2066,8 +2068,8 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
info->nents = job->reply_payload.sg_cnt;
info->sg = job->reply_payload.sg_list;
- if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
- NULL, info, tov)) {
+ if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
kfree(info);
return -ECOMM;
}
@@ -2079,25 +2081,27 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
* FC Passthrough requests
* @job: The BSG passthrough job
*/
-int fc_lport_bsg_request(struct fc_bsg_job *job)
+int fc_lport_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct request *rsp = job->req->next_rq;
- struct Scsi_Host *shost = job->shost;
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport;
struct fc_rport_priv *rdata;
int rc = -EINVAL;
u32 did, tov;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (rsp)
rsp->resid_len = job->reply_payload.payload_len;
mutex_lock(&lport->lp_mutex);
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
- rport = job->rport;
+ rport = fc_bsg_to_rport(job);
if (!rport)
break;
@@ -2107,7 +2111,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
break;
case FC_BSG_RPT_CT:
- rport = job->rport;
+ rport = fc_bsg_to_rport(job);
if (!rport)
break;
@@ -2117,25 +2121,25 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
break;
case FC_BSG_HST_CT:
- did = ntoh24(job->request->rqst_data.h_ct.port_id);
+ did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
if (did == FC_FID_DIR_SERV) {
rdata = lport->dns_rdata;
if (!rdata)
break;
tov = rdata->e_d_tov;
} else {
- rdata = lport->tt.rport_lookup(lport, did);
+ rdata = fc_rport_lookup(lport, did);
if (!rdata)
break;
tov = rdata->e_d_tov;
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
rc = fc_lport_ct_request(job, lport, did, tov);
break;
case FC_BSG_HST_ELS_NOLOGIN:
- did = ntoh24(job->request->rqst_data.h_els.port_id);
+ did = ntoh24(bsg_request->rqst_data.h_els.port_id);
rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
break;
}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 97aeaddd600d..c991f3b822f8 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -44,6 +44,19 @@
* path this potential over-use of the mutex is acceptable.
*/
+/*
+ * RPORT REFERENCE COUNTING
+ *
+ * A rport reference should be taken when:
+ * - an rport is allocated
+ * - a workqueue item is scheduled
+ * - an ELS request is send
+ * The reference should be dropped when:
+ * - the workqueue function has finished
+ * - the ELS response is handled
+ * - an rport is removed
+ */
+
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -74,8 +87,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
static void fc_rport_timeout(struct work_struct *);
-static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
-static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_error(struct fc_rport_priv *, int);
+static void fc_rport_error_retry(struct fc_rport_priv *, int);
static void fc_rport_work(struct work_struct *);
static const char *fc_rport_state_names[] = {
@@ -98,8 +111,8 @@ static const char *fc_rport_state_names[] = {
* The reference count of the fc_rport_priv structure is
* increased by one.
*/
-static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
- u32 port_id)
+struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+ u32 port_id)
{
struct fc_rport_priv *rdata = NULL, *tmp_rdata;
@@ -113,6 +126,7 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
rcu_read_unlock();
return rdata;
}
+EXPORT_SYMBOL(fc_rport_lookup);
/**
* fc_rport_create() - Create a new remote port
@@ -123,12 +137,11 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
*
* Locking note: must be called with the disc_mutex held.
*/
-static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
- u32 port_id)
+struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
- rdata = lport->tt.rport_lookup(lport, port_id);
+ rdata = fc_rport_lookup(lport, port_id);
if (rdata)
return rdata;
@@ -158,18 +171,20 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
}
return rdata;
}
+EXPORT_SYMBOL(fc_rport_create);
/**
* fc_rport_destroy() - Free a remote port after last reference is released
* @kref: The remote port's kref
*/
-static void fc_rport_destroy(struct kref *kref)
+void fc_rport_destroy(struct kref *kref)
{
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
kfree_rcu(rdata, rcu);
}
+EXPORT_SYMBOL(fc_rport_destroy);
/**
* fc_rport_state() - Return a string identifying the remote port's state
@@ -242,6 +257,8 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata,
/**
* fc_rport_work() - Handler for remote port events in the rport_event_queue
* @work: Handle to the remote port being dequeued
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_work(struct work_struct *work)
{
@@ -272,12 +289,14 @@ static void fc_rport_work(struct work_struct *work)
kref_get(&rdata->kref);
mutex_unlock(&rdata->rp_mutex);
- if (!rport)
+ if (!rport) {
+ FC_RPORT_DBG(rdata, "No rport!\n");
rport = fc_remote_port_add(lport->host, 0, &ids);
+ }
if (!rport) {
FC_RPORT_DBG(rdata, "Failed to add the rport\n");
- lport->tt.rport_logoff(rdata);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
mutex_lock(&rdata->rp_mutex);
@@ -303,7 +322,7 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
rdata->lld_event_callback(lport, rdata, event);
}
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
break;
case RPORT_EV_FAILED:
@@ -329,7 +348,8 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
rdata->lld_event_callback(lport, rdata, event);
}
- cancel_delayed_work_sync(&rdata->retry_work);
+ if (cancel_delayed_work_sync(&rdata->retry_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
/*
* Reset any outstanding exchanges before freeing rport.
@@ -351,7 +371,7 @@ static void fc_rport_work(struct work_struct *work)
if (port_id == FC_FID_DIR_SERV) {
rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
} else if ((rdata->flags & FC_RP_STARTED) &&
rdata->major_retries <
lport->max_rport_retry_count) {
@@ -362,17 +382,21 @@ static void fc_rport_work(struct work_struct *work)
mutex_unlock(&rdata->rp_mutex);
} else {
FC_RPORT_DBG(rdata, "work delete\n");
+ mutex_lock(&lport->disc.disc_mutex);
list_del_rcu(&rdata->peers);
+ mutex_unlock(&lport->disc.disc_mutex);
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
} else {
/*
* Re-open for events. Reissue READY event if ready.
*/
rdata->event = RPORT_EV_NONE;
- if (rdata->rp_state == RPORT_ST_READY)
+ if (rdata->rp_state == RPORT_ST_READY) {
+ FC_RPORT_DBG(rdata, "work reopen\n");
fc_rport_enter_ready(rdata);
+ }
mutex_unlock(&rdata->rp_mutex);
}
break;
@@ -381,12 +405,21 @@ static void fc_rport_work(struct work_struct *work)
mutex_unlock(&rdata->rp_mutex);
break;
}
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_login() - Start the remote port login state machine
* @rdata: The remote port to be logged in to
*
+ * Initiates the RP state machine. It is called from the LP module.
+ * This function will issue the following commands to the N_Port
+ * identified by the FC ID provided.
+ *
+ * - PLOGI
+ * - PRLI
+ * - RTV
+ *
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
@@ -395,10 +428,16 @@ static void fc_rport_work(struct work_struct *work)
* If it appears we are already logged in, ADISC is used to verify
* the setup.
*/
-static int fc_rport_login(struct fc_rport_priv *rdata)
+int fc_rport_login(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
+ if (rdata->flags & FC_RP_STARTED) {
+ FC_RPORT_DBG(rdata, "port already started\n");
+ mutex_unlock(&rdata->rp_mutex);
+ return 0;
+ }
+
rdata->flags |= FC_RP_STARTED;
switch (rdata->rp_state) {
case RPORT_ST_READY:
@@ -408,15 +447,20 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
case RPORT_ST_DELETE:
FC_RPORT_DBG(rdata, "Restart deleted port\n");
break;
- default:
+ case RPORT_ST_INIT:
FC_RPORT_DBG(rdata, "Login to port\n");
fc_rport_enter_flogi(rdata);
break;
+ default:
+ FC_RPORT_DBG(rdata, "Login in progress, state %s\n",
+ fc_rport_state(rdata));
+ break;
}
mutex_unlock(&rdata->rp_mutex);
return 0;
}
+EXPORT_SYMBOL(fc_rport_login);
/**
* fc_rport_enter_delete() - Schedule a remote port to be deleted
@@ -431,6 +475,8 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
* Set the new event so that the old pending event will not occur.
* Since we have the mutex, even if fc_rport_work() is already started,
* it'll see the new event.
+ *
+ * Reference counting: does not modify kref
*/
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
enum fc_rport_event event)
@@ -442,8 +488,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
- if (rdata->event == RPORT_EV_NONE)
- queue_work(rport_event_queue, &rdata->event_work);
+ kref_get(&rdata->kref);
+ if (rdata->event == RPORT_EV_NONE &&
+ !queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+
rdata->event = event;
}
@@ -455,7 +504,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*/
-static int fc_rport_logoff(struct fc_rport_priv *rdata)
+int fc_rport_logoff(struct fc_rport_priv *rdata)
{
struct fc_lport *lport = rdata->local_port;
u32 port_id = rdata->ids.port_id;
@@ -489,6 +538,7 @@ out:
mutex_unlock(&rdata->rp_mutex);
return 0;
}
+EXPORT_SYMBOL(fc_rport_logoff);
/**
* fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
@@ -496,6 +546,8 @@ out:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: schedules workqueue, does not modify kref
*/
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
{
@@ -503,8 +555,11 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
FC_RPORT_DBG(rdata, "Port is Ready\n");
- if (rdata->event == RPORT_EV_NONE)
- queue_work(rport_event_queue, &rdata->event_work);
+ kref_get(&rdata->kref);
+ if (rdata->event == RPORT_EV_NONE &&
+ !queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+
rdata->event = RPORT_EV_READY;
}
@@ -515,6 +570,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
+ *
+ * Reference counting: Drops kref on return.
*/
static void fc_rport_timeout(struct work_struct *work)
{
@@ -522,6 +579,7 @@ static void fc_rport_timeout(struct work_struct *work)
container_of(work, struct fc_rport_priv, retry_work.work);
mutex_lock(&rdata->rp_mutex);
+ FC_RPORT_DBG(rdata, "Port timeout, state %s\n", fc_rport_state(rdata));
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
@@ -547,23 +605,25 @@ static void fc_rport_timeout(struct work_struct *work)
}
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_error() - Error handler, called once retries have been exhausted
* @rdata: The remote port the error is happened on
- * @fp: The error code encapsulated in a frame pointer
+ * @err: The error code
*
* Locking Note: The rport lock is expected to be held before
* calling this routine
+ *
+ * Reference counting: does not modify kref
*/
-static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
+static void fc_rport_error(struct fc_rport_priv *rdata, int err)
{
struct fc_lport *lport = rdata->local_port;
- FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
- IS_ERR(fp) ? -PTR_ERR(fp) : 0,
- fc_rport_state(rdata), rdata->retries);
+ FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
+ -err, fc_rport_state(rdata), rdata->retries);
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
@@ -595,36 +655,39 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
/**
* fc_rport_error_retry() - Handler for remote port state retries
* @rdata: The remote port whose state is to be retried
- * @fp: The error code encapsulated in a frame pointer
+ * @err: The error code
*
* If the error was an exchange timeout retry immediately,
* otherwise wait for E_D_TOV.
*
* Locking Note: The rport lock is expected to be held before
* calling this routine
+ *
+ * Reference counting: increments kref when scheduling retry_work
*/
-static void fc_rport_error_retry(struct fc_rport_priv *rdata,
- struct fc_frame *fp)
+static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
{
- unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
+ unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
- if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ if (err == -FC_EX_CLOSED)
goto out;
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
- FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
- PTR_ERR(fp), fc_rport_state(rdata));
+ FC_RPORT_DBG(rdata, "Error %d in state %s, retrying\n",
+ err, fc_rport_state(rdata));
rdata->retries++;
/* no additional delay on exchange timeouts */
- if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
+ if (err == -FC_EX_TIMEOUT)
delay = 0;
- schedule_delayed_work(&rdata->retry_work, delay);
+ kref_get(&rdata->kref);
+ if (!schedule_delayed_work(&rdata->retry_work, delay))
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
out:
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, err);
}
/**
@@ -684,8 +747,11 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport = rdata->local_port;
struct fc_els_flogi *flogi;
unsigned int r_a_tov;
+ u8 opcode;
+ int err = 0;
- FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));
+ FC_RPORT_DBG(rdata, "Received a FLOGI %s\n",
+ IS_ERR(fp) ? "error" : fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
goto put;
@@ -701,18 +767,34 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
-
- if (fc_frame_payload_op(fp) != ELS_LS_ACC)
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ struct fc_els_ls_rjt *rjt;
+
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "FLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ err = -FC_EX_ELS_RJT;
goto bad;
- if (fc_rport_login_complete(rdata, fp))
+ } else if (opcode != ELS_LS_ACC) {
+ FC_RPORT_DBG(rdata, "FLOGI ELS invalid opcode %x\n", opcode);
+ err = -FC_EX_ELS_RJT;
goto bad;
+ }
+ if (fc_rport_login_complete(rdata, fp)) {
+ FC_RPORT_DBG(rdata, "FLOGI failed, no login\n");
+ err = -FC_EX_INV_LOGIN;
+ goto bad;
+ }
flogi = fc_frame_payload_get(fp, sizeof(*flogi));
- if (!flogi)
+ if (!flogi) {
+ err = -FC_EX_ALLOC_ERR;
goto bad;
+ }
r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
if (r_a_tov > rdata->r_a_tov)
rdata->r_a_tov = r_a_tov;
@@ -726,11 +808,11 @@ out:
err:
mutex_unlock(&rdata->rp_mutex);
put:
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
bad:
FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, err);
goto out;
}
@@ -740,6 +822,8 @@ bad:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
{
@@ -756,20 +840,23 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp)
- return fc_rport_error_retry(rdata, fp);
+ return fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
fc_rport_flogi_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
* fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
* @lport: The local port that received the PLOGI request
* @rx_fp: The PLOGI request frame
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_recv_flogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -799,7 +886,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
goto reject;
}
- rdata = lport->tt.rport_lookup(lport, sid);
+ rdata = fc_rport_lookup(lport, sid);
if (!rdata) {
rjt_data.reason = ELS_RJT_FIP;
rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
@@ -824,8 +911,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
* RPORT wouldn;t have created and 'rport_lookup' would have
* failed anyway in that case.
*/
- if (lport->point_to_multipoint)
- break;
+ break;
case RPORT_ST_DELETE:
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_FIP;
@@ -867,20 +953,27 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
- if (rdata->ids.port_name < lport->wwpn)
- fc_rport_enter_plogi(rdata);
- else
- fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ /*
+ * Do not proceed with the state machine if our
+ * FLOGI has crossed with an FLOGI from the
+ * remote port; wait for the FLOGI response instead.
+ */
+ if (rdata->rp_state != RPORT_ST_FLOGI) {
+ if (rdata->ids.port_name < lport->wwpn)
+ fc_rport_enter_plogi(rdata);
+ else
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ }
out:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
fc_frame_free(rx_fp);
return;
reject_put:
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(rx_fp);
}
@@ -904,10 +997,13 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
u16 cssp_seq;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_PLOGI) {
FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -917,7 +1013,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, PTR_ERR(fp));
goto err;
}
@@ -939,14 +1035,20 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->max_seq = csp_seq;
rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
fc_rport_enter_prli(rdata);
- } else
- fc_rport_error_retry(rdata, fp);
+ } else {
+ struct fc_els_ls_rjt *rjt;
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
+ }
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
static bool
@@ -969,6 +1071,8 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
{
@@ -990,17 +1094,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp) {
FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
rdata->e_d_tov = lport->e_d_tov;
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
fc_rport_plogi_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
@@ -1022,16 +1127,20 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_spp spp;
} *pp;
struct fc_els_spp temp_spp;
+ struct fc_els_ls_rjt *rjt;
struct fc4_prov *prov;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
u32 fcp_parm = 0;
u8 op;
- u8 resp_code = 0;
-
- mutex_lock(&rdata->rp_mutex);
+ enum fc_els_spp_resp resp_code;
FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_PRLI) {
FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -1041,7 +1150,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, PTR_ERR(fp));
goto err;
}
@@ -1055,14 +1164,14 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
goto out;
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
- FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
- pp->spp.spp_flags);
+ FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
+ pp->spp.spp_flags, pp->spp.spp_type);
rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, -FC_EX_SEQ_ERR);
else
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
}
if (pp->prli.prli_spp_len < sizeof(pp->spp))
@@ -1074,13 +1183,25 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
if (fcp_parm & FCP_SPPF_CONF_COMPL)
rdata->flags |= FC_RP_FLAGS_CONF_REQ;
- prov = fc_passive_prov[FC_TYPE_FCP];
+ /*
+ * Call prli provider if we should act as a target
+ */
+ prov = fc_passive_prov[rdata->spp_type];
if (prov) {
memset(&temp_spp, 0, sizeof(temp_spp));
prov->prli(rdata, pp->prli.prli_spp_len,
&pp->spp, &temp_spp);
}
-
+ /*
+ * Check if the image pair could be established
+ */
+ if (rdata->spp_type != FC_TYPE_FCP ||
+ !(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) {
+ /*
+ * Nope; we can't use this port as a target.
+ */
+ fcp_parm &= ~FCP_SPPF_TARG_FCN;
+ }
rdata->supported_classes = FC_COS_CLASS3;
if (fcp_parm & FCP_SPPF_INIT_FCN)
roles |= FC_RPORT_ROLE_FCP_INITIATOR;
@@ -1091,15 +1212,18 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_rport_enter_rtv(rdata);
} else {
- FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
- fc_rport_error_retry(rdata, fp);
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1108,6 +1232,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
{
@@ -1128,6 +1254,15 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
return;
}
+ /*
+ * And if the local port does not support the initiator function
+ * there's no need to send a PRLI, either.
+ */
+ if (!(lport->service_params & FCP_SPPF_INIT_FCN)) {
+ fc_rport_enter_ready(rdata);
+ return;
+ }
+
FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
fc_rport_state(rdata));
@@ -1135,7 +1270,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(*pp));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
@@ -1151,15 +1286,16 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
fc_host_port_id(lport->host), FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
- if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
- NULL, rdata, 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ kref_get(&rdata->kref);
+ if (!fc_exch_seq_send(lport, fp, fc_rport_prli_resp,
+ NULL, rdata, 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
- * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses
+ * fc_rport_rtv_resp() - Handler for Request Timeout Value (RTV) responses
* @sp: The sequence the RTV was on
* @fp: The RTV response frame
* @rdata_arg: The remote port that sent the RTV response
@@ -1176,10 +1312,13 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_rport_priv *rdata = rdata_arg;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_RTV) {
FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -1189,7 +1328,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
@@ -1205,13 +1344,15 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
tov = ntohl(rtv->rtv_r_a_tov);
if (tov == 0)
tov = 1;
- rdata->r_a_tov = tov;
+ if (tov > rdata->r_a_tov)
+ rdata->r_a_tov = tov;
tov = ntohl(rtv->rtv_e_d_tov);
if (toq & FC_ELS_RTV_EDRES)
tov /= 1000000;
if (tov == 0)
tov = 1;
- rdata->e_d_tov = tov;
+ if (tov > rdata->e_d_tov)
+ rdata->e_d_tov = tov;
}
}
@@ -1221,7 +1362,8 @@ out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1230,6 +1372,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
{
@@ -1243,16 +1387,52 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
fc_rport_rtv_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+}
+
+/**
+ * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
+ * @rdata: The remote port that sent the RTV request
+ * @in_fp: The RTV request frame
+ *
+ * Locking Note: Called with the lport and rport locks held.
+ */
+static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
+ struct fc_frame *in_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct fc_els_rtv_acc *rtv;
+ struct fc_seq_els_data rjt_data;
+
+ FC_RPORT_DBG(rdata, "Received RTV request\n");
+
+ fp = fc_frame_alloc(lport, sizeof(*rtv));
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.reason = ELS_EXPL_INSUF_RES;
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ goto drop;
+ }
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ rtv->rtv_cmd = ELS_LS_ACC;
+ rtv->rtv_r_a_tov = htonl(lport->r_a_tov);
+ rtv->rtv_e_d_tov = htonl(lport->e_d_tov);
+ rtv->rtv_toq = 0;
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+drop:
+ fc_frame_free(in_fp);
}
/**
@@ -1262,15 +1442,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
* @lport_arg: The local port
*/
static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
- void *lport_arg)
+ void *rdata_arg)
{
- struct fc_lport *lport = lport_arg;
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_lport *lport = rdata->local_port;
FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
"Received a LOGO %s\n", fc_els_resp_type(fp));
- if (IS_ERR(fp))
- return;
- fc_frame_free(fp);
+ if (!IS_ERR(fp))
+ fc_frame_free(fp);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1279,6 +1460,8 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
{
@@ -1291,8 +1474,10 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
if (!fp)
return;
- (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
- fc_rport_logo_resp, lport, 0);
+ kref_get(&rdata->kref);
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
+ fc_rport_logo_resp, rdata, 0))
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1312,10 +1497,13 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_adisc *adisc;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a ADISC response\n");
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_ADISC) {
FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
fc_rport_state(rdata));
@@ -1325,7 +1513,7 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
@@ -1350,7 +1538,8 @@ out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1359,6 +1548,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
{
@@ -1372,15 +1563,16 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
fc_rport_adisc_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
@@ -1404,7 +1596,7 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
if (!adisc) {
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
- lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
goto drop;
}
@@ -1480,7 +1672,7 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
goto out;
out_rjt:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
out:
fc_frame_free(rx_fp);
}
@@ -1494,15 +1686,21 @@ out:
* The ELS opcode has already been validated by the caller.
*
* Locking Note: Called with the lport lock held.
+ *
+ * Reference counting: does not modify kref
*/
static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_rport_priv *rdata;
struct fc_seq_els_data els_data;
- rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp));
- if (!rdata)
+ rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
+ if (!rdata) {
+ FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
+ "Received ELS 0x%02x from non-logged-in port\n",
+ fc_frame_payload_op(fp));
goto reject;
+ }
mutex_lock(&rdata->rp_mutex);
@@ -1512,9 +1710,21 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
case RPORT_ST_READY:
case RPORT_ST_ADISC:
break;
+ case RPORT_ST_PLOGI:
+ if (fc_frame_payload_op(fp) == ELS_PRLI) {
+ FC_RPORT_DBG(rdata, "Reject ELS PRLI "
+ "while in state %s\n",
+ fc_rport_state(rdata));
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ goto busy;
+ }
default:
+ FC_RPORT_DBG(rdata,
+ "Reject ELS 0x%02x while in state %s\n",
+ fc_frame_payload_op(fp), fc_rport_state(rdata));
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
goto reject;
}
@@ -1529,30 +1739,41 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
fc_rport_recv_adisc_req(rdata, fp);
break;
case ELS_RRQ:
- lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL);
+ fc_seq_els_rsp_send(fp, ELS_RRQ, NULL);
fc_frame_free(fp);
break;
case ELS_REC:
- lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_REC, NULL);
fc_frame_free(fp);
break;
case ELS_RLS:
fc_rport_recv_rls_req(rdata, fp);
break;
+ case ELS_RTV:
+ fc_rport_recv_rtv_req(rdata, fp);
+ break;
default:
fc_frame_free(fp); /* can't happen */
break;
}
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
reject:
els_data.reason = ELS_RJT_UNAB;
els_data.explan = ELS_EXPL_PLOGI_REQD;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_frame_free(fp);
+ return;
+
+busy:
+ els_data.reason = ELS_RJT_BUSY;
+ els_data.explan = ELS_EXPL_NONE;
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
+ return;
}
/**
@@ -1561,8 +1782,10 @@ reject:
* @fp: The request frame
*
* Locking Note: Called with the lport lock held.
+ *
+ * Reference counting: does not modify kref
*/
-static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
@@ -1588,16 +1811,18 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
case ELS_RRQ:
case ELS_REC:
case ELS_RLS:
+ case ELS_RTV:
fc_rport_recv_els_req(lport, fp);
break;
default:
els_data.reason = ELS_RJT_UNSUP;
els_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
break;
}
}
+EXPORT_SYMBOL(fc_rport_recv_req);
/**
* fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
@@ -1605,6 +1830,8 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
* @rx_fp: The PLOGI request frame
*
* Locking Note: The rport lock is held before calling this function.
+ *
+ * Reference counting: increments kref on return
*/
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -1630,7 +1857,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
disc = &lport->disc;
mutex_lock(&disc->disc_mutex);
- rdata = lport->tt.rport_create(lport, sid);
+ rdata = fc_rport_create(lport, sid);
if (!rdata) {
mutex_unlock(&disc->disc_mutex);
rjt_data.reason = ELS_RJT_UNAB;
@@ -1718,7 +1945,7 @@ out:
return;
reject:
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -1744,7 +1971,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
unsigned int len;
unsigned int plen;
enum fc_els_spp_resp resp;
- enum fc_els_spp_resp passive;
struct fc_seq_els_data rjt_data;
struct fc4_prov *prov;
@@ -1794,15 +2020,21 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
resp = 0;
if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+ enum fc_els_spp_resp active = 0, passive = 0;
+
prov = fc_active_prov[rspp->spp_type];
if (prov)
- resp = prov->prli(rdata, plen, rspp, spp);
+ active = prov->prli(rdata, plen, rspp, spp);
prov = fc_passive_prov[rspp->spp_type];
- if (prov) {
+ if (prov)
passive = prov->prli(rdata, plen, rspp, spp);
- if (!resp || passive == FC_SPP_RESP_ACK)
- resp = passive;
- }
+ if (!active || passive == FC_SPP_RESP_ACK)
+ resp = passive;
+ else
+ resp = active;
+ FC_RPORT_DBG(rdata, "PRLI rspp type %x "
+ "active %x passive %x\n",
+ rspp->spp_type, active, passive);
}
if (!resp) {
if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
@@ -1823,20 +2055,13 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
- switch (rdata->rp_state) {
- case RPORT_ST_PRLI:
- fc_rport_enter_ready(rdata);
- break;
- default:
- break;
- }
goto drop;
reject_len:
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
drop:
fc_frame_free(rx_fp);
}
@@ -1907,7 +2132,7 @@ reject_len:
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
drop:
fc_frame_free(rx_fp);
}
@@ -1919,17 +2144,19 @@ drop:
*
* Locking Note: The rport lock is expected to be held before calling
* this function.
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_rport_priv *rdata;
u32 sid;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
sid = fc_frame_sid(fp);
- rdata = lport->tt.rport_lookup(lport, sid);
+ rdata = fc_rport_lookup(lport, sid);
if (rdata) {
mutex_lock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
@@ -1937,7 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
} else
FC_RPORT_ID_DBG(lport, sid,
"Received LOGO from non-logged-in port\n");
@@ -1947,41 +2174,11 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
/**
* fc_rport_flush_queue() - Flush the rport_event_queue
*/
-static void fc_rport_flush_queue(void)
+void fc_rport_flush_queue(void)
{
flush_workqueue(rport_event_queue);
}
-
-/**
- * fc_rport_init() - Initialize the remote port layer for a local port
- * @lport: The local port to initialize the remote port layer for
- */
-int fc_rport_init(struct fc_lport *lport)
-{
- if (!lport->tt.rport_lookup)
- lport->tt.rport_lookup = fc_rport_lookup;
-
- if (!lport->tt.rport_create)
- lport->tt.rport_create = fc_rport_create;
-
- if (!lport->tt.rport_login)
- lport->tt.rport_login = fc_rport_login;
-
- if (!lport->tt.rport_logoff)
- lport->tt.rport_logoff = fc_rport_logoff;
-
- if (!lport->tt.rport_recv_req)
- lport->tt.rport_recv_req = fc_rport_recv_req;
-
- if (!lport->tt.rport_flush_queue)
- lport->tt.rport_flush_queue = fc_rport_flush_queue;
-
- if (!lport->tt.rport_destroy)
- lport->tt.rport_destroy = fc_rport_destroy;
-
- return 0;
-}
-EXPORT_SYMBOL(fc_rport_init);
+EXPORT_SYMBOL(fc_rport_flush_queue);
/**
* fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b484859464f6..8a20b4e86224 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -648,6 +648,10 @@ struct lpfc_hba {
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
+#define HBA_FORCED_LINK_SPEED 0x40000 /*
+ * Firmware supports Forced Link Speed
+ * capability
+ */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -746,6 +750,8 @@ struct lpfc_hba {
uint32_t cfg_oas_priority;
uint32_t cfg_XLanePriority;
uint32_t cfg_enable_bg;
+ uint32_t cfg_prot_mask;
+ uint32_t cfg_prot_guard;
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f1019908800e..c84775562c65 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2759,18 +2759,14 @@ LPFC_ATTR_R(enable_npiv, 1, 0, 1,
LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
-int lpfc_enable_rrq = 2;
-module_param(lpfc_enable_rrq, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
-lpfc_param_show(enable_rrq);
/*
# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
# 0x0 = disabled, XRI/OXID use not tracked.
# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
*/
-lpfc_param_init(enable_rrq, 2, 0, 2);
-static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
+LPFC_ATTR_R(enable_rrq, 2, 0, 2,
+ "Enable RRQ functionality");
/*
# lpfc_suppress_link_up: Bring link up at initialization
@@ -2827,14 +2823,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
-int lpfc_iocb_cnt = 2;
-module_param(lpfc_iocb_cnt, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_iocb_cnt,
+LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
-lpfc_param_show(iocb_cnt);
-lpfc_param_init(iocb_cnt, 2, 1, 5);
-static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
- lpfc_iocb_cnt_show, NULL);
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -2887,9 +2877,9 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
if (val != LPFC_DEF_DEVLOSS_TMO)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0407 Ignoring nodev_tmo module "
- "parameter because devloss_tmo is "
- "set.\n");
+ "0407 Ignoring lpfc_nodev_tmo module "
+ "parameter because lpfc_devloss_tmo "
+ "is set.\n");
return 0;
}
@@ -2948,8 +2938,8 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
if (vport->dev_loss_tmo_changed ||
(lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0401 Ignoring change to nodev_tmo "
- "because devloss_tmo is set.\n");
+ "0401 Ignoring change to lpfc_nodev_tmo "
+ "because lpfc_devloss_tmo is set.\n");
return 0;
}
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
@@ -2964,7 +2954,7 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
return 0;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0403 lpfc_nodev_tmo attribute cannot be set to"
+ "0403 lpfc_nodev_tmo attribute cannot be set to "
"%d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
@@ -3015,8 +3005,8 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0404 lpfc_devloss_tmo attribute cannot be set to"
- " %d, allowed range is [%d, %d]\n",
+ "0404 lpfc_devloss_tmo attribute cannot be set to "
+ "%d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
}
@@ -3204,6 +3194,8 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
# Default value is 0.
*/
+LPFC_ATTR(topology, 0, 0, 6,
+ "Select Fibre Channel topology");
/**
* lpfc_topology_set - Set the adapters topology field
@@ -3281,11 +3273,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
phba->brd_no, val);
return -EINVAL;
}
-static int lpfc_topology = 0;
-module_param(lpfc_topology, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
+
lpfc_param_show(topology)
-lpfc_param_init(topology, 0, 0, 6)
static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
lpfc_topology_show, lpfc_topology_store);
@@ -3679,7 +3668,12 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
int nolip = 0;
const char *val_buf = buf;
int err;
- uint32_t prev_val;
+ uint32_t prev_val, if_type;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2 &&
+ phba->hba_flag & HBA_FORCED_LINK_SPEED)
+ return -EPERM;
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
nolip = 1;
@@ -3789,6 +3783,9 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
# 1 = aer supported and enabled (default)
# Value range is [0,1]. Default value is 1.
*/
+LPFC_ATTR(aer_support, 1, 0, 1,
+ "Enable PCIe device AER support");
+lpfc_param_show(aer_support)
/**
* lpfc_aer_support_store - Set the adapter for aer support
@@ -3871,46 +3868,6 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
return rc;
}
-static int lpfc_aer_support = 1;
-module_param(lpfc_aer_support, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
-lpfc_param_show(aer_support)
-
-/**
- * lpfc_aer_support_init - Set the initial adapters aer support flag
- * @phba: lpfc_hba pointer.
- * @val: enable aer or disable aer flag.
- *
- * Description:
- * If val is in a valid range [0,1], then set the adapter's initial
- * cfg_aer_support field. It will be up to the driver's probe_one
- * routine to determine whether the device's AER support can be set
- * or not.
- *
- * Notes:
- * If the value is not in range log a kernel error message, and
- * choose the default value of setting AER support and return.
- *
- * Returns:
- * zero if val saved.
- * -EINVAL val out of range
- **/
-static int
-lpfc_aer_support_init(struct lpfc_hba *phba, int val)
-{
- if (val == 0 || val == 1) {
- phba->cfg_aer_support = val;
- return 0;
- }
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2712 lpfc_aer_support attribute value %d out "
- "of range, allowed values are 0|1, setting it "
- "to default value of 1\n", val);
- /* By default, try to enable AER on a device */
- phba->cfg_aer_support = 1;
- return -EINVAL;
-}
-
static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
lpfc_aer_support_show, lpfc_aer_support_store);
@@ -4055,39 +4012,10 @@ lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
return rc;
}
-static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
-module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
-lpfc_param_show(sriov_nr_virtfn)
-
-/**
- * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
- * @phba: lpfc_hba pointer.
- * @val: link speed value.
- *
- * Description:
- * If val is in a valid range [0,255], then set the adapter's initial
- * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
- * number shall be used instead. It will be up to the driver's probe_one
- * routine to determine whether the device's SR-IOV is supported or not.
- *
- * Returns:
- * zero if val saved.
- * -EINVAL val out of range
- **/
-static int
-lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
-{
- if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
- phba->cfg_sriov_nr_virtfn = val;
- return 0;
- }
+LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
+ "Enable PCIe device SR-IOV virtual fn");
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3017 Enabling %d virtual functions is not "
- "allowed.\n", val);
- return -EINVAL;
-}
+lpfc_param_show(sriov_nr_virtfn)
static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
@@ -4251,7 +4179,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3016 fcp_imax: %d out of range, using default\n", val);
+ "3016 lpfc_fcp_imax: %d out of range, using default\n",
+ val);
phba->cfg_fcp_imax = LPFC_DEF_IMAX;
return 0;
@@ -4401,8 +4330,8 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3326 fcp_cpu_map: %d out of range, using default\n",
- val);
+ "3326 lpfc_fcp_cpu_map: %d out of range, using "
+ "default\n", val);
phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
return 0;
@@ -4441,12 +4370,10 @@ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
# to limit the I/O completion time to the parameter value.
# The value is set in milliseconds.
*/
-static int lpfc_max_scsicmpl_time;
-module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
+LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
"Use command completion time to control queue depth");
+
lpfc_vport_param_show(max_scsicmpl_time);
-lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
{
@@ -4691,12 +4618,15 @@ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
# HBA supports DIX Type 1: Host to HBA Type 1 protection
#
*/
-unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIX_TYPE0_PROTECTION |
- SHOST_DIX_TYPE1_PROTECTION;
-
-module_param(lpfc_prot_mask, uint, S_IRUGO);
-MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
+LPFC_ATTR(prot_mask,
+ (SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION),
+ 0,
+ (SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION),
+ "T10-DIF host protection capabilities mask");
/*
# lpfc_prot_guard: i
@@ -4706,9 +4636,9 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
# - Default will result in registering capabilities for all guard types
#
*/
-unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
-module_param(lpfc_prot_guard, byte, S_IRUGO);
-MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+LPFC_ATTR(prot_guard,
+ SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
+ "T10-DIF host protection guard type");
/*
* Delay initial NPort discovery when Clean Address bit is cleared in
@@ -5828,6 +5758,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_oas_flags = 0;
phba->cfg_oas_priority = 0;
lpfc_enable_bg_init(phba, lpfc_enable_bg);
+ lpfc_prot_mask_init(phba, lpfc_prot_mask);
+ lpfc_prot_guard_init(phba, lpfc_prot_guard);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_poll = 0;
else
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 05dcc2abd541..7dca4d6a8883 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -97,7 +98,7 @@ struct lpfc_bsg_menlo {
#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
- struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
+ struct bsg_job *set_job; /* job waiting for this iocb to finish */
union {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
@@ -211,7 +212,7 @@ lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
static unsigned int
lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
- struct fc_bsg_buffer *bsg_buffers,
+ struct bsg_buffer *bsg_buffers,
unsigned int bytes_to_transfer, int to_buffers)
{
@@ -297,7 +298,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
@@ -312,6 +314,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -350,7 +353,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
}
} else {
rsp_size = rsp->un.genreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
rsp_size, 0);
}
@@ -367,8 +370,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -378,12 +382,13 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct ulp_bde64 *bpl = NULL;
uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
@@ -398,7 +403,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
int iocb_stat;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -542,7 +547,7 @@ no_ndlp:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -570,7 +575,8 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
@@ -588,6 +594,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -609,17 +616,17 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
if (job) {
if (rsp->ulpStatus == IOSTAT_SUCCESS) {
rsp_size = rsp->un.elsreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
prsp->virt,
rsp_size);
} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
/* LS_RJT data returned in word 4 */
rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
- els_reply = &job->reply->reply_data.ctels_reply;
+ els_reply = &bsg_reply->reply_data.ctels_reply;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[3];
els_reply->rjt_data.reason_code = rjt_data[2];
@@ -637,8 +644,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -648,12 +656,14 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_rport_els(struct fc_bsg_job *job)
+lpfc_bsg_rport_els(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t elscmd;
uint32_t cmdsize;
struct lpfc_iocbq *cmdiocbq;
@@ -664,7 +674,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
int rc = 0;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* verify the els command is not greater than the
* maximum ELS transfer size.
@@ -684,7 +694,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
goto no_dd_data;
}
- elscmd = job->request->rqst_data.r_els.els_code;
+ elscmd = bsg_request->rqst_data.r_els.els_code;
cmdsize = job->request_payload.payload_len;
if (!lpfc_nlp_get(ndlp)) {
@@ -771,7 +781,7 @@ free_dd_data:
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -917,7 +927,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
struct lpfc_hbq_entry *hbqe;
struct lpfc_sli_ct_request *ct_req;
- struct fc_bsg_job *job = NULL;
+ struct bsg_job *job = NULL;
+ struct fc_bsg_reply *bsg_reply;
struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
@@ -1120,13 +1131,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dd_data->set_job = NULL;
lpfc_bsg_event_unref(evt);
if (job) {
- job->reply->reply_payload_rcv_len = size;
+ bsg_reply = job->reply;
+ bsg_reply->reply_payload_rcv_len = size;
/* make error code available to userspace */
- job->reply->result = 0;
+ bsg_reply->result = 0;
job->dd_data = NULL;
/* complete the job back to userspace */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
}
}
@@ -1187,10 +1200,11 @@ lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
* @job: SET_EVENT fc_bsg_job
**/
static int
-lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_set_event(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
struct set_ct_event *event_req;
struct lpfc_bsg_event *evt;
int rc = 0;
@@ -1208,7 +1222,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
}
event_req = (struct set_ct_event *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
FC_REG_EVENT_MASK);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1271,10 +1285,12 @@ job_error:
* @job: GET_EVENT fc_bsg_job
**/
static int
-lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_get_event(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply;
struct lpfc_bsg_event *evt, *evt_next;
@@ -1292,10 +1308,10 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
}
event_req = (struct get_ct_event *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
event_reply = (struct get_ct_event_reply *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
@@ -1315,7 +1331,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
* an error indicating that there isn't anymore
*/
if (evt_dat == NULL) {
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
rc = -ENOENT;
goto job_error;
}
@@ -1331,12 +1347,12 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
event_reply->type = evt_dat->type;
event_reply->immed_data = evt_dat->immed_dat;
if (evt_dat->len > 0)
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
evt_dat->data, evt_dat->len);
else
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (evt_dat) {
kfree(evt_dat->data);
@@ -1347,13 +1363,14 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
lpfc_bsg_event_unref(evt);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->dd_data = NULL;
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
job_error:
job->dd_data = NULL;
- job->reply->result = rc;
+ bsg_reply->result = rc;
return rc;
}
@@ -1380,7 +1397,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
@@ -1411,6 +1429,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Copy the completed job data or set the error status */
if (job) {
+ bsg_reply = job->reply;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
@@ -1428,7 +1447,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
rc = -EACCES;
}
} else {
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
}
}
@@ -1442,8 +1461,9 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -1457,7 +1477,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
* @num_entry: Number of enties in the bde.
**/
static int
-lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
int num_entry)
{
@@ -1603,12 +1623,14 @@ no_dd_data:
* @job: SEND_MGMT_RESP fc_bsg_job
**/
static int
-lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
struct ulp_bde64 *bpl;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
int bpl_entries;
@@ -1618,7 +1640,7 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
int rc = 0;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
rc = -ERANGE;
@@ -1664,7 +1686,7 @@ send_mgmt_rsp_free_bmp:
kfree(bmp);
send_mgmt_rsp_exit:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -1760,8 +1782,10 @@ lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
* All of this is done in-line.
*/
static int
-lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
uint32_t link_flags;
uint32_t timeout;
@@ -1771,7 +1795,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
int rc = 0;
/* no data to return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
@@ -1791,7 +1815,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
/* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
@@ -1864,10 +1888,11 @@ loopback_mode_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2015,14 +2040,16 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
* loopback mode in order to perform a diagnostic loopback test.
*/
static int
-lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
uint32_t link_flags, timeout;
int i, rc = 0;
/* no data to return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
@@ -2054,7 +2081,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3129 Bring link to diagnostic state.\n");
loopback_mode = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
@@ -2151,10 +2178,11 @@ loopback_mode_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2166,17 +2194,17 @@ job_error:
* command from the user to proper driver action routines.
*/
static int
-lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
{
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
int rc;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost)
return -ENODEV;
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport)
return -ENODEV;
phba = vport->phba;
@@ -2202,8 +2230,10 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
* command from the user to proper driver action routines.
*/
static int
-lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -2211,10 +2241,10 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
uint32_t timeout;
int rc, i;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost)
return -ENODEV;
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport)
return -ENODEV;
phba = vport->phba;
@@ -2232,7 +2262,7 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
phba->link_flag &= ~LS_LOOPBACK_MODE;
spin_unlock_irq(&phba->hbalock);
loopback_mode_end_cmd = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
timeout = loopback_mode_end_cmd->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
@@ -2263,10 +2293,11 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
loopback_mode_end_exit:
/* make return code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2278,8 +2309,10 @@ loopback_mode_end_exit:
* applicaiton.
*/
static int
-lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -2292,12 +2325,12 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
struct diag_status *diag_status_reply;
int mbxstatus, rc = 0;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost) {
rc = -ENODEV;
goto job_error;
}
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport) {
rc = -ENODEV;
goto job_error;
@@ -2335,7 +2368,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
goto job_error;
link_diag_test_cmd = (struct sli4_link_diag *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
@@ -2385,7 +2418,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
}
diag_status_reply = (struct diag_status *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
@@ -2413,10 +2446,11 @@ link_diag_test_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2982,9 +3016,10 @@ err_post_rxbufs_exit:
* of loopback mode.
**/
static int
-lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct lpfc_bsg_event *evt;
struct event_data *evdat;
@@ -3012,7 +3047,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
uint32_t total_mem;
/* in case no data is returned return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
@@ -3237,11 +3272,11 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rc = IOCB_SUCCESS;
/* skip over elx loopback header */
rx_databuf += ELX_LOOPBACK_HEADER_SZ;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
rx_databuf, size);
- job->reply->reply_payload_rcv_len = size;
+ bsg_reply->reply_payload_rcv_len = size;
}
}
@@ -3271,11 +3306,12 @@ err_loopback_test_exit:
loopback_test_exit:
kfree(dataout);
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
/* complete the job back to userspace if no error */
if (rc == IOCB_SUCCESS)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -3284,9 +3320,10 @@ loopback_test_exit:
* @job: GET_DFC_REV fc_bsg_job
**/
static int
-lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
+lpfc_bsg_get_dfc_rev(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct get_mgmt_rev_reply *event_reply;
int rc = 0;
@@ -3301,7 +3338,7 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
}
event_reply = (struct get_mgmt_rev_reply *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
@@ -3315,9 +3352,10 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
job_error:
- job->reply->result = rc;
+ bsg_reply->result = rc;
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -3336,7 +3374,8 @@ static void
lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
+ struct bsg_job *job;
uint32_t size;
unsigned long flags;
uint8_t *pmb, *pmb_buf;
@@ -3364,8 +3403,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* Copy the mailbox data to the job if it is still active */
if (job) {
+ bsg_reply = job->reply;
size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
@@ -3379,8 +3419,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -3510,11 +3551,12 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
* This is routine handles BSG job for mailbox commands completions with
* multiple external buffers.
**/
-static struct fc_bsg_job *
+static struct bsg_job *
lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
uint8_t *pmb, *pmb_buf;
unsigned long flags;
uint32_t size;
@@ -3529,6 +3571,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -3559,13 +3602,13 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
if (job) {
size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
/* result for successful */
- job->reply->result = 0;
+ bsg_reply->result = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2937 SLI_CONFIG ext-buffer maibox command "
@@ -3603,7 +3646,8 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
static void
lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
@@ -3623,9 +3667,11 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
/* if the job is still active, call job done */
- if (job)
- job->job_done(job);
-
+ if (job) {
+ bsg_reply = job->reply;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
return;
}
@@ -3640,7 +3686,8 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
static void
lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
@@ -3658,8 +3705,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
lpfc_bsg_mbox_ext_session_reset(phba);
/* if the job is still active, call job done */
- if (job)
- job->job_done(job);
+ if (job) {
+ bsg_reply = job->reply;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
return;
}
@@ -3768,10 +3818,11 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
* non-embedded external bufffers.
**/
static int
-lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct dfc_mbox_req *mbox_req;
struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
@@ -3784,7 +3835,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
int rc, i;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3955,10 +4006,12 @@ job_error:
* non-embedded external bufffers.
**/
static int
-lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct dfc_mbox_req *mbox_req;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint32_t ext_buf_cnt;
@@ -3969,7 +4022,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
int rc = SLI_CONFIG_NOT_HANDLED, i;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -4096,8 +4149,9 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* wait for additoinal external buffers */
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
job_error:
@@ -4119,7 +4173,7 @@ job_error:
* with embedded sussystem 0x1 and opcodes with external HBDs.
**/
static int
-lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
struct lpfc_sli_config_mbox *sli_cfg_mbx;
@@ -4268,8 +4322,9 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
* user space through BSG.
**/
static int
-lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct lpfc_dmabuf *dmabuf;
uint8_t *pbuf;
@@ -4307,7 +4362,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
dmabuf, index);
pbuf = (uint8_t *)dmabuf->virt;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pbuf, size);
@@ -4321,8 +4376,9 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
lpfc_bsg_mbox_ext_session_reset(phba);
}
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
}
@@ -4336,9 +4392,10 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
* from user space through BSG.
**/
static int
-lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
@@ -4436,8 +4493,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/* wait for additoinal external buffers */
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
job_error:
@@ -4457,7 +4515,7 @@ job_error:
* command with multiple non-embedded external buffers.
**/
static int
-lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
int rc;
@@ -4502,14 +4560,15 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
* (0x9B) mailbox commands and external buffers.
**/
static int
-lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
struct dfc_mbox_req *mbox_req;
int rc = SLI_CONFIG_NOT_HANDLED;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* mbox command with/without single external buffer */
if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
@@ -4579,9 +4638,11 @@ sli_cfg_ext_error:
* let our completion handler finish the command.
**/
static int
-lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_vport *vport)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
@@ -4600,7 +4661,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint32_t size;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* sanity check to protect driver */
if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
@@ -4619,7 +4680,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
@@ -4841,7 +4902,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* job finished, copy the data */
memcpy(pmbx, pmb, sizeof(*pmb));
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmbx, size);
@@ -4870,15 +4931,17 @@ job_cont:
* @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
**/
static int
-lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
+lpfc_bsg_mbox_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct dfc_mbox_req *mbox_req;
int rc = 0;
/* mix-and-match backward compatibility */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -4889,7 +4952,7 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
sizeof(struct fc_bsg_request)),
(int)sizeof(struct dfc_mbox_req));
mbox_req = (struct dfc_mbox_req *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
mbox_req->extMboxTag = 0;
mbox_req->extSeqNum = 0;
}
@@ -4898,15 +4961,16 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
if (rc == 0) {
/* job done */
- job->reply->result = 0;
+ bsg_reply->result = 0;
job->dd_data = NULL;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
} else if (rc == 1)
/* job submitted, will complete later*/
rc = 0; /* return zero, no error */
else {
/* some error occurred */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
}
@@ -4936,7 +5000,8 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_bsg_menlo *menlo;
@@ -4956,6 +5021,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -4970,7 +5036,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
*/
menlo_resp = (struct menlo_response *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
menlo_resp->xri = rsp->ulpContext;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
@@ -4990,7 +5056,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
}
} else {
rsp_size = rsp->un.genreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
rsp_size, 0);
}
@@ -5007,8 +5073,9 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
/* Complete the job if active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
@@ -5024,9 +5091,11 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
* supplied in the menlo request header xri field.
**/
static int
-lpfc_menlo_cmd(struct fc_bsg_job *job)
+lpfc_menlo_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocbq;
IOCB_t *cmd;
@@ -5039,7 +5108,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
struct ulp_bde64 *bpl = NULL;
/* in case no data is returned return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) +
@@ -5069,7 +5138,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
}
menlo_cmd = (struct menlo_command *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -5180,19 +5249,65 @@ free_dd:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
+static int
+lpfc_forced_link_speed(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct forced_link_speed_support_reply *forced_reply;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct get_forced_link_speed_support)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "0048 Received FORCED_LINK_SPEED request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ forced_reply = (struct forced_link_speed_support_reply *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct forced_link_speed_support_reply)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "0049 Received FORCED_LINK_SPEED reply below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
+ ? LPFC_FORCED_LINK_SPEED_SUPPORTED
+ : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
+job_error:
+ bsg_reply->result = rc;
+ if (rc == 0)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return rc;
+}
+
/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
+lpfc_bsg_hst_vendor(struct bsg_job *job)
{
- int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
int rc;
switch (command) {
@@ -5227,11 +5342,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
case LPFC_BSG_VENDOR_MENLO_DATA:
rc = lpfc_menlo_cmd(job);
break;
+ case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
+ rc = lpfc_forced_link_speed(job);
+ break;
default:
rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
break;
}
@@ -5243,12 +5361,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
* @job: fc_bsg_job to handle
**/
int
-lpfc_bsg_request(struct fc_bsg_job *job)
+lpfc_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t msgcode;
int rc;
- msgcode = job->request->msgcode;
+ msgcode = bsg_request->msgcode;
switch (msgcode) {
case FC_BSG_HST_VENDOR:
rc = lpfc_bsg_hst_vendor(job);
@@ -5261,9 +5381,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
break;
default:
rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
break;
}
@@ -5278,9 +5398,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
* the waiting function which will handle passing the error back to userspace
**/
int
-lpfc_bsg_timeout(struct fc_bsg_job *job)
+lpfc_bsg_timeout(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index e557bcdbcb19..f2247aa4fa17 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -35,6 +35,7 @@
#define LPFC_BSG_VENDOR_MENLO_DATA 9
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
+#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
struct set_ct_event {
uint32_t command;
@@ -284,6 +285,15 @@ struct lpfc_sli_config_mbox {
} un;
};
+#define LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED 0
+#define LPFC_FORCED_LINK_SPEED_SUPPORTED 1
+struct get_forced_link_speed_support {
+ uint32_t command;
+};
+struct forced_link_speed_support_reply {
+ uint8_t supported;
+};
+
/* driver only */
#define SLI_CONFIG_NOT_HANDLED 0
#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bd7576d452f2..15d2bfdf582d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -397,8 +397,6 @@ extern spinlock_t _dump_buf_lock;
extern int _dump_buf_done;
extern spinlock_t pgcnt_lock;
extern unsigned int pgcnt;
-extern unsigned int lpfc_prot_mask;
-extern unsigned char lpfc_prot_guard;
extern unsigned int lpfc_fcp_look_ahead;
/* Interface exported by fabric iocb scheduler */
@@ -431,8 +429,8 @@ struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
#define HBA_EVENT_LINK_DOWN 3
/* functions to support SGIOv4/bsg interface */
-int lpfc_bsg_request(struct fc_bsg_job *);
-int lpfc_bsg_timeout(struct fc_bsg_job *);
+int lpfc_bsg_request(struct bsg_job *);
+int lpfc_bsg_timeout(struct bsg_job *);
int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b7d54bfb1df9..236e4e51d161 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7610,7 +7610,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* reject till our FLOGI completes */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
(cmd != ELS_CMD_FLOGI)) {
- rjt_err = LSRJT_UNABLE_TPC;
+ rjt_err = LSRJT_LOGICAL_BSY;
rjt_exp = LSEXP_NOTHING_MORE;
goto lsrjt;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index ee8022737591..5646699b0516 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -921,6 +921,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
+#define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D
#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
@@ -2289,6 +2290,9 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
+#define lpfc_mbx_rd_conf_link_speed_SHIFT 16
+#define lpfc_mbx_rd_conf_link_speed_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_link_speed_WORD word6
uint32_t rsvd_7;
uint32_t rsvd_8;
uint32_t word9;
@@ -2919,6 +2923,16 @@ struct lpfc_mbx_set_feature {
};
+#define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2
+struct lpfc_mbx_set_host_data {
+#define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48
+ struct mbox_header header;
+ uint32_t param_id;
+ uint32_t param_len;
+ uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+};
+
+
struct lpfc_mbx_get_sli4_parameters {
struct mbox_header header;
struct lpfc_sli4_parameters sli4_parameters;
@@ -3313,6 +3327,7 @@ struct lpfc_mqe {
struct lpfc_mbx_get_port_name get_port_name;
struct lpfc_mbx_set_feature set_feature;
struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
+ struct lpfc_mbx_set_host_data set_host_data;
struct lpfc_mbx_nop nop;
} un;
};
@@ -3981,7 +3996,8 @@ union lpfc_wqe128 {
struct gen_req64_wqe gen_req;
};
-#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G5 0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G6 0xfeaa0003
#define LPFC_FILE_TYPE_GROUP 0xf7
#define LPFC_FILE_ID_GROUP 0xa2
struct lpfc_grp_hdr {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 734a0428ef0e..4776fd85514f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6279,34 +6279,36 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
uint32_t old_guard;
int pagecnt = 10;
- if (lpfc_prot_mask && lpfc_prot_guard) {
+ if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"1478 Registering BlockGuard with the "
"SCSI layer\n");
- old_mask = lpfc_prot_mask;
- old_guard = lpfc_prot_guard;
+ old_mask = phba->cfg_prot_mask;
+ old_guard = phba->cfg_prot_guard;
/* Only allow supported values */
- lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
+ phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIX_TYPE0_PROTECTION |
SHOST_DIX_TYPE1_PROTECTION);
- lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
+ phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
+ SHOST_DIX_GUARD_CRC);
/* DIF Type 1 protection for profiles AST1/C1 is end to end */
- if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
- lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
+ if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
+ phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
- if (lpfc_prot_mask && lpfc_prot_guard) {
- if ((old_mask != lpfc_prot_mask) ||
- (old_guard != lpfc_prot_guard))
+ if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
+ if ((old_mask != phba->cfg_prot_mask) ||
+ (old_guard != phba->cfg_prot_guard))
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1475 Registering BlockGuard with the "
"SCSI layer: mask %d guard %d\n",
- lpfc_prot_mask, lpfc_prot_guard);
+ phba->cfg_prot_mask,
+ phba->cfg_prot_guard);
- scsi_host_set_prot(shost, lpfc_prot_mask);
- scsi_host_set_guard(shost, lpfc_prot_guard);
+ scsi_host_set_prot(shost, phba->cfg_prot_mask);
+ scsi_host_set_guard(shost, phba->cfg_prot_guard);
} else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1479 Not Registering BlockGuard with the SCSI "
@@ -6929,6 +6931,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
struct lpfc_mbx_get_func_cfg *get_func_cfg;
struct lpfc_rsrc_desc_fcfcoe *desc;
char *pdesc_0;
+ uint16_t forced_link_speed;
+ uint32_t if_type;
int length, i, rc = 0, rc2;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -7022,6 +7026,58 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
if (rc)
goto read_cfg_out;
+ /* Update link speed if forced link speed is supported */
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ forced_link_speed =
+ bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
+ if (forced_link_speed) {
+ phba->hba_flag |= HBA_FORCED_LINK_SPEED;
+
+ switch (forced_link_speed) {
+ case LINK_SPEED_1G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_1G;
+ break;
+ case LINK_SPEED_2G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_2G;
+ break;
+ case LINK_SPEED_4G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_4G;
+ break;
+ case LINK_SPEED_8G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_8G;
+ break;
+ case LINK_SPEED_10G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_10G;
+ break;
+ case LINK_SPEED_16G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_16G;
+ break;
+ case LINK_SPEED_32G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_32G;
+ break;
+ case 0xffff:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_AUTO;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0047 Unrecognized link "
+ "speed : %d\n",
+ forced_link_speed);
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_AUTO;
+ }
+ }
+ }
+
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
length = phba->sli4_hba.max_cfg_param.max_xri -
lpfc_sli4_get_els_iocb_cnt(phba);
@@ -7256,6 +7312,7 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
+ uint32_t wqesize;
int idx;
/*
@@ -7340,15 +7397,10 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.fcp_cq[idx] = qdesc;
/* Create Fast Path FCP WQs */
- if (phba->fcp_embed_io) {
- qdesc = lpfc_sli4_queue_alloc(phba,
- LPFC_WQE128_SIZE,
- LPFC_WQE128_DEF_COUNT);
- } else {
- qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
- }
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+ qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP "
@@ -10260,6 +10312,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
int i, rc = 0;
struct lpfc_dmabuf *dmabuf, *next;
uint32_t offset = 0, temp_offset = 0;
+ uint32_t magic_number, ftype, fid, fsize;
/* It can be null in no-wait mode, sanity check */
if (!fw) {
@@ -10268,18 +10321,19 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
}
image = (struct lpfc_grp_hdr *)fw->data;
+ magic_number = be32_to_cpu(image->magic_number);
+ ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
+ fid = bf_get_be32(lpfc_grp_hdr_id, image),
+ fsize = be32_to_cpu(image->size);
+
INIT_LIST_HEAD(&dma_buffer_list);
- if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
- (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
- LPFC_FILE_TYPE_GROUP) ||
- (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
- (be32_to_cpu(image->size) != fw->size)) {
+ if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
+ magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
+ ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3022 Invalid FW image found. "
- "Magic:%x Type:%x ID:%x\n",
- be32_to_cpu(image->magic_number),
- bf_get_be32(lpfc_grp_hdr_file_type, image),
- bf_get_be32(lpfc_grp_hdr_id, image));
+ "Magic:%x Type:%x ID:%x Size %d %zd\n",
+ magic_number, ftype, fid, fsize, fw->size);
rc = -EINVAL;
goto release_out;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d197aa176dee..ad350d969bdc 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -413,15 +413,13 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
* struct fcp_cmnd, struct fcp_rsp and the number of bde's
* necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
break;
}
- /* Initialize virtual ptrs to dma_buf region. */
- memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
@@ -607,7 +605,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
+ * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
* @phba: pointer to lpfc hba data structure.
* @post_sblist: pointer to the scsi buffer list.
*
@@ -736,7 +734,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
+ * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
* @phba: pointer to lpfc hba data structure.
*
* This routine walks the list of scsi buffers that have been allocated and
@@ -821,13 +819,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
* for the struct fcp_cmnd, struct fcp_rsp and the number
* of bde's necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
break;
}
- memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
/*
* 4K Page alignment is CRITICAL to BlockGuard, double check
@@ -857,7 +854,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
psb->data, psb->dma_handle);
kfree(psb);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "3368 Failed to allocated IOTAG for"
+ "3368 Failed to allocate IOTAG for"
" XRI:0x%x\n", lxri);
lpfc_sli4_free_xri(phba, lxri);
break;
@@ -1136,7 +1133,7 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
*
* This routine does the pci dma mapping for scatter-gather list of scsi cmnd
* field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
- * through sg elements and format the bdea. This routine also initializes all
+ * through sg elements and format the bde. This routine also initializes all
* IOCB fields which are dependent on scsi command request buffer.
*
* Return codes:
@@ -1269,13 +1266,16 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-/* Return if if error injection is detected by Initiator */
+/* Return BG_ERR_INIT if error injection is detected by Initiator */
#define BG_ERR_INIT 0x1
-/* Return if if error injection is detected by Target */
+/* Return BG_ERR_TGT if error injection is detected by Target */
#define BG_ERR_TGT 0x2
-/* Return if if swapping CSUM<-->CRC is required for error injection */
+/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
#define BG_ERR_SWAP 0x10
-/* Return if disabling Guard/Ref/App checking is required for error injection */
+/**
+ * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
+ * error injection
+ **/
#define BG_ERR_CHECK 0x20
/**
@@ -4139,13 +4139,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
- /* The sdev is not guaranteed to be valid post scsi_done upcall. */
- cmd->scsi_done(cmd);
-
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_cmd->pCmd = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* The sdev is not guaranteed to be valid post scsi_done upcall. */
+ cmd->scsi_done(cmd);
+
/*
* If there is a thread waiting for command completion
* wake up the thread.
@@ -4822,7 +4822,7 @@ wait_for_cmpl:
ret = FAILED;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0748 abort handler timed out waiting "
- "for abortng I/O (xri:x%x) to complete: "
+ "for aborting I/O (xri:x%x) to complete: "
"ret %#x, ID %d, LUN %llu\n",
iocb->sli4_xritag, ret,
cmnd->device->id, cmnd->device->lun);
@@ -4945,26 +4945,30 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
* 0x2002 - Success.
**/
static int
-lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
- unsigned tgt_id, uint64_t lun_id,
- uint8_t task_mgmt_cmd)
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
+ unsigned int tgt_id, uint64_t lun_id,
+ uint8_t task_mgmt_cmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
- struct lpfc_nodelist *pnode = rdata->pnode;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *pnode;
int ret;
int status;
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED;
+ pnode = rdata->pnode;
- lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
+ lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
+ lpfc_cmd->pCmd = cmnd;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
task_mgmt_cmd);
@@ -5171,7 +5175,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_LUN_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5249,7 +5253,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_TARGET_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5328,7 +5332,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (!match)
continue;
- status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
+ status = lpfc_send_taskmgmt(vport, cmnd,
i, 0, FCP_TARGET_RESET);
if (status != SUCCESS) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f4f77c5b0c83..4faa7672fc1d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -47,6 +47,7 @@
#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
#include "lpfc_vport.h"
+#include "lpfc_version.h"
/* There are only four IOCB completion types. */
typedef enum _lpfc_iocb_type {
@@ -2678,15 +2679,16 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- list_del_init(&cmd_iocb->list);
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ /* remove from txcmpl queue list */
+ list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ return cmd_iocb;
}
- return cmd_iocb;
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0317 iotag x%x is out off "
+ "0317 iotag x%x is out of "
"range: max iotag x%x wd0 x%x\n",
iotag, phba->sli.last_iotag,
*(((uint32_t *) &prspiocb->iocb) + 7));
@@ -2721,8 +2723,9 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
return cmd_iocb;
}
}
+
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0372 iotag x%x is out off range: max iotag (x%x)\n",
+ "0372 iotag x%x is out of range: max iotag (x%x)\n",
iotag, phba->sli.last_iotag);
return NULL;
}
@@ -6291,6 +6294,25 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
return 0;
}
+void
+lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ uint32_t len;
+
+ len = sizeof(struct lpfc_mbx_set_host_data) -
+ sizeof(struct lpfc_sli4_cfg_mhdr);
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
+ LPFC_SLI4_MBX_EMBED);
+
+ mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
+ mbox->u.mqe.un.set_host_data.param_len = 8;
+ snprintf(mbox->u.mqe.un.set_host_data.data,
+ LPFC_HOST_OS_DRIVER_VERSION_SIZE,
+ "Linux %s v"LPFC_DRIVER_VERSION,
+ (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
+}
+
/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
@@ -6542,6 +6564,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
+ lpfc_set_host_data(phba, mboxq);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "2134 Failed to set host os driver version %x",
+ rc);
+ }
+
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
if (rc) {
@@ -11781,6 +11812,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ /* Put the iocb back on the txcmplq */
+ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c9bf20eb7223..50bfc43ebcb0 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.0."
+#define LPFC_DRIVER_VERSION "11.2.0.2"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index a590089b9397..ccb68d12692c 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -28,17 +28,15 @@
/* Definitions for the core NCR5380 driver. */
-#define NCR5380_implementation_fields unsigned char *pdma_base; \
- int pdma_residual
+#define NCR5380_implementation_fields int pdma_residual
-#define NCR5380_read(reg) macscsi_read(instance, reg)
-#define NCR5380_write(reg, value) macscsi_write(instance, reg, value)
+#define NCR5380_read(reg) in_8(hostdata->io + ((reg) << 4))
+#define NCR5380_write(reg, value) out_8(hostdata->io + ((reg) << 4), value)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- macscsi_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_xfer_len macscsi_dma_xfer_len
#define NCR5380_dma_recv_setup macscsi_pread
#define NCR5380_dma_send_setup macscsi_pwrite
-#define NCR5380_dma_residual(instance) (hostdata->pdma_residual)
+#define NCR5380_dma_residual macscsi_dma_residual
#define NCR5380_intr macscsi_intr
#define NCR5380_queue_command macscsi_queue_command
@@ -61,20 +59,6 @@ module_param(setup_hostid, int, 0);
static int setup_toshiba_delay = -1;
module_param(setup_toshiba_delay, int, 0);
-/*
- * NCR 5380 register access functions
- */
-
-static inline char macscsi_read(struct Scsi_Host *instance, int reg)
-{
- return in_8(instance->base + (reg << 4));
-}
-
-static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value)
-{
- out_8(instance->base + (reg << 4), value);
-}
-
#ifndef MODULE
static int __init mac_scsi_setup(char *str)
{
@@ -167,16 +151,15 @@ __asm__ __volatile__ \
: "0"(s), "1"(d), "2"(n) \
: "d0")
-static int macscsi_pread(struct Scsi_Host *instance,
- unsigned char *dst, int len)
+static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
+ unsigned char *dst, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
+ unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
unsigned char *d = dst;
int n = len;
int transferred;
- while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
CP_IO_TO_MEM(s, d, n);
@@ -189,23 +172,23 @@ static int macscsi_pread(struct Scsi_Host *instance,
return 0;
/* Target changed phase early? */
- if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
return 0;
- dsprintk(NDEBUG_PSEUDO_DMA, instance,
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: bus error (%d/%d)\n", __func__, transferred, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
d = dst + transferred;
n = len - transferred;
}
scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
return -1;
}
@@ -270,16 +253,15 @@ __asm__ __volatile__ \
: "0"(s), "1"(d), "2"(n) \
: "d0")
-static int macscsi_pwrite(struct Scsi_Host *instance,
- unsigned char *src, int len)
+static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
+ unsigned char *src, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char *s = src;
- unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
+ unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
int n = len;
int transferred;
- while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
CP_MEM_TO_IO(s, d, n);
@@ -288,7 +270,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
hostdata->pdma_residual = len - transferred;
/* Target changed phase early? */
- if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
@@ -297,7 +279,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
/* No bus error. */
if (n == 0) {
- if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG,
+ if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT,
TCR_LAST_BYTE_SENT, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
@@ -305,25 +287,23 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
return 0;
}
- dsprintk(NDEBUG_PSEUDO_DMA, instance,
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: bus error (%d/%d)\n", __func__, transferred, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
s = src + transferred;
n = len - transferred;
}
scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
return -1;
}
-static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
+static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
cmd->SCp.this_residual < 16)
return 0;
@@ -331,6 +311,11 @@ static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
return cmd->SCp.this_residual;
}
+static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata)
+{
+ return hostdata->pdma_residual;
+}
+
#include "NCR5380.c"
#define DRV_MODULE_NAME "mac_scsi"
@@ -356,6 +341,7 @@ static struct scsi_host_template mac_scsi_template = {
static int __init mac_scsi_probe(struct platform_device *pdev)
{
struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
int error;
int host_flags = 0;
struct resource *irq, *pio_mem, *pdma_mem = NULL;
@@ -388,17 +374,18 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
if (!instance)
return -ENOMEM;
- instance->base = pio_mem->start;
if (irq)
instance->irq = irq->start;
else
instance->irq = NO_IRQ;
- if (pdma_mem && setup_use_pdma) {
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ hostdata = shost_priv(instance);
+ hostdata->base = pio_mem->start;
+ hostdata->io = (void *)pio_mem->start;
- hostdata->pdma_base = (unsigned char *)pdma_mem->start;
- } else
+ if (pdma_mem && setup_use_pdma)
+ hostdata->pdma_io = (void *)pdma_mem->start;
+ else
host_flags |= FLAG_NO_PSEUDO_DMA;
host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3aaea713bf37..fdd519c1dd57 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.811.02.00-rc1"
-#define MEGASAS_RELDATE "April 12, 2016"
+#define MEGASAS_VERSION "06.812.07.00-rc1"
+#define MEGASAS_RELDATE "August 22, 2016"
/*
* Device IDs
@@ -1429,6 +1429,8 @@ enum FW_BOOT_CONTEXT {
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
#define MR_MAX_MSIX_REG_ARRAY 16
#define MR_RDPQ_MODE_OFFSET 0X00800000
+#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
+
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
@@ -2118,7 +2120,6 @@ struct megasas_instance {
u32 ctrl_context_pages;
struct megasas_ctrl_info *ctrl_info;
unsigned int msix_vectors;
- struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
u64 map_id;
u64 pd_seq_map_id;
@@ -2140,6 +2141,7 @@ struct megasas_instance {
u8 is_imr;
u8 is_rdpq;
bool dev_handle;
+ bool fw_sync_cache_support;
};
struct MR_LD_VF_MAP {
u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d8b1fbd4c8aa..6484c382f670 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,11 +1700,8 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
goto out_done;
}
- /*
- * FW takes care of flush cache on its own for Virtual Disk.
- * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
- */
- if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) &&
+ (!instance->fw_sync_cache_support)) {
scmd->result = DID_OK << 16;
goto out_done;
}
@@ -4840,7 +4837,7 @@ fail_alloc_cmds:
}
/*
- * megasas_setup_irqs_msix - register legacy interrupts.
+ * megasas_setup_irqs_ioapic - register legacy interrupts.
* @instance: Adapter soft state
*
* Do not enable interrupt, only setup ISRs.
@@ -4855,8 +4852,9 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
pdev = instance->pdev;
instance->irq_context[0].instance = instance;
instance->irq_context[0].MSIxIndex = 0;
- if (request_irq(pdev->irq, instance->instancet->service_isr,
- IRQF_SHARED, "megasas", &instance->irq_context[0])) {
+ if (request_irq(pci_irq_vector(pdev, 0),
+ instance->instancet->service_isr, IRQF_SHARED,
+ "megasas", &instance->irq_context[0])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ from %s %d\n",
__func__, __LINE__);
@@ -4877,28 +4875,23 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
static int
megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
{
- int i, j, cpu;
+ int i, j;
struct pci_dev *pdev;
pdev = instance->pdev;
/* Try MSI-x */
- cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
- if (request_irq(instance->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(pdev, i),
instance->instancet->service_isr, 0, "megasas",
&instance->irq_context[i])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ for vector %d.\n", i);
- for (j = 0; j < i; j++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[j].vector, NULL);
- free_irq(instance->msixentry[j].vector,
- &instance->irq_context[j]);
- }
+ for (j = 0; j < i; j++)
+ free_irq(pci_irq_vector(pdev, j),
+ &instance->irq_context[j]);
/* Retry irq register for IO_APIC*/
instance->msix_vectors = 0;
if (is_probe)
@@ -4906,14 +4899,6 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
else
return -1;
}
- if (smp_affinity_enable) {
- if (irq_set_affinity_hint(instance->msixentry[i].vector,
- get_cpu_mask(cpu)))
- dev_err(&instance->pdev->dev,
- "Failed to set affinity hint"
- " for cpu %d\n", cpu);
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
}
return 0;
}
@@ -4930,14 +4915,12 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
- free_irq(instance->msixentry[i].vector,
+ free_irq(pci_irq_vector(instance->pdev, i),
&instance->irq_context[i]);
}
else
- free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ free_irq(pci_irq_vector(instance->pdev, 0),
+ &instance->irq_context[0]);
}
/**
@@ -5095,6 +5078,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
0x4000000) >> 0x1a;
if (msix_enable && !msix_disable) {
+ int irq_flags = PCI_IRQ_MSIX;
+
scratch_pad_2 = readl
(&instance->reg_set->outbound_scratch_pad_2);
/* Check max MSI-X vectors */
@@ -5131,15 +5116,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Don't bother allocating more MSI-X vectors than cpus */
instance->msix_vectors = min(instance->msix_vectors,
(unsigned int)num_online_cpus());
- for (i = 0; i < instance->msix_vectors; i++)
- instance->msixentry[i].entry = i;
- i = pci_enable_msix_range(instance->pdev, instance->msixentry,
- 1, instance->msix_vectors);
+ if (smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ i = pci_alloc_irq_vectors(instance->pdev, 1,
+ instance->msix_vectors, irq_flags);
if (i > 0)
instance->msix_vectors = i;
else
instance->msix_vectors = 0;
}
+ i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
+ if (i < 0)
+ goto fail_setup_irqs;
dev_info(&instance->pdev->dev,
"firmware supports msix\t: (%d)", fw_msix_count);
@@ -5152,11 +5140,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- if (instance->msix_vectors ?
- megasas_setup_irqs_msix(instance, 1) :
- megasas_setup_irqs_ioapic(instance))
- goto fail_setup_irqs;
-
instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
GFP_KERNEL);
if (instance->ctrl_info == NULL)
@@ -5172,6 +5155,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
+ if (instance->msix_vectors ?
+ megasas_setup_irqs_msix(instance, 1) :
+ megasas_setup_irqs_ioapic(instance))
+ goto fail_init_adapter;
instance->instancet->enable_intr(instance);
@@ -5315,7 +5302,7 @@ fail_init_adapter:
megasas_destroy_irqs(instance);
fail_setup_irqs:
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
instance->msix_vectors = 0;
fail_ready_state:
kfree(instance->ctrl_info);
@@ -5584,7 +5571,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
/*
* Export parameters required by SCSI mid-layer
*/
- host->irq = instance->pdev->irq;
host->unique_id = instance->unique_id;
host->can_queue = instance->max_scsi_cmds;
host->this_id = instance->init_id;
@@ -5947,7 +5933,7 @@ fail_io_attach:
else
megasas_release_mfi(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
fail_init_mfi:
fail_alloc_dma_buf:
if (instance->evt_detail)
@@ -6105,7 +6091,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
pci_save_state(pdev);
pci_disable_device(pdev);
@@ -6125,6 +6111,7 @@ megasas_resume(struct pci_dev *pdev)
int rval;
struct Scsi_Host *host;
struct megasas_instance *instance;
+ int irq_flags = PCI_IRQ_LEGACY;
instance = pci_get_drvdata(pdev);
host = instance->host;
@@ -6160,9 +6147,15 @@ megasas_resume(struct pci_dev *pdev)
goto fail_ready_state;
/* Now re-enable MSI-X */
- if (instance->msix_vectors &&
- pci_enable_msix_exact(instance->pdev, instance->msixentry,
- instance->msix_vectors))
+ if (instance->msix_vectors) {
+ irq_flags = PCI_IRQ_MSIX;
+ if (smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ }
+ rval = pci_alloc_irq_vectors(instance->pdev, 1,
+ instance->msix_vectors ?
+ instance->msix_vectors : 1, irq_flags);
+ if (rval < 0)
goto fail_reenable_msix;
if (instance->ctrl_context) {
@@ -6245,6 +6238,34 @@ fail_reenable_msix:
#define megasas_resume NULL
#endif
+static inline int
+megasas_wait_for_adapter_operational(struct megasas_instance *instance)
+{
+ int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
+ int i;
+
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+ return 1;
+
+ for (i = 0; i < wait_time; i++) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
+ break;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
+ dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
+
+ msleep(1000);
+ }
+
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
/**
* megasas_detach_one - PCI hot"un"plug entry point
* @pdev: PCI device structure
@@ -6269,9 +6290,14 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
+
+ if (megasas_wait_for_adapter_operational(instance))
+ goto skip_firing_dcmds;
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+skip_firing_dcmds:
/* cancel the delayed work if this work still in queue*/
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
@@ -6302,7 +6328,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
if (instance->ctrl_context) {
megasas_release_fusion(instance);
@@ -6385,13 +6411,19 @@ static void megasas_shutdown(struct pci_dev *pdev)
struct megasas_instance *instance = pci_get_drvdata(pdev);
instance->unload = 1;
+
+ if (megasas_wait_for_adapter_operational(instance))
+ goto skip_firing_dcmds;
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+skip_firing_dcmds:
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
}
/**
@@ -6752,8 +6784,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while"
- "waiting for HBA to recover\n");
+ dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
error = -ENODEV;
goto out_up;
}
@@ -6821,8 +6852,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
spin_lock_irqsave(&instance->hba_lock, flags);
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while waiting"
- "for HBA to recover\n");
+ dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
return -ENODEV;
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e413113c86ac..f237d0003df3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -782,7 +782,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
- pd = MR_ArPdGet(arRef, physArm + 1, map);
+ physArm = physArm + 1;
+ pd = MR_ArPdGet(arRef, physArm, map);
if (pd != MR_PD_INVALID)
*pDevHandle = MR_PdDevHandleGet(pd, map);
}
@@ -879,7 +880,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
/* Get alternate Pd. */
- pd = MR_ArPdGet(arRef, physArm + 1, map);
+ physArm = physArm + 1;
+ pd = MR_ArPdGet(arRef, physArm, map);
if (pd != MR_PD_INVALID)
/* Get dev handle from Pd */
*pDevHandle = MR_PdDevHandleGet(pd, map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 52d8bbf7feb5..24778ba4b6e8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -748,6 +748,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_fw_init;
}
+ instance->fw_sync_cache_support = (scratch_pad_2 &
+ MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
+ dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
+ instance->fw_sync_cache_support ? "Yes" : "No");
+
IOCInitMessage =
dma_alloc_coherent(&instance->pdev->dev,
sizeof(struct MPI2_IOC_INIT_REQUEST),
@@ -2000,6 +2005,8 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
pRAID_Context->regLockFlags |=
(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ pRAID_Context->Type = MPI2_TYPE_CUDA;
+ pRAID_Context->nseg = 0x1;
} else if (fusion->fast_path_io) {
pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
pRAID_Context->configSeqNum = 0;
@@ -2035,12 +2042,10 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
pRAID_Context->timeoutValue =
cpu_to_le16((os_timeout_value > timeout_limit) ?
timeout_limit : os_timeout_value);
- if (fusion->adapter_type == INVADER_SERIES) {
- pRAID_Context->Type = MPI2_TYPE_CUDA;
- pRAID_Context->nseg = 0x1;
+ if (fusion->adapter_type == INVADER_SERIES)
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
- }
+
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -2463,12 +2468,15 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
/* Start collecting crash, if DMA bit is done */
if ((fw_state == MFI_STATE_FAULT) && dma_state)
schedule_work(&instance->crash_init);
- else if (fw_state == MFI_STATE_FAULT)
- schedule_work(&instance->work_init);
+ else if (fw_state == MFI_STATE_FAULT) {
+ if (instance->unload == 0)
+ schedule_work(&instance->work_init);
+ }
} else if (fw_state == MFI_STATE_FAULT) {
dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
"for scsi%d\n", instance->host->host_no);
- schedule_work(&instance->work_init);
+ if (instance->unload == 0)
+ schedule_work(&instance->work_init);
}
}
@@ -2823,6 +2831,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
"will reset adapter scsi%d.\n",
instance->host->host_no);
+ *convert = 1;
retval = 1;
}
out:
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 95356a82ee99..fa61baf7c74d 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -478,6 +478,13 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2)
#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3)
+#define MPI26_MFGPAGE_DEVID_SAS3516 (0x00AA)
+#define MPI26_MFGPAGE_DEVID_SAS3516_1 (0x00AB)
+#define MPI26_MFGPAGE_DEVID_SAS3416 (0x00AC)
+#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD)
+#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE)
+#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF)
+
/*Manufacturing Page 0 */
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index a1a5ceb42ce6..f00ef88a378a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -849,7 +849,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
ack_request->EventContext = mpi_reply->EventContext;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
out:
@@ -1078,7 +1078,7 @@ _base_interrupt(int irq, void *bus_id)
* new reply host index value in ReplyPostIndex Field and msix_index
* value in MSIxIndex field.
*/
- if (ioc->msix96_vector)
+ if (ioc->combined_reply_queue)
writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
MPI2_RPHI_MSIX_INDEX_SHIFT),
ioc->replyPostRegisterIndex[msix_index/8]);
@@ -1959,7 +1959,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
{
struct msix_entry *entries, *a;
int r;
- int i;
+ int i, local_max_msix_vectors;
u8 try_msix = 0;
if (msix_disable == -1 || msix_disable == 0)
@@ -1979,13 +1979,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_count, max_msix_vectors);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
- max_msix_vectors = 8;
+ local_max_msix_vectors = 8;
+ else
+ local_max_msix_vectors = max_msix_vectors;
- if (max_msix_vectors > 0) {
- ioc->reply_queue_count = min_t(int, max_msix_vectors,
+ if (local_max_msix_vectors > 0) {
+ ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
ioc->reply_queue_count);
ioc->msix_vector_count = ioc->reply_queue_count;
- } else if (max_msix_vectors == 0)
+ } else if (local_max_msix_vectors == 0)
goto try_ioapic;
if (ioc->msix_vector_count < ioc->cpu_count)
@@ -2050,7 +2052,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->msix96_vector) {
+ if (ioc->combined_reply_queue) {
kfree(ioc->replyPostRegisterIndex);
ioc->replyPostRegisterIndex = NULL;
}
@@ -2160,7 +2162,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
/* Use the Combined reply queue feature only for SAS3 C0 & higher
* revision HBAs and also only when reply queue count is greater than 8
*/
- if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
+ if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
/* Determine the Supplemental Reply Post Host Index Registers
* Addresse. Supplemental Reply Post Host Index Registers
* starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
@@ -2168,7 +2170,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
* MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
*/
ioc->replyPostRegisterIndex = kcalloc(
- MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
+ ioc->combined_reply_index_count,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->replyPostRegisterIndex) {
dfailprintk(ioc, printk(MPT3SAS_FMT
@@ -2178,14 +2180,14 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
goto out_fail;
}
- for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
+ for (i = 0; i < ioc->combined_reply_index_count; i++) {
ioc->replyPostRegisterIndex[i] = (resource_size_t *)
((u8 *)&ioc->chip->Doorbell +
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
} else
- ioc->msix96_vector = 0;
+ ioc->combined_reply_queue = 0;
if (ioc->is_warpdrive) {
ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
@@ -2462,15 +2464,15 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
#endif
/**
- * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * _base_put_smid_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+static void
+_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
@@ -2486,15 +2488,15 @@ mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
}
/**
- * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * _base_put_smid_fast_path - send fast path request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
@@ -2511,14 +2513,14 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
+ * _base_put_smid_hi_priority - send Task Management request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 msix_task)
{
Mpi2RequestDescriptorUnion_t descriptor;
@@ -2535,14 +2537,14 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+static void
+_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
@@ -2557,6 +2559,95 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
+* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
+* Atomic Request Descriptor
+* @ioc: per adapter object
+* @smid: system request message index
+* @handle: device handle, unused in this function, for function type match
+*
+* Return nothing.
+*/
+static void
+_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_fast_path_atomic - send fast path request to firmware
+ * using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ * Return nothing
+ */
+static void
+_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_hi_priority_atomic - send Task Management request to
+ * firmware using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 msix_task)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ descriptor.MSIxIndex = msix_task;
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_default - Default, primarily used for config pages
+ * use Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
* _base_display_OEMs_branding - Display branding string
* @ioc: per adapter object
*
@@ -4070,7 +4161,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
ioc->ioc_link_reset_in_progress = 1;
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -4170,7 +4261,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
ioc->base_cmds.smid = smid;
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -4355,6 +4446,8 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
if ((facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
ioc->rdpq_array_capable = 1;
+ if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
+ ioc->atomic_desc_capable = 1;
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
facts->IOCRequestFrameSize =
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -4582,7 +4675,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
init_completion(&ioc->port_enable_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -4645,7 +4738,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
}
@@ -4764,7 +4857,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
mpi_request->EventMasks[i] =
cpu_to_le32(ioc->event_masks[i]);
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -5138,7 +5231,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
/* initialize reply post host index */
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- if (ioc->msix96_vector)
+ if (ioc->combined_reply_queue)
writel((reply_q->msix_index & 7)<<
MPI2_RPHI_MSIX_INDEX_SHIFT,
ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
@@ -5280,9 +5373,23 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg = &_base_build_sg_ieee;
ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
+
break;
}
+ if (ioc->atomic_desc_capable) {
+ ioc->put_smid_default = &_base_put_smid_default_atomic;
+ ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
+ ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
+ ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
+ } else {
+ ioc->put_smid_default = &_base_put_smid_default;
+ ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
+ ioc->put_smid_fast_path = &_base_put_smid_fast_path;
+ ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
+ }
+
+
/*
* These function pointers for other requests that don't
* the require IEEE scatter gather elements.
@@ -5332,6 +5439,21 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
goto out_free_resources;
}
+ /* allocate memory for pending OS device add list */
+ ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pend_os_device_add_sz++;
+ ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
+ GFP_KERNEL);
+ if (!ioc->pend_os_device_add)
+ goto out_free_resources;
+
+ ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
+ ioc->device_remove_in_progress =
+ kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
+ if (!ioc->device_remove_in_progress)
+ goto out_free_resources;
+
ioc->fwfault_debug = mpt3sas_fwfault_debug;
/* base internal command bits */
@@ -5414,6 +5536,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
kfree(ioc->blocking_handles);
+ kfree(ioc->device_remove_in_progress);
+ kfree(ioc->pend_os_device_add);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
kfree(ioc->scsih_cmds.reply);
@@ -5455,6 +5579,8 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
kfree(ioc->blocking_handles);
+ kfree(ioc->device_remove_in_progress);
+ kfree(ioc->pend_os_device_add);
kfree(ioc->pfacts);
kfree(ioc->ctl_cmds.reply);
kfree(ioc->ctl_cmds.sense);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 3e71bc1b4a80..8de0eda8cd00 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,9 +73,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "13.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 13
-#define MPT3SAS_MINOR_VERSION 100
+#define MPT3SAS_DRIVER_VERSION "14.101.00.00"
+#define MPT3SAS_MAJOR_VERSION 14
+#define MPT3SAS_MINOR_VERSION 101
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -300,8 +300,9 @@
* There are twelve Supplemental Reply Post Host Index Registers
* and each register is at offset 0x10 bytes from the previous one.
*/
-#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12
-#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
/* OEM Identifiers */
#define MFG10_OEM_ID_INVALID (0x00000000)
@@ -375,7 +376,6 @@ struct MPT3SAS_TARGET {
* per device private data
*/
#define MPT_DEVICE_FLAGS_INIT 0x01
-#define MPT_DEVICE_TLR_ON 0x02
#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003)
#define MFG_PAGE10_HIDE_ALL_DISKS (0x00)
@@ -736,7 +736,10 @@ typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
void *paddr);
-
+/* To support atomic and non atomic descriptors*/
+typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 funcdep);
+typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
/* IOC Facts and Port Facts converted from little endian to cpu */
union mpi3_version_union {
@@ -1079,6 +1082,9 @@ struct MPT3SAS_ADAPTER {
void *pd_handles;
u16 pd_handles_sz;
+ void *pend_os_device_add;
+ u16 pend_os_device_add_sz;
+
/* config page */
u16 config_page_sz;
void *config_page;
@@ -1156,7 +1162,8 @@ struct MPT3SAS_ADAPTER {
u8 reply_queue_count;
struct list_head reply_queue_list;
- u8 msix96_vector;
+ u8 combined_reply_queue;
+ u8 combined_reply_index_count;
/* reply post register index */
resource_size_t **replyPostRegisterIndex;
@@ -1187,6 +1194,15 @@ struct MPT3SAS_ADAPTER {
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+ void *device_remove_in_progress;
+ u16 device_remove_in_progress_sz;
+ u8 is_gen35_ioc;
+ u8 atomic_desc_capable;
+ PUT_SMID_IO_FP_HIP put_smid_scsi_io;
+ PUT_SMID_IO_FP_HIP put_smid_fast_path;
+ PUT_SMID_IO_FP_HIP put_smid_hi_priority;
+ PUT_SMID_DEFAULT put_smid_default;
+
};
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1232,13 +1248,6 @@ u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 handle);
-void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 handle);
-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
- u16 smid, u16 msix_task);
-void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_initialize_callback_handler(void);
u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
void mpt3sas_base_release_callback_handler(u8 cb_idx);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index cebfd734fd76..dd6270125614 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -384,7 +384,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
_config_display_some_debug(ioc, smid, "config_request", NULL);
init_completion(&ioc->config_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 26cdc127ac89..050bd788ad02 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -654,6 +654,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
size_t data_in_sz = 0;
long ret;
u16 wait_state_count;
+ u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
issue_reset = 0;
@@ -738,10 +739,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
data_in_sz = karg.data_in_size;
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
- mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
- if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
- le16_to_cpu(mpi_request->FunctionDependent1) >
- ioc->facts.MaxDevHandle) {
+ mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) {
+
+ device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
+ if (!device_handle || (device_handle >
+ ioc->facts.MaxDevHandle)) {
ret = -EINVAL;
mpt3sas_base_free_smid(ioc, smid);
goto out;
@@ -797,14 +801,20 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
scsiio_request->SenseBufferLowAddress =
mpt3sas_base_get_sense_buffer_dma(ioc, smid);
memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
-
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
- le16_to_cpu(mpi_request->FunctionDependent1));
+ ioc->put_smid_scsi_io(ioc, smid, device_handle);
else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -827,11 +837,19 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
}
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
tm_request->DevHandle));
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
break;
}
case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -862,16 +880,30 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ {
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ ioc->put_smid_default(ioc, smid);
+ break;
+ }
case MPI2_FUNCTION_FW_DOWNLOAD:
case MPI2_FUNCTION_FW_UPLOAD:
{
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_TOOLBOX:
@@ -886,7 +918,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
}
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
@@ -905,7 +937,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
@@ -1064,7 +1096,10 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
break;
case MPI25_VERSION:
case MPI26_VERSION:
- karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
+ if (ioc->is_gen35_ioc)
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
+ else
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
break;
}
@@ -1491,7 +1526,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
cpu_to_le32(ioc->product_specific[buffer_type][i]);
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -1838,7 +1873,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
mpi_request->VP_ID = 0;
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -2105,7 +2140,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
mpi_request->VP_ID = 0;
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 89408356d252..f3e17a8c1b07 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -143,6 +143,7 @@ struct mpt3_ioctl_pci_info {
#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05)
#define MPT3_IOCTL_INTERFACE_SAS3 (0x06)
+#define MPT3_IOCTL_INTERFACE_SAS35 (0x07)
#define MPT2_IOCTL_VERSION_LENGTH (32)
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 1c4744e78173..5c8f75247d73 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -423,7 +423,7 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
return 0;
}
- /* we hit this becuase the given parent handle doesn't exist */
+ /* we hit this because the given parent handle doesn't exist */
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
return -ENXIO;
@@ -788,6 +788,11 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
list_add_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (ioc->hide_drives) {
+ clear_bit(sas_device->handle, ioc->pend_os_device_add);
+ return;
+ }
+
if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
@@ -803,7 +808,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
sas_device->sas_address_parent);
_scsih_sas_device_remove(ioc, sas_device);
}
- }
+ } else
+ clear_bit(sas_device->handle, ioc->pend_os_device_add);
}
/**
@@ -1517,7 +1523,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
/*
* raid transport support -
* Enabled for SLES11 and newer, in older kernels the driver will panic when
- * unloading the driver followed by a load - I beleive that the subroutine
+ * unloading the driver followed by a load - I believe that the subroutine
* raid_class_release() is not cleaning up properly.
*/
@@ -2279,7 +2285,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
msix_task = scsi_lookup->msix_io;
else
msix_task = 0;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
+ ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -2837,7 +2843,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
if (r == -EINVAL)
sdev_printk(KERN_WARNING, sdev,
"device_block failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
}
/**
@@ -2867,20 +2873,20 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
sdev_printk(KERN_WARNING, sdev,
"device_unblock failed with return(%d) for handle(0x%04x) "
"performing a block followed by an unblock\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
r = scsi_internal_device_block(sdev);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_block "
"failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 0;
r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
" failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
}
}
@@ -2942,7 +2948,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
* @ioc: per adapter object
* @handle: device handle
*
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
*/
static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
@@ -2971,7 +2977,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @handle: device handle
*
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
*/
static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -3138,6 +3144,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (test_bit(handle, ioc->pd_handles))
return;
+ clear_bit(handle, ioc->pend_os_device_add);
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
if (sas_device && sas_device->starget &&
@@ -3192,7 +3200,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ set_bit(handle, ioc->device_remove_in_progress);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
out:
@@ -3291,7 +3300,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = mpi_request_tm->DevHandle;
- mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+ ioc->put_smid_default(ioc, smid_sas_ctrl);
return _scsih_check_for_pending_tm(ioc, smid);
}
@@ -3326,6 +3335,11 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
le16_to_cpu(mpi_reply->IOCStatus),
le32_to_cpu(mpi_reply->IOCLogInfo)));
+ if (le16_to_cpu(mpi_reply->IOCStatus) ==
+ MPI2_IOCSTATUS_SUCCESS) {
+ clear_bit(le16_to_cpu(mpi_reply->DevHandle),
+ ioc->device_remove_in_progress);
+ }
} else {
pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -3381,7 +3395,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
}
/**
@@ -3473,7 +3487,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
ack_request->EventContext = event_context;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
}
/**
@@ -3530,7 +3544,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = handle;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
}
/**
@@ -3930,7 +3944,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
* _scsih_setup_eedp - setup MPI request for EEDP transfer
* @ioc: per adapter object
* @scmd: pointer to scsi command object
- * @mpi_request: pointer to the SCSI_IO reqest message frame
+ * @mpi_request: pointer to the SCSI_IO request message frame
*
* Supporting protection 1 and 3.
*
@@ -3983,6 +3997,9 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
mpi_request_3v->EEDPBlockSize =
cpu_to_le16(scmd->device->sector_size);
+
+ if (ioc->is_gen35_ioc)
+ eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
}
@@ -4084,7 +4101,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
- /* device busy with task managment */
+ /* device busy with task management */
} else if (sas_target_priv_data->tm_busy ||
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -4154,12 +4171,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
MPI25_SCSIIO_IOFLAGS_FAST_PATH);
- mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+ ioc->put_smid_fast_path(ioc, smid, handle);
} else
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ ioc->put_smid_scsi_io(ioc, smid,
le16_to_cpu(mpi_request->DevHandle));
} else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
out:
@@ -4658,7 +4675,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ ioc->put_smid_scsi_io(ioc, smid,
sas_device_priv_data->sas_target->handle);
return 0;
}
@@ -5383,10 +5400,10 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->handle, handle);
sas_target_priv_data->handle = handle;
sas_device->handle = handle;
- if (sas_device_pg0.Flags &
+ if (le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0.EnclosureLevel);
+ sas_device_pg0.EnclosureLevel;
memcpy(sas_device->connector_name,
sas_device_pg0.ConnectorName, 4);
sas_device->connector_name[4] = '\0';
@@ -5465,6 +5482,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
if (!(_scsih_is_end_device(device_info)))
return -1;
+ set_bit(handle, ioc->pend_os_device_add);
sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
/* check if device is present */
@@ -5483,6 +5501,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device = mpt3sas_get_sdev_by_addr(ioc,
sas_address);
if (sas_device) {
+ clear_bit(handle, ioc->pend_os_device_add);
sas_device_put(sas_device);
return -1;
}
@@ -5513,9 +5532,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
- if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ if (le16_to_cpu(sas_device_pg0.Flags)
+ & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0.EnclosureLevel);
+ sas_device_pg0.EnclosureLevel;
memcpy(sas_device->connector_name,
sas_device_pg0.ConnectorName, 4);
sas_device->connector_name[4] = '\0';
@@ -5806,6 +5826,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
_scsih_check_device(ioc, sas_address, handle,
phy_number, link_rate);
+ if (!test_bit(handle, ioc->pend_os_device_add))
+ break;
+
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -6267,7 +6290,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
handle, phys_disk_num));
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -6320,7 +6343,7 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
{
sdev->no_uld_attach = no_uld_attach ? 1 : 0;
sdev_printk(KERN_INFO, sdev, "%s raid component\n",
- sdev->no_uld_attach ? "hidding" : "exposing");
+ sdev->no_uld_attach ? "hiding" : "exposing");
WARN_ON(scsi_device_reprobe(sdev));
}
@@ -7050,7 +7073,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
if (sas_device_pg0->Flags &
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0->EnclosureLevel);
+ sas_device_pg0->EnclosureLevel;
memcpy(&sas_device->connector_name[0],
&sas_device_pg0->ConnectorName[0], 4);
} else {
@@ -7112,6 +7135,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
sas_device_pg0.SASAddress =
le64_to_cpu(sas_device_pg0.SASAddress);
sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
+ sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags);
_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
}
@@ -7723,6 +7747,9 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
complete(&ioc->tm_cmds.done);
}
+ memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
+ memset(ioc->device_remove_in_progress, 0,
+ ioc->device_remove_in_progress_sz);
_scsih_fw_event_cleanup_queue(ioc);
_scsih_flush_running_cmds(ioc);
break;
@@ -8113,7 +8140,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->hide_ir_msg)
pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -8654,6 +8681,12 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_SAS3324_2:
case MPI26_MFGPAGE_DEVID_SAS3324_3:
case MPI26_MFGPAGE_DEVID_SAS3324_4:
+ case MPI26_MFGPAGE_DEVID_SAS3508:
+ case MPI26_MFGPAGE_DEVID_SAS3508_1:
+ case MPI26_MFGPAGE_DEVID_SAS3408:
+ case MPI26_MFGPAGE_DEVID_SAS3516:
+ case MPI26_MFGPAGE_DEVID_SAS3516_1:
+ case MPI26_MFGPAGE_DEVID_SAS3416:
return MPI26_VERSION;
}
return 0;
@@ -8722,10 +8755,29 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->hba_mpi_version_belonged = hba_mpi_version;
ioc->id = mpt3_ids++;
sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
+ switch (pdev->device) {
+ case MPI26_MFGPAGE_DEVID_SAS3508:
+ case MPI26_MFGPAGE_DEVID_SAS3508_1:
+ case MPI26_MFGPAGE_DEVID_SAS3408:
+ case MPI26_MFGPAGE_DEVID_SAS3516:
+ case MPI26_MFGPAGE_DEVID_SAS3516_1:
+ case MPI26_MFGPAGE_DEVID_SAS3416:
+ ioc->is_gen35_ioc = 1;
+ break;
+ default:
+ ioc->is_gen35_ioc = 0;
+ }
if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
- (ioc->hba_mpi_version_belonged == MPI26_VERSION))
- ioc->msix96_vector = 1;
+ (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
+ ioc->combined_reply_queue = 1;
+ if (ioc->is_gen35_ioc)
+ ioc->combined_reply_index_count =
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
+ else
+ ioc->combined_reply_index_count =
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
+ }
break;
default:
return -ENODEV;
@@ -9128,6 +9180,19 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
PCI_ANY_ID, PCI_ANY_ID },
+ /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
@@ -9168,7 +9233,7 @@ scsih_init(void)
/* queuecommand callback hander */
scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
- /* task managment callback handler */
+ /* task management callback handler */
tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
/* base internal commands callback handler */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index b74faf1a69b2..7f1d5785bc30 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -392,7 +392,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
"report_manufacture - send to sas_addr(0x%016llx)\n",
ioc->name, (unsigned long long)sas_address));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1198,7 +1198,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
ioc->name, (unsigned long long)phy->identify.sas_address,
phy->number));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1514,7 +1514,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
ioc->name, (unsigned long long)phy->identify.sas_address,
phy->number, phy_operation));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2032,7 +2032,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
"%s - sending smp request\n", ioc->name, __func__));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 4c57d9abce7b..7de5d8d75480 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -668,7 +668,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
{
u32 tmp;
tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
- if (tmp && 1 << (slot_idx % 32)) {
+ if (tmp & 1 << (slot_idx % 32)) {
mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
1 << (slot_idx % 32));
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 845affa112f7..337982cf3d63 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3787,11 +3787,11 @@ static long pmcraid_ioctl_passthrough(
direction);
if (rc) {
pmcraid_err("couldn't build passthrough ioadls\n");
- goto out_free_buffer;
+ goto out_free_cmd;
}
} else if (request_size < 0) {
rc = -EINVAL;
- goto out_free_buffer;
+ goto out_free_cmd;
}
/* If data is being written into the device, copy the data from user
@@ -3908,6 +3908,8 @@ out_handle_response:
out_free_sglist:
pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
+
+out_free_cmd:
pmcraid_return_cmd(cmd);
out_free_buffer:
@@ -6018,8 +6020,10 @@ static int __init pmcraid_init(void)
error = pmcraid_netlink_init();
- if (error)
+ if (error) {
+ class_destroy(pmcraid_class);
goto out_unreg_chrdev;
+ }
error = pci_register_driver(&pmcraid_driver);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 643014f82f7d..1bf8061ff803 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,4 +1,4 @@
-/*
+ /*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
*
@@ -9,6 +9,7 @@
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
+#include <linux/bsg-lib.h>
/* BSG support for ELS/CT pass through */
void
@@ -16,10 +17,12 @@ qla2x00_bsg_job_done(void *data, void *ptr, int res)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
- bsg_job->reply->result = res;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = res;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
sp->free(vha, sp);
}
@@ -28,13 +31,15 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = sp->fcport->vha;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+
struct qla_hw_data *ha = vha->hw;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
if (sp->type == SRB_FXIOCB_BCMD) {
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
@@ -116,9 +121,11 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
}
static int
-qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
+qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int ret = 0;
@@ -131,7 +138,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
}
/* Get the sub command */
- oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
/* Only set config is allowed if config memory is not allocated */
if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
@@ -145,10 +152,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ha->fcp_prio_cfg->attributes &=
~FCP_PRIO_ATTR_ENABLE;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
} else {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
break;
@@ -160,10 +167,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ha->fcp_prio_cfg->attributes |=
FCP_PRIO_ATTR_ENABLE;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
} else {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
}
@@ -173,12 +180,12 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
len = bsg_job->reply_payload.payload_len;
if (!len || len > FCP_PRIO_CFG_SIZE) {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
- bsg_job->reply->result = DID_OK;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->result = DID_OK;
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
@@ -189,7 +196,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
case QLFC_FCP_PRIO_SET_CONFIG:
len = bsg_job->request_payload.payload_len;
if (!len || len > FCP_PRIO_CFG_SIZE) {
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -EINVAL;
goto exit_fcp_prio_cfg;
}
@@ -200,7 +207,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7050,
"Unable to allocate memory for fcp prio "
"config data (%x).\n", FCP_PRIO_CFG_SIZE);
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -ENOMEM;
goto exit_fcp_prio_cfg;
}
@@ -215,7 +222,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (!qla24xx_fcp_prio_cfg_valid(vha,
(struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -EINVAL;
/* If buffer was invalidatic int
* fcp_prio_cfg is of no use
@@ -229,7 +236,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
ha->flags.fcp_prio_enabled = 1;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
break;
default:
ret = -EINVAL;
@@ -237,13 +244,15 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
}
exit_fcp_prio_cfg:
if (!ret)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return ret;
}
static int
-qla2x00_process_els(struct fc_bsg_job *bsg_job)
+qla2x00_process_els(struct bsg_job *bsg_job)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
struct fc_rport *rport;
fc_port_t *fcport = NULL;
struct Scsi_Host *host;
@@ -255,15 +264,15 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
int rval = (DRIVER_ERROR << 16);
uint16_t nextlid = 0;
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ rport = fc_bsg_to_rport(bsg_job);
fcport = *(fc_port_t **) rport->dd_data;
host = rport_to_shost(rport);
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_RPT_ELS";
} else {
- host = bsg_job->shost;
+ host = fc_bsg_to_shost(bsg_job);
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_HST_ELS_NOLOGIN";
@@ -296,7 +305,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
/* ELS request for rport */
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
/* make sure the rport is logged in,
* if not perform fabric login
*/
@@ -322,11 +331,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->d_id.b.al_pa =
- bsg_job->request->rqst_data.h_els.port_id[0];
+ bsg_request->rqst_data.h_els.port_id[0];
fcport->d_id.b.area =
- bsg_job->request->rqst_data.h_els.port_id[1];
+ bsg_request->rqst_data.h_els.port_id[1];
fcport->d_id.b.domain =
- bsg_job->request->rqst_data.h_els.port_id[2];
+ bsg_request->rqst_data.h_els.port_id[2];
fcport->loop_id =
(fcport->d_id.b.al_pa == 0xFD) ?
NPH_FABRIC_CONTROLLER : NPH_F_PORT;
@@ -366,11 +375,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
sp->type =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+ (bsg_request->msgcode == FC_BSG_RPT_ELS ?
+ SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
sp->name =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- "bsg_els_rpt" : "bsg_els_hst");
+ (bsg_request->msgcode == FC_BSG_RPT_ELS ?
+ "bsg_els_rpt" : "bsg_els_hst");
sp->u.bsg_job = bsg_job;
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
@@ -378,7 +387,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x700a,
"bsg rqst type: %s els type: %x - loop-id=%x "
"portid=%-2x%02x%02x.\n", type,
- bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
+ bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
rval = qla2x00_start_sp(sp);
@@ -399,7 +408,7 @@ done_unmap_sg:
goto done_free_fcport;
done_free_fcport:
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS)
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS)
kfree(fcport);
done:
return rval;
@@ -420,10 +429,11 @@ qla24xx_calc_ct_iocbs(uint16_t dsds)
}
static int
-qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+qla2x00_process_ct(struct bsg_job *bsg_job)
{
srb_t *sp;
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
@@ -469,7 +479,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
}
loop_id =
- (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+ (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
>> 24;
switch (loop_id) {
case 0xFC:
@@ -500,9 +510,9 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
- fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
- fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+ fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
+ fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
+ fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
fcport->loop_id = loop_id;
/* Alloc SRB structure */
@@ -524,7 +534,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x7016,
"bsg rqst type: %s else type: %x - "
"loop-id=%x portid=%02x%02x%02x.\n", type,
- (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
@@ -697,9 +707,11 @@ done_set_internal:
}
static int
-qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
+qla2x00_process_loopback(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval;
@@ -780,9 +792,9 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.rcv_dma = rsp_data_dma;
elreq.transfer_size = req_data_len;
- elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
elreq.iteration_count =
- bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
+ bsg_request->rqst_data.h_vendor.vendor_cmd[2];
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
@@ -896,12 +908,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"Vendor request %s failed.\n", type);
rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = (DID_ERROR << 16);
+ bsg_reply->reply_payload_rcv_len = 0;
} else {
ql_dbg(ql_dbg_user, vha, 0x702d,
"Vendor request %s completed.\n", type);
- bsg_job->reply->result = (DID_OK << 16);
+ bsg_reply->result = (DID_OK << 16);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, rsp_data,
rsp_data_len);
@@ -930,14 +942,17 @@ done_unmap_req_sg:
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla84xx_reset(struct fc_bsg_job *bsg_job)
+qla84xx_reset(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -948,7 +963,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
return -EINVAL;
}
- flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
@@ -960,17 +975,20 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
} else {
ql_dbg(ql_dbg_user, vha, 0x7031,
"Vendor request 84xx reset completed.\n");
- bsg_job->reply->result = DID_OK;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return rval;
}
static int
-qla84xx_updatefw(struct fc_bsg_job *bsg_job)
+qla84xx_updatefw(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct verify_chip_entry_84xx *mn = NULL;
@@ -1027,7 +1045,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
goto done_free_fw_buf;
}
- flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
memset(mn, 0, sizeof(struct access_chip_84xx));
@@ -1059,7 +1077,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
"Vendor request 84xx updatefw completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
}
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
@@ -1072,14 +1090,17 @@ done_unmap_sg:
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
+qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct access_chip_84xx *mn = NULL;
@@ -1107,7 +1128,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
memset(mn, 0, sizeof(struct access_chip_84xx));
mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
mn->entry_count = 1;
- ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+ ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
switch (ql84_mgmt->mgmt.cmd) {
case QLA84_MGMT_READ_MEM:
case QLA84_MGMT_GET_INFO:
@@ -1239,11 +1260,11 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
"Vendor request 84xx mgmt completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
@@ -1267,14 +1288,17 @@ exit_mgmt:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla24xx_iidma(struct fc_bsg_job *bsg_job)
+qla24xx_iidma(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_port_param *port_param = NULL;
@@ -1288,7 +1312,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
return -EINVAL;
}
- port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+ port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
ql_log(ql_log_warn, vha, 0x7048,
"Invalid destination type.\n");
@@ -1343,24 +1367,26 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(struct qla_port_param);
- rsp_ptr = ((uint8_t *)bsg_job->reply) +
+ rsp_ptr = ((uint8_t *)bsg_reply) +
sizeof(struct fc_bsg_reply);
memcpy(rsp_ptr, port_param,
sizeof(struct qla_port_param));
}
- bsg_job->reply->result = DID_OK;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return rval;
}
static int
-qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
+qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
uint8_t is_update)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
uint32_t start = 0;
int valid = 0;
struct qla_hw_data *ha = vha->hw;
@@ -1368,7 +1394,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
if (unlikely(pci_channel_offline(ha->pdev)))
return -EINVAL;
- start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
if (start > ha->optrom_size) {
ql_log(ql_log_warn, vha, 0x7055,
"start %d > optrom_size %d.\n", start, ha->optrom_size);
@@ -1427,9 +1453,10 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
}
static int
-qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
+qla2x00_read_optrom(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1451,20 +1478,22 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);
- bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
- bsg_job->reply->result = DID_OK;
+ bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
+ bsg_reply->result = DID_OK;
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
mutex_unlock(&ha->optrom_mutex);
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
+qla2x00_update_optrom(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1486,19 +1515,21 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
mutex_unlock(&ha->optrom_mutex);
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
+qla2x00_update_fru_versions(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1509,7 +1540,7 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1525,30 +1556,32 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
image->field_address.device, image->field_address.offset,
sizeof(image->field_info), image->field_address.option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
image++;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
+qla2x00_read_fru_status(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1557,7 +1590,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1571,7 +1604,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
sr->status_reg = *sfp;
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
@@ -1579,24 +1612,26 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = sizeof(*sr);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
+qla2x00_write_fru_status(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1605,7 +1640,7 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1619,28 +1654,30 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
sizeof(sr->status_reg), sr->field_address.option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
+qla2x00_write_i2c(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1649,7 +1686,7 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1662,28 +1699,30 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
i2c->device, i2c->offset, i2c->length, i2c->option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
+qla2x00_read_i2c(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1692,7 +1731,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1704,7 +1743,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
i2c->device, i2c->offset, i2c->length, i2c->option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
@@ -1713,24 +1752,26 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
+qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
uint32_t rval = EXT_STATUS_OK;
@@ -1895,19 +1936,21 @@ done:
/* Return an error vendor specific response
* and complete the bsg request
*/
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = 0;
- bsg_job->reply->result = (DID_OK) << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = (DID_OK) << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
/* Always return success, vendor rsp carries correct status */
return 0;
}
static int
-qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
+qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
@@ -1919,7 +1962,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
/* Copy the IOCB specific information */
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
/* Dump the vendor information */
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
@@ -2027,9 +2070,10 @@ done:
}
static int
-qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
+qla26xx_serdes_op(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_serdes_reg sr;
@@ -2042,13 +2086,13 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
switch (sr.cmd) {
case INT_SC_SERDES_WRITE_REG:
rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
case INT_SC_SERDES_READ_REG:
rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ bsg_reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
ql_dbg(ql_dbg_user, vha, 0x708c,
@@ -2057,19 +2101,21 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
break;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : 0;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla8044_serdes_op(struct fc_bsg_job *bsg_job)
+qla8044_serdes_op(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_serdes_reg_ex sr;
@@ -2082,13 +2128,13 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
switch (sr.cmd) {
case INT_SC_SERDES_WRITE_REG:
rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
case INT_SC_SERDES_READ_REG:
rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ bsg_reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
ql_dbg(ql_dbg_user, vha, 0x70cf,
@@ -2097,19 +2143,21 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
break;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : 0;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
+qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct qla_flash_update_caps cap;
@@ -2125,21 +2173,23 @@ qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
- bsg_job->reply->reply_payload_rcv_len = sizeof(cap);
+ bsg_reply->reply_payload_rcv_len = sizeof(cap);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
+qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
uint64_t online_fw_attr = 0;
@@ -2158,32 +2208,34 @@ qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
(uint64_t)ha->fw_attributes;
if (online_fw_attr != cap.capabilities) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_INVALID_PARAM;
return -EINVAL;
}
if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_INVALID_PARAM;
return -EINVAL;
}
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
+qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct qla_bbcr_data bbcr;
@@ -2227,27 +2279,30 @@ qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
done:
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr);
+ bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
+qla2x00_get_priv_stats(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
struct link_statistics *stats = NULL;
dma_addr_t stats_dma;
int rval;
- uint32_t *cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd;
+ uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -2281,13 +2336,14 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
}
- bsg_job->reply->reply_payload_rcv_len = sizeof(*stats);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_payload_rcv_len = sizeof(*stats);
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
- bsg_job->reply_len = sizeof(*bsg_job->reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
stats, stats_dma);
@@ -2296,9 +2352,10 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
}
static int
-qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
+qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval;
struct qla_dport_diag *dd;
@@ -2323,13 +2380,14 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
}
- bsg_job->reply->reply_payload_rcv_len = sizeof(*dd);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
- bsg_job->reply_len = sizeof(*bsg_job->reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(dd);
@@ -2337,9 +2395,11 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
}
static int
-qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
{
- switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+
+ switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
case QL_VND_LOOPBACK:
return qla2x00_process_loopback(bsg_job);
@@ -2413,36 +2473,38 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
}
int
-qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+qla24xx_bsg_request(struct bsg_job *bsg_job)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
int ret = -EINVAL;
struct fc_rport *rport;
struct Scsi_Host *host;
scsi_qla_host_t *vha;
/* In case no data transferred. */
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ rport = fc_bsg_to_rport(bsg_job);
host = rport_to_shost(rport);
vha = shost_priv(host);
} else {
- host = bsg_job->shost;
+ host = fc_bsg_to_shost(bsg_job);
vha = shost_priv(host);
}
if (qla2x00_reset_active(vha)) {
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
- bsg_job->request->msgcode);
+ bsg_request->msgcode);
return -EBUSY;
}
ql_dbg(ql_dbg_user, vha, 0x7000,
- "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
+ "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
- switch (bsg_job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
ret = qla2x00_process_els(bsg_job);
@@ -2464,9 +2526,10 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
}
int
-qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+qla24xx_bsg_timeout(struct bsg_job *bsg_job)
{
- scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
int cnt, que;
@@ -2494,13 +2557,13 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
"mbx abort_command "
"failed.\n");
bsg_job->req->errors =
- bsg_job->reply->result = -EIO;
+ bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command "
"success.\n");
bsg_job->req->errors =
- bsg_job->reply->result = 0;
+ bsg_reply->result = 0;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
goto done;
@@ -2510,7 +2573,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
- bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+ bsg_job->req->errors = bsg_reply->result = -ENXIO;
return 0;
done:
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 73b12e41d992..5236e3f2a06a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -403,7 +403,7 @@ typedef struct srb {
int iocbs;
union {
struct srb_iocb iocb_cmd;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
struct srb_cmd scmd;
} u;
void (*done)(void *, void *, int);
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6ca00813c71f..c51d9f3359e3 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -733,8 +733,8 @@ extern int qla82xx_read_temperature(scsi_qla_host_t *);
extern int qla8044_read_temperature(scsi_qla_host_t *);
/* BSG related functions */
-extern int qla24xx_bsg_request(struct fc_bsg_job *);
-extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
+extern int qla24xx_bsg_request(struct bsg_job *);
+extern int qla24xx_bsg_timeout(struct bsg_job *);
extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
dma_addr_t, size_t, uint32_t);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index b41265a75ed5..221ad8907893 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2197,7 +2197,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2212,8 +2213,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->opcode =
sp->type == SRB_ELS_CMD_RPT ?
- bsg_job->request->rqst_data.r_els.els_code :
- bsg_job->request->rqst_data.h_els.command_code;
+ bsg_request->rqst_data.r_els.els_code :
+ bsg_request->rqst_data.h_els.command_code;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
@@ -2250,7 +2251,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int entry_count = 1;
@@ -2327,7 +2328,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int entry_count = 1;
@@ -2833,7 +2834,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
struct scatterlist *sg;
int index;
int entry_count = 1;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
/*Update entry type to indicate bidir command */
*((uint32_t *)(&cmd_pkt->entry_type)) =
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 068c4e47fac9..19f18485a854 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1356,7 +1356,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
const char func[] = "CT_IOCB";
const char *type;
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
int res;
@@ -1365,6 +1366,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
type = "ct pass-through";
@@ -1373,32 +1375,32 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
* fc payload to the caller
*/
- bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
ql_log(ql_log_warn, vha, 0x5048,
"CT pass-through-%s error "
"comp_status-status=0x%x total_byte = 0x%x.\n",
type, comp_status,
- bsg_job->reply->reply_payload_rcv_len);
+ bsg_reply->reply_payload_rcv_len);
} else {
ql_log(ql_log_warn, vha, 0x5049,
"CT pass-through-%s error "
"comp_status-status=0x%x.\n", type, comp_status);
res = DID_ERROR << 16;
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
}
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
(uint8_t *)pkt, sizeof(*pkt));
} else {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
@@ -1413,7 +1415,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
const char func[] = "ELS_CT_IOCB";
const char *type;
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
uint32_t fw_status[3];
uint8_t* fw_sts_ptr;
@@ -1423,6 +1426,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
type = NULL;
switch (sp->type) {
@@ -1452,13 +1456,13 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
* fc payload to the caller
*/
- bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
ql_dbg(ql_dbg_user, vha, 0x503f,
@@ -1480,7 +1484,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_2));
res = DID_ERROR << 16;
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
@@ -1489,7 +1493,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
else {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+ bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
@@ -1904,7 +1908,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
uint16_t scsi_status;
uint16_t thread_id;
uint32_t rval = EXT_STATUS_OK;
- struct fc_bsg_job *bsg_job = NULL;
+ struct bsg_job *bsg_job = NULL;
+ struct fc_bsg_request *bsg_request;
+ struct fc_bsg_reply *bsg_reply;
sts_entry_t *sts;
struct sts_entry_24xx *sts24;
sts = (sts_entry_t *) pkt;
@@ -1919,11 +1925,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
}
sp = req->outstanding_cmds[index];
- if (sp) {
- /* Free outstanding command slot. */
- req->outstanding_cmds[index] = NULL;
- bsg_job = sp->u.bsg_job;
- } else {
+ if (!sp) {
ql_log(ql_log_warn, vha, 0x70b0,
"Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
req->id, index);
@@ -1932,6 +1934,12 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
return;
}
+ /* Free outstanding command slot. */
+ req->outstanding_cmds[index] = NULL;
+ bsg_job = sp->u.bsg_job;
+ bsg_request = bsg_job->request;
+ bsg_reply = bsg_job->reply;
+
if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
@@ -1940,14 +1948,14 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
}
- thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
switch (comp_status) {
case CS_COMPLETE:
if (scsi_status == 0) {
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
vha->qla_stats.input_bytes +=
- bsg_job->reply->reply_payload_rcv_len;
+ bsg_reply->reply_payload_rcv_len;
vha->qla_stats.input_requests++;
rval = EXT_STATUS_OK;
}
@@ -2028,11 +2036,11 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
rval = EXT_STATUS_ERR;
break;
}
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
done:
/* Return the vendor specific reply to API */
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
/* Always return DID_OK, bsg will send the vendor specific response
* in this case only */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 15dff7099955..02f1de18bc2b 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi_tcq.h>
#include <linux/utsname.h>
@@ -2206,7 +2207,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "IOSB_IOCB";
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
struct srb_iocb *iocb_job;
int res;
struct qla_mt_iocb_rsp_fx00 fstatus;
@@ -2226,6 +2228,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt->dataword_r;
} else {
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
@@ -2257,8 +2260,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
sp->fcport->vha, 0x5074,
(uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
- res = bsg_job->reply->result = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ res = bsg_reply->result = DID_OK << 16;
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
}
sp->done(vha, sp, res);
@@ -3252,7 +3255,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
{
struct srb_iocb *fxio = &sp->u.iocb_cmd;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_request *bsg_request;
struct fxdisc_entry_fx00 fx_iocb;
uint8_t entry_cnt = 1;
@@ -3301,8 +3305,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
} else {
struct scatterlist *sg;
bsg_job = sp->u.bsg_job;
+ bsg_request = bsg_job->request;
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
fx_iocb.func_num = piocb_rqst->func_type;
fx_iocb.adapid = piocb_rqst->adapid;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7cfc270bd08..aeebefb1e9f8 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -409,18 +409,9 @@ struct qla4_8xxx_legacy_intr_set {
/* MSI-X Support */
-#define QLA_MSIX_DEFAULT 0x00
-#define QLA_MSIX_RSP_Q 0x01
-
+#define QLA_MSIX_DEFAULT 0
+#define QLA_MSIX_RSP_Q 1
#define QLA_MSIX_ENTRIES 2
-#define QLA_MIDX_DEFAULT 0
-#define QLA_MIDX_RSP_Q 1
-
-struct ql4_msix_entry {
- int have_irq;
- uint16_t msix_vector;
- uint16_t msix_entry;
-};
/*
* ISP Operations
@@ -572,9 +563,6 @@ struct scsi_qla_host {
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
#define AF_HA_REMOVAL 12 /* 0x00001000 */
-#define AF_INTx_ENABLED 15 /* 0x00008000 */
-#define AF_MSI_ENABLED 16 /* 0x00010000 */
-#define AF_MSIX_ENABLED 17 /* 0x00020000 */
#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
@@ -762,8 +750,6 @@ struct scsi_qla_host {
struct isp_operations *isp_ops;
struct ql82xx_hw_data hw;
- struct ql4_msix_entry msix_entries[QLA_MSIX_ENTRIES];
-
uint32_t nx_dev_init_timeout;
uint32_t nx_reset_timeout;
void *fw_dump;
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 2559144f5475..bce96a58f14e 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -134,7 +134,6 @@ int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
-void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 4f9c0f2be89d..d2cd33d8d67f 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1107,7 +1107,7 @@ static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
if (is_qla8022(ha)) {
writel(0, &ha->qla4_82xx_reg->host_int);
- if (test_bit(AF_INTx_ENABLED, &ha->flags))
+ if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)
qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
@@ -1564,19 +1564,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
try_msi:
/* Trying MSI */
- ret = pci_enable_msi(ha->pdev);
- if (!ret) {
+ ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret > 0) {
ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
0, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
- set_bit(AF_MSI_ENABLED, &ha->flags);
goto irq_attached;
} else {
ql4_printk(KERN_WARNING, ha,
"MSI: Failed to reserve interrupt %d "
"already in use.\n", ha->pdev->irq);
- pci_disable_msi(ha->pdev);
+ pci_free_irq_vectors(ha->pdev);
}
}
@@ -1592,7 +1591,6 @@ try_intx:
IRQF_SHARED, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
- set_bit(AF_INTx_ENABLED, &ha->flags);
goto irq_attached;
} else {
@@ -1614,14 +1612,11 @@ irq_not_attached:
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
{
- if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
- if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
- qla4_8xxx_disable_msix(ha);
- } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- pci_disable_msi(ha->pdev);
- } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- }
- }
+ if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
+ return;
+
+ if (ha->pdev->msix_enabled)
+ free_irq(pci_irq_vector(ha->pdev, 1), ha);
+ free_irq(pci_irq_vector(ha->pdev, 0), ha);
+ pci_free_irq_vectors(ha->pdev);
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c291fdff1b33..1da04f323d38 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -2032,10 +2032,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
ptid = (uint16_t *)&fw_ddb_entry->isid[1];
*ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
- DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
- fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
- fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
- fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid));
iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 06ddd13cb7cc..e91abb327745 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3945,7 +3945,7 @@ void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
ha->isp_ops->interrupt_service_routine(ha, intr_status);
if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
- test_bit(AF_INTx_ENABLED, &ha->flags))
+ (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled))
qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
@@ -4094,12 +4094,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
ha->phy_port_num = sys_info->port_num;
ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
- DEBUG2(printk("scsi%ld: %s: "
- "mac %02x:%02x:%02x:%02x:%02x:%02x "
- "serial %s\n", ha->host_no, __func__,
- ha->my_mac[0], ha->my_mac[1], ha->my_mac[2],
- ha->my_mac[3], ha->my_mac[4], ha->my_mac[5],
- ha->serial_number));
+ DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n",
+ ha->host_no, __func__, ha->my_mac, ha->serial_number));
status = QLA_SUCCESS;
@@ -4178,78 +4174,37 @@ qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
spin_unlock_irq(&ha->hardware_lock);
}
-struct ql4_init_msix_entry {
- uint16_t entry;
- uint16_t index;
- const char *name;
- irq_handler_t handler;
-};
-
-static struct ql4_init_msix_entry qla4_8xxx_msix_entries[QLA_MSIX_ENTRIES] = {
- { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
- "qla4xxx (default)",
- (irq_handler_t)qla4_8xxx_default_intr_handler },
- { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
- "qla4xxx (rsp_q)", (irq_handler_t)qla4_8xxx_msix_rsp_q },
-};
-
-void
-qla4_8xxx_disable_msix(struct scsi_qla_host *ha)
-{
- int i;
- struct ql4_msix_entry *qentry;
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
- qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
- if (qentry->have_irq) {
- free_irq(qentry->msix_vector, ha);
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
- __func__, qla4_8xxx_msix_entries[i].name));
- }
- }
- pci_disable_msix(ha->pdev);
- clear_bit(AF_MSIX_ENABLED, &ha->flags);
-}
-
int
qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
{
- int i, ret;
- struct msix_entry entries[QLA_MSIX_ENTRIES];
- struct ql4_msix_entry *qentry;
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++)
- entries[i].entry = qla4_8xxx_msix_entries[i].entry;
+ int ret;
- ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries));
- if (ret) {
+ ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES,
+ QLA_MSIX_ENTRIES, PCI_IRQ_MSIX);
+ if (ret < 0) {
ql4_printk(KERN_WARNING, ha,
"MSI-X: Failed to enable support -- %d/%d\n",
QLA_MSIX_ENTRIES, ret);
- goto msix_out;
- }
- set_bit(AF_MSIX_ENABLED, &ha->flags);
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
- qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
- qentry->msix_vector = entries[i].vector;
- qentry->msix_entry = entries[i].entry;
- qentry->have_irq = 0;
- ret = request_irq(qentry->msix_vector,
- qla4_8xxx_msix_entries[i].handler, 0,
- qla4_8xxx_msix_entries[i].name, ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha,
- "MSI-X: Unable to register handler -- %x/%d.\n",
- qla4_8xxx_msix_entries[i].index, ret);
- qla4_8xxx_disable_msix(ha);
- goto msix_out;
- }
- qentry->have_irq = 1;
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
- __func__, qla4_8xxx_msix_entries[i].name));
+ return ret;
}
-msix_out:
+
+ ret = request_irq(pci_irq_vector(ha->pdev, 0),
+ qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)",
+ ha);
+ if (ret)
+ goto out_free_vectors;
+
+ ret = request_irq(pci_irq_vector(ha->pdev, 1),
+ qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha);
+ if (ret)
+ goto out_free_default_irq;
+
+ return 0;
+
+out_free_default_irq:
+ free_irq(pci_irq_vector(ha->pdev, 0), ha);
+out_free_vectors:
+ pci_free_irq_vectors(ha->pdev);
return ret;
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 01c3610a60cf..9fbb33fc90c7 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -6304,13 +6304,9 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
* ISID would not match firmware generated ISID.
*/
if (is_isid_compare) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
- "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
- __func__, old_tddb->isid[5], old_tddb->isid[4],
- old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
- old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
- new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
- new_tddb->isid[0]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: old ISID [%pmR] New ISID [%pmR]\n",
+ __func__, old_tddb->isid, new_tddb->isid));
if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
sizeof(old_tddb->isid)))
@@ -7925,10 +7921,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
break;
case ISCSI_FLASHNODE_ISID:
- rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
- fnode_sess->isid[0], fnode_sess->isid[1],
- fnode_sess->isid[2], fnode_sess->isid[3],
- fnode_sess->isid[4], fnode_sess->isid[5]);
+ rc = sprintf(buf, "%pm\n", fnode_sess->isid);
break;
case ISCSI_FLASHNODE_TSID:
rc = sprintf(buf, "%u\n", fnode_sess->tsid);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 246456925335..28fea83ae2fe 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -220,8 +220,6 @@ static struct {
{"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
- {"NETAPP", "LUN C-Mode", NULL, BLIST_SYNC_ALUA},
- {"NETAPP", "INF-01-00", NULL, BLIST_SYNC_ALUA},
{"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9a8ccff1121f..c35b6de4ca64 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1998,6 +1998,15 @@ static void scsi_exit_request(void *data, struct request *rq,
kfree(cmd->sense_buffer);
}
+static int scsi_map_queues(struct blk_mq_tag_set *set)
+{
+ struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
+
+ if (shost->hostt->map_queues)
+ return shost->hostt->map_queues(shost);
+ return blk_mq_map_queues(set);
+}
+
static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{
struct device *host_dev;
@@ -2090,6 +2099,7 @@ static struct blk_mq_ops scsi_mq_ops = {
.timeout = scsi_timeout,
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
+ .map_queues = scsi_map_queues,
};
struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@ -2732,6 +2742,39 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
+ */
+static int scsi_request_fn_active(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+ int request_fn_active;
+
+ WARN_ON_ONCE(sdev->host->use_blk_mq);
+
+ spin_lock_irq(q->queue_lock);
+ request_fn_active = q->request_fn_active;
+ spin_unlock_irq(q->queue_lock);
+
+ return request_fn_active;
+}
+
+/**
+ * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
+ * @sdev: SCSI device pointer.
+ *
+ * Wait until the ongoing shost->hostt->queuecommand() calls that are
+ * invoked from scsi_request_fn() have finished.
+ */
+static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
+{
+ WARN_ON_ONCE(sdev->host->use_blk_mq);
+
+ while (scsi_request_fn_active(sdev))
+ msleep(20);
+}
+
+/**
* scsi_device_quiesce - Block user issued commands.
* @sdev: scsi device to quiesce.
*
@@ -2815,8 +2858,7 @@ EXPORT_SYMBOL(scsi_target_resume);
* @sdev: device to block
*
* Block request made by scsi lld's to temporarily stop all
- * scsi commands on the specified device. Called from interrupt
- * or normal process context.
+ * scsi commands on the specified device. May sleep.
*
* Returns zero if successful or error if not
*
@@ -2825,6 +2867,10 @@ EXPORT_SYMBOL(scsi_target_resume);
* (which must be a legal transition). When the device is in this
* state, all commands are deferred until the scsi lld reenables
* the device with scsi_device_unblock or device_block_tmo fires.
+ *
+ * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
+ * scsi_internal_device_block() has blocked a SCSI device and also
+ * remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
int
scsi_internal_device_block(struct scsi_device *sdev)
@@ -2852,6 +2898,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
+ scsi_wait_for_queuecommand(sdev);
}
return 0;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 0f3a3869524b..03577bde6ac5 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -2592,7 +2593,7 @@ fc_rport_final_delete(struct work_struct *work)
/**
- * fc_rport_create - allocates and creates a remote FC port.
+ * fc_remote_port_create - allocates and creates a remote FC port.
* @shost: scsi host the remote port is connected to.
* @channel: Channel on shost port connected to.
* @ids: The world wide names, fc address, and FC4 port
@@ -2605,8 +2606,8 @@ fc_rport_final_delete(struct work_struct *work)
* This routine assumes no locks are held on entry.
*/
static struct fc_rport *
-fc_rport_create(struct Scsi_Host *shost, int channel,
- struct fc_rport_identifiers *ids)
+fc_remote_port_create(struct Scsi_Host *shost, int channel,
+ struct fc_rport_identifiers *ids)
{
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
struct fc_internal *fci = to_fc_internal(shost->transportt);
@@ -2914,7 +2915,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock, flags);
/* No consistent binding found - create new remote port entry */
- rport = fc_rport_create(shost, channel, ids);
+ rport = fc_remote_port_create(shost, channel, ids);
return rport;
}
@@ -3554,81 +3555,6 @@ fc_vport_sched_delete(struct work_struct *work)
* BSG support
*/
-
-/**
- * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
- * @job: fc_bsg_job that is to be torn down
- */
-static void
-fc_destroy_bsgjob(struct fc_bsg_job *job)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&job->job_lock, flags);
- if (job->ref_cnt) {
- spin_unlock_irqrestore(&job->job_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&job->job_lock, flags);
-
- put_device(job->dev); /* release reference for the request */
-
- kfree(job->request_payload.sg_list);
- kfree(job->reply_payload.sg_list);
- kfree(job);
-}
-
-/**
- * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
- * completed
- * @job: fc_bsg_job that is complete
- */
-static void
-fc_bsg_jobdone(struct fc_bsg_job *job)
-{
- struct request *req = job->req;
- struct request *rsp = req->next_rq;
- int err;
-
- err = job->req->errors = job->reply->result;
-
- if (err < 0)
- /* we're only returning the result field in the reply */
- job->req->sense_len = sizeof(uint32_t);
- else
- job->req->sense_len = job->reply_len;
-
- /* we assume all request payload was transferred, residual == 0 */
- req->resid_len = 0;
-
- if (rsp) {
- WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
-
- /* set reply (bidi) residual */
- rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
- rsp->resid_len);
- }
- blk_complete_request(req);
-}
-
-/**
- * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
- * @rq: BSG request that holds the job to be destroyed
- */
-static void fc_bsg_softirq_done(struct request *rq)
-{
- struct fc_bsg_job *job = rq->special;
- unsigned long flags;
-
- spin_lock_irqsave(&job->job_lock, flags);
- job->state_flags |= FC_RQST_STATE_DONE;
- job->ref_cnt--;
- spin_unlock_irqrestore(&job->job_lock, flags);
-
- blk_end_request_all(rq, rq->errors);
- fc_destroy_bsgjob(job);
-}
-
/**
* fc_bsg_job_timeout - handler for when a bsg request timesout
* @req: request that timed out
@@ -3636,27 +3562,22 @@ static void fc_bsg_softirq_done(struct request *rq)
static enum blk_eh_timer_return
fc_bsg_job_timeout(struct request *req)
{
- struct fc_bsg_job *job = (void *) req->special;
- struct Scsi_Host *shost = job->shost;
+ struct bsg_job *job = (void *) req->special;
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct fc_rport *rport = fc_bsg_to_rport(job);
struct fc_internal *i = to_fc_internal(shost->transportt);
- unsigned long flags;
- int err = 0, done = 0;
+ int err = 0, inflight = 0;
- if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
+ if (rport && rport->port_state == FC_PORTSTATE_BLOCKED)
return BLK_EH_RESET_TIMER;
- spin_lock_irqsave(&job->job_lock, flags);
- if (job->state_flags & FC_RQST_STATE_DONE)
- done = 1;
- else
- job->ref_cnt++;
- spin_unlock_irqrestore(&job->job_lock, flags);
+ inflight = bsg_job_get(job);
- if (!done && i->f->bsg_timeout) {
+ if (inflight && i->f->bsg_timeout) {
/* call LLDD to abort the i/o as it has timed out */
err = i->f->bsg_timeout(job);
if (err == -EAGAIN) {
- job->ref_cnt--;
+ bsg_job_put(job);
return BLK_EH_RESET_TIMER;
} else if (err)
printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
@@ -3664,126 +3585,33 @@ fc_bsg_job_timeout(struct request *req)
}
/* the blk_end_sync_io() doesn't check the error */
- if (done)
+ if (!inflight)
return BLK_EH_NOT_HANDLED;
else
return BLK_EH_HANDLED;
}
-static int
-fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
-{
- size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
-
- BUG_ON(!req->nr_phys_segments);
-
- buf->sg_list = kzalloc(sz, GFP_KERNEL);
- if (!buf->sg_list)
- return -ENOMEM;
- sg_init_table(buf->sg_list, req->nr_phys_segments);
- buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
- buf->payload_len = blk_rq_bytes(req);
- return 0;
-}
-
-
-/**
- * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
- * bsg request
- * @shost: SCSI Host corresponding to the bsg object
- * @rport: (optional) FC Remote Port corresponding to the bsg object
- * @req: BSG request that needs a job structure
- */
-static int
-fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
- struct request *req)
-{
- struct fc_internal *i = to_fc_internal(shost->transportt);
- struct request *rsp = req->next_rq;
- struct fc_bsg_job *job;
- int ret;
-
- BUG_ON(req->special);
-
- job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
- GFP_KERNEL);
- if (!job)
- return -ENOMEM;
-
- /*
- * Note: this is a bit silly.
- * The request gets formatted as a SGIO v4 ioctl request, which
- * then gets reformatted as a blk request, which then gets
- * reformatted as a fc bsg request. And on completion, we have
- * to wrap return results such that SGIO v4 thinks it was a scsi
- * status. I hope this was all worth it.
- */
-
- req->special = job;
- job->shost = shost;
- job->rport = rport;
- job->req = req;
- if (i->f->dd_bsg_size)
- job->dd_data = (void *)&job[1];
- spin_lock_init(&job->job_lock);
- job->request = (struct fc_bsg_request *)req->cmd;
- job->request_len = req->cmd_len;
- job->reply = req->sense;
- job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
- * allocated */
- if (req->bio) {
- ret = fc_bsg_map_buffer(&job->request_payload, req);
- if (ret)
- goto failjob_rls_job;
- }
- if (rsp && rsp->bio) {
- ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
- if (ret)
- goto failjob_rls_rqst_payload;
- }
- job->job_done = fc_bsg_jobdone;
- if (rport)
- job->dev = &rport->dev;
- else
- job->dev = &shost->shost_gendev;
- get_device(job->dev); /* take a reference for the request */
-
- job->ref_cnt = 1;
-
- return 0;
-
-
-failjob_rls_rqst_payload:
- kfree(job->request_payload.sg_list);
-failjob_rls_job:
- kfree(job);
- return -ENOMEM;
-}
-
-
-enum fc_dispatch_result {
- FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
- FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
- FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
-};
-
-
/**
* fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
- * @q: fc host request queue
* @shost: scsi host rport attached to
* @job: bsg job to be processed
*/
-static enum fc_dispatch_result
-fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_bsg_job *job)
+static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_host_msg;
+ }
+
/* Validate the host command */
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_HST_ADD_RPORT:
cmdlen += sizeof(struct fc_bsg_host_add_rport);
break;
@@ -3815,7 +3643,7 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
case FC_BSG_HST_VENDOR:
cmdlen += sizeof(struct fc_bsg_host_vendor);
if ((shost->hostt->vendor_id == 0L) ||
- (job->request->rqst_data.h_vendor.vendor_id !=
+ (bsg_request->rqst_data.h_vendor.vendor_id !=
shost->hostt->vendor_id)) {
ret = -ESRCH;
goto fail_host_msg;
@@ -3827,24 +3655,19 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
goto fail_host_msg;
}
- /* check if we really have all the request data needed */
- if (job->request_len < cmdlen) {
- ret = -ENOMSG;
- goto fail_host_msg;
- }
-
ret = i->f->bsg_request(job);
if (!ret)
- return FC_DISPATCH_UNLOCKED;
+ return 0;
fail_host_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = ret;
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = ret;
job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- return FC_DISPATCH_UNLOCKED;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return 0;
}
@@ -3855,34 +3678,38 @@ fail_host_msg:
static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
- if (!rport->rqst_q)
+ struct request_queue *q = rport->rqst_q;
+ unsigned long flags;
+
+ if (!q)
return;
- /*
- * This get/put dance makes no sense
- */
- get_device(&rport->dev);
- blk_run_queue_async(rport->rqst_q);
- put_device(&rport->dev);
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_run_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
* fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
- * @q: rport request queue
* @shost: scsi host rport attached to
- * @rport: rport request destined to
* @job: bsg job to be processed
*/
-static enum fc_dispatch_result
-fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_rport *rport, struct fc_bsg_job *job)
+static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_rport_msg;
+ }
+
/* Validate the rport command */
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
cmdlen += sizeof(struct fc_bsg_rport_els);
goto check_bidi;
@@ -3902,133 +3729,31 @@ check_bidi:
goto fail_rport_msg;
}
- /* check if we really have all the request data needed */
- if (job->request_len < cmdlen) {
- ret = -ENOMSG;
- goto fail_rport_msg;
- }
-
ret = i->f->bsg_request(job);
if (!ret)
- return FC_DISPATCH_UNLOCKED;
+ return 0;
fail_rport_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = ret;
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = ret;
job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- return FC_DISPATCH_UNLOCKED;
-}
-
-
-/**
- * fc_bsg_request_handler - generic handler for bsg requests
- * @q: request queue to manage
- * @shost: Scsi_Host related to the bsg object
- * @rport: FC remote port related to the bsg object (optional)
- * @dev: device structure for bsg object
- */
-static void
-fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_rport *rport, struct device *dev)
-{
- struct request *req;
- struct fc_bsg_job *job;
- enum fc_dispatch_result ret;
-
- if (!get_device(dev))
- return;
-
- while (1) {
- if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
- !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
- break;
-
- req = blk_fetch_request(q);
- if (!req)
- break;
-
- if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
- req->errors = -ENXIO;
- spin_unlock_irq(q->queue_lock);
- blk_end_request_all(req, -ENXIO);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- spin_unlock_irq(q->queue_lock);
-
- ret = fc_req_to_bsgjob(shost, rport, req);
- if (ret) {
- req->errors = ret;
- blk_end_request_all(req, ret);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- job = req->special;
-
- /* check if we have the msgcode value at least */
- if (job->request_len < sizeof(uint32_t)) {
- BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = -ENOMSG;
- job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- /* the dispatch routines will unlock the queue_lock */
- if (rport)
- ret = fc_bsg_rport_dispatch(q, shost, rport, job);
- else
- ret = fc_bsg_host_dispatch(q, shost, job);
-
- /* did dispatcher hit state that can't process any more */
- if (ret == FC_DISPATCH_BREAK)
- break;
-
- /* did dispatcher had released the lock */
- if (ret == FC_DISPATCH_UNLOCKED)
- spin_lock_irq(q->queue_lock);
- }
-
- spin_unlock_irq(q->queue_lock);
- put_device(dev);
- spin_lock_irq(q->queue_lock);
-}
-
-
-/**
- * fc_bsg_host_handler - handler for bsg requests for a fc host
- * @q: fc host request queue
- */
-static void
-fc_bsg_host_handler(struct request_queue *q)
-{
- struct Scsi_Host *shost = q->queuedata;
-
- fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return 0;
}
-
-/**
- * fc_bsg_rport_handler - handler for bsg requests for a fc rport
- * @q: rport request queue
- */
-static void
-fc_bsg_rport_handler(struct request_queue *q)
+static int fc_bsg_dispatch(struct bsg_job *job)
{
- struct fc_rport *rport = q->queuedata;
- struct Scsi_Host *shost = rport_to_shost(rport);
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
- fc_bsg_request_handler(q, shost, rport, &rport->dev);
+ if (scsi_is_fc_rport(job->dev))
+ return fc_bsg_rport_dispatch(shost, job);
+ else
+ return fc_bsg_host_dispatch(shost, job);
}
-
/**
* fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
* @shost: shost for fc_host
@@ -4051,33 +3776,42 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
snprintf(bsg_name, sizeof(bsg_name),
"fc_host%d", shost->host_no);
- q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
+ q = __scsi_alloc_queue(shost, bsg_request_fn);
if (!q) {
- printk(KERN_ERR "fc_host%d: bsg interface failed to "
- "initialize - no request queue\n",
- shost->host_no);
+ dev_err(dev,
+ "fc_host%d: bsg interface failed to initialize - no request queue\n",
+ shost->host_no);
return -ENOMEM;
}
- q->queuedata = shost;
- queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
- blk_queue_softirq_done(q, fc_bsg_softirq_done);
- blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
- blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
-
- err = bsg_register_queue(q, dev, bsg_name, NULL);
+ err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch,
+ i->f->dd_bsg_size);
if (err) {
- printk(KERN_ERR "fc_host%d: bsg interface failed to "
- "initialize - register queue\n",
- shost->host_no);
+ dev_err(dev,
+ "fc_host%d: bsg interface failed to initialize - setup queue\n",
+ shost->host_no);
blk_cleanup_queue(q);
return err;
}
-
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
fc_host->rqst_q = q;
return 0;
}
+static int fc_bsg_rport_prep(struct request_queue *q, struct request *req)
+{
+ struct fc_rport *rport = dev_to_rport(q->queuedata);
+
+ if (rport->port_state == FC_PORTSTATE_BLOCKED &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
+ return BLKPREP_DEFER;
+
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ return BLKPREP_KILL;
+
+ return BLKPREP_OK;
+}
/**
* fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
@@ -4097,29 +3831,22 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
if (!i->f->bsg_request)
return -ENOTSUPP;
- q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
+ q = __scsi_alloc_queue(shost, bsg_request_fn);
if (!q) {
- printk(KERN_ERR "%s: bsg interface failed to "
- "initialize - no request queue\n",
- dev->kobj.name);
+ dev_err(dev, "bsg interface failed to initialize - no request queue\n");
return -ENOMEM;
}
- q->queuedata = rport;
- queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
- blk_queue_softirq_done(q, fc_bsg_softirq_done);
- blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
- blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
-
- err = bsg_register_queue(q, dev, NULL, NULL);
+ err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
if (err) {
- printk(KERN_ERR "%s: bsg interface failed to "
- "initialize - register queue\n",
- dev->kobj.name);
+ dev_err(dev, "failed to setup bsg queue\n");
blk_cleanup_queue(q);
return err;
}
+ blk_queue_prep_rq(q, fc_bsg_rport_prep);
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
rport->rqst_q = q;
return 0;
}
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index e3cd3ece4412..b87a78673f65 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -115,21 +114,12 @@ static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
NULL, NULL, NULL);
-#define SRP_PID(p) \
- (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
- (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
- (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
- (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
-
-#define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
- "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
-
static ssize_t
show_srp_rport_id(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_rport *rport = transport_class_to_srp_rport(dev);
- return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport));
+ return sprintf(buf, "%16phC\n", rport->port_id);
}
static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
@@ -402,36 +392,6 @@ static void srp_reconnect_work(struct work_struct *work)
}
}
-/**
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
- * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
- *
- * To do: add support for scsi-mq in this function.
- */
-static int scsi_request_fn_active(struct Scsi_Host *shost)
-{
- struct scsi_device *sdev;
- struct request_queue *q;
- int request_fn_active = 0;
-
- shost_for_each_device(sdev, shost) {
- q = sdev->request_queue;
-
- spin_lock_irq(q->queue_lock);
- request_fn_active += q->request_fn_active;
- spin_unlock_irq(q->queue_lock);
- }
-
- return request_fn_active;
-}
-
-/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
-static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
-{
- while (scsi_request_fn_active(shost))
- msleep(20);
-}
-
static void __rport_fail_io_fast(struct srp_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
@@ -441,14 +401,17 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
return;
+ /*
+ * Call scsi_target_block() to wait for ongoing shost->queuecommand()
+ * calls before invoking i->f->terminate_rport_io().
+ */
+ scsi_target_block(rport->dev.parent);
scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
/* Involve the LLD if possible to terminate all I/O on the rport. */
i = to_srp_internal(shost->transportt);
- if (i->f->terminate_rport_io) {
- srp_wait_for_queuecommand(shost);
+ if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
- }
}
/**
@@ -576,7 +539,6 @@ int srp_reconnect_rport(struct srp_rport *rport)
if (res)
goto out;
scsi_target_block(&shost->shost_gendev);
- srp_wait_for_queuecommand(shost);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 079c2d9759fb..1622e23138e0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2465,9 +2465,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
if (sdkp->first_scan || old_wp != sdkp->write_prot) {
sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
sdkp->write_prot ? "on" : "off");
- sd_printk(KERN_DEBUG, sdkp,
- "Mode Sense: %02x %02x %02x %02x\n",
- buffer[0], buffer[1], buffer[2], buffer[3]);
+ sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
}
}
}
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07b6444d3e0a..b673825f46b5 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -929,8 +929,6 @@ struct pqi_ctrl_info {
int max_msix_vectors;
int num_msix_vectors_enabled;
int num_msix_vectors_initialized;
- u32 msix_vectors[PQI_MAX_MSIX_VECTORS];
- void *intr_data[PQI_MAX_MSIX_VECTORS];
int event_irq;
struct Scsi_Host *scsi_host;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index a535b2661f38..8702d9cf8040 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -25,6 +25,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/cciss_ioctl.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -2887,19 +2888,19 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
{
+ struct pci_dev *pdev = ctrl_info->pci_dev;
int i;
int rc;
- ctrl_info->event_irq = ctrl_info->msix_vectors[0];
+ ctrl_info->event_irq = pci_irq_vector(pdev, 0);
for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
- rc = request_irq(ctrl_info->msix_vectors[i],
- pqi_irq_handler, 0,
- DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
+ rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
+ DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
+ dev_err(&pdev->dev,
"irq %u init failed with error %d\n",
- ctrl_info->msix_vectors[i], rc);
+ pci_irq_vector(pdev, i), rc);
return rc;
}
ctrl_info->num_msix_vectors_initialized++;
@@ -2908,72 +2909,23 @@ static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
return 0;
}
-static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
-
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
- free_irq(ctrl_info->msix_vectors[i],
- ctrl_info->intr_data[i]);
-}
-
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- unsigned int i;
- int max_vectors;
- int num_vectors_enabled;
- struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
-
- max_vectors = ctrl_info->num_queue_groups;
-
- for (i = 0; i < max_vectors; i++)
- msix_entries[i].entry = i;
-
- num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
- msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
+ int ret;
- if (num_vectors_enabled < 0) {
+ ret = pci_alloc_irq_vectors(ctrl_info->pci_dev,
+ PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (ret < 0) {
dev_err(&ctrl_info->pci_dev->dev,
- "MSI-X init failed with error %d\n",
- num_vectors_enabled);
- return num_vectors_enabled;
- }
-
- ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
- for (i = 0; i < num_vectors_enabled; i++) {
- ctrl_info->msix_vectors[i] = msix_entries[i].vector;
- ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
+ "MSI-X init failed with error %d\n", ret);
+ return ret;
}
+ ctrl_info->num_msix_vectors_enabled = ret;
return 0;
}
-static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
- int rc;
- int cpu;
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
- rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
- get_cpu_mask(cpu));
- if (rc)
- dev_err(&ctrl_info->pci_dev->dev,
- "error %d setting affinity hint for irq vector %u\n",
- rc, ctrl_info->msix_vectors[i]);
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-}
-
-static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
-
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
- irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
-}
-
static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -4743,6 +4695,13 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return 0;
}
+static int pqi_map_queues(struct Scsi_Host *shost)
+{
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
+}
+
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
void __user *arg)
{
@@ -5130,6 +5089,7 @@ static struct scsi_host_template pqi_driver_template = {
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
+ .map_queues = pqi_map_queues,
.sdev_attrs = pqi_sdev_attrs,
.shost_attrs = pqi_shost_attrs,
};
@@ -5159,7 +5119,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->cmd_per_lun = shost->can_queue;
shost->sg_tablesize = ctrl_info->sg_tablesize;
shost->transportt = pqi_sas_transport_template;
- shost->irq = ctrl_info->msix_vectors[0];
+ shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
shost->unique_id = shost->irq;
shost->nr_hw_queues = ctrl_info->num_queue_groups;
shost->hostdata[0] = (unsigned long)ctrl_info;
@@ -5409,8 +5369,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
- pqi_irq_set_affinity_hint(ctrl_info);
-
rc = pqi_create_queues(ctrl_info);
if (rc)
return rc;
@@ -5557,10 +5515,14 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- pqi_irq_unset_affinity_hint(ctrl_info);
- pqi_free_irqs(ctrl_info);
- if (ctrl_info->num_msix_vectors_enabled)
- pci_disable_msix(ctrl_info->pci_dev);
+ int i;
+
+ for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
+ free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
+ &ctrl_info->queue_groups[i]);
+ }
+
+ pci_free_irq_vectors(ctrl_info->pci_dev);
}
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8ccfc9ea874b..05526b71541b 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1495,9 +1495,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
if (sg_count) {
if (sg_count > MAX_PAGE_BUFFER_COUNT) {
- payload_sz = (sg_count * sizeof(void *) +
+ payload_sz = (sg_count * sizeof(u64) +
sizeof(struct vmbus_packet_mpb_array));
- payload = kmalloc(payload_sz, GFP_ATOMIC);
+ payload = kzalloc(payload_sz, GFP_ATOMIC);
if (!payload)
return SCSI_MLQUEUE_DEVICE_BUSY;
}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 3c4c07038948..88db6992420e 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -43,20 +43,18 @@
#define NCR5380_implementation_fields /* none */
-#define NCR5380_read(reg) sun3scsi_read(reg)
-#define NCR5380_write(reg, value) sun3scsi_write(reg, value)
+#define NCR5380_read(reg) in_8(hostdata->io + (reg))
+#define NCR5380_write(reg, value) out_8(hostdata->io + (reg), value)
#define NCR5380_queue_command sun3scsi_queue_command
#define NCR5380_bus_reset sun3scsi_bus_reset
#define NCR5380_abort sun3scsi_abort
#define NCR5380_info sun3scsi_info
-#define NCR5380_dma_recv_setup(instance, data, count) (count)
-#define NCR5380_dma_send_setup(instance, data, count) (count)
-#define NCR5380_dma_residual(instance) \
- sun3scsi_dma_residual(instance)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd)
+#define NCR5380_dma_xfer_len sun3scsi_dma_xfer_len
+#define NCR5380_dma_recv_setup sun3scsi_dma_count
+#define NCR5380_dma_send_setup sun3scsi_dma_count
+#define NCR5380_dma_residual sun3scsi_dma_residual
#define NCR5380_acquire_dma_irq(instance) (1)
#define NCR5380_release_dma_irq(instance)
@@ -82,7 +80,6 @@ module_param(setup_hostid, int, 0);
#define SUN3_DVMA_BUFSIZE 0xe000
static struct scsi_cmnd *sun3_dma_setup_done;
-static unsigned char *sun3_scsi_regp;
static volatile struct sun3_dma_regs *dregs;
static struct sun3_udc_regs *udc_regs;
static unsigned char *sun3_dma_orig_addr;
@@ -90,20 +87,6 @@ static unsigned long sun3_dma_orig_count;
static int sun3_dma_active;
static unsigned long last_residual;
-/*
- * NCR 5380 register access functions
- */
-
-static inline unsigned char sun3scsi_read(int reg)
-{
- return in_8(sun3_scsi_regp + reg);
-}
-
-static inline void sun3scsi_write(int reg, int value)
-{
- out_8(sun3_scsi_regp + reg, value);
-}
-
#ifndef SUN3_SCSI_VME
/* dma controller register access functions */
@@ -158,8 +141,8 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dev)
}
/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
-static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
- void *data, unsigned long count, int write_flag)
+static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count, int write_flag)
{
void *addr;
@@ -211,9 +194,10 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
dregs->csr |= CSR_FIFO;
if(dregs->fifo_count != count) {
- shost_printk(KERN_ERR, instance, "FIFO mismatch %04x not %04x\n",
+ shost_printk(KERN_ERR, hostdata->host,
+ "FIFO mismatch %04x not %04x\n",
dregs->fifo_count, (unsigned int) count);
- NCR5380_dprint(NDEBUG_DMA, instance);
+ NCR5380_dprint(NDEBUG_DMA, hostdata->host);
}
/* setup udc */
@@ -248,14 +232,34 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
}
-static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
+static int sun3scsi_dma_count(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return count;
+}
+
+static inline int sun3scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return sun3scsi_dma_setup(hostdata, data, count, 0);
+}
+
+static inline int sun3scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return sun3scsi_dma_setup(hostdata, data, count, 1);
+}
+
+static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata)
{
return last_residual;
}
-static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len,
- struct scsi_cmnd *cmd)
+static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
{
+ int wanted_len = cmd->SCp.this_residual;
+
if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS)
return 0;
@@ -428,9 +432,10 @@ static struct scsi_host_template sun3_scsi_template = {
static int __init sun3_scsi_probe(struct platform_device *pdev)
{
struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
int error;
struct resource *irq, *mem;
- unsigned char *ioaddr;
+ void __iomem *ioaddr;
int host_flags = 0;
#ifdef SUN3_SCSI_VME
int i;
@@ -493,8 +498,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
}
#endif
- sun3_scsi_regp = ioaddr;
-
instance = scsi_host_alloc(&sun3_scsi_template,
sizeof(struct NCR5380_hostdata));
if (!instance) {
@@ -502,9 +505,12 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
goto fail_alloc;
}
- instance->io_port = (unsigned long)ioaddr;
instance->irq = irq->start;
+ hostdata = shost_priv(instance);
+ hostdata->base = mem->start;
+ hostdata->io = ioaddr;
+
error = NCR5380_init(instance, host_flags);
if (error)
goto fail_init;
@@ -552,13 +558,15 @@ fail_init:
fail_alloc:
if (udc_regs)
dvma_free(udc_regs);
- iounmap(sun3_scsi_regp);
+ iounmap(ioaddr);
return error;
}
static int __exit sun3_scsi_remove(struct platform_device *pdev)
{
struct Scsi_Host *instance = platform_get_drvdata(pdev);
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ void __iomem *ioaddr = hostdata->io;
scsi_remove_host(instance);
free_irq(instance->irq, instance);
@@ -566,7 +574,7 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev)
scsi_host_put(instance);
if (udc_regs)
dvma_free(udc_regs);
- iounmap(sun3_scsi_regp);
+ iounmap(ioaddr);
return 0;
}
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3aedf73f1131..aa43bfea0d00 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1094,10 +1094,12 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* ufs_qcom_setup_clocks - enables/disable clocks
* @hba: host controller instance
* @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
*
* Returns 0 on success, non-zero on failure.
*/
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
@@ -1111,18 +1113,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
if (!host)
return 0;
- if (on) {
- err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
- if (err)
- goto out;
+ if (on && (status == POST_CHANGE)) {
+ phy_power_on(host->generic_phy);
- err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
- if (err) {
- dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
- __func__, err);
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- goto out;
- }
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1130,14 +1123,15 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
if (vote == host->bus_vote.min_bw_vote)
ufs_qcom_update_bus_bw_vote(host);
- } else {
-
- /* M-PHY RMMI interface clocks can be turned off */
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- if (!ufs_qcom_is_link_active(hba))
+ } else if (!on && (status == PRE_CHANGE)) {
+ if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
+ /* powering off PHY during aggressive clk gating */
+ phy_power_off(host->generic_phy);
+ }
+
vote = host->bus_vote.min_bw_vote;
}
@@ -1146,7 +1140,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "%s: set bus vote failed %d\n",
__func__, err);
-out:
return err;
}
@@ -1204,12 +1197,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (IS_ERR(host->generic_phy)) {
err = PTR_ERR(host->generic_phy);
dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
- goto out;
+ goto out_variant_clear;
}
err = ufs_qcom_bus_register(host);
if (err)
- goto out_host_free;
+ goto out_variant_clear;
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
@@ -1254,7 +1247,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
- ufs_qcom_setup_clocks(hba, true);
+ ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
ufs_qcom_hosts[hba->dev->id] = host;
@@ -1274,8 +1267,7 @@ out_disable_phy:
phy_power_off(host->generic_phy);
out_unregister_bus:
phy_exit(host->generic_phy);
-out_host_free:
- devm_kfree(dev, host);
+out_variant_clear:
ufshcd_set_variant(hba, NULL);
out:
return err;
@@ -1287,6 +1279,7 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
ufs_qcom_disable_lane_clks(host);
phy_power_off(host->generic_phy);
+ phy_exit(host->generic_phy);
}
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 845b874e2977..8e6709a3fb6b 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -46,6 +46,7 @@
#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -162,7 +163,7 @@ enum desc_header_offset {
};
enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
+ QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
QUERY_DESC_UNIT_MAX_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
@@ -416,7 +417,7 @@ struct utp_cmd_rsp {
__be32 residual_transfer_count;
__be32 reserved[4];
__be16 sense_data_len;
- u8 sense_data[18];
+ u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 22f881e9253a..f7983058f3f7 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -128,6 +128,13 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
+/*
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
+
+
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
@@ -140,6 +147,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index d15eaa466c59..52b546fb509b 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
+ ufshcd_dealloc_host(hba);
}
/**
@@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
+ ufshcd_dealloc_host(hba);
return err;
}
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index db53f38da864..a72a4ba78125 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -163,7 +163,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
if (ret) {
dev_err(dev, "%s: unable to find %s err %d\n",
__func__, prop_name, ret);
- goto out_free;
+ goto out;
}
vreg->min_uA = 0;
@@ -185,9 +185,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
goto out;
-out_free:
- devm_kfree(dev, vreg);
- vreg = NULL;
out:
if (!ret)
*out_vreg = vreg;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index cf549871c1ee..ef8548c3a423 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -45,6 +45,8 @@
#include "ufs_quirks.h"
#include "unipro.h"
+#define UFSHCD_REQ_SENSE_SIZE 18
+
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
@@ -57,15 +59,9 @@
#define NOP_OUT_TIMEOUT 30 /* msecs */
/* Query request retries */
-#define QUERY_REQ_RETRIES 10
+#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 30 /* msec */
-/*
- * Query request timeout for fDeviceInit flag
- * fDeviceInit query response time for some devices is too large that default
- * QUERY_REQ_TIMEOUT may not be enough for such devices.
- */
-#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -123,6 +119,7 @@ enum {
UFSHCD_STATE_RESET,
UFSHCD_STATE_ERROR,
UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED,
};
/* UFSHCD error handling flags */
@@ -598,6 +595,20 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
return false;
}
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -631,8 +642,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
scsi_unblock_requests(hba->host);
}
@@ -660,6 +670,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
+ /*
+ * Wait for the ungate work to complete if in progress.
+ * Though the clocks may be in ON state, the link could
+ * still be in hibner8 state if hibern8 is allowed
+ * during clock gating.
+ * Make sure we exit hibern8 state also in addition to
+ * clocks being ON.
+ */
+ if (ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_link_hibern8(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@@ -709,7 +734,14 @@ static void ufshcd_gate_work(struct work_struct *work)
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.is_suspended) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ (hba->clk_gating.state == REQ_CLKS_ON)) {
hba->clk_gating.state = CLKS_ON;
goto rel_lock;
}
@@ -731,10 +763,7 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
+ ufshcd_suspend_clkscaling(hba);
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
@@ -878,6 +907,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
}
/**
@@ -889,10 +920,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
int len;
if (lrbp->sense_buffer &&
ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ int len_to_copy;
+
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+ len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data,
- min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+ min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
}
}
@@ -1088,7 +1123,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*
* Returns 0 in case of success, non-zero value in case of failure
*/
-static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufshcd_sg_entry *prd_table;
struct scatterlist *sg;
@@ -1102,8 +1137,13 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
return sg_segments;
if (sg_segments) {
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)(sg_segments *
+ sizeof(struct ufshcd_sg_entry)));
+ else
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
@@ -1410,6 +1450,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
+ case UFSHCD_STATE_EH_SCHEDULED:
case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_unlock;
@@ -1457,7 +1498,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
- lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+ lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -1465,15 +1506,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_comp_scsi_upiu(hba, lrbp);
- err = ufshcd_map_sg(lrbp);
+ err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1581,6 +1625,8 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
msecs_to_jiffies(max_timeout));
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
@@ -1683,6 +1729,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1789,9 +1836,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
- timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
-
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
@@ -1861,8 +1905,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
goto out_unlock;
}
@@ -1961,8 +2005,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
goto out_unlock;
}
@@ -2055,18 +2099,41 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
desc_id, desc_index, 0, desc_buf,
&buff_len);
- if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
- (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
- ufs_query_desc_max_size[desc_id])
- || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
- __func__, desc_id, param_offset, buff_len, ret);
- if (!ret)
- ret = -EINVAL;
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
goto out;
}
+ /* Sanity check */
+ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * While reading variable size descriptors (like string descriptor),
+ * some UFS devices may report the "LENGTH" (field in "Transaction
+ * Specific fields" of Query Response UPIU) same as what was requested
+ * in Query Request UPIU instead of reporting the actual size of the
+ * variable size descriptor.
+ * Although it's safe to ignore the "LENGTH" field for variable size
+ * descriptors as we can always derive the length of the descriptor from
+ * the descriptor header fields. Hence this change impose the length
+ * match check only for fixed size descriptors (for which we always
+ * request the correct size as part of Query Request UPIU).
+ */
+ if ((desc_id != QUERY_DESC_IDN_STRING) &&
+ (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+ dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+ __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
out:
@@ -2088,7 +2155,18 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
u8 *buf,
u32 size)
{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+ int err = 0;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Read descriptor*/
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+ if (!err)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+
+ return err;
}
int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
@@ -2320,12 +2398,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
/* Response upiu and prdt offset should be in double words */
- utrdlp[i].response_upiu_offset =
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset);
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16(prdt_offset);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+ } else {
+ utrdlp[i].response_upiu_offset =
cpu_to_le16((response_offset >> 2));
- utrdlp[i].prd_table_offset =
+ utrdlp[i].prd_table_offset =
cpu_to_le16((prdt_offset >> 2));
- utrdlp[i].response_upiu_length =
+ utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+ }
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
hba->lrb[i].ucd_req_ptr =
@@ -2429,10 +2516,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
} while (ret && peer && --retries);
- if (!retries)
+ if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val,
- retries);
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ UFS_UIC_COMMAND_RETRIES - retries);
return ret;
}
@@ -2496,9 +2583,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
get, UIC_GET_ATTR_ID(attr_sel), ret);
} while (ret && peer && --retries);
- if (!retries)
+ if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
- get, UIC_GET_ATTR_ID(attr_sel), retries);
+ get, UIC_GET_ATTR_ID(attr_sel),
+ UFS_UIC_COMMAND_RETRIES - retries);
if (mib_val && !ret)
*mib_val = uic_cmd.argument3;
@@ -2651,6 +2739,8 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
int ret;
struct uic_command uic_cmd = {0};
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
+
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
@@ -2664,7 +2754,9 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
*/
if (ufshcd_link_recovery(hba))
ret = -ENOLINK;
- }
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
+ POST_CHANGE);
return ret;
}
@@ -2687,13 +2779,17 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
struct uic_command uic_cmd = {0};
int ret;
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
+
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
if (ret) {
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
__func__, ret);
ret = ufshcd_link_recovery(hba);
- }
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
+ POST_CHANGE);
return ret;
}
@@ -2725,8 +2821,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
if (hba->max_pwr_info.is_valid)
return 0;
- pwr_info->pwr_tx = FASTAUTO_MODE;
- pwr_info->pwr_rx = FASTAUTO_MODE;
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
@@ -2757,7 +2853,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_rx);
return -EINVAL;
}
- pwr_info->pwr_rx = SLOWAUTO_MODE;
+ pwr_info->pwr_rx = SLOW_MODE;
}
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2770,7 +2866,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_tx);
return -EINVAL;
}
- pwr_info->pwr_tx = SLOWAUTO_MODE;
+ pwr_info->pwr_tx = SLOW_MODE;
}
hba->max_pwr_info.is_valid = true;
@@ -3090,7 +3186,16 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
{
int ret;
int retries = DME_LINKSTARTUP_RETRIES;
+ bool link_startup_again = false;
+
+ /*
+ * If UFS device isn't active then we will have to issue link startup
+ * 2 times to make sure the device state move to active.
+ */
+ if (!ufshcd_is_ufs_dev_active(hba))
+ link_startup_again = true;
+link_startup:
do {
ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
@@ -3116,6 +3221,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
/* failed to get the link up... retire */
goto out;
+ if (link_startup_again) {
+ link_startup_again = false;
+ retries = DME_LINKSTARTUP_RETRIES;
+ goto link_startup;
+ }
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
ret = ufshcd_disable_device_tx_lcc(hba);
if (ret)
@@ -3181,16 +3292,24 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
{
int ret = 0;
u8 lun_qdepth;
+ int retries;
struct ufs_hba *hba;
hba = shost_priv(sdev->host);
lun_qdepth = hba->nutrs;
- ret = ufshcd_read_unit_desc_param(hba,
- ufshcd_scsi_to_upiu_lun(sdev->lun),
- UNIT_DESC_PARAM_LU_Q_DEPTH,
- &lun_qdepth,
- sizeof(lun_qdepth));
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Read descriptor*/
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
+ if (!ret || ret == -ENOTSUPP)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
+ }
/* Some WLUN doesn't support unit descriptor */
if (ret == -EOPNOTSUPP)
@@ -4097,6 +4216,17 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ /* PHY layer lane error */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ /* Ignore LINERESET indication, as this is not an error */
+ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
+ /*
+ * To know whether this error is fatal or not, DB timeout
+ * must be checked but this error is handled separately.
+ */
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
@@ -4158,7 +4288,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* block commands from scsi mid-layer */
scsi_block_requests(hba->host);
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
schedule_work(&hba->eh_work);
}
}
@@ -4311,6 +4441,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
task_req_upiup->input_param1 = cpu_to_be32(lun_id);
task_req_upiup->input_param2 = cpu_to_be32(task_id);
+ ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+
/* send command to the controller */
__set_bit(free_slot, &hba->outstanding_tasks);
@@ -4318,6 +4450,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
wmb();
ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
spin_unlock_irqrestore(host->host_lock, flags);
@@ -4722,6 +4856,24 @@ out:
return icc_level;
}
+static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
+{
+ int ret = 0;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* write attribute */
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
+ if (!ret)
+ break;
+
+ dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
+ }
+
+ return ret;
+}
+
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
@@ -4742,9 +4894,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
__func__, hba->init_prefetch_data.icc_level);
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
+ ret = ufshcd_set_icc_levels_attr(hba,
+ hba->init_prefetch_data.icc_level);
if (ret)
dev_err(hba->dev,
@@ -4965,6 +5116,76 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ u32 pa_tactivate_us, peer_pa_tactivate_us;
+ u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ if (ret)
+ goto out;
+
+ if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+ (granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+ __func__, granularity);
+ return -EINVAL;
+ }
+
+ if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+ (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+ __func__, peer_granularity);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+ peer_pa_tactivate_us = peer_pa_tactivate *
+ gran_to_us_table[peer_granularity - 1];
+
+ if (pa_tactivate_us > peer_pa_tactivate_us) {
+ u32 new_peer_pa_tactivate;
+
+ new_peer_pa_tactivate = pa_tactivate_us /
+ gran_to_us_table[peer_granularity - 1];
+ new_peer_pa_tactivate++;
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ new_peer_pa_tactivate);
+ }
+
+out:
+ return ret;
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
@@ -4975,6 +5196,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ ufshcd_quirk_tune_host_pa_tactivate(hba);
}
/**
@@ -5027,9 +5251,11 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
__func__);
} else {
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret)
+ if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
+ goto out;
+ }
}
/* set the state as operational after switching to desired gear */
@@ -5062,8 +5288,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
hba->is_init_prefetch = true;
/* Resume devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
out:
/*
@@ -5389,6 +5614,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (!head || list_empty(head))
goto out;
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
@@ -5410,7 +5639,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
}
}
- ret = ufshcd_vops_setup_clocks(hba, on);
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+
out:
if (ret) {
list_for_each_entry(clki, head, list) {
@@ -5500,8 +5732,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
if (!hba->vops)
return;
- ufshcd_vops_setup_clocks(hba, false);
-
ufshcd_vops_setup_regulators(hba, false);
ufshcd_vops_exit(hba);
@@ -5564,6 +5794,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -5577,19 +5808,19 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
0,
0,
0,
- SCSI_SENSE_BUFFERSIZE,
+ UFSHCD_REQ_SENSE_SIZE,
0};
char *buffer;
int ret;
- buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
- SCSI_SENSE_BUFFERSIZE, NULL,
+ UFSHCD_REQ_SENSE_SIZE, NULL,
msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@@ -5766,7 +5997,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
!hba->dev_info.is_lu_power_on_wp) {
ret = ufshcd_setup_vreg(hba, true);
} else if (!ufshcd_is_ufs_dev_active(hba)) {
- ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
if (!ret && !ufshcd_is_link_active(hba)) {
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
if (ret)
@@ -5775,6 +6005,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
if (ret)
goto vccq_lpm;
}
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
}
goto out;
@@ -5839,6 +6070,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
+ ufshcd_suspend_clkscaling(hba);
+
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
goto disable_clks;
@@ -5846,12 +6079,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
(req_link_state == hba->uic_link_state))
- goto out;
+ goto enable_gating;
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
ret = -EINVAL;
- goto out;
+ goto enable_gating;
}
if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5888,15 +6121,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
disable_clks:
/*
- * The clock scaling needs access to controller registers. Hence, Wait
- * for pending clock scaling work to be done before clocks are
- * turned off.
- */
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
- /*
* Call vendor specific suspend callback. As these callbacks may access
* vendor specific host controller register space call them before the
* host clocks are ON.
@@ -5905,10 +6129,6 @@ disable_clks:
if (ret)
goto set_link_active;
- ret = ufshcd_vops_setup_clocks(hba, false);
- if (ret)
- goto vops_resume;
-
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
else
@@ -5925,9 +6145,8 @@ disable_clks:
ufshcd_hba_vreg_set_lpm(hba);
goto out;
-vops_resume:
- ufshcd_vops_resume(hba, pm_op);
set_link_active:
+ ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
ufshcd_set_link_active(hba);
@@ -5937,6 +6156,7 @@ set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
+ ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
out:
@@ -6015,8 +6235,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_urgent_bkops(hba);
hba->clk_gating.is_suspended = false;
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -6030,6 +6249,7 @@ disable_vreg:
ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
out:
hba->pm_op_in_progress = 0;
@@ -6052,16 +6272,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
if (!hba || !hba->is_powered)
return 0;
- if (pm_runtime_suspended(hba->dev)) {
- if (hba->rpm_lvl == hba->spm_lvl)
- /*
- * There is possibility that device may still be in
- * active state during the runtime suspend.
- */
- if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
- hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
- goto out;
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) &&
+ (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ hba->uic_link_state))
+ goto out;
+ if (pm_runtime_suspended(hba->dev)) {
/*
* UFS device and/or UFS link low power states during runtime
* suspend seems to be different than what is expected during
@@ -6092,7 +6309,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
int ufshcd_system_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered || pm_runtime_suspended(hba->dev))
/*
* Let the runtime resume take care of resuming
* if runtime suspended.
@@ -6113,7 +6333,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
return ufshcd_suspend(hba, UFS_RUNTIME_PM);
@@ -6143,10 +6366,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
- else
- return ufshcd_resume(hba, UFS_RUNTIME_PM);
+
+ return ufshcd_resume(hba, UFS_RUNTIME_PM);
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
@@ -6198,11 +6424,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
- scsi_host_put(hba->host);
-
ufshcd_exit_clk_gating(hba);
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_remove_device(hba->devfreq);
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -6324,15 +6546,47 @@ static int ufshcd_devfreq_target(struct device *dev,
{
int err = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
+ bool release_clk_hold = false;
+ unsigned long irq_flags;
if (!ufshcd_is_clkscaling_enabled(hba))
return -EINVAL;
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+
+ if (ufshcd_is_clkgating_allowed(hba) &&
+ (hba->clk_gating.state != CLKS_ON)) {
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ /* hold the vote until the scaling work is completed */
+ hba->clk_gating.active_reqs++;
+ release_clk_hold = true;
+ hba->clk_gating.state = CLKS_ON;
+ } else {
+ /*
+ * Clock gating work seems to be running in parallel
+ * hence skip scaling work to avoid deadlock between
+ * current scaling work and gating work.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
if (*freq == UINT_MAX)
err = ufshcd_scale_clks(hba, true);
else if (*freq == 0)
err = ufshcd_scale_clks(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (release_clk_hold)
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
return err;
}
@@ -6498,7 +6752,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
if (ufshcd_is_clkscaling_enabled(hba)) {
- hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+ hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
"simple_ondemand", NULL);
if (IS_ERR(hba->devfreq)) {
dev_err(hba->dev, "Unable to register with devfreq %ld\n",
@@ -6507,18 +6761,19 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_remove_scsi_host;
}
/* Suspend devfreq until the UFS device is detected */
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
+ ufshcd_suspend_clkscaling(hba);
}
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
/*
- * The device-initialize-sequence hasn't been invoked yet.
- * Set the device to power-off state
+ * We are assuming that device wasn't put in sleep/power-down
+ * state exclusively during the boot stage before kernel.
+ * This assumption helps avoid doing link startup twice during
+ * ufshcd_probe_hba().
*/
- ufshcd_set_ufs_dev_poweroff(hba);
+ ufshcd_set_ufs_dev_active(hba);
async_schedule(ufshcd_async_scan, hba);
@@ -6530,7 +6785,6 @@ exit_gating:
ufshcd_exit_clk_gating(hba);
out_disable:
hba->is_irq_enabled = false;
- scsi_host_put(host);
ufshcd_hba_exit(hba);
out_error:
return err;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 430bef111293..7d9ff22acfea 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -261,6 +261,12 @@ struct ufs_pwr_mode_info {
* @pwr_change_notify: called before and after a power mode change
* is carried out to allow vendor spesific capabilities
* to be set.
+ * @setup_xfer_req: called before any transfer request is issued
+ * to set some things
+ * @setup_task_mgmt: called before any task management request is issued
+ * to set some things
+ * @hibern8_notify: called around hibern8 enter/exit
+ * to configure some things
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
@@ -273,7 +279,8 @@ struct ufs_hba_variant_ops {
u32 (*get_ufs_hci_version)(struct ufs_hba *);
int (*clk_scale_notify)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
- int (*setup_clocks)(struct ufs_hba *, bool);
+ int (*setup_clocks)(struct ufs_hba *, bool,
+ enum ufs_notify_change_status);
int (*setup_regulators)(struct ufs_hba *, bool);
int (*hce_enable_notify)(struct ufs_hba *,
enum ufs_notify_change_status);
@@ -283,6 +290,10 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *,
struct ufs_pa_layer_attr *);
+ void (*setup_xfer_req)(struct ufs_hba *, int, bool);
+ void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
+ void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
+ enum ufs_notify_change_status);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -474,6 +485,12 @@ struct ufs_hba {
*/
#define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
+ /*
+ * This quirk needs to be enabled if the host contoller regards
+ * resolution of the values of PRDTO and PRDTL in UTRD as byte.
+ */
+ #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7)
+
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
@@ -755,10 +772,11 @@ static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
return 0;
}
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->setup_clocks)
- return hba->vops->setup_clocks(hba, on);
+ return hba->vops->setup_clocks(hba, on, status);
return 0;
}
@@ -799,6 +817,28 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
return -ENOTSUPP;
}
+static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
+ bool is_scsi_cmd)
+{
+ if (hba->vops && hba->vops->setup_xfer_req)
+ return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
+}
+
+static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
+ int tag, u8 tm_function)
+{
+ if (hba->vops && hba->vops->setup_task_mgmt)
+ return hba->vops->setup_task_mgmt(hba, tag, tm_function);
+}
+
+static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
+ enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ if (hba->vops && hba->vops->hibern8_notify)
+ return hba->vops->hibern8_notify(hba, cmd, status);
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 9599741ff606..5d978867be57 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -83,6 +83,8 @@ enum {
MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
};
+#define UFS_MASK(mask, offset) ((mask) << (offset))
+
/* UFS Version 08h */
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
@@ -166,6 +168,7 @@ enum {
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
+#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index eff8b5675575..23129d7b2678 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -123,6 +123,7 @@
#define PA_MAXRXHSGEAR 0x1587
#define PA_RXHSUNTERMCAP 0x15A5
#define PA_RXLSTERMCAP 0x15A6
+#define PA_GRANULARITY 0x15AA
#define PA_PACPREQTIMEOUT 0x1590
#define PA_PACPREQEOBTIMEOUT 0x1591
#define PA_HIBERN8TIME 0x15A7
@@ -158,6 +159,9 @@
#define VS_DEBUGOMC 0xD09E
#define VS_POWERSTATE 0xD083
+#define PA_GRANULARITY_MIN_VAL 1
+#define PA_GRANULARITY_MAX_VAL 6
+
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index ff5de9a96643..9af7842b8178 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -92,7 +92,7 @@ static void ft_free_cmd(struct ft_cmd *cmd)
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
- lport->tt.seq_release(fr_seq(fp));
+ fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ft_sess_put(sess); /* undo get from lookup at recv */
@@ -161,11 +161,11 @@ int ft_queue_status(struct se_cmd *se_cmd)
/*
* Send response.
*/
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
- rc = lport->tt.seq_send(lport, cmd->seq, fp);
+ rc = fc_seq_send(lport, cmd->seq, fp);
if (rc) {
pr_info_ratelimited("%s: Failed to send response frame %p, "
"xid <0x%x>\n", __func__, fp, ep->xid);
@@ -177,7 +177,7 @@ int ft_queue_status(struct se_cmd *se_cmd)
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
- lport->tt.exch_done(cmd->seq);
+ fc_exch_done(cmd->seq);
/*
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
@@ -221,7 +221,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
memset(txrdy, 0, sizeof(*txrdy));
txrdy->ft_burst_len = htonl(se_cmd->data_length);
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
@@ -242,7 +242,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
cmd->was_ddp_setup = 1;
}
}
- lport->tt.seq_send(lport, cmd->seq, fp);
+ fc_seq_send(lport, cmd->seq, fp);
return 0;
}
@@ -323,8 +323,8 @@ static void ft_send_resp_status(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
sp = fr_seq(fp);
if (sp) {
- lport->tt.seq_send(lport, sp, fp);
- lport->tt.exch_done(sp);
+ fc_seq_send(lport, sp, fp);
+ fc_exch_done(sp);
} else {
lport->tt.frame_send(lport, fp);
}
@@ -461,7 +461,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
- cmd->seq = lport->tt.seq_assign(lport, fp);
+ cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
percpu_ida_free(&se_sess->sess_tag_pool, tag);
goto busy;
@@ -563,7 +563,7 @@ static void ft_send_work(struct work_struct *work)
task_attr = TCM_SIMPLE_TAG;
}
- fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+ fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 6f7c65abfe2a..1eb1f58e00e4 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -82,7 +82,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
remaining = se_cmd->data_length;
@@ -174,7 +174,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
f_ctl |= FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_off);
- error = lport->tt.seq_send(lport, seq, fp);
+ error = fc_seq_send(lport, seq, fp);
if (error) {
pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "