summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-29 01:48:06 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-29 01:48:06 +0300
commit938edb8a31b976c9a92eb0cd4ff481e93f76c1f1 (patch)
tree0854d5f6859d51032f1d853eaa8ab0e8647fb0cb
parentaf7ddd8a627c62a835524b3f5b471edbbbcce025 (diff)
parentda7903092b880b25971ca9103cb0b934a44ace2b (diff)
downloadlinux-938edb8a31b976c9a92eb0cd4ff481e93f76c1f1.tar.xz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: smarpqi, lpfc, qedi, megaraid_sas, libsas, zfcp, mpt3sas, hisi_sas. Additionally, we have a pile of annotation, unused variable and minor updates. The big API change is the updates for Christoph's DMA rework which include removing the DISABLE_CLUSTERING flag. And finally there are a couple of target tree updates" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (259 commits) scsi: isci: request: mark expected switch fall-through scsi: isci: remote_node_context: mark expected switch fall-throughs scsi: isci: remote_device: Mark expected switch fall-throughs scsi: isci: phy: Mark expected switch fall-through scsi: iscsi: Capture iscsi debug messages using tracepoints scsi: myrb: Mark expected switch fall-throughs scsi: megaraid: fix out-of-bound array accesses scsi: mpt3sas: mpt3sas_scsih: Mark expected switch fall-through scsi: fcoe: remove set but not used variable 'port' scsi: smartpqi: call pqi_free_interrupts() in pqi_shutdown() scsi: smartpqi: fix build warnings scsi: smartpqi: update driver version scsi: smartpqi: add ofa support scsi: smartpqi: increase fw status register read timeout scsi: smartpqi: bump driver version scsi: smartpqi: add smp_utils support scsi: smartpqi: correct lun reset issues scsi: smartpqi: correct volume status scsi: smartpqi: do not offline disks for transient did no connect conditions scsi: smartpqi: allow for larger raid maps ...
-rw-r--r--Documentation/devicetree/bindings/ufs/cdns,ufshc.txt31
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt6
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt2
-rw-r--r--arch/ia64/hp/sim/simscsi.c2
-rw-r--r--block/blk-merge.c18
-rw-r--r--block/blk-settings.c3
-rw-r--r--block/blk-sysfs.c5
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c8
-rw-r--r--drivers/message/fusion/mptfc.c1
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/message/fusion/mptspi.c1
-rw-r--r--drivers/s390/scsi/zfcp_aux.c82
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c25
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h14
-rw-r--r--drivers/s390/scsi/zfcp_def.h113
-rw-r--r--drivers/s390/scsi/zfcp_erp.c345
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fc.c48
-rw-r--r--drivers/s390/scsi/zfcp_fc.h21
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c51
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h4
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h9
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/scsi/3w-9xxx.c1
-rw-r--r--drivers/scsi/3w-sas.c1
-rw-r--r--drivers/scsi/3w-xxxx.c3
-rw-r--r--drivers/scsi/53c700.c1
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/a100u2w.c1
-rw-r--r--drivers/scsi/a2091.c2
-rw-r--r--drivers/scsi/a3000.c1
-rw-r--r--drivers/scsi/aacraid/aachba.c5
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/commctrl.c5
-rw-r--r--drivers/scsi/aacraid/commsup.c19
-rw-r--r--drivers/scsi/aacraid/dpcsup.c19
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/advansys.c12
-rw-r--r--drivers/scsi/aha152x.c2
-rw-r--r--drivers/scsi/aha1542.c127
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/arm/arxescsi.c2
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/cumana_2.c1
-rw-r--r--drivers/scsi/arm/eesox.c1
-rw-r--r--drivers/scsi/arm/oak.c2
-rw-r--r--drivers/scsi/arm/powertec.c1
-rw-r--r--drivers/scsi/atari_scsi.c2
-rw-r--r--drivers/scsi/atp870u.c1
-rw-r--r--drivers/scsi/be2iscsi/be_main.c9
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c4
-rw-r--r--drivers/scsi/bfa/bfad.c18
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c1
-rw-r--r--drivers/scsi/csiostor/csio_init.c3
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxlflash/main.c1
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/dpt_i2o.c13
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c49
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c1
-rw-r--r--drivers/scsi/esp_scsi.c1
-rw-r--r--drivers/scsi/fcoe/fcoe.c5
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/fnic/fnic_trace.c3
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/gdth.c1
-rw-r--r--drivers/scsi/gvp11.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c203
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c25
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c74
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c322
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/hptiop.c11
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c8
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/initio.c3
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/ips.c10
-rw-r--r--drivers/scsi/ips.h9
-rw-r--r--drivers/scsi/isci/init.c20
-rw-r--r--drivers/scsi/isci/phy.c1
-rw-r--r--drivers/scsi/isci/remote_device.c4
-rw-r--r--drivers/scsi/isci/remote_node_context.c4
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c3
-rw-r--r--drivers/scsi/libiscsi.c10
-rw-r--r--drivers/scsi/libiscsi_tcp.c4
-rw-r--r--drivers/scsi/libsas/Makefile3
-rw-r--r--drivers/scsi/libsas/sas_ata.c26
-rw-r--r--drivers/scsi/libsas/sas_discover.c33
-rw-r--r--drivers/scsi/libsas/sas_dump.c63
-rw-r--r--drivers/scsi/libsas/sas_dump.h29
-rw-r--r--drivers/scsi/libsas/sas_event.c1
-rw-r--r--drivers/scsi/libsas/sas_expander.c236
-rw-r--r--drivers/scsi/libsas/sas_init.c10
-rw-r--r--drivers/scsi/libsas/sas_internal.h16
-rw-r--r--drivers/scsi/libsas/sas_phy.c8
-rw-r--r--drivers/scsi/libsas/sas_port.c23
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c123
-rw-r--r--drivers/scsi/libsas/sas_task.c10
-rw-r--r--drivers/scsi/lpfc/lpfc.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c230
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c200
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h38
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c261
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c468
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c224
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h80
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c273
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c44
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c81
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c145
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c309
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/mac_esp.c2
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h74
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c447
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c24
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c470
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h26
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h17
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h94
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_image.h506
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h359
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_pci.h11
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h72
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c145
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h14
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c72
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c91
-rw-r--r--drivers/scsi/mvme147.c1
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/mvumi.c3
-rw-r--r--drivers/scsi/myrb.c3
-rw-r--r--drivers/scsi/ncr53c8xx.c1
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c2
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/pmcraid.c117
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/ps3rom.c1
-rw-r--r--drivers/scsi/qedf/qedf_main.c4
-rw-r--r--drivers/scsi/qedi/qedi.h7
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c1
-rw-r--r--drivers/scsi/qedi/qedi_main.c87
-rw-r--r--drivers/scsi/qedi/qedi_version.h4
-rw-r--r--drivers/scsi/qla1280.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c117
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c64
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c11
-rw-r--r--drivers/scsi/qlogicfas.c2
-rw-r--r--drivers/scsi/qlogicpti.c1
-rw-r--r--drivers/scsi/scsi_debug.c8
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c34
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h216
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c1558
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c164
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c15
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h1
-rw-r--r--drivers/scsi/snic/snic_main.c1
-rw-r--r--drivers/scsi/snic/snic_trc.c3
-rw-r--r--drivers/scsi/stex.c18
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/scsi/sun3_scsi.c2
-rw-r--r--drivers/scsi/sun_esp.c7
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c5
-rw-r--r--drivers/scsi/ufs/Kconfig8
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c148
-rw-r--r--drivers/scsi/ufs/ufs.h18
-rw-r--r--drivers/scsi/ufs/ufshcd.c104
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/scsi/virtio_scsi.c53
-rw-r--r--drivers/scsi/vmw_pvscsi.c1
-rw-r--r--drivers/scsi/wd719x.c136
-rw-r--r--drivers/scsi/wd719x.h1
-rw-r--r--drivers/scsi/xen-scsifront.c3
-rw-r--r--drivers/staging/rts5208/rtsx.c6
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c28
-rw-r--r--drivers/target/loopback/tcm_loop.c10
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_alua.c6
-rw-r--r--drivers/target/target_core_configfs.c157
-rw-r--r--drivers/target/target_core_device.c111
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c90
-rw-r--r--drivers/target/target_core_pscsi.c50
-rw-r--r--drivers/target/target_core_spc.c28
-rw-r--r--drivers/target/target_core_stat.c34
-rw-r--r--drivers/target/target_core_tmr.c56
-rw-r--r--drivers/target/target_core_tpg.c23
-rw-r--r--drivers/target/target_core_transport.c416
-rw-r--r--drivers/target/target_core_ua.c4
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/target/target_core_xcopy.c13
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c8
-rw-r--r--drivers/usb/gadget/function/f_tcm.c8
-rw-r--r--drivers/usb/image/microtek.c1
-rw-r--r--drivers/usb/storage/scsiglue.c7
-rw-r--r--drivers/usb/storage/uas.c1
-rw-r--r--drivers/vhost/scsi.c8
-rw-r--r--drivers/xen/xen-scsiback.c8
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/scsi/scsi_host.h20
-rw-r--r--include/target/target_core_base.h25
-rw-r--r--include/target/target_core_fabric.h25
-rw-r--r--include/trace/events/iscsi.h107
269 files changed, 7984 insertions, 3844 deletions
diff --git a/Documentation/devicetree/bindings/ufs/cdns,ufshc.txt b/Documentation/devicetree/bindings/ufs/cdns,ufshc.txt
new file mode 100644
index 000000000000..a04a4989ec7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/cdns,ufshc.txt
@@ -0,0 +1,31 @@
+* Cadence Universal Flash Storage (UFS) Controller
+
+UFS nodes are defined to describe on-chip UFS host controllers.
+Each UFS controller instance should have its own node.
+Please see the ufshcd-pltfrm.txt for a list of all available properties.
+
+Required properties:
+- compatible : Compatible list, contains the following controller:
+ "cdns,ufshc"
+ complemented with the JEDEC version:
+ "jedec,ufs-2.0"
+
+- reg : Address and length of the UFS register set.
+- interrupts : One interrupt mapping.
+- freq-table-hz : Clock frequency table.
+ See the ufshcd-pltfrm.txt for details.
+- clocks : List of phandle and clock specifier pairs.
+- clock-names : List of clock input name strings sorted in the same
+ order as the clocks property. "core_clk" is mandatory.
+ Depending on a type of a PHY,
+ the "phy_clk" clock can also be added, if needed.
+
+Example:
+ ufs@fd030000 {
+ compatible = "cdns,ufshc", "jedec,ufs-2.0";
+ reg = <0xfd030000 0x10000>;
+ interrupts = <0 1 IRQ_TYPE_LEVEL_HIGH>;
+ freq-table-hz = <0 0>, <0 0>;
+ clocks = <&ufs_core_clk>, <&ufs_phy_clk>;
+ clock-names = "core_clk", "phy_clk";
+ };
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 2df00524bd21..8cf59452c675 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -33,6 +33,12 @@ Optional properties:
- clocks : List of phandle and clock specifier pairs
- clock-names : List of clock input name strings sorted in the same
order as the clocks property.
+ "ref_clk" indicates reference clock frequency.
+ UFS host supplies reference clock to UFS device and UFS device
+ specification allows host to provide one of the 4 frequencies (19.2 MHz,
+ 26 MHz, 38.4 MHz, 52MHz) for reference clock. This "ref_clk" entry is
+ parsed and used to update the reference clock setting in device.
+ Defaults to 26 MHz(as per specification) if not specified by host.
- freq-table-hz : Array of <min max> operating frequencies stored in the same
order as the clocks property. If this property is not
defined or a value in the array is "0" then it is assumed
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index 177c031763c0..c1dd4939f4ae 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -1098,8 +1098,6 @@ of interest:
unchecked_isa_dma - 1=>only use bottom 16 MB of ram (ISA DMA addressing
restriction), 0=>can use full 32 bit (or better) DMA
address space
- use_clustering - 1=>SCSI commands in mid level's queue can be merged,
- 0=>disallow SCSI command merging
no_async_abort - 1=>Asynchronous aborts are not supported
0=>Timed-out commands will be aborted asynchronously
hostt - pointer to driver's struct scsi_host_template from which
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 7e1426e76d96..f86844fc0725 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -347,7 +347,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 1024,
.cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
};
static int __init
diff --git a/block/blk-merge.c b/block/blk-merge.c
index e7f1c6cf0167..71e9ac03f621 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -195,7 +195,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
goto split;
}
- if (bvprvp && blk_queue_cluster(q)) {
+ if (bvprvp) {
if (seg_size + bv.bv_len > queue_max_segment_size(q))
goto new_segment;
if (!biovec_phys_mergeable(q, bvprvp, &bv))
@@ -295,7 +295,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
bool no_sg_merge)
{
struct bio_vec bv, bvprv = { NULL };
- int cluster, prev = 0;
+ int prev = 0;
unsigned int seg_size, nr_phys_segs;
struct bio *fbio, *bbio;
struct bvec_iter iter;
@@ -313,7 +313,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
}
fbio = bio;
- cluster = blk_queue_cluster(q);
seg_size = 0;
nr_phys_segs = 0;
for_each_bio(bio) {
@@ -325,7 +324,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (no_sg_merge)
goto new_segment;
- if (prev && cluster) {
+ if (prev) {
if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
@@ -395,9 +394,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
{
struct bio_vec end_bv = { NULL }, nxt_bv;
- if (!blk_queue_cluster(q))
- return 0;
-
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
queue_max_segment_size(q))
return 0;
@@ -414,12 +410,12 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
static inline void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
struct scatterlist *sglist, struct bio_vec *bvprv,
- struct scatterlist **sg, int *nsegs, int *cluster)
+ struct scatterlist **sg, int *nsegs)
{
int nbytes = bvec->bv_len;
- if (*sg && *cluster) {
+ if (*sg) {
if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment;
if (!biovec_phys_mergeable(q, bvprv, bvec))
@@ -465,12 +461,12 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
{
struct bio_vec bvec, bvprv = { NULL };
struct bvec_iter iter;
- int cluster = blk_queue_cluster(q), nsegs = 0;
+ int nsegs = 0;
for_each_bio(bio)
bio_for_each_segment(bvec, bio, iter)
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
- &nsegs, &cluster);
+ &nsegs);
return nsegs;
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 3abe831e92c8..3e7038e475ee 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -56,7 +56,6 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->alignment_offset = 0;
lim->io_opt = 0;
lim->misaligned = 0;
- lim->cluster = 1;
lim->zoned = BLK_ZONED_NONE;
}
EXPORT_SYMBOL(blk_set_default_limits);
@@ -547,8 +546,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
- t->cluster &= b->cluster;
-
/* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 0619c8922893..590d1ef2f961 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -132,10 +132,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
{
- if (blk_queue_cluster(q))
- return queue_var_show(queue_max_segment_size(q), (page));
-
- return queue_var_show(PAGE_SIZE, (page));
+ return queue_var_show(queue_max_segment_size(q), (page));
}
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 6bac03999fd4..09b845e90114 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1610,7 +1610,6 @@ static struct scsi_host_template scsi_driver_template = {
.eh_abort_handler = sbp2_scsi_abort,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .use_clustering = ENABLE_CLUSTERING,
.can_queue = 1,
.sdev_attrs = sbp2_scsi_sysfs_attrs,
};
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 3fecd87c9f2b..8c707accd148 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -997,7 +997,6 @@ static struct scsi_host_template iscsi_iser_sht = {
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
- .use_clustering = ENABLE_CLUSTERING,
.slave_alloc = iscsi_iser_slave_alloc,
.proc_name = "iscsi_iser",
.this_id = -1,
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index eed0eb3bb04c..d27fe970ceba 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3215,7 +3215,6 @@ static struct scsi_host_template srp_template = {
.can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
.this_id = -1,
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = srp_host_attrs,
.track_queue_depth = 1,
};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2357aa727dcf..41ee1f263bd6 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3147,11 +3147,6 @@ static int srpt_check_false(struct se_portal_group *se_tpg)
return 0;
}
-static char *srpt_get_fabric_name(void)
-{
- return "srpt";
-}
-
static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
{
return tpg->se_tpg_wwn->priv;
@@ -3678,8 +3673,7 @@ static struct configfs_attribute *srpt_wwn_attrs[] = {
static const struct target_core_fabric_ops srpt_template = {
.module = THIS_MODULE,
- .name = "srpt",
- .get_fabric_name = srpt_get_fabric_name,
+ .fabric_name = "srpt",
.tpg_get_wwn = srpt_get_fabric_wwn,
.tpg_get_tag = srpt_get_tag,
.tpg_check_demo_mode = srpt_check_false,
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index b15fdc626fb8..4314a3352b96 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -129,7 +129,6 @@ static struct scsi_host_template mptfc_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
};
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 9b404fc69c90..612cb5bc1333 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1992,7 +1992,6 @@ static struct scsi_host_template mptsas_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
.no_write_same = 1,
};
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 9a336a161d9f..7172b0b16bdd 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -848,7 +848,6 @@ static struct scsi_host_template mptspi_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
};
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 94f4d8fe85e0..9cf30d124b9e 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2017
*/
/*
@@ -124,6 +124,9 @@ static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
+ if (zfcp_experimental_dix)
+ pr_warn("DIX is enabled. It is experimental and might cause problems\n");
+
zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
sizeof(struct fsf_qtcb));
if (!zfcp_fsf_qtcb_cache)
@@ -248,43 +251,36 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
{
- if (adapter->pool.erp_req)
- mempool_destroy(adapter->pool.erp_req);
- if (adapter->pool.scsi_req)
- mempool_destroy(adapter->pool.scsi_req);
- if (adapter->pool.scsi_abort)
- mempool_destroy(adapter->pool.scsi_abort);
- if (adapter->pool.qtcb_pool)
- mempool_destroy(adapter->pool.qtcb_pool);
- if (adapter->pool.status_read_req)
- mempool_destroy(adapter->pool.status_read_req);
- if (adapter->pool.sr_data)
- mempool_destroy(adapter->pool.sr_data);
- if (adapter->pool.gid_pn)
- mempool_destroy(adapter->pool.gid_pn);
+ mempool_destroy(adapter->pool.erp_req);
+ mempool_destroy(adapter->pool.scsi_req);
+ mempool_destroy(adapter->pool.scsi_abort);
+ mempool_destroy(adapter->pool.qtcb_pool);
+ mempool_destroy(adapter->pool.status_read_req);
+ mempool_destroy(adapter->pool.sr_data);
+ mempool_destroy(adapter->pool.gid_pn);
}
/**
* zfcp_status_read_refill - refill the long running status_read_requests
* @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
*
- * Returns: 0 on success, 1 otherwise
- *
- * if there are 16 or more status_read requests missing an adapter_reopen
- * is triggered
+ * Return:
+ * * 0 on success meaning at least one status read is pending
+ * * 1 if posting failed and not a single status read buffer is pending,
+ * also triggers adapter reopen recovery
*/
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
- while (atomic_read(&adapter->stat_miss) > 0)
+ while (atomic_add_unless(&adapter->stat_miss, -1, 0))
if (zfcp_fsf_status_read(adapter->qdio)) {
+ atomic_inc(&adapter->stat_miss); /* undo add -1 */
if (atomic_read(&adapter->stat_miss) >=
adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
return 1;
}
break;
- } else
- atomic_dec(&adapter->stat_miss);
+ }
return 0;
}
@@ -542,45 +538,3 @@ err_out:
zfcp_ccw_adapter_put(adapter);
return ERR_PTR(retval);
}
-
-/**
- * zfcp_sg_free_table - free memory used by scatterlists
- * @sg: pointer to scatterlist
- * @count: number of scatterlist which are to be free'ed
- * the scatterlist are expected to reference pages always
- */
-void zfcp_sg_free_table(struct scatterlist *sg, int count)
-{
- int i;
-
- for (i = 0; i < count; i++, sg++)
- if (sg)
- free_page((unsigned long) sg_virt(sg));
- else
- break;
-}
-
-/**
- * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
- * @sg: pointer to struct scatterlist
- * @count: number of scatterlists which should be assigned with buffers
- * of size page
- *
- * Returns: 0 on success, -ENOMEM otherwise
- */
-int zfcp_sg_setup_table(struct scatterlist *sg, int count)
-{
- void *addr;
- int i;
-
- sg_init_table(sg, count);
- for (i = 0; i < count; i++, sg++) {
- addr = (void *) get_zeroed_page(GFP_KERNEL);
- if (!addr) {
- zfcp_sg_free_table(sg, i);
- return -ENOMEM;
- }
- sg_set_buf(sg, addr, PAGE_SIZE);
- }
- return 0;
-}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 3b368fcf13f4..dccdb41bed8c 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -63,7 +63,8 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
/**
* zfcp_dbf_hba_fsf_res - trace event for fsf responses
- * @tag: tag indicating which kind of unsolicited status has been received
+ * @tag: tag indicating which kind of FSF response has been received
+ * @level: trace level to be used for event
* @req: request for which a response was received
*/
void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
@@ -81,8 +82,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
rec->id = ZFCP_DBF_HBA_RES;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
- rec->fsf_cmd = req->fsf_command;
- rec->fsf_seq_no = req->seq_no;
+ rec->fsf_cmd = q_head->fsf_command;
+ rec->fsf_seq_no = q_pref->req_seq_no;
rec->u.res.req_issued = req->issued;
rec->u.res.prot_status = q_pref->prot_status;
rec->u.res.fsf_status = q_head->fsf_status;
@@ -94,7 +95,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
FSF_STATUS_QUALIFIER_SIZE);
- if (req->fsf_command != FSF_QTCB_FCP_CMND) {
+ if (q_head->fsf_command != FSF_QTCB_FCP_CMND) {
rec->pl_len = q_head->log_length;
zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
rec->pl_len, "fsf_res", req->req_id);
@@ -127,7 +128,7 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
rec->id = ZFCP_DBF_HBA_USS;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
- rec->fsf_cmd = req->fsf_command;
+ rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
if (!srb)
goto log;
@@ -153,7 +154,7 @@ log:
/**
* zfcp_dbf_hba_bit_err - trace event for bit error conditions
- * @tag: tag indicating which kind of unsolicited status has been received
+ * @tag: tag indicating which kind of bit error unsolicited status was received
* @req: request which caused the bit_error condition
*/
void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
@@ -174,7 +175,7 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
rec->id = ZFCP_DBF_HBA_BIT;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
- rec->fsf_cmd = req->fsf_command;
+ rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
memcpy(&rec->u.be, &sr_buf->payload.bit_error,
sizeof(struct fsf_bit_error_payload));
@@ -224,6 +225,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
/**
* zfcp_dbf_hba_basic - trace event for basic adapter events
+ * @tag: identifier for event
* @adapter: pointer to struct zfcp_adapter
*/
void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
@@ -357,7 +359,7 @@ void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
rec->u.run.fsf_req_id = erp->fsf_req_id;
rec->u.run.rec_status = erp->status;
rec->u.run.rec_step = erp->step;
- rec->u.run.rec_action = erp->action;
+ rec->u.run.rec_action = erp->type;
if (erp->sdev)
rec->u.run.rec_count =
@@ -478,7 +480,8 @@ out:
/**
* zfcp_dbf_san_req - trace event for issued SAN request
* @tag: identifier for event
- * @fsf_req: request containing issued CT data
+ * @fsf: request containing issued CT or ELS data
+ * @d_id: N_Port_ID where SAN request is sent to
* d_id: destination ID
*/
void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
@@ -560,7 +563,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
/**
* zfcp_dbf_san_res - trace event for received SAN request
* @tag: identifier for event
- * @fsf_req: request containing issued CT data
+ * @fsf: request containing received CT or ELS data
*/
void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
{
@@ -580,7 +583,7 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
/**
* zfcp_dbf_san_in_els - trace event for incoming ELS
* @tag: identifier for event
- * @fsf_req: request containing issued CT data
+ * @fsf: request containing received ELS data
*/
void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
{
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index d116c07ed77a..900c779cc39b 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -42,7 +42,8 @@ struct zfcp_dbf_rec_trigger {
* @fsf_req_id: request id for fsf requests
* @rec_status: status of the fsf request
* @rec_step: current step of the recovery action
- * rec_count: recovery counter
+ * @rec_action: ERP action type
+ * @rec_count: recoveries including retries for particular @rec_action
*/
struct zfcp_dbf_rec_running {
u64 fsf_req_id;
@@ -72,6 +73,7 @@ enum zfcp_dbf_rec_id {
* @adapter_status: current status of the adapter
* @port_status: current status of the port
* @lun_status: current status of the lun
+ * @u: record type specific data
* @u.trig: structure zfcp_dbf_rec_trigger
* @u.run: structure zfcp_dbf_rec_running
*/
@@ -126,6 +128,8 @@ struct zfcp_dbf_san {
* @prot_status_qual: protocol status qualifier
* @fsf_status: fsf status
* @fsf_status_qual: fsf status qualifier
+ * @port_handle: handle for port
+ * @lun_handle: handle for LUN
*/
struct zfcp_dbf_hba_res {
u64 req_issued;
@@ -158,6 +162,7 @@ struct zfcp_dbf_hba_uss {
* @ZFCP_DBF_HBA_RES: response trace record
* @ZFCP_DBF_HBA_USS: unsolicited status trace record
* @ZFCP_DBF_HBA_BIT: bit error trace record
+ * @ZFCP_DBF_HBA_BASIC: basic adapter event, only trace tag, no other data
*/
enum zfcp_dbf_hba_id {
ZFCP_DBF_HBA_RES = 1,
@@ -176,6 +181,9 @@ enum zfcp_dbf_hba_id {
* @fsf_seq_no: fsf sequence number
* @pl_len: length of payload stored as zfcp_dbf_pay
* @u: record type specific data
+ * @u.res: data for fsf responses
+ * @u.uss: data for unsolicited status buffer
+ * @u.be: data for bit error unsolicited status buffer
*/
struct zfcp_dbf_hba {
u8 id;
@@ -339,8 +347,8 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
zfcp_dbf_hba_fsf_resp_suppress(req)
? 5 : 1, req);
- } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
- (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
+ } else if ((qtcb->header.fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
+ (qtcb->header.fsf_command == FSF_QTCB_OPEN_LUN)) {
zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
} else if (qtcb->header.log_length) {
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 3396a47721a7..87d2f47a6990 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2017
*/
#ifndef ZFCP_DEF_H
@@ -41,24 +41,16 @@
#include "zfcp_fc.h"
#include "zfcp_qdio.h"
-struct zfcp_reqlist;
-
-/********************* SCSI SPECIFIC DEFINES *********************************/
-#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
-
/********************* FSF SPECIFIC DEFINES *********************************/
/* ATTENTION: value must not be used by hardware */
#define FSF_QTCB_UNSOLICITED_STATUS 0x6305
-/* timeout value for "default timer" for fsf requests */
-#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
-
/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
/*
- * Note, the leftmost status byte is common among adapter, port
- * and unit
+ * Note, the leftmost 12 status bits (3 nibbles) are common among adapter, port
+ * and unit. This is a mask for bitwise 'and' with status values.
*/
#define ZFCP_COMMON_FLAGS 0xfff00000
@@ -97,49 +89,60 @@ struct zfcp_reqlist;
/************************* STRUCTURE DEFINITIONS *****************************/
-struct zfcp_fsf_req;
+/**
+ * enum zfcp_erp_act_type - Type of ERP action object.
+ * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
+ *
+ * Values must fit into u8 because of code dependencies:
+ * zfcp_dbf_rec_trig(), &zfcp_dbf_rec_trigger.want, &zfcp_dbf_rec_trigger.need;
+ * zfcp_dbf_rec_run_lvl(), zfcp_dbf_rec_run(), &zfcp_dbf_rec_running.rec_action.
+ */
+enum zfcp_erp_act_type {
+ ZFCP_ERP_ACTION_REOPEN_LUN = 1,
+ ZFCP_ERP_ACTION_REOPEN_PORT = 2,
+ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
+ ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
+};
-/* holds various memory pools of an adapter */
-struct zfcp_adapter_mempool {
- mempool_t *erp_req;
- mempool_t *gid_pn_req;
- mempool_t *scsi_req;
- mempool_t *scsi_abort;
- mempool_t *status_read_req;
- mempool_t *sr_data;
- mempool_t *gid_pn;
- mempool_t *qtcb_pool;
+/*
+ * Values must fit into u16 because of code dependencies:
+ * zfcp_dbf_rec_run_lvl(), zfcp_dbf_rec_run(), zfcp_dbf_rec_run_wka(),
+ * &zfcp_dbf_rec_running.rec_step.
+ */
+enum zfcp_erp_steps {
+ ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
+ ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
+ ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
+ ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
+ ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
+ ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
};
struct zfcp_erp_action {
struct list_head list;
- int action; /* requested action code */
+ enum zfcp_erp_act_type type; /* requested action code */
struct zfcp_adapter *adapter; /* device which should be recovered */
struct zfcp_port *port;
struct scsi_device *sdev;
u32 status; /* recovery status */
- u32 step; /* active step of this erp action */
+ enum zfcp_erp_steps step; /* active step of this erp action */
unsigned long fsf_req_id;
struct timer_list timer;
};
-struct fsf_latency_record {
- u32 min;
- u32 max;
- u64 sum;
-};
-
-struct latency_cont {
- struct fsf_latency_record channel;
- struct fsf_latency_record fabric;
- u64 counter;
-};
-
-struct zfcp_latencies {
- struct latency_cont read;
- struct latency_cont write;
- struct latency_cont cmd;
- spinlock_t lock;
+/* holds various memory pools of an adapter */
+struct zfcp_adapter_mempool {
+ mempool_t *erp_req;
+ mempool_t *gid_pn_req;
+ mempool_t *scsi_req;
+ mempool_t *scsi_abort;
+ mempool_t *status_read_req;
+ mempool_t *sr_data;
+ mempool_t *gid_pn;
+ mempool_t *qtcb_pool;
};
struct zfcp_adapter {
@@ -220,6 +223,25 @@ struct zfcp_port {
unsigned int starget_id;
};
+struct zfcp_latency_record {
+ u32 min;
+ u32 max;
+ u64 sum;
+};
+
+struct zfcp_latency_cont {
+ struct zfcp_latency_record channel;
+ struct zfcp_latency_record fabric;
+ u64 counter;
+};
+
+struct zfcp_latencies {
+ struct zfcp_latency_cont read;
+ struct zfcp_latency_cont write;
+ struct zfcp_latency_cont cmd;
+ spinlock_t lock;
+};
+
/**
* struct zfcp_unit - LUN configured via zfcp sysfs
* @dev: struct device for sysfs representation and reference counting
@@ -287,9 +309,7 @@ static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
* @qdio_req: qdio queue related values
* @completion: used to signal the completion of the request
* @status: status of the request
- * @fsf_command: FSF command issued
* @qtcb: associated QTCB
- * @seq_no: sequence number of this request
* @data: private data
* @timer: timer data of this request
* @erp_action: reference to erp action if request issued on behalf of ERP
@@ -304,9 +324,7 @@ struct zfcp_fsf_req {
struct zfcp_qdio_req qdio_req;
struct completion completion;
u32 status;
- u32 fsf_command;
struct fsf_qtcb *qtcb;
- u32 seq_no;
void *data;
struct timer_list timer;
struct zfcp_erp_action *erp_action;
@@ -321,4 +339,9 @@ int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
}
+static inline bool zfcp_fsf_req_is_status_read_buffer(struct zfcp_fsf_req *req)
+{
+ return req->qtcb == NULL;
+}
+
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e7e6b63905e2..744a64680d5b 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -4,7 +4,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -24,38 +24,18 @@ enum zfcp_erp_act_flags {
ZFCP_STATUS_ERP_NO_REF = 0x00800000,
};
-enum zfcp_erp_steps {
- ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
- ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
- ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
- ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
- ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
- ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
-};
-
-/**
- * enum zfcp_erp_act_type - Type of ERP action object.
- * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
- * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
- * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
- * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
- * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
- * either of the first four enum values.
- * Used to indicate that an ERP action could not be
- * set up despite a detected need for some recovery.
- * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
- * either of the first four enum values.
- * Used to indicate that ERP not needed because
- * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
+/*
+ * Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
+ * Used to indicate that an ERP action could not be set up despite a detected
+ * need for some recovery.
*/
-enum zfcp_erp_act_type {
- ZFCP_ERP_ACTION_REOPEN_LUN = 1,
- ZFCP_ERP_ACTION_REOPEN_PORT = 2,
- ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
- ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
- ZFCP_ERP_ACTION_NONE = 0xc0,
- ZFCP_ERP_ACTION_FAILED = 0xe0,
-};
+#define ZFCP_ERP_ACTION_NONE 0xc0
+/*
+ * Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
+ * Used to indicate that ERP not needed because the object has
+ * ZFCP_STATUS_COMMON_ERP_FAILED.
+ */
+#define ZFCP_ERP_ACTION_FAILED 0xe0
enum zfcp_erp_act_result {
ZFCP_ERP_SUCCEEDED = 0,
@@ -136,11 +116,11 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
}
}
-static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
- struct zfcp_port *port,
- struct scsi_device *sdev)
+static enum zfcp_erp_act_type zfcp_erp_handle_failed(
+ enum zfcp_erp_act_type want, struct zfcp_adapter *adapter,
+ struct zfcp_port *port, struct scsi_device *sdev)
{
- int need = want;
+ enum zfcp_erp_act_type need = want;
struct zfcp_scsi_dev *zsdev;
switch (want) {
@@ -171,19 +151,17 @@ static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
- default:
- need = 0;
- break;
}
return need;
}
-static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
+static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
+ struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
{
- int need = want;
+ enum zfcp_erp_act_type need = want;
int l_status, p_status, a_status;
struct zfcp_scsi_dev *zfcp_sdev;
@@ -230,7 +208,8 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
return need;
}
-static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
+static struct zfcp_erp_action *zfcp_erp_setup_act(enum zfcp_erp_act_type need,
+ u32 act_status,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
@@ -278,9 +257,6 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
-
- default:
- return NULL;
}
WARN_ON_ONCE(erp_action->adapter != adapter);
@@ -288,18 +264,19 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
memset(&erp_action->timer, 0, sizeof(erp_action->timer));
erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
erp_action->fsf_req_id = 0;
- erp_action->action = need;
+ erp_action->type = need;
erp_action->status = act_status;
return erp_action;
}
-static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
+static void zfcp_erp_action_enqueue(enum zfcp_erp_act_type want,
+ struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev,
- char *id, u32 act_status)
+ char *dbftag, u32 act_status)
{
- int need;
+ enum zfcp_erp_act_type need;
struct zfcp_erp_action *act;
need = zfcp_erp_handle_failed(want, adapter, port, sdev);
@@ -327,10 +304,11 @@ static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
list_add_tail(&act->list, &adapter->erp_ready_head);
wake_up(&adapter->erp_ready_wq);
out:
- zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
+ zfcp_dbf_rec_trig(dbftag, adapter, port, sdev, want, need);
}
-void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
+void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
+ struct zfcp_adapter *adapter,
u64 port_name, u32 port_id)
{
unsigned long flags;
@@ -344,29 +322,30 @@ void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
atomic_set(&tmpport.status, -1); /* unknown */
tmpport.wwpn = port_name;
tmpport.d_id = port_id;
- zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
+ zfcp_dbf_rec_trig(dbftag, adapter, &tmpport, NULL,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
ZFCP_ERP_ACTION_NONE);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
- int clear_mask, char *id)
+ int clear_mask, char *dbftag)
{
zfcp_erp_adapter_block(adapter, clear_mask);
zfcp_scsi_schedule_rports_block(adapter);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
- adapter, NULL, NULL, id, 0);
+ adapter, NULL, NULL, dbftag, 0);
}
/**
* zfcp_erp_adapter_reopen - Reopen adapter.
* @adapter: Adapter to reopen.
* @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*/
-void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
+void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
+ char *dbftag)
{
unsigned long flags;
@@ -375,7 +354,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
write_lock_irqsave(&adapter->erp_lock, flags);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
- NULL, NULL, id, 0);
+ NULL, NULL, dbftag, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
@@ -383,25 +362,25 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
* zfcp_erp_adapter_shutdown - Shutdown adapter.
* @adapter: Adapter to shut down.
* @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*/
void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
- char *id)
+ char *dbftag)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
- zfcp_erp_adapter_reopen(adapter, clear | flags, id);
+ zfcp_erp_adapter_reopen(adapter, clear | flags, dbftag);
}
/**
* zfcp_erp_port_shutdown - Shutdown port
* @port: Port to shut down.
* @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*/
-void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *dbftag)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
- zfcp_erp_port_reopen(port, clear | flags, id);
+ zfcp_erp_port_reopen(port, clear | flags, dbftag);
}
static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
@@ -411,53 +390,55 @@ static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
}
static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
- char *id)
+ char *dbftag)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
- port->adapter, port, NULL, id, 0);
+ port->adapter, port, NULL, dbftag, 0);
}
/**
* zfcp_erp_port_forced_reopen - Forced close of port and open again
* @port: Port to force close and to reopen.
* @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*/
-void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
+ char *dbftag)
{
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
- _zfcp_erp_port_forced_reopen(port, clear, id);
+ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
-static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
+ char *dbftag)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
- port->adapter, port, NULL, id, 0);
+ port->adapter, port, NULL, dbftag, 0);
}
/**
* zfcp_erp_port_reopen - trigger remote port recovery
* @port: port to recover
- * @clear_mask: flags in port status to be cleared
- * @id: Id for debug trace event.
+ * @clear: flags in port status to be cleared
+ * @dbftag: Tag for debug trace event.
*/
-void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *dbftag)
{
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
- _zfcp_erp_port_reopen(port, clear, id);
+ _zfcp_erp_port_reopen(port, clear, dbftag);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
@@ -467,8 +448,8 @@ static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
}
-static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
- u32 act_status)
+static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear,
+ char *dbftag, u32 act_status)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -476,18 +457,18 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
zfcp_erp_lun_block(sdev, clear);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
- zfcp_sdev->port, sdev, id, act_status);
+ zfcp_sdev->port, sdev, dbftag, act_status);
}
/**
* zfcp_erp_lun_reopen - initiate reopen of a LUN
* @sdev: SCSI device / LUN to be reopened
- * @clear_mask: specifies flags in LUN status to be cleared
- * @id: Id for debug trace event.
+ * @clear: specifies flags in LUN status to be cleared
+ * @dbftag: Tag for debug trace event.
*
* Return: 0 on success, < 0 on error
*/
-void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
+void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *dbftag)
{
unsigned long flags;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -495,7 +476,7 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
- _zfcp_erp_lun_reopen(sdev, clear, id, 0);
+ _zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
@@ -503,25 +484,25 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
* zfcp_erp_lun_shutdown - Shutdown LUN
* @sdev: SCSI device / LUN to shut down.
* @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*/
-void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
+void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *dbftag)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
- zfcp_erp_lun_reopen(sdev, clear | flags, id);
+ zfcp_erp_lun_reopen(sdev, clear | flags, dbftag);
}
/**
* zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
* @sdev: SCSI device / LUN to shut down.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
*
* Do not acquire a reference for the LUN when creating the ERP
* action. It is safe, because this function waits for the ERP to
* complete first. This allows to shutdown the LUN, even when the SCSI
* device is in the state SDEV_DEL when scsi_device_get will fail.
*/
-void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
+void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *dbftag)
{
unsigned long flags;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -530,7 +511,7 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
write_lock_irqsave(&adapter->erp_lock, flags);
- _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
+ _zfcp_erp_lun_reopen(sdev, clear, dbftag, ZFCP_STATUS_ERP_NO_REF);
write_unlock_irqrestore(&adapter->erp_lock, flags);
zfcp_erp_wait(adapter);
@@ -619,7 +600,7 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
/**
* zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
- * @data: ERP action (from timer data)
+ * @t: timer list entry embedded in zfcp FSF request
*/
void zfcp_erp_timeout_handler(struct timer_list *t)
{
@@ -644,31 +625,31 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
}
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
- int clear, char *id)
+ int clear, char *dbftag)
{
struct zfcp_port *port;
read_lock(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list)
- _zfcp_erp_port_reopen(port, clear, id);
+ _zfcp_erp_port_reopen(port, clear, dbftag);
read_unlock(&adapter->port_list_lock);
}
static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
- char *id)
+ char *dbftag)
{
struct scsi_device *sdev;
spin_lock(port->adapter->scsi_host->host_lock);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
- _zfcp_erp_lun_reopen(sdev, clear, id, 0);
+ _zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
spin_unlock(port->adapter->scsi_host->host_lock);
}
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
{
- switch (act->action) {
+ switch (act->type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
break;
@@ -686,7 +667,7 @@ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
{
- switch (act->action) {
+ switch (act->type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
break;
@@ -696,6 +677,9 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
case ZFCP_ERP_ACTION_REOPEN_PORT:
_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
break;
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ /* NOP */
+ break;
}
}
@@ -723,7 +707,8 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
_zfcp_erp_port_reopen(port, 0, "ereptp1");
}
-static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
+ struct zfcp_erp_action *erp_action)
{
int retries;
int sleep = 1;
@@ -768,7 +753,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_SUCCEEDED;
}
-static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
+ struct zfcp_erp_action *act)
{
int ret;
struct zfcp_adapter *adapter = act->adapter;
@@ -793,7 +779,8 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
return ZFCP_ERP_SUCCEEDED;
}
-static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
+ struct zfcp_erp_action *act)
{
if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
@@ -832,7 +819,8 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
}
-static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open(
+ struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
@@ -853,7 +841,8 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
return ZFCP_ERP_SUCCEEDED;
}
-static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy(
+ struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
@@ -871,7 +860,8 @@ static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
return ZFCP_ERP_SUCCEEDED;
}
-static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy_close(
+ struct zfcp_erp_action *act)
{
int retval;
@@ -885,7 +875,8 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
return ZFCP_ERP_CONTINUES;
}
-static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy(
+ struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
int status = atomic_read(&port->status);
@@ -901,11 +892,19 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
return ZFCP_ERP_SUCCEEDED;
+ break;
+ case ZFCP_ERP_STEP_PORT_CLOSING:
+ case ZFCP_ERP_STEP_PORT_OPENING:
+ case ZFCP_ERP_STEP_LUN_CLOSING:
+ case ZFCP_ERP_STEP_LUN_OPENING:
+ /* NOP */
+ break;
}
return ZFCP_ERP_FAILED;
}
-static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy_close(
+ struct zfcp_erp_action *erp_action)
{
int retval;
@@ -918,7 +917,8 @@ static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_CONTINUES;
}
-static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_port(
+ struct zfcp_erp_action *erp_action)
{
int retval;
@@ -944,7 +944,8 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
return zfcp_erp_port_strategy_open_port(act);
}
-static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_common(
+ struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
@@ -975,12 +976,18 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
port->d_id = 0;
return ZFCP_ERP_FAILED;
}
- /* fall through otherwise */
+ /* no early return otherwise, continue after switch case */
+ break;
+ case ZFCP_ERP_STEP_LUN_CLOSING:
+ case ZFCP_ERP_STEP_LUN_OPENING:
+ /* NOP */
+ break;
}
return ZFCP_ERP_FAILED;
}
-static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy(
+ struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
int p_status = atomic_read(&port->status);
@@ -999,6 +1006,12 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
if (p_status & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
break;
+ case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+ case ZFCP_ERP_STEP_PORT_OPENING:
+ case ZFCP_ERP_STEP_LUN_CLOSING:
+ case ZFCP_ERP_STEP_LUN_OPENING:
+ /* NOP */
+ break;
}
close_init_done:
@@ -1016,7 +1029,8 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
&zfcp_sdev->status);
}
-static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_lun_strategy_close(
+ struct zfcp_erp_action *erp_action)
{
int retval = zfcp_fsf_close_lun(erp_action);
if (retval == -ENOMEM)
@@ -1027,7 +1041,8 @@ static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_CONTINUES;
}
-static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_lun_strategy_open(
+ struct zfcp_erp_action *erp_action)
{
int retval = zfcp_fsf_open_lun(erp_action);
if (retval == -ENOMEM)
@@ -1038,7 +1053,8 @@ static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_CONTINUES;
}
-static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_lun_strategy(
+ struct zfcp_erp_action *erp_action)
{
struct scsi_device *sdev = erp_action->sdev;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -1048,7 +1064,8 @@ static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
zfcp_erp_lun_strategy_clearstati(sdev);
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_lun_strategy_close(erp_action);
- /* already closed, fall through */
+ /* already closed */
+ /* fall through */
case ZFCP_ERP_STEP_LUN_CLOSING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
@@ -1059,11 +1076,18 @@ static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
case ZFCP_ERP_STEP_LUN_OPENING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_SUCCEEDED;
+ break;
+ case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+ case ZFCP_ERP_STEP_PORT_CLOSING:
+ case ZFCP_ERP_STEP_PORT_OPENING:
+ /* NOP */
+ break;
}
return ZFCP_ERP_FAILED;
}
-static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_lun(
+ struct scsi_device *sdev, enum zfcp_erp_act_result result)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -1084,6 +1108,12 @@ static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
+ case ZFCP_ERP_CONTINUES:
+ case ZFCP_ERP_EXIT:
+ case ZFCP_ERP_DISMISSED:
+ case ZFCP_ERP_NOMEM:
+ /* NOP */
+ break;
}
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1093,7 +1123,8 @@ static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
return result;
}
-static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_port(
+ struct zfcp_port *port, enum zfcp_erp_act_result result)
{
switch (result) {
case ZFCP_ERP_SUCCEEDED :
@@ -1115,6 +1146,12 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
+ case ZFCP_ERP_CONTINUES:
+ case ZFCP_ERP_EXIT:
+ case ZFCP_ERP_DISMISSED:
+ case ZFCP_ERP_NOMEM:
+ /* NOP */
+ break;
}
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1124,8 +1161,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
return result;
}
-static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
- int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_adapter(
+ struct zfcp_adapter *adapter, enum zfcp_erp_act_result result)
{
switch (result) {
case ZFCP_ERP_SUCCEEDED :
@@ -1143,6 +1180,12 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
+ case ZFCP_ERP_CONTINUES:
+ case ZFCP_ERP_EXIT:
+ case ZFCP_ERP_DISMISSED:
+ case ZFCP_ERP_NOMEM:
+ /* NOP */
+ break;
}
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1152,14 +1195,14 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
return result;
}
-static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
- int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_target(
+ struct zfcp_erp_action *erp_action, enum zfcp_erp_act_result result)
{
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_port *port = erp_action->port;
struct scsi_device *sdev = erp_action->sdev;
- switch (erp_action->action) {
+ switch (erp_action->type) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
result = zfcp_erp_strategy_check_lun(sdev, result);
@@ -1192,16 +1235,17 @@ static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
return 0;
}
-static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
+static enum zfcp_erp_act_result zfcp_erp_strategy_statechange(
+ struct zfcp_erp_action *act, enum zfcp_erp_act_result result)
{
- int action = act->action;
+ enum zfcp_erp_act_type type = act->type;
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
struct scsi_device *sdev = act->sdev;
struct zfcp_scsi_dev *zfcp_sdev;
u32 erp_status = act->status;
- switch (action) {
+ switch (type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
_zfcp_erp_adapter_reopen(adapter,
@@ -1231,7 +1275,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
}
break;
}
- return ret;
+ return result;
}
static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
@@ -1248,7 +1292,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
list_del(&erp_action->list);
zfcp_dbf_rec_run("eractd1", erp_action);
- switch (erp_action->action) {
+ switch (erp_action->type) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
@@ -1324,13 +1368,14 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
-static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
+static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act,
+ enum zfcp_erp_act_result result)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
struct scsi_device *sdev = act->sdev;
- switch (act->action) {
+ switch (act->type) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
scsi_device_put(sdev);
@@ -1364,9 +1409,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
}
}
-static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_strategy_do_action(
+ struct zfcp_erp_action *erp_action)
{
- switch (erp_action->action) {
+ switch (erp_action->type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
return zfcp_erp_adapter_strategy(erp_action);
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1379,9 +1425,10 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_FAILED;
}
-static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_strategy(
+ struct zfcp_erp_action *erp_action)
{
- int retval;
+ enum zfcp_erp_act_result result;
unsigned long flags;
struct zfcp_adapter *adapter = erp_action->adapter;
@@ -1392,12 +1439,12 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
zfcp_erp_action_dequeue(erp_action);
- retval = ZFCP_ERP_DISMISSED;
+ result = ZFCP_ERP_DISMISSED;
goto unlock;
}
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
- retval = ZFCP_ERP_FAILED;
+ result = ZFCP_ERP_FAILED;
goto check_target;
}
@@ -1405,13 +1452,13 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
/* no lock to allow for blocking operations */
write_unlock_irqrestore(&adapter->erp_lock, flags);
- retval = zfcp_erp_strategy_do_action(erp_action);
+ result = zfcp_erp_strategy_do_action(erp_action);
write_lock_irqsave(&adapter->erp_lock, flags);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
- retval = ZFCP_ERP_CONTINUES;
+ result = ZFCP_ERP_CONTINUES;
- switch (retval) {
+ switch (result) {
case ZFCP_ERP_NOMEM:
if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
++adapter->erp_low_mem_count;
@@ -1421,7 +1468,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
else {
zfcp_erp_strategy_memwait(erp_action);
- retval = ZFCP_ERP_CONTINUES;
+ result = ZFCP_ERP_CONTINUES;
}
goto unlock;
@@ -1431,27 +1478,33 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
}
goto unlock;
+ case ZFCP_ERP_SUCCEEDED:
+ case ZFCP_ERP_FAILED:
+ case ZFCP_ERP_EXIT:
+ case ZFCP_ERP_DISMISSED:
+ /* NOP */
+ break;
}
check_target:
- retval = zfcp_erp_strategy_check_target(erp_action, retval);
+ result = zfcp_erp_strategy_check_target(erp_action, result);
zfcp_erp_action_dequeue(erp_action);
- retval = zfcp_erp_strategy_statechange(erp_action, retval);
- if (retval == ZFCP_ERP_EXIT)
+ result = zfcp_erp_strategy_statechange(erp_action, result);
+ if (result == ZFCP_ERP_EXIT)
goto unlock;
- if (retval == ZFCP_ERP_SUCCEEDED)
+ if (result == ZFCP_ERP_SUCCEEDED)
zfcp_erp_strategy_followup_success(erp_action);
- if (retval == ZFCP_ERP_FAILED)
+ if (result == ZFCP_ERP_FAILED)
zfcp_erp_strategy_followup_failed(erp_action);
unlock:
write_unlock_irqrestore(&adapter->erp_lock, flags);
- if (retval != ZFCP_ERP_CONTINUES)
- zfcp_erp_action_cleanup(erp_action, retval);
+ if (result != ZFCP_ERP_CONTINUES)
+ zfcp_erp_action_cleanup(erp_action, result);
kref_put(&adapter->ref, zfcp_adapter_release);
- return retval;
+ return result;
}
static int zfcp_erp_thread(void *data)
@@ -1489,7 +1542,7 @@ static int zfcp_erp_thread(void *data)
* zfcp_erp_thread_setup - Start ERP thread for adapter
* @adapter: Adapter to start the ERP thread for
*
- * Returns 0 on success or error code from kernel_thread()
+ * Return: 0 on success, or error code from kthread_run().
*/
int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
{
@@ -1694,11 +1747,11 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
/**
* zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait.
* @adapter: Pointer to zfcp_adapter to reopen.
- * @id: Trace tag string of length %ZFCP_DBF_TAG_LEN.
+ * @dbftag: Trace tag string of length %ZFCP_DBF_TAG_LEN.
*/
-void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id)
+void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *dbftag)
{
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
- zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, id);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
zfcp_erp_wait(adapter);
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index bd0c5a9f04cb..3fce47b0b21b 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -59,14 +59,15 @@ extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
-extern void zfcp_erp_port_forced_no_port_dbf(char *id,
+extern void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
struct zfcp_adapter *adapter,
u64 port_name, u32 port_id);
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
-extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id);
+extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
+ char *dbftag);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
@@ -79,7 +80,8 @@ extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_timeout_handler(struct timer_list *t);
-extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id);
+extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter,
+ char *dbftag);
/* zfcp_fc.c */
extern struct kmem_cache *zfcp_fc_req_cache;
@@ -144,6 +146,7 @@ extern void zfcp_qdio_close(struct zfcp_qdio *);
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
/* zfcp_scsi.c */
+extern bool zfcp_experimental_dix;
extern struct scsi_transport_template *zfcp_scsi_transport_template;
extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index f6c415d6ef48..db00b5e3abbe 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -312,7 +312,7 @@ static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
/**
* zfcp_fc_incoming_els - handle incoming ELS
- * @fsf_req - request which contains incoming ELS
+ * @fsf_req: request which contains incoming ELS
*/
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
{
@@ -597,6 +597,48 @@ void zfcp_fc_test_link(struct zfcp_port *port)
put_device(&port->dev);
}
+/**
+ * zfcp_fc_sg_free_table - free memory used by scatterlists
+ * @sg: pointer to scatterlist
+ * @count: number of scatterlist which are to be free'ed
+ * the scatterlist are expected to reference pages always
+ */
+static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++, sg++)
+ if (sg)
+ free_page((unsigned long) sg_virt(sg));
+ else
+ break;
+}
+
+/**
+ * zfcp_fc_sg_setup_table - init scatterlist and allocate, assign buffers
+ * @sg: pointer to struct scatterlist
+ * @count: number of scatterlists which should be assigned with buffers
+ * of size page
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
+ */
+static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
+{
+ void *addr;
+ int i;
+
+ sg_init_table(sg, count);
+ for (i = 0; i < count; i++, sg++) {
+ addr = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!addr) {
+ zfcp_fc_sg_free_table(sg, i);
+ return -ENOMEM;
+ }
+ sg_set_buf(sg, addr, PAGE_SIZE);
+ }
+ return 0;
+}
+
static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
{
struct zfcp_fc_req *fc_req;
@@ -605,7 +647,7 @@ static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
if (!fc_req)
return NULL;
- if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+ if (zfcp_fc_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
kmem_cache_free(zfcp_fc_req_cache, fc_req);
return NULL;
}
@@ -763,7 +805,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
break;
}
}
- zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+ zfcp_fc_sg_free_table(&fc_req->sg_rsp, buf_num);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 3cd74729cfb9..6902ae1f8e4f 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -121,9 +121,24 @@ struct zfcp_fc_rspn_req {
/**
* struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
* @ct_els: data required for issuing fsf command
- * @sg_req: scatterlist entry for request data
- * @sg_rsp: scatterlist entry for response data
- * @u: request specific data
+ * @sg_req: scatterlist entry for request data, refers to embedded @u submember
+ * @sg_rsp: scatterlist entry for response data, refers to embedded @u submember
+ * @u: request and response specific data
+ * @u.adisc: ADISC specific data
+ * @u.adisc.req: ADISC request
+ * @u.adisc.rsp: ADISC response
+ * @u.gid_pn: GID_PN specific data
+ * @u.gid_pn.req: GID_PN request
+ * @u.gid_pn.rsp: GID_PN response
+ * @u.gpn_ft: GPN_FT specific data
+ * @u.gpn_ft.sg_rsp2: GPN_FT response, not embedded here, allocated elsewhere
+ * @u.gpn_ft.req: GPN_FT request
+ * @u.gspn: GSPN specific data
+ * @u.gspn.req: GSPN request
+ * @u.gspn.rsp: GSPN response
+ * @u.rspn: RSPN specific data
+ * @u.rspn.req: RSPN request
+ * @u.rspn.rsp: RSPN response
*/
struct zfcp_fc_req {
struct zfcp_fsf_ct_els ct_els;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 3c86e27f094d..d94496ee6883 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -19,6 +19,11 @@
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
+/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
+#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
+/* timeout for: exchange config/port data outside ERP, or open/close WKA port */
+#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
+
struct kmem_cache *zfcp_fsf_qtcb_cache;
static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
@@ -74,18 +79,18 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
/**
* zfcp_fsf_req_free - free memory used by fsf request
- * @fsf_req: pointer to struct zfcp_fsf_req
+ * @req: pointer to struct zfcp_fsf_req
*/
void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
{
if (likely(req->pool)) {
- if (likely(req->qtcb))
+ if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
mempool_free(req, req->pool);
return;
}
- if (likely(req->qtcb))
+ if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
kfree(req);
}
@@ -379,7 +384,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
/**
* zfcp_fsf_req_complete - process completion of a FSF request
- * @fsf_req: The FSF request that has been completed.
+ * @req: The FSF request that has been completed.
*
* When a request has been completed either from the FCP adapter,
* or it has been dismissed due to a queue shutdown, this function
@@ -388,7 +393,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
*/
static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
{
- if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
+ if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
zfcp_fsf_status_read_handler(req);
return;
}
@@ -705,7 +710,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
init_completion(&req->completion);
req->adapter = adapter;
- req->fsf_command = fsf_cmd;
req->req_id = adapter->req_no;
if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
@@ -720,14 +724,13 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
return ERR_PTR(-ENOMEM);
}
- req->seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_id = req->req_id;
req->qtcb->prefix.ulp_info = 26;
- req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
+ req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
- req->qtcb->header.fsf_command = req->fsf_command;
+ req->qtcb->header.fsf_command = fsf_cmd;
}
zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
@@ -740,7 +743,6 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
- int with_qtcb = (req->qtcb != NULL);
int req_id = req->req_id;
zfcp_reqlist_add(adapter->req_list, req);
@@ -756,7 +758,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
}
/* Don't increase for unsolicited status */
- if (with_qtcb)
+ if (!zfcp_fsf_req_is_status_read_buffer(req))
adapter->fsf_req_seq_no++;
adapter->req_no++;
@@ -765,8 +767,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
/**
* zfcp_fsf_status_read - send status read request
- * @adapter: pointer to struct zfcp_adapter
- * @req_flags: request flags
+ * @qdio: pointer to struct zfcp_qdio
* Returns: 0 on success, ERROR otherwise
*/
int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
@@ -912,7 +913,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
req->qtcb->header.port_handle = zfcp_sdev->port->handle;
req->qtcb->bottom.support.req_handle = (u64) old_req_id;
- zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
+ zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
goto out;
@@ -1057,8 +1058,10 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
/**
* zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
+ * @wka_port: pointer to zfcp WKA port to send CT/GS to
* @ct: pointer to struct zfcp_send_ct with data for request
* @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
+ * @timeout: timeout that hardware should use, and a later software timeout
*/
int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
struct zfcp_fsf_ct_els *ct, mempool_t *pool,
@@ -1151,7 +1154,10 @@ skip_fsfstatus:
/**
* zfcp_fsf_send_els - initiate an ELS command (FC-FS)
+ * @adapter: pointer to zfcp adapter
+ * @d_id: N_Port_ID to send ELS to
* @els: pointer to struct zfcp_send_els with data for the command
+ * @timeout: timeout that hardware should use, and a later software timeout
*/
int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
struct zfcp_fsf_ct_els *els, unsigned int timeout)
@@ -1809,7 +1815,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
case FSF_LUN_SHARING_VIOLATION:
if (qual->word[0])
dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
- "LUN 0x%Lx on port 0x%Lx is already in "
+ "LUN 0x%016Lx on port 0x%016Lx is already in "
"use by CSS%d, MIF Image ID %x\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn,
@@ -1986,7 +1992,7 @@ out:
return retval;
}
-static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
+static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
{
lat_rec->sum += lat;
lat_rec->min = min(lat_rec->min, lat);
@@ -1996,7 +2002,7 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
{
struct fsf_qual_latency_info *lat_in;
- struct latency_cont *lat = NULL;
+ struct zfcp_latency_cont *lat = NULL;
struct zfcp_scsi_dev *zfcp_sdev;
struct zfcp_blk_drv_data blktrc;
int ticks = req->adapter->timer_ticks;
@@ -2088,11 +2094,8 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
break;
case FSF_CMND_LENGTH_NOT_VALID:
dev_err(&req->adapter->ccw_device->dev,
- "Incorrect CDB length %d, LUN 0x%016Lx on "
- "port 0x%016Lx closed\n",
- req->qtcb->bottom.io.fcp_cmnd_length,
- (unsigned long long)zfcp_scsi_dev_lun(sdev),
- (unsigned long long)zfcp_sdev->port->wwpn);
+ "Incorrect FCP_CMND length %d, FCP device closed\n",
+ req->qtcb->bottom.io.fcp_cmnd_length);
zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -2369,7 +2372,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
- zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
+ zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
goto out;
@@ -2382,7 +2385,7 @@ out:
/**
* zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
- * @adapter: pointer to struct zfcp_adapter
+ * @qdio: pointer to struct zfcp_qdio
* @sbal_idx: response queue index of SBAL to be processed
*/
void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 535628b92f0a..2c658b66318c 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -438,8 +438,8 @@ struct zfcp_blk_drv_data {
/**
* struct zfcp_fsf_ct_els - zfcp data for ct or els request
- * @req: scatter-gather list for request
- * @resp: scatter-gather list for response
+ * @req: scatter-gather list for request, points to &zfcp_fc_req.sg_req or BSG
+ * @resp: scatter-gather list for response, points to &zfcp_fc_req.sg_rsp or BSG
* @handler: handler function (called for response to the request)
* @handler_data: data passed to handler function
* @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 4ab02e8d36f3..10c4e8e3fd59 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -4,7 +4,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -19,7 +19,7 @@ static bool enable_multibuffer = true;
module_param_named(datarouter, enable_multibuffer, bool, 0400);
MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
-static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
+static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
unsigned int qdio_err)
{
struct zfcp_adapter *adapter = qdio->adapter;
@@ -28,12 +28,12 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
if (qdio_err & QDIO_ERROR_SLSB_STATE) {
zfcp_qdio_siosl(adapter);
- zfcp_erp_adapter_shutdown(adapter, 0, id);
+ zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
return;
}
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
- ZFCP_STATUS_COMMON_ERP_FAILED, id);
+ ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
}
static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
@@ -180,7 +180,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_qdio_req
* @sg: scatter-gather list
- * @max_sbals: upper bound for number of SBALs to be used
* Returns: zero or -EINVAL on error
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
@@ -303,7 +302,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
- * @adapter: pointer to struct zfcp_adapter
+ * @qdio: pointer to struct zfcp_qdio
* Returns: -ENOMEM on memory allocation error or return value from
* qdio_allocate
*/
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 886c662cc154..2a816a37b3c0 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -30,6 +30,8 @@
* @req_q_full: queue full incidents
* @req_q_wq: used to wait for SBAL availability
* @adapter: adapter used in conjunction with this qdio structure
+ * @max_sbale_per_sbal: qdio limit per sbal
+ * @max_sbale_per_req: qdio limit per request
*/
struct zfcp_qdio {
struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
@@ -70,7 +72,7 @@ struct zfcp_qdio_req {
/**
* zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
* @qdio: pointer to struct zfcp_qdio
- * @q_rec: pointer to struct zfcp_qdio_req
+ * @q_req: pointer to struct zfcp_qdio_req
* Returns: pointer to qdio_buffer_element (sbale) structure
*/
static inline struct qdio_buffer_element *
@@ -82,7 +84,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
/**
* zfcp_qdio_sbale_curr - return current sbale on req_q for a request
* @qdio: pointer to struct zfcp_qdio
- * @fsf_req: pointer to struct zfcp_fsf_req
+ * @q_req: pointer to struct zfcp_qdio_req
* Returns: pointer to qdio_buffer_element (sbale) structure
*/
static inline struct qdio_buffer_element *
@@ -135,6 +137,8 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
* zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_queue_req
+ * @data: pointer to data
+ * @len: length of data
*
* This is only required for single sbal requests, calling it when
* wrapping around to the next sbal is a bug.
@@ -182,6 +186,7 @@ int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
/**
* zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
+ * @qdio: pointer to struct zfcp_qdio
* @q_req: The current zfcp_qdio_req
*/
static inline
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 59a943c0d51d..9b8ff249e31c 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -17,7 +17,7 @@
/**
* struct zfcp_reqlist - Container for request list (reqlist)
* @lock: Spinlock for protecting the hash list
- * @list: Array of hashbuckets, each is a list of requests in this bucket
+ * @buckets: Array of hashbuckets, each is a list of requests in this bucket
*/
struct zfcp_reqlist {
spinlock_t lock;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index a8efcb330bc1..00acc7144bbc 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -27,7 +27,11 @@ MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
static bool enable_dif;
module_param_named(dif, enable_dif, bool, 0400);
-MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
+MODULE_PARM_DESC(dif, "Enable DIF data integrity support (default off)");
+
+bool zfcp_experimental_dix;
+module_param_named(dix, zfcp_experimental_dix, bool, 0400);
+MODULE_PARM_DESC(dix, "Enable experimental DIX (data integrity extension) support which implies DIF support (default off)");
static bool allow_lun_scan = true;
module_param(allow_lun_scan, bool, 0600);
@@ -226,7 +230,9 @@ static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
(struct zfcp_scsi_req_filter *)data;
/* already aborted - prevent side-effects - or not a SCSI command */
- if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
+ if (old_req->data == NULL ||
+ zfcp_fsf_req_is_status_read_buffer(old_req) ||
+ old_req->qtcb->header.fsf_command != FSF_QTCB_FCP_CMND)
return;
/* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
@@ -423,7 +429,6 @@ static struct scsi_host_template zfcp_scsi_host_template = {
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
/* GCD, adjusted later */
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
- .use_clustering = 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
.track_queue_depth = 1,
@@ -788,11 +793,11 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
data_div = atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
- if (enable_dif &&
+ if ((enable_dif || zfcp_experimental_dix) &&
adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
mask |= SHOST_DIF_TYPE1_PROTECTION;
- if (enable_dif && data_div &&
+ if (zfcp_experimental_dix && data_div &&
adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
mask |= SHOST_DIX_TYPE1_PROTECTION;
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 2d655a97b959..a3c20e3a8b7c 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1998,7 +1998,6 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = twa_host_attrs,
.emulated = 1,
.no_write_same = 1,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 480cf82700e9..e8f5f7c63190 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1550,7 +1550,6 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = twl_host_attrs,
.emulated = 1,
.no_write_same = 1,
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a58257645e94..2b1e0d503020 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1174,7 +1174,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
command_que_value = tw_dev->command_packet_physical_address[request_id];
if (command_que_value == 0) {
printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet physical address.\n");
- return 1;
+ return 1;
}
/* Send command packet to the board */
@@ -2247,7 +2247,6 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = tw_host_attrs,
.emulated = 1,
.no_write_same = 1,
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 6be77b3aa8a5..128d658d472a 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -318,7 +318,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
- tpnt->use_clustering = ENABLE_CLUSTERING;
tpnt->slave_configure = NCR_700_slave_configure;
tpnt->slave_destroy = NCR_700_slave_destroy;
tpnt->slave_alloc = NCR_700_slave_alloc;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 9cee941f97d6..e41e51f1da71 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2641,6 +2641,7 @@ static int blogic_resultcode(struct blogic_adapter *adapter,
case BLOGIC_BAD_CMD_PARAM:
blogic_warn("BusLogic Driver Protocol Error 0x%02X\n",
adapter, adapter_status);
+ /* fall through */
case BLOGIC_DATA_UNDERRUN:
case BLOGIC_DATA_OVERRUN:
case BLOGIC_NOEXPECT_BUSFREE:
@@ -3857,7 +3858,6 @@ static struct scsi_host_template blogic_template = {
#endif
.unchecked_isa_dma = 1,
.max_sectors = 128,
- .use_clustering = ENABLE_CLUSTERING,
};
/*
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 00072ed9540b..ff53fd0d12f2 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1078,7 +1078,6 @@ static struct scsi_host_template inia100_template = {
.can_queue = 1,
.this_id = 1,
.sg_tablesize = SG_ALL,
- .use_clustering = ENABLE_CLUSTERING,
};
static int inia100_probe_one(struct pci_dev *pdev,
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 61aadc7acb49..c96bc7261a42 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -160,7 +160,7 @@ static struct scsi_host_template a2091_scsi_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
- .use_clustering = DISABLE_CLUSTERING
+ .dma_boundary = PAGE_SIZE - 1,
};
static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 2427a8541247..dcf435f312dd 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -175,7 +175,6 @@ static struct scsi_host_template amiga_a3000_scsi_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING
};
static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index bd7f352c28f3..75ab5ff6b78c 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -2892,6 +2892,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
!(dev->raw_io_64) ||
((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
+ /* fall through */
case INQUIRY:
case READ_CAPACITY:
case TEST_UNIT_READY:
@@ -2966,6 +2967,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
/* Issue FIB to tell Firmware to flush it's cache */
if ((aac_cache & 6) != 2)
return aac_synchronize(scsicmd);
+ /* fall through */
case INQUIRY:
{
struct inquiry_data inq_data;
@@ -3319,8 +3321,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
min_t(size_t,
sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
- break;
+ break;
}
+ /* fall through */
case RESERVE:
case RELEASE:
case REZERO_UNIT:
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 39eb415987fc..3291d1c16864 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -40,6 +40,7 @@
#define nblank(x) _nblank(x)[0]
#include <linux/interrupt.h>
+#include <linux/completion.h>
#include <linux/pci.h>
#include <scsi/scsi_host.h>
@@ -1241,7 +1242,7 @@ struct aac_fib_context {
u32 unique; // unique value representing this context
ulong jiffies; // used for cleanup - dmb changed to ulong
struct list_head next; // used to link context's into a linked list
- struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
+ struct completion completion; // this is used to wait for the next fib to arrive.
int wait; // Set to true when thread is in WaitForSingleObject
unsigned long count; // total number of FIBs on FibList
struct list_head fib_list; // this holds fibs and their attachd hw_fibs
@@ -1313,7 +1314,7 @@ struct fib {
* This is the event the sendfib routine will wait on if the
* caller did not pass one and this is synch io.
*/
- struct semaphore event_wait;
+ struct completion event_wait;
spinlock_t event_lock;
u32 done; /* gets set to 1 when fib is complete */
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 25f6600d6c09..e2899ff7913e 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -41,7 +41,6 @@
#include <linux/blkdev.h>
#include <linux/delay.h> /* ssleep prototype */
#include <linux/kthread.h>
-#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <scsi/scsi_host.h>
@@ -203,7 +202,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
/*
* Initialize the mutex used to wait for the next AIF.
*/
- sema_init(&fibctx->wait_sem, 0);
+ init_completion(&fibctx->completion);
fibctx->wait = 0;
/*
* Initialize the fibs and set the count of fibs on
@@ -335,7 +334,7 @@ return_fib:
ssleep(1);
}
if (f.wait) {
- if(down_interruptible(&fibctx->wait_sem) < 0) {
+ if (wait_for_completion_interruptible(&fibctx->completion) < 0) {
status = -ERESTARTSYS;
} else {
/* Lock again and retry */
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 1e77d96a18f2..d5a6aa9676c8 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -44,7 +44,6 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
-#include <linux/semaphore.h>
#include <linux/bcd.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -189,7 +188,7 @@ int aac_fib_setup(struct aac_dev * dev)
fibptr->hw_fib_va = hw_fib;
fibptr->data = (void *) fibptr->hw_fib_va->data;
fibptr->next = fibptr+1; /* Forward chain the fibs */
- sema_init(&fibptr->event_wait, 0);
+ init_completion(&fibptr->event_wait);
spin_lock_init(&fibptr->event_lock);
hw_fib->header.XferState = cpu_to_le32(0xffffffff);
hw_fib->header.SenderSize =
@@ -623,7 +622,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
}
if (wait) {
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
- if (down_interruptible(&fibptr->event_wait)) {
+ if (wait_for_completion_interruptible(&fibptr->event_wait)) {
fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
return -EFAULT;
}
@@ -659,7 +658,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
* hardware failure has occurred.
*/
unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
- while (down_trylock(&fibptr->event_wait)) {
+ while (!try_wait_for_completion(&fibptr->event_wait)) {
int blink;
if (time_is_before_eq_jiffies(timeout)) {
struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
@@ -689,9 +688,9 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
*/
schedule();
}
- } else if (down_interruptible(&fibptr->event_wait)) {
+ } else if (wait_for_completion_interruptible(&fibptr->event_wait)) {
/* Do nothing ... satisfy
- * down_interruptible must_check */
+ * wait_for_completion_interruptible must_check */
}
spin_lock_irqsave(&fibptr->event_lock, flags);
@@ -777,7 +776,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
return -EFAULT;
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
- if (down_interruptible(&fibptr->event_wait))
+ if (wait_for_completion_interruptible(&fibptr->event_wait))
fibptr->done = 2;
fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
@@ -1538,7 +1537,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|| fib->flags & FIB_CONTEXT_FLAG_WAIT) {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
- up(&fib->event_wait);
+ complete(&fib->event_wait);
spin_unlock_irqrestore(&fib->event_lock, flagv);
schedule();
retval = 0;
@@ -1828,7 +1827,7 @@ int aac_check_health(struct aac_dev * aac)
* Set the event to wake up the
* thread that will waiting.
*/
- up(&fibctx->wait_sem);
+ complete(&fibctx->completion);
} else {
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
kfree(fib);
@@ -2165,7 +2164,7 @@ static void wakeup_fibctx_threads(struct aac_dev *dev,
* Set the event to wake up the
* thread that is waiting.
*/
- up(&fibctx->wait_sem);
+ complete(&fibctx->completion);
entry = entry->next;
}
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index ddc69738375f..40a771dd1c0e 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -38,7 +38,6 @@
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
-#include <linux/semaphore.h>
#include "aacraid.h"
@@ -129,7 +128,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
spin_lock_irqsave(&fib->event_lock, flagv);
if (!fib->done) {
fib->done = 1;
- up(&fib->event_wait);
+ complete(&fib->event_wait);
}
spin_unlock_irqrestore(&fib->event_lock, flagv);
@@ -376,16 +375,16 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
start_callback = 1;
} else {
unsigned long flagv;
- int complete = 0;
+ int completed = 0;
dprintk((KERN_INFO "event_wait up\n"));
spin_lock_irqsave(&fib->event_lock, flagv);
if (fib->done == 2) {
fib->done = 1;
- complete = 1;
+ completed = 1;
} else {
fib->done = 1;
- up(&fib->event_wait);
+ complete(&fib->event_wait);
}
spin_unlock_irqrestore(&fib->event_lock, flagv);
@@ -395,7 +394,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
mflags);
FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
- if (complete)
+ if (completed)
aac_fib_complete(fib);
}
} else {
@@ -428,16 +427,16 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
start_callback = 1;
} else {
unsigned long flagv;
- int complete = 0;
+ int completed = 0;
dprintk((KERN_INFO "event_wait up\n"));
spin_lock_irqsave(&fib->event_lock, flagv);
if (fib->done == 2) {
fib->done = 1;
- complete = 1;
+ completed = 1;
} else {
fib->done = 1;
- up(&fib->event_wait);
+ complete(&fib->event_wait);
}
spin_unlock_irqrestore(&fib->event_lock, flagv);
@@ -447,7 +446,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
mflags);
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
- if (complete)
+ if (completed)
aac_fib_complete(fib);
}
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 2d4e4ddc5ace..634ddb90e7aa 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -759,6 +759,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
!(aac->raw_io_64) ||
((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
+ /* fall through */
case INQUIRY:
case READ_CAPACITY:
/*
@@ -1539,7 +1540,6 @@ static struct scsi_host_template aac_driver_template = {
#else
.cmd_per_lun = AAC_NUM_IO_FIB,
#endif
- .use_clustering = ENABLE_CLUSTERING,
.emulated = 1,
.no_write_same = 1,
};
@@ -1559,7 +1559,7 @@ static void __aac_shutdown(struct aac_dev * aac)
struct fib *fib = &aac->fibs[i];
if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
(fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
- up(&fib->event_wait);
+ complete(&fib->event_wait);
}
kthread_stop(aac->thread);
aac->thread = NULL;
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 7a51ccfa8662..8377aec0649d 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -106,7 +106,7 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
dev->management_fib_count--;
- up(&dev->sync_fib->event_wait);
+ complete(&dev->sync_fib->event_wait);
}
spin_unlock_irqrestore(&dev->sync_fib->event_lock,
sflags);
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 223ef6f4e258..d37584403c33 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -3192,8 +3192,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
shost->sg_tablesize, shost->cmd_per_lun);
seq_printf(m,
- " unchecked_isa_dma %d, use_clustering %d\n",
- shost->unchecked_isa_dma, shost->use_clustering);
+ " unchecked_isa_dma %d\n",
+ shost->unchecked_isa_dma);
seq_printf(m,
" flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
@@ -10808,14 +10808,6 @@ static struct scsi_host_template advansys_template = {
* for non-ISA adapters.
*/
.unchecked_isa_dma = true,
- /*
- * All adapters controlled by this driver are capable of large
- * scatter-gather lists. According to the mid-level SCSI documentation
- * this obviates any performance gain provided by setting
- * 'use_clustering'. But empirically while CPU utilization is increased
- * by enabling clustering, I/O throughput increases as well.
- */
- .use_clustering = ENABLE_CLUSTERING,
};
static int advansys_wide_init_chip(struct Scsi_Host *shost)
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 301b3cad15f8..97872838b983 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -2920,7 +2920,7 @@ static struct scsi_host_template aha152x_driver_template = {
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_ALL,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.slave_alloc = aha152x_adjust_queue,
};
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 41add33e3f1f..ba7a5725be04 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -58,8 +58,15 @@ struct aha1542_hostdata {
int aha1542_last_mbi_used;
int aha1542_last_mbo_used;
struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES];
- struct mailbox mb[2 * AHA1542_MAILBOXES];
- struct ccb ccb[AHA1542_MAILBOXES];
+ struct mailbox *mb;
+ dma_addr_t mb_handle;
+ struct ccb *ccb;
+ dma_addr_t ccb_handle;
+};
+
+struct aha1542_cmd {
+ struct chain *chain;
+ dma_addr_t chain_handle;
};
static inline void aha1542_intr_reset(u16 base)
@@ -233,6 +240,21 @@ static int aha1542_test_port(struct Scsi_Host *sh)
return 1;
}
+static void aha1542_free_cmd(struct scsi_cmnd *cmd)
+{
+ struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
+ struct device *dev = cmd->device->host->dma_dev;
+ size_t len = scsi_sg_count(cmd) * sizeof(struct chain);
+
+ if (acmd->chain) {
+ dma_unmap_single(dev, acmd->chain_handle, len, DMA_TO_DEVICE);
+ kfree(acmd->chain);
+ }
+
+ acmd->chain = NULL;
+ scsi_dma_unmap(cmd);
+}
+
static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
{
struct Scsi_Host *sh = dev_id;
@@ -303,7 +325,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
};
- mbo = (scsi2int(mb[mbi].ccbptr) - (isa_virt_to_bus(&ccb[0]))) / sizeof(struct ccb);
+ mbo = (scsi2int(mb[mbi].ccbptr) - (unsigned long)aha1542->ccb_handle) / sizeof(struct ccb);
mbistatus = mb[mbi].status;
mb[mbi].status = 0;
aha1542->aha1542_last_mbi_used = mbi;
@@ -331,8 +353,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
my_done = tmp_cmd->scsi_done;
- kfree(tmp_cmd->host_scribble);
- tmp_cmd->host_scribble = NULL;
+ aha1542_free_cmd(tmp_cmd);
/* Fetch the sense data, and tuck it away, in the required slot. The
Adaptec automatically fetches it, and there is no guarantee that
we will still have it in the cdb when we come back */
@@ -369,6 +390,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
+ struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
struct aha1542_hostdata *aha1542 = shost_priv(sh);
u8 direction;
u8 target = cmd->device->id;
@@ -378,7 +400,6 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
int mbo, sg_count;
struct mailbox *mb = aha1542->mb;
struct ccb *ccb = aha1542->ccb;
- struct chain *cptr;
if (*cmd->cmnd == REQUEST_SENSE) {
/* Don't do the command - we have the sense data already */
@@ -398,15 +419,17 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
}
#endif
- if (bufflen) { /* allocate memory before taking host_lock */
- sg_count = scsi_sg_count(cmd);
- cptr = kmalloc_array(sg_count, sizeof(*cptr),
- GFP_KERNEL | GFP_DMA);
- if (!cptr)
- return SCSI_MLQUEUE_HOST_BUSY;
- } else {
- sg_count = 0;
- cptr = NULL;
+ sg_count = scsi_dma_map(cmd);
+ if (sg_count) {
+ size_t len = sg_count * sizeof(struct chain);
+
+ acmd->chain = kmalloc(len, GFP_DMA);
+ if (!acmd->chain)
+ goto out_unmap;
+ acmd->chain_handle = dma_map_single(sh->dma_dev, acmd->chain,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(sh->dma_dev, acmd->chain_handle))
+ goto out_free_chain;
}
/* Use the outgoing mailboxes in a round-robin fashion, because this
@@ -437,7 +460,8 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
#endif
- any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */
+ /* This gets trashed for some reason */
+ any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb));
memset(&ccb[mbo], 0, sizeof(struct ccb));
@@ -456,21 +480,18 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
int i;
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
- cmd->host_scribble = (void *)cptr;
scsi_for_each_sg(cmd, sg, sg_count, i) {
- any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg))
- + sg->offset);
- any2scsi(cptr[i].datalen, sg->length);
+ any2scsi(acmd->chain[i].dataptr, sg_dma_address(sg));
+ any2scsi(acmd->chain[i].datalen, sg_dma_len(sg));
};
any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
- any2scsi(ccb[mbo].dataptr, isa_virt_to_bus(cptr));
+ any2scsi(ccb[mbo].dataptr, acmd->chain_handle);
#ifdef DEBUG
- shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr);
- print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, cptr, 18);
+ shost_printk(KERN_DEBUG, sh, "cptr %p: ", acmd->chain);
+ print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, acmd->chain, 18);
#endif
} else {
ccb[mbo].op = 0; /* SCSI Initiator Command */
- cmd->host_scribble = NULL;
any2scsi(ccb[mbo].datalen, 0);
any2scsi(ccb[mbo].dataptr, 0);
};
@@ -488,24 +509,29 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
spin_unlock_irqrestore(sh->host_lock, flags);
return 0;
+out_free_chain:
+ kfree(acmd->chain);
+ acmd->chain = NULL;
+out_unmap:
+ scsi_dma_unmap(cmd);
+ return SCSI_MLQUEUE_HOST_BUSY;
}
/* Initialize mailboxes */
static void setup_mailboxes(struct Scsi_Host *sh)
{
struct aha1542_hostdata *aha1542 = shost_priv(sh);
- int i;
- struct mailbox *mb = aha1542->mb;
- struct ccb *ccb = aha1542->ccb;
-
u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
+ int i;
for (i = 0; i < AHA1542_MAILBOXES; i++) {
- mb[i].status = mb[AHA1542_MAILBOXES + i].status = 0;
- any2scsi(mb[i].ccbptr, isa_virt_to_bus(&ccb[i]));
+ aha1542->mb[i].status = 0;
+ any2scsi(aha1542->mb[i].ccbptr,
+ aha1542->ccb_handle + i * sizeof(struct ccb));
+ aha1542->mb[AHA1542_MAILBOXES + i].status = 0;
};
aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
- any2scsi((mb_cmd + 2), isa_virt_to_bus(mb));
+ any2scsi(mb_cmd + 2, aha1542->mb_handle);
if (aha1542_out(sh->io_port, mb_cmd, 5))
shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n");
aha1542_intr_reset(sh->io_port);
@@ -739,11 +765,26 @@ static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct
if (aha1542->bios_translation == BIOS_TRANSLATION_25563)
shost_printk(KERN_INFO, sh, "Using extended bios translation\n");
+ if (dma_set_mask_and_coherent(pdev, DMA_BIT_MASK(24)) < 0)
+ goto unregister;
+
+ aha1542->mb = dma_alloc_coherent(pdev,
+ AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
+ &aha1542->mb_handle, GFP_KERNEL);
+ if (!aha1542->mb)
+ goto unregister;
+
+ aha1542->ccb = dma_alloc_coherent(pdev,
+ AHA1542_MAILBOXES * sizeof(struct ccb),
+ &aha1542->ccb_handle, GFP_KERNEL);
+ if (!aha1542->ccb)
+ goto free_mb;
+
setup_mailboxes(sh);
if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) {
shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n");
- goto unregister;
+ goto free_ccb;
}
if (sh->dma_channel != 0xFF) {
if (request_dma(sh->dma_channel, "aha1542")) {
@@ -762,11 +803,18 @@ static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct
scsi_scan_host(sh);
return sh;
+
free_dma:
if (sh->dma_channel != 0xff)
free_dma(sh->dma_channel);
free_irq:
free_irq(sh->irq, sh);
+free_ccb:
+ dma_free_coherent(pdev, AHA1542_MAILBOXES * sizeof(struct ccb),
+ aha1542->ccb, aha1542->ccb_handle);
+free_mb:
+ dma_free_coherent(pdev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
+ aha1542->mb, aha1542->mb_handle);
unregister:
scsi_host_put(sh);
release:
@@ -777,9 +825,16 @@ release:
static int aha1542_release(struct Scsi_Host *sh)
{
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ struct device *dev = sh->dma_dev;
+
scsi_remove_host(sh);
if (sh->dma_channel != 0xff)
free_dma(sh->dma_channel);
+ dma_free_coherent(dev, AHA1542_MAILBOXES * sizeof(struct ccb),
+ aha1542->ccb, aha1542->ccb_handle);
+ dma_free_coherent(dev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
+ aha1542->mb, aha1542->mb_handle);
if (sh->irq)
free_irq(sh->irq, sh);
if (sh->io_port && sh->n_io_port)
@@ -826,7 +881,8 @@ static int aha1542_dev_reset(struct scsi_cmnd *cmd)
aha1542->aha1542_last_mbo_used = mbo;
- any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */
+ /* This gets trashed for some reason */
+ any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb));
memset(&ccb[mbo], 0, sizeof(struct ccb));
@@ -901,8 +957,7 @@ static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd)
*/
continue;
}
- kfree(tmp_cmd->host_scribble);
- tmp_cmd->host_scribble = NULL;
+ aha1542_free_cmd(tmp_cmd);
aha1542->int_cmds[i] = NULL;
aha1542->mb[i].status = 0;
}
@@ -946,6 +1001,7 @@ static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.proc_name = "aha1542",
.name = "Adaptec 1542",
+ .cmd_size = sizeof(struct aha1542_cmd),
.queuecommand = aha1542_queuecommand,
.eh_device_reset_handler= aha1542_dev_reset,
.eh_bus_reset_handler = aha1542_bus_reset,
@@ -955,7 +1011,6 @@ static struct scsi_host_template driver_template = {
.this_id = 7,
.sg_tablesize = 16,
.unchecked_isa_dma = 1,
- .use_clustering = ENABLE_CLUSTERING,
};
static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 786bf7f32c64..da4150c17781 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -545,7 +545,6 @@ static struct scsi_host_template aha1740_template = {
.can_queue = AHA1740_ECBS,
.this_id = 7,
.sg_tablesize = AHA1740_SCATTER,
- .use_clustering = ENABLE_CLUSTERING,
.eh_abort_handler = aha1740_eh_abort_handler,
};
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 2588b8f84ba0..57992519384e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -920,7 +920,6 @@ struct scsi_host_template aic79xx_driver_template = {
.this_id = -1,
.max_sectors = 8192,
.cmd_per_lun = 2,
- .use_clustering = ENABLE_CLUSTERING,
.slave_alloc = ahd_linux_slave_alloc,
.slave_configure = ahd_linux_slave_configure,
.target_alloc = ahd_linux_target_alloc,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c6be3aeb302b..3c9c17450bb3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -807,7 +807,6 @@ struct scsi_host_template aic7xxx_driver_template = {
.this_id = -1,
.max_sectors = 8192,
.cmd_per_lun = 2,
- .use_clustering = ENABLE_CLUSTERING,
.slave_alloc = ahc_linux_slave_alloc,
.slave_configure = ahc_linux_slave_configure,
.target_alloc = ahc_linux_target_alloc,
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 3b8ad55e59de..2bc7615193bd 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1057,14 +1057,13 @@ static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
if (ascb) {
ascb->dma_scb.size = sizeof(struct scb);
- ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
+ ascb->dma_scb.vaddr = dma_pool_zalloc(asd_ha->scb_pool,
gfp_flags,
&ascb->dma_scb.dma_handle);
if (!ascb->dma_scb.vaddr) {
kmem_cache_free(asd_ascb_cache, ascb);
return NULL;
}
- memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb));
asd_init_ascb(asd_ha, ascb);
spin_lock_irqsave(&seq->tc_index_lock, flags);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 41c4d8abdd4a..f83f79b07b50 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -68,7 +68,6 @@ static struct scsi_host_template aic94xx_sht = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index d4404eea24fb..0f6751b0a633 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -156,7 +156,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
.max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
.cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
.no_write_same = 1,
};
@@ -903,9 +902,9 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if(!host){
goto pci_disable_dev;
}
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if(error){
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if(error){
printk(KERN_WARNING
"scsi%d: No suitable DMA mask available\n",
@@ -1049,9 +1048,9 @@ static int arcmsr_resume(struct pci_dev *pdev)
pr_warn("%s: pci_enable_device error\n", __func__);
return -ENODEV;
}
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (error) {
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (error) {
pr_warn("scsi%d: No suitable DMA mask available\n",
host->host_no);
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 421fe869a11e..d7509859dc00 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2890,7 +2890,7 @@ static struct scsi_host_template acornscsi_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.proc_name = "acornscsi",
};
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 3110736fd337..5e9dd9f34821 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -245,7 +245,7 @@ static struct scsi_host_template arxescsi_template = {
.can_queue = 0,
.this_id = 7,
.sg_tablesize = SG_ALL,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.proc_name = "arxescsi",
};
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index ae1d809904fb..e2d2a81d8e0b 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -221,10 +221,10 @@ static struct scsi_host_template cumanascsi_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
.proc_name = "CumanaSCSI-1",
.cmd_size = NCR5380_CMD_SIZE,
.max_sectors = 128,
+ .dma_boundary = PAGE_SIZE - 1,
};
static int cumanascsi1_probe(struct expansion_card *ec,
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index edce5f3cfdba..40afcbd8de61 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -367,7 +367,6 @@ static struct scsi_host_template cumanascsi2_template = {
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
- .use_clustering = DISABLE_CLUSTERING,
.proc_name = "cumanascsi2",
};
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index e93e047f4316..8f64c370a8a7 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -486,7 +486,6 @@ static struct scsi_host_template eesox_template = {
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
- .use_clustering = DISABLE_CLUSTERING,
.proc_name = "eesox",
};
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 05b7f755499b..8f2efaab8d46 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -110,7 +110,7 @@ static struct scsi_host_template oakscsi_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.proc_name = "oakscsi",
.cmd_size = NCR5380_CMD_SIZE,
.max_sectors = 128,
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 79aa88911b7f..759f95ba993c 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -294,7 +294,6 @@ static struct scsi_host_template powertecscsi_template = {
.sg_tablesize = SG_MAX_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
.cmd_per_lun = 2,
- .use_clustering = ENABLE_CLUSTERING,
.proc_name = "powertec",
};
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 89f5154c40b6..a503dc50c4f8 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -714,7 +714,7 @@ static struct scsi_host_template atari_scsi_template = {
.eh_host_reset_handler = atari_scsi_host_reset,
.this_id = 7,
.cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.cmd_size = NCR5380_CMD_SIZE,
};
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 802d15018ec0..1267200380f8 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1681,7 +1681,6 @@ static struct scsi_host_template atp870u_template = {
.can_queue = qcnt /* can_queue */,
.this_id = 7 /* SCSI ID */,
.sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/,
- .use_clustering = ENABLE_CLUSTERING,
.max_sectors = ATP870U_MAX_SECTORS,
};
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index effb6fc95af4..39f3820572b4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -214,12 +214,6 @@ static char const *cqe_desc[] = {
"CXN_KILLED_IMM_DATA_RCVD"
};
-static int beiscsi_slave_configure(struct scsi_device *sdev)
-{
- blk_queue_max_segment_size(sdev->request_queue, 65536);
- return 0;
-}
-
static int beiscsi_eh_abort(struct scsi_cmnd *sc)
{
struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
@@ -393,7 +387,6 @@ static struct scsi_host_template beiscsi_sht = {
.proc_name = DRV_NAME,
.queuecommand = iscsi_queuecommand,
.change_queue_depth = scsi_change_queue_depth,
- .slave_configure = beiscsi_slave_configure,
.target_alloc = iscsi_target_alloc,
.eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = beiscsi_eh_abort,
@@ -404,8 +397,8 @@ static struct scsi_host_template beiscsi_sht = {
.can_queue = BE2_IO_DEPTH,
.this_id = -1,
.max_sectors = BEISCSI_MAX_SECTORS,
+ .max_segment_size = 65536,
.cmd_per_lun = BEISCSI_CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 16d3aeb0e572..9631877aba4f 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3819,7 +3819,7 @@ bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
sfp->state = BFA_SFP_STATE_REMOVED;
sfp->data_valid = 0;
bfa_sfp_scn_aen_post(sfp, rsp);
- break;
+ break;
case BFA_SFP_SCN_FAILED:
sfp->state = BFA_SFP_STATE_FAILED;
sfp->data_valid = 0;
@@ -5763,7 +5763,7 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
(struct bfa_phy_stats_s *) phy->ubuf;
bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
sizeof(struct bfa_phy_stats_s));
- bfa_trc(phy, stats->status);
+ bfa_trc(phy, stats->status);
}
phy->status = status;
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 911efc98d1fd..42a0caf6740d 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -739,14 +739,10 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
-
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
- printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
- goto out_release_region;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
+ goto out_release_region;
}
/* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
@@ -1565,9 +1561,9 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
pci_set_master(pdev);
- if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
- if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
- goto out_disable_device;
+ if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32)))
+ goto out_disable_device;
if (restart_bfa(bfad) == -1)
goto out_disable_device;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index c4a33317d344..394930cbaa13 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -817,7 +817,6 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
.cmd_per_lun = 3,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = bfad_im_host_attrs,
.max_sectors = BFAD_MAX_SECTORS,
.vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
@@ -840,7 +839,6 @@ struct scsi_host_template bfad_im_vport_template = {
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
.cmd_per_lun = 3,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = bfad_im_vport_attrs,
.max_sectors = BFAD_MAX_SECTORS,
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index bcd30e2374f1..2e4e7159ebf9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2970,7 +2970,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
- .use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
.max_sectors = 1024,
.track_queue_depth = 1,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 6bad2689edd4..91f5316aa3ab 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2427,7 +2427,6 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
{
u32 cid_addr;
struct bnx2i_endpoint *ep;
- u32 cid_num;
ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
if (!ep) {
@@ -2462,7 +2461,6 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
} else {
ep->state = EP_STATE_OFLD_COMPL;
cid_addr = ofld_kcqe->iscsi_conn_context_id;
- cid_num = bnx2i_get_cid_num(ep);
ep->ep_cid = cid_addr;
ep->qp.ctx_base = NULL;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index de0a507577ef..69c75426c5eb 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2263,7 +2263,6 @@ static struct scsi_host_template bnx2i_host_template = {
.max_sectors = 127,
.cmd_per_lun = 128,
.this_id = -1,
- .use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
.shost_attrs = bnx2i_dev_attributes,
.track_queue_depth = 1,
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 1a458ce08210..cf629380a981 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -255,7 +255,6 @@ static void
csio_hw_exit_workers(struct csio_hw *hw)
{
cancel_work_sync(&hw->evtq_work);
- flush_scheduled_work();
}
static int
@@ -646,7 +645,7 @@ csio_shost_init(struct csio_hw *hw, struct device *dev,
if (csio_lnode_init(ln, hw, pln))
goto err_shost_put;
- if (scsi_add_host(shost, dev))
+ if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev))
goto err_lnode_exit;
return ln;
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index a95debbea0e4..bc5547a62c00 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2274,7 +2274,6 @@ struct scsi_host_template csio_fcoe_shost_template = {
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = csio_fcoe_lport_attrs,
.max_sectors = CSIO_MAX_SECTOR_SIZE,
};
@@ -2294,7 +2293,6 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = csio_fcoe_vport_attrs,
.max_sectors = CSIO_MAX_SECTOR_SIZE,
};
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index bf07735275a4..8a20411699d9 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -95,7 +95,7 @@ static struct scsi_host_template cxgb3i_host_template = {
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index 594f593c8821..f36b76e8e12c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -1,8 +1,8 @@
config SCSI_CXGB4_ISCSI
tristate "Chelsio T4 iSCSI support"
depends on PCI && INET && (IPV6 || IPV6=n)
- select NETDEVICES
- select ETHERNET
+ depends on THERMAL || !THERMAL
+ depends on ETHERNET
select NET_VENDOR_CHELSIO
select CHELSIO_T4
select CHELSIO_LIB
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 907dd8792a0a..49f8028ac524 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -113,7 +113,7 @@ static struct scsi_host_template cxgb4i_host_template = {
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index abdc9eac4173..bfa13e3b191c 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3174,7 +3174,6 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = 1, /* No scatter gather support */
.max_sectors = CXLFLASH_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = cxlflash_host_attrs,
.sdev_attrs = cxlflash_dev_attrs,
};
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 8c55ec6e1827..13fbb2eab842 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4631,7 +4631,7 @@ static struct scsi_host_template dc395x_driver_template = {
.cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
.eh_abort_handler = dc395x_eh_abort,
.eh_bus_reset_handler = dc395x_eh_bus_reset,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
};
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 003c3d726238..8db1cc552932 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -63,7 +63,7 @@ static struct scsi_host_template dmx3191d_driver_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.cmd_size = NCR5380_CMD_SIZE,
};
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 37de8fb186d7..70d1a18278af 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -934,15 +934,15 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
* See if we should enable dma64 mode.
*/
if (sizeof(dma_addr_t) > 4 &&
- pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
- if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
- dma64 = 1;
- }
- if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
+ dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
+ dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
+ dma64 = 1;
+
+ if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
return -EINVAL;
/* adapter only supports message blocks below 4GB */
- pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
+ dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
base_addr0_phys = pci_resource_start(pDev,0);
hba_map0_area_size = pci_resource_len(pDev,0);
@@ -3569,7 +3569,6 @@ static struct scsi_host_template driver_template = {
.slave_configure = adpt_slave_configure,
.can_queue = MAX_TO_IOP_MESSAGES,
.this_id = 7,
- .use_clustering = ENABLE_CLUSTERING,
};
static int __init adpt_init(void)
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index bbe77db8938d..46b2c83ba21f 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -266,6 +266,7 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
int i;
void *next_uncached;
struct esas2r_request *first_request, *last_request;
+ bool dma64 = false;
if (index >= MAX_ADAPTERS) {
esas2r_log(ESAS2R_LOG_CRIT,
@@ -286,42 +287,20 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
a->pcid = pcid;
a->host = host;
- if (sizeof(dma_addr_t) > 4) {
- const uint64_t required_mask = dma_get_required_mask
- (&pcid->dev);
- if (required_mask > DMA_BIT_MASK(32)
- && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
- && !pci_set_consistent_dma_mask(pcid,
- DMA_BIT_MASK(64))) {
- esas2r_log_dev(ESAS2R_LOG_INFO,
- &(a->pcid->dev),
- "64-bit PCI addressing enabled\n");
- } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pcid,
- DMA_BIT_MASK(32))) {
- esas2r_log_dev(ESAS2R_LOG_INFO,
- &(a->pcid->dev),
- "32-bit PCI addressing enabled\n");
- } else {
- esas2r_log(ESAS2R_LOG_CRIT,
- "failed to set DMA mask");
- esas2r_kill_adapter(index);
- return 0;
- }
- } else {
- if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pcid,
- DMA_BIT_MASK(32))) {
- esas2r_log_dev(ESAS2R_LOG_INFO,
- &(a->pcid->dev),
- "32-bit PCI addressing enabled\n");
- } else {
- esas2r_log(ESAS2R_LOG_CRIT,
- "failed to set DMA mask");
- esas2r_kill_adapter(index);
- return 0;
- }
+ if (sizeof(dma_addr_t) > 4 &&
+ dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) &&
+ !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64)))
+ dma64 = true;
+
+ if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) {
+ esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask");
+ esas2r_kill_adapter(index);
+ return 0;
}
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev,
+ "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32");
+
esas2r_adapters[index] = a;
sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
esas2r_debug("new adapter %p, name %s", a, a->name);
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index c07118617d89..64397d441bae 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -250,7 +250,6 @@ static struct scsi_host_template driver_template = {
ESAS2R_DEFAULT_CMD_PER_LUN,
.present = 0,
.unchecked_isa_dma = 0,
- .use_clustering = ENABLE_CLUSTERING,
.emulated = 0,
.proc_name = ESAS2R_DRVR_NAME,
.change_queue_depth = scsi_change_queue_depth,
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index ac7da9db7317..465df475f753 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2676,7 +2676,6 @@ struct scsi_host_template scsi_esp_template = {
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
- .use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0xffff,
.skip_settle_delay = 1,
};
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f46b312d04bc..cd19be3f3405 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -286,7 +286,6 @@ static struct scsi_host_template fcoe_shost_template = {
.this_id = -1,
.cmd_per_lun = 3,
.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
- .use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SG_ALL,
.max_sectors = 0xffff,
.track_queue_depth = 1,
@@ -1670,7 +1669,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
struct fc_stats *stats;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
- struct fcoe_port *port;
struct fcoe_hdr *hp;
fr = fcoe_dev_from_skb(skb);
@@ -1688,7 +1686,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb_end_pointer(skb), skb->csum,
skb->dev ? skb->dev->name : "<NULL>");
- port = lport_priv(lport);
skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
/*
@@ -1859,7 +1856,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
- struct fcoe_port *port;
struct fc_stats *stats;
u32 link_possible = 1;
u32 mfs;
@@ -1897,7 +1893,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
break;
case NETDEV_UNREGISTER:
list_del(&fcoe->list);
- port = lport_priv(ctlr->lp);
fcoe_vport_remove(lport);
mutex_lock(&fcoe_config_mutex);
fcoe_if_destroy(lport);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index cc461fd7bef1..5b3534b0deda 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -115,7 +115,6 @@ static struct scsi_host_template fnic_host_template = {
.this_id = -1,
.cmd_per_lun = 3,
.can_queue = FNIC_DFLT_IO_REQ,
- .use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
.max_sectors = 0xffff,
.shost_attrs = fnic_attrs,
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 8271785bdb93..bf0fd2aeb92e 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -468,14 +468,13 @@ int fnic_trace_buf_init(void)
fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
FNIC_ENTRY_SIZE_BYTES;
- fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
+ fnic_trace_buf_p = (unsigned long)vzalloc(trace_max_pages * PAGE_SIZE);
if (!fnic_trace_buf_p) {
printk(KERN_ERR PFX "Failed to allocate memory "
"for fnic_trace_buf_p\n");
err = -ENOMEM;
goto err_fnic_trace_buf_init;
}
- memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
fnic_trace_entries.page_offset =
vmalloc(array_size(fnic_max_trace_entries,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index fc538181f8df..9cdca0625498 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -700,7 +700,7 @@ static struct scsi_host_template driver_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.cmd_size = NCR5380_CMD_SIZE,
.max_sectors = 128,
};
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 16709735b546..194c294f9b6c 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4680,7 +4680,6 @@ static struct scsi_host_template gdth_template = {
.sg_tablesize = GDTH_MAXSG,
.cmd_per_lun = GDTH_MAXC_P_L,
.unchecked_isa_dma = 1,
- .use_clustering = ENABLE_CLUSTERING,
.no_write_same = 1,
};
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index a27fc49ebd3a..d2acd0d826e2 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -184,7 +184,7 @@ static struct scsi_host_template gvp11_scsi_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
- .use_clustering = DISABLE_CLUSTERING
+ .dma_boundary = PAGE_SIZE - 1,
};
static int check_wd33c93(struct gvp11_scsiregs *regs)
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 0ddb53c8a2e2..af291947a54d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -69,6 +69,12 @@
#define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8
#define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10
+#define HISI_SAS_DIF_PROT_MASK (SHOST_DIF_TYPE1_PROTECTION | \
+ SHOST_DIF_TYPE2_PROTECTION | \
+ SHOST_DIF_TYPE3_PROTECTION)
+
+#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK)
+
struct hisi_hba;
enum {
@@ -211,7 +217,7 @@ struct hisi_sas_slot {
/* Do not reorder/change members after here */
void *buf;
dma_addr_t buf_dma;
- int idx;
+ u16 idx;
};
struct hisi_sas_hw {
@@ -268,6 +274,8 @@ struct hisi_hba {
struct pci_dev *pci_dev;
struct device *dev;
+ int prot_mask;
+
void __iomem *regs;
void __iomem *sgpio_regs;
struct regmap *ctrl;
@@ -322,6 +330,8 @@ struct hisi_hba {
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
struct work_struct rst_work;
u32 phy_state;
+ u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
+ u32 intr_coal_count; /* Interrupt count to coalesce */
};
/* Generic HW DMA host memory structures */
@@ -468,7 +478,6 @@ extern int hisi_sas_remove(struct platform_device *pdev);
extern int hisi_sas_slave_configure(struct scsi_device *sdev);
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
-extern struct device_attribute *host_attrs[];
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index b3f01d5b821b..eed7fc5b3389 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -296,42 +296,109 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
device_id, abort_flag, tag_to_abort);
}
+static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
+ struct sas_task *task, int n_elem,
+ int n_elem_req, int n_elem_resp)
+{
+ struct device *dev = hisi_hba->dev;
+
+ if (!sas_protocol_ata(task->task_proto)) {
+ if (task->num_scatter) {
+ if (n_elem)
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
+ task->data_dir);
+ } else if (task->task_proto & SAS_PROTOCOL_SMP) {
+ if (n_elem_req)
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ if (n_elem_resp)
+ dma_unmap_sg(dev, &task->smp_task.smp_resp,
+ 1, DMA_FROM_DEVICE);
+ }
+ }
+}
+
+static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
+ struct sas_task *task, int *n_elem,
+ int *n_elem_req, int *n_elem_resp)
+{
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+ if (sas_protocol_ata(task->task_proto)) {
+ *n_elem = task->num_scatter;
+ } else {
+ unsigned int req_len, resp_len;
+
+ if (task->num_scatter) {
+ *n_elem = dma_map_sg(dev, task->scatter,
+ task->num_scatter, task->data_dir);
+ if (!*n_elem) {
+ rc = -ENOMEM;
+ goto prep_out;
+ }
+ } else if (task->task_proto & SAS_PROTOCOL_SMP) {
+ *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ if (!*n_elem_req) {
+ rc = -ENOMEM;
+ goto prep_out;
+ }
+ req_len = sg_dma_len(&task->smp_task.smp_req);
+ if (req_len & 0x3) {
+ rc = -EINVAL;
+ goto err_out_dma_unmap;
+ }
+ *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
+ 1, DMA_FROM_DEVICE);
+ if (!*n_elem_resp) {
+ rc = -ENOMEM;
+ goto err_out_dma_unmap;
+ }
+ resp_len = sg_dma_len(&task->smp_task.smp_resp);
+ if (resp_len & 0x3) {
+ rc = -EINVAL;
+ goto err_out_dma_unmap;
+ }
+ }
+ }
+
+ if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
+ dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+ *n_elem);
+ rc = -EINVAL;
+ goto err_out_dma_unmap;
+ }
+ return 0;
+
+err_out_dma_unmap:
+ /* It would be better to call dma_unmap_sg() here, but it's messy */
+ hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
+ *n_elem_req, *n_elem_resp);
+prep_out:
+ return rc;
+}
+
static int hisi_sas_task_prep(struct sas_task *task,
struct hisi_sas_dq **dq_pointer,
bool is_tmf, struct hisi_sas_tmf_task *tmf,
int *pass)
{
struct domain_device *device = task->dev;
- struct hisi_hba *hisi_hba;
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct asd_sas_port *sas_port = device->port;
- struct device *dev;
+ struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
struct hisi_sas_dq *dq;
unsigned long flags;
int wr_q_index;
- if (!sas_port) {
- struct task_status_struct *ts = &task->task_status;
-
- ts->resp = SAS_TASK_UNDELIVERED;
- ts->stat = SAS_PHY_DOWN;
- /*
- * libsas will use dev->port, should
- * not call task_done for sata
- */
- if (device->dev_type != SAS_SATA_DEV)
- task->task_done(task);
- return -ECOMM;
- }
-
- hisi_hba = dev_to_hisi_hba(device);
- dev = hisi_hba->dev;
-
if (DEV_IS_GONE(sas_dev)) {
if (sas_dev)
dev_info(dev, "task prep: device %d not ready\n",
@@ -355,49 +422,10 @@ static int hisi_sas_task_prep(struct sas_task *task,
return -ECOMM;
}
- if (!sas_protocol_ata(task->task_proto)) {
- unsigned int req_len, resp_len;
-
- if (task->num_scatter) {
- n_elem = dma_map_sg(dev, task->scatter,
- task->num_scatter, task->data_dir);
- if (!n_elem) {
- rc = -ENOMEM;
- goto prep_out;
- }
- } else if (task->task_proto & SAS_PROTOCOL_SMP) {
- n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
- if (!n_elem_req) {
- rc = -ENOMEM;
- goto prep_out;
- }
- req_len = sg_dma_len(&task->smp_task.smp_req);
- if (req_len & 0x3) {
- rc = -EINVAL;
- goto err_out_dma_unmap;
- }
- n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
- 1, DMA_FROM_DEVICE);
- if (!n_elem_resp) {
- rc = -ENOMEM;
- goto err_out_dma_unmap;
- }
- resp_len = sg_dma_len(&task->smp_task.smp_resp);
- if (resp_len & 0x3) {
- rc = -EINVAL;
- goto err_out_dma_unmap;
- }
- }
- } else
- n_elem = task->num_scatter;
-
- if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
- dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
- n_elem);
- rc = -EINVAL;
- goto err_out_dma_unmap;
- }
+ rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
+ &n_elem_req, &n_elem_resp);
+ if (rc < 0)
+ goto prep_out;
if (hisi_hba->hw->slot_index_alloc)
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
@@ -482,19 +510,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
err_out_tag:
hisi_sas_slot_index_free(hisi_hba, slot_idx);
err_out_dma_unmap:
- if (!sas_protocol_ata(task->task_proto)) {
- if (task->num_scatter) {
- dma_unmap_sg(dev, task->scatter, task->num_scatter,
- task->data_dir);
- } else if (task->task_proto & SAS_PROTOCOL_SMP) {
- if (n_elem_req)
- dma_unmap_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
- if (n_elem_resp)
- dma_unmap_sg(dev, &task->smp_task.smp_resp,
- 1, DMA_FROM_DEVICE);
- }
- }
+ hisi_sas_dma_unmap(hisi_hba, task, n_elem,
+ n_elem_req, n_elem_resp);
prep_out:
dev_err(dev, "task prep: failed[%d]!\n", rc);
return rc;
@@ -506,10 +523,29 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
u32 rc;
u32 pass = 0;
unsigned long flags;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
- struct device *dev = hisi_hba->dev;
+ struct hisi_hba *hisi_hba;
+ struct device *dev;
+ struct domain_device *device = task->dev;
+ struct asd_sas_port *sas_port = device->port;
struct hisi_sas_dq *dq = NULL;
+ if (!sas_port) {
+ struct task_status_struct *ts = &task->task_status;
+
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ /*
+ * libsas will use dev->port, should
+ * not call task_done for sata
+ */
+ if (device->dev_type != SAS_SATA_DEV)
+ task->task_done(task);
+ return -ECOMM;
+ }
+
+ hisi_hba = dev_to_hisi_hba(device);
+ dev = hisi_hba->dev;
+
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
if (in_softirq())
return -EINVAL;
@@ -1459,12 +1495,12 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
struct scsi_cmnd *cmnd = task->uldd_task;
struct hisi_sas_slot *slot = task->lldd_task;
- u32 tag = slot->idx;
+ u16 tag = slot->idx;
int rc2;
int_to_scsilun(cmnd->device->lun, &lun);
tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+ tmf_task.tag_of_task_to_be_managed = tag;
rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
&tmf_task);
@@ -1718,7 +1754,7 @@ static int hisi_sas_query_task(struct sas_task *task)
int_to_scsilun(cmnd->device->lun, &lun);
tmf_task.tmf = TMF_QUERY_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+ tmf_task.tag_of_task_to_be_managed = tag;
rc = hisi_sas_debug_issue_ssp_tmf(device,
lun.scsi_lun,
@@ -1994,12 +2030,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
-struct device_attribute *host_attrs[] = {
- &dev_attr_phy_event_threshold,
- NULL,
-};
-EXPORT_SYMBOL_GPL(host_attrs);
-
static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_dev_found = hisi_sas_dev_found,
.lldd_dev_gone = hisi_sas_dev_gone,
@@ -2380,7 +2410,6 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->max_lun = ~0;
shost->max_channel = 1;
shost->max_cmd_len = 16;
- shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
if (hisi_hba->hw->slot_index_alloc) {
shost->can_queue = hisi_hba->hw->max_command_entries;
shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 8df822a4a1bd..28ab52a021cf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -510,6 +510,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ u64 sas_addr;
memset(itct, 0, sizeof(*itct));
@@ -534,8 +535,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
itct->qw0 = cpu_to_le64(qw0);
/* qw1 */
- memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
- itct->sas_addr = __swab64(itct->sas_addr);
+ memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+ itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
/* qw2 */
itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) |
@@ -561,7 +562,7 @@ static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
reg_val &= ~CFG_AGING_TIME_ITCT_REL_MSK;
hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val);
- qw0 = cpu_to_le64(itct->qw0);
+ qw0 = le64_to_cpu(itct->qw0);
qw0 &= ~ITCT_HDR_VALID_MSK;
itct->qw0 = cpu_to_le64(qw0);
}
@@ -1100,7 +1101,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SSP:
{
int error = -1;
- u32 dma_err_type = cpu_to_le32(err_record->dma_err_type);
+ u32 dma_err_type = le32_to_cpu(err_record->dma_err_type);
u32 dma_tx_err_type = ((dma_err_type &
ERR_HDR_DMA_TX_ERR_TYPE_MSK)) >>
ERR_HDR_DMA_TX_ERR_TYPE_OFF;
@@ -1108,9 +1109,9 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
ERR_HDR_DMA_RX_ERR_TYPE_MSK)) >>
ERR_HDR_DMA_RX_ERR_TYPE_OFF;
u32 trans_tx_fail_type =
- cpu_to_le32(err_record->trans_tx_fail_type);
+ le32_to_cpu(err_record->trans_tx_fail_type);
u32 trans_rx_fail_type =
- cpu_to_le32(err_record->trans_rx_fail_type);
+ le32_to_cpu(err_record->trans_rx_fail_type);
if (dma_tx_err_type) {
/* dma tx err */
@@ -1558,7 +1559,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
u32 cmplt_hdr_data;
complete_hdr = &complete_queue[rd_point];
- cmplt_hdr_data = cpu_to_le32(complete_hdr->data);
+ cmplt_hdr_data = le32_to_cpu(complete_hdr->data);
idx = (cmplt_hdr_data & CMPLT_HDR_IPTT_MSK) >>
CMPLT_HDR_IPTT_OFF;
slot = &hisi_hba->slot_info[idx];
@@ -1797,6 +1798,11 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba)
return 0;
}
+static struct device_attribute *host_attrs_v1_hw[] = {
+ &dev_attr_phy_event_threshold,
+ NULL
+};
+
static struct scsi_host_template sht_v1_hw = {
.name = DRV_NAME,
.module = THIS_MODULE,
@@ -1808,14 +1814,13 @@ static struct scsi_host_template sht_v1_hw = {
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.this_id = -1,
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
- .shost_attrs = host_attrs,
+ .shost_attrs = host_attrs_v1_hw,
};
static const struct hisi_sas_hw hisi_sas_v1_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 77a85ead483e..c8ebff3ba559 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -934,6 +934,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
struct domain_device *parent_dev = device->parent;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ u64 sas_addr;
memset(itct, 0, sizeof(*itct));
@@ -966,8 +967,8 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
itct->qw0 = cpu_to_le64(qw0);
/* qw1 */
- memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
- itct->sas_addr = __swab64(itct->sas_addr);
+ memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+ itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
/* qw2 */
if (!dev_is_sata(device))
@@ -2044,11 +2045,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
struct task_status_struct *ts = &task->task_status;
struct hisi_sas_err_record_v2 *err_record =
hisi_sas_status_buf_addr_mem(slot);
- u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
- u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
- u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
- u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type);
- u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type);
+ u32 trans_tx_fail_type = le32_to_cpu(err_record->trans_tx_fail_type);
+ u32 trans_rx_fail_type = le32_to_cpu(err_record->trans_rx_fail_type);
+ u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type);
+ u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type);
+ u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type);
int error = -1;
if (err_phase == 1) {
@@ -2059,8 +2060,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
trans_tx_fail_type);
} else if (err_phase == 2) {
/* error in RX phase, the priority is: DW1 > DW3 > DW2 */
- error = parse_trans_rx_err_code_v2_hw(
- trans_rx_fail_type);
+ error = parse_trans_rx_err_code_v2_hw(trans_rx_fail_type);
if (error == -1) {
error = parse_dma_rx_err_code_v2_hw(
dma_rx_err_type);
@@ -2358,6 +2358,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
&complete_queue[slot->cmplt_queue_slot];
unsigned long flags;
bool is_internal = slot->is_internal;
+ u32 dw0;
if (unlikely(!task || !task->lldd_task || !task->dev))
return -EINVAL;
@@ -2382,8 +2383,9 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
/* Use SAS+TMF status codes */
- switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
- >> CMPLT_HDR_ABORT_STAT_OFF) {
+ dw0 = le32_to_cpu(complete_hdr->dw0);
+ switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >>
+ CMPLT_HDR_ABORT_STAT_OFF) {
case STAT_IO_ABORTED:
/* this io has been aborted by abort command */
ts->stat = SAS_ABORTED_TASK;
@@ -2408,9 +2410,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
break;
}
- if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
- (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
- u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
+ if ((dw0 & CMPLT_HDR_ERX_MSK) && (!(dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
+ u32 err_phase = (dw0 & CMPLT_HDR_ERR_PHASE_MSK)
>> CMPLT_HDR_ERR_PHASE_OFF;
u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
@@ -2526,22 +2527,23 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_tmf_task *tmf = slot->tmf;
u8 *buf_cmd;
int has_data = 0, hdr_tag = 0;
- u32 dw1 = 0, dw2 = 0;
+ u32 dw0, dw1 = 0, dw2 = 0;
/* create header */
/* dw0 */
- hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
+ dw0 = port->id << CMD_HDR_PORT_OFF;
if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
- hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
+ dw0 |= 3 << CMD_HDR_CMD_OFF;
else
- hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
+ dw0 |= 4 << CMD_HDR_CMD_OFF;
if (tmf && tmf->force_phy) {
- hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK;
- hdr->dw0 |= cpu_to_le32((1 << tmf->phy_id)
- << CMD_HDR_PHY_ID_OFF);
+ dw0 |= CMD_HDR_FORCE_PHY_MSK;
+ dw0 |= (1 << tmf->phy_id) << CMD_HDR_PHY_ID_OFF;
}
+ hdr->dw0 = cpu_to_le32(dw0);
+
/* dw1 */
switch (task->data_dir) {
case DMA_TO_DEVICE:
@@ -3152,20 +3154,24 @@ static void cq_tasklet_v2_hw(unsigned long val)
/* Check for NCQ completion */
if (complete_hdr->act) {
- u32 act_tmp = complete_hdr->act;
+ u32 act_tmp = le32_to_cpu(complete_hdr->act);
int ncq_tag_count = ffs(act_tmp);
+ u32 dw1 = le32_to_cpu(complete_hdr->dw1);
- dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
+ dev_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >>
CMPLT_HDR_DEV_ID_OFF;
itct = &hisi_hba->itct[dev_id];
/* The NCQ tags are held in the itct header */
while (ncq_tag_count) {
- __le64 *ncq_tag = &itct->qw4_15[0];
+ __le64 *_ncq_tag = &itct->qw4_15[0], __ncq_tag;
+ u64 ncq_tag;
- ncq_tag_count -= 1;
- iptt = (ncq_tag[ncq_tag_count / 5]
- >> (ncq_tag_count % 5) * 12) & 0xfff;
+ ncq_tag_count--;
+ __ncq_tag = _ncq_tag[ncq_tag_count / 5];
+ ncq_tag = le64_to_cpu(__ncq_tag);
+ iptt = (ncq_tag >> (ncq_tag_count % 5) * 12) &
+ 0xfff;
slot = &hisi_hba->slot_info[iptt];
slot->cmplt_queue_slot = rd_point;
@@ -3176,7 +3182,9 @@ static void cq_tasklet_v2_hw(unsigned long val)
ncq_tag_count = ffs(act_tmp);
}
} else {
- iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ u32 dw1 = le32_to_cpu(complete_hdr->dw1);
+
+ iptt = dw1 & CMPLT_HDR_IPTT_MSK;
slot = &hisi_hba->slot_info[iptt];
slot->cmplt_queue_slot = rd_point;
slot->cmplt_queue = queue;
@@ -3552,6 +3560,11 @@ static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
dev_dbg(dev, "wait commands complete %dms\n", time);
}
+static struct device_attribute *host_attrs_v2_hw[] = {
+ &dev_attr_phy_event_threshold,
+ NULL
+};
+
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
.module = THIS_MODULE,
@@ -3563,14 +3576,13 @@ static struct scsi_host_template sht_v2_hw = {
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.this_id = -1,
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
- .shost_attrs = host_attrs,
+ .shost_attrs = host_attrs_v2_hw,
};
static const struct hisi_sas_hw hisi_sas_v2_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index a369450a1fa7..e2420a810e99 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -42,6 +42,7 @@
#define MAX_CON_TIME_LIMIT_TIME 0xa4
#define BUS_INACTIVE_LIMIT_TIME 0xa8
#define REJECT_TO_OPEN_LIMIT_TIME 0xac
+#define CQ_INT_CONVERGE_EN 0xb0
#define CFG_AGING_TIME 0xbc
#define HGC_DFX_CFG2 0xc0
#define CFG_ABT_SET_QUERY_IPTT 0xd4
@@ -126,6 +127,8 @@
#define PHY_CTRL (PORT_BASE + 0x14)
#define PHY_CTRL_RESET_OFF 0
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
+#define CMD_HDR_PIR_OFF 8
+#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF)
#define SL_CFG (PORT_BASE + 0x84)
#define AIP_LIMIT (PORT_BASE + 0x90)
#define SL_CONTROL (PORT_BASE + 0x94)
@@ -332,6 +335,16 @@
#define ITCT_HDR_RTOLT_OFF 48
#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+struct hisi_sas_protect_iu_v3_hw {
+ u32 dw0;
+ u32 lbrtcv;
+ u32 lbrtgv;
+ u32 dw3;
+ u32 dw4;
+ u32 dw5;
+ u32 rsv;
+};
+
struct hisi_sas_complete_v3_hdr {
__le32 dw0;
__le32 dw1;
@@ -371,6 +384,28 @@ struct hisi_sas_err_record_v3 {
((fis.command == ATA_CMD_DEV_RESET) && \
((fis.control & ATA_SRST) != 0)))
+#define T10_INSRT_EN_OFF 0
+#define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF)
+#define T10_RMV_EN_OFF 1
+#define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF)
+#define T10_RPLC_EN_OFF 2
+#define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF)
+#define T10_CHK_EN_OFF 3
+#define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF)
+#define INCR_LBRT_OFF 5
+#define INCR_LBRT_MSK (1 << INCR_LBRT_OFF)
+#define USR_DATA_BLOCK_SZ_OFF 20
+#define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF)
+#define T10_CHK_MSK_OFF 16
+
+static bool hisi_sas_intr_conv;
+MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
+
+/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
+static int prot_mask;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
+
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
void __iomem *regs = hisi_hba->regs + off;
@@ -436,6 +471,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
+ hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN,
+ hisi_sas_intr_conv);
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
@@ -494,7 +531,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
-
+ hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
@@ -622,6 +659,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
struct domain_device *parent_dev = device->parent;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ u64 sas_addr;
memset(itct, 0, sizeof(*itct));
@@ -654,8 +692,8 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
itct->qw0 = cpu_to_le64(qw0);
/* qw1 */
- memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
- itct->sas_addr = __swab64(itct->sas_addr);
+ memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+ itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
/* qw2 */
if (!dev_is_sata(device))
@@ -932,6 +970,58 @@ static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
}
+static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd)
+{
+ unsigned char prot_flags = scsi_cmnd->prot_flags;
+
+ if (prot_flags & SCSI_PROT_TRANSFER_PI) {
+ if (prot_flags & SCSI_PROT_REF_CHECK)
+ return 0xc << 16;
+ return 0xfc << 16;
+ }
+ return 0;
+}
+
+static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
+ struct hisi_sas_protect_iu_v3_hw *prot)
+{
+ unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
+ unsigned int interval = scsi_prot_interval(scsi_cmnd);
+ u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
+
+ switch (prot_op) {
+ case SCSI_PROT_READ_STRIP:
+ prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
+ prot->lbrtcv = lbrt_chk_val;
+ prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ prot->dw0 |= T10_INSRT_EN_MSK;
+ prot->lbrtgv = lbrt_chk_val;
+ break;
+ default:
+ WARN(1, "prot_op(0x%x) is not valid\n", prot_op);
+ break;
+ }
+
+ switch (interval) {
+ case 512:
+ break;
+ case 4096:
+ prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF);
+ break;
+ case 520:
+ prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF);
+ break;
+ default:
+ WARN(1, "protection interval (0x%x) invalid\n",
+ interval);
+ break;
+ }
+
+ prot->dw0 |= INCR_LBRT_MSK;
+}
+
static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
@@ -943,9 +1033,10 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
struct hisi_sas_tmf_task *tmf = slot->tmf;
+ unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
int has_data = 0, priority = !!tmf;
u8 *buf_cmd;
- u32 dw1 = 0, dw2 = 0;
+ u32 dw1 = 0, dw2 = 0, len = 0;
hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
(2 << CMD_HDR_TLR_CTRL_OFF) |
@@ -975,7 +1066,6 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
/* map itct entry */
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
- hdr->dw1 = cpu_to_le32(dw1);
dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
+ 3) / 4) << CMD_HDR_CFL_OFF) |
@@ -988,7 +1078,6 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
- hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
@@ -1013,6 +1102,38 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
break;
}
}
+
+ if (has_data && (prot_op != SCSI_PROT_NORMAL)) {
+ struct hisi_sas_protect_iu_v3_hw prot;
+ u8 *buf_cmd_prot;
+
+ hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF);
+ dw1 |= CMD_HDR_PIR_MSK;
+ buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) +
+ sizeof(struct ssp_frame_hdr) +
+ sizeof(struct ssp_command_iu);
+
+ memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw));
+ fill_prot_v3_hw(scsi_cmnd, &prot);
+ memcpy(buf_cmd_prot, &prot,
+ sizeof(struct hisi_sas_protect_iu_v3_hw));
+
+ /*
+ * For READ, we need length of info read to memory, while for
+ * WRITE we need length of data written to the disk.
+ */
+ if (prot_op == SCSI_PROT_WRITE_INSERT) {
+ unsigned int interval = scsi_prot_interval(scsi_cmnd);
+ unsigned int ilog2_interval = ilog2(interval);
+
+ len = (task->total_xfer_len >> ilog2_interval) * 8;
+ }
+
+ }
+
+ hdr->dw1 = cpu_to_le32(dw1);
+
+ hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len);
}
static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
@@ -1584,15 +1705,16 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
&complete_queue[slot->cmplt_queue_slot];
struct hisi_sas_err_record_v3 *record =
hisi_sas_status_buf_addr_mem(slot);
- u32 dma_rx_err_type = record->dma_rx_err_type;
- u32 trans_tx_fail_type = record->trans_tx_fail_type;
+ u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
+ u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
+ u32 dw3 = le32_to_cpu(complete_hdr->dw3);
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
- } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
+ } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
ts->stat = SAS_QUEUE_FULL;
slot->abort = 1;
} else {
@@ -1606,7 +1728,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
- } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
+ } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
ts->stat = SAS_PHY_DOWN;
slot->abort = 1;
} else {
@@ -1639,6 +1761,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
&complete_queue[slot->cmplt_queue_slot];
unsigned long flags;
bool is_internal = slot->is_internal;
+ u32 dw0, dw1, dw3;
if (unlikely(!task || !task->lldd_task || !task->dev))
return -EINVAL;
@@ -1662,11 +1785,14 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
goto out;
}
+ dw0 = le32_to_cpu(complete_hdr->dw0);
+ dw1 = le32_to_cpu(complete_hdr->dw1);
+ dw3 = le32_to_cpu(complete_hdr->dw3);
+
/*
* Use SAS+TMF status codes
*/
- switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
- >> CMPLT_HDR_ABORT_STAT_OFF) {
+ switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) {
case STAT_IO_ABORTED:
/* this IO has been aborted by abort command */
ts->stat = SAS_ABORTED_TASK;
@@ -1689,7 +1815,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
/* check for erroneous completion */
- if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
+ if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
slot_err_v3_hw(hisi_hba, task, slot);
@@ -1698,8 +1824,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
"Error info: 0x%x 0x%x 0x%x 0x%x\n",
slot->idx, task, sas_dev->device_id,
- complete_hdr->dw0, complete_hdr->dw1,
- complete_hdr->act, complete_hdr->dw3,
+ dw0, dw1, complete_hdr->act, dw3,
error_info[0], error_info[1],
error_info[2], error_info[3]);
if (unlikely(slot->abort))
@@ -1797,11 +1922,13 @@ static void cq_tasklet_v3_hw(unsigned long val)
while (rd_point != wr_point) {
struct hisi_sas_complete_v3_hdr *complete_hdr;
struct device *dev = hisi_hba->dev;
+ u32 dw1;
int iptt;
complete_hdr = &complete_queue[rd_point];
+ dw1 = le32_to_cpu(complete_hdr->dw1);
- iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ iptt = dw1 & CMPLT_HDR_IPTT_MSK;
if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) {
slot = &hisi_hba->slot_info[iptt];
slot->cmplt_queue_slot = rd_point;
@@ -1878,10 +2005,12 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
struct tasklet_struct *t = &cq->tasklet;
+ int nr = hisi_sas_intr_conv ? 16 : 16 + i;
+ unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0;
- rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16),
- cq_interrupt_v3_hw, 0,
- DRV_NAME " cq", cq);
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, nr),
+ cq_interrupt_v3_hw, irqflags,
+ DRV_NAME " cq", cq);
if (rc) {
dev_err(dev,
"could not request cq%d interrupt, rc=%d\n",
@@ -1898,8 +2027,9 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
free_cq_irqs:
for (k = 0; k < i; k++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[k];
+ int nr = hisi_sas_intr_conv ? 16 : 16 + k;
- free_irq(pci_irq_vector(pdev, k+16), cq);
+ free_irq(pci_irq_vector(pdev, nr), cq);
}
free_irq(pci_irq_vector(pdev, 11), hisi_hba);
free_chnl_interrupt:
@@ -2089,6 +2219,119 @@ static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
dev_dbg(dev, "wait commands complete %dms\n", time);
}
+static ssize_t intr_conv_v3_hw_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv);
+}
+static DEVICE_ATTR_RO(intr_conv_v3_hw);
+
+static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba)
+{
+ /* config those registers between enable and disable PHYs */
+ hisi_sas_stop_phys(hisi_hba);
+
+ if (hisi_hba->intr_coal_ticks == 0 ||
+ hisi_hba->intr_coal_count == 0) {
+ hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
+ } else {
+ hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME,
+ hisi_hba->intr_coal_ticks);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT,
+ hisi_hba->intr_coal_count);
+ }
+ phys_init_v3_hw(hisi_hba);
+}
+
+static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ hisi_hba->intr_coal_ticks);
+}
+
+static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ u32 intr_coal_ticks;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &intr_coal_ticks);
+ if (ret) {
+ dev_err(dev, "Input data of interrupt coalesce unmatch\n");
+ return -EINVAL;
+ }
+
+ if (intr_coal_ticks >= BIT(24)) {
+ dev_err(dev, "intr_coal_ticks must be less than 2^24!\n");
+ return -EINVAL;
+ }
+
+ hisi_hba->intr_coal_ticks = intr_coal_ticks;
+
+ config_intr_coal_v3_hw(hisi_hba);
+
+ return count;
+}
+static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw);
+
+static ssize_t intr_coal_count_v3_hw_show(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ hisi_hba->intr_coal_count);
+}
+
+static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
+ struct device_attribute
+ *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ u32 intr_coal_count;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &intr_coal_count);
+ if (ret) {
+ dev_err(dev, "Input data of interrupt coalesce unmatch\n");
+ return -EINVAL;
+ }
+
+ if (intr_coal_count >= BIT(8)) {
+ dev_err(dev, "intr_coal_count must be less than 2^8!\n");
+ return -EINVAL;
+ }
+
+ hisi_hba->intr_coal_count = intr_coal_count;
+
+ config_intr_coal_v3_hw(hisi_hba);
+
+ return count;
+}
+static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
+
+static struct device_attribute *host_attrs_v3_hw[] = {
+ &dev_attr_phy_event_threshold,
+ &dev_attr_intr_conv_v3_hw,
+ &dev_attr_intr_coal_ticks_v3_hw,
+ &dev_attr_intr_coal_count_v3_hw,
+ NULL
+};
+
static struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME,
.module = THIS_MODULE,
@@ -2100,14 +2343,13 @@ static struct scsi_host_template sht_v3_hw = {
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.this_id = -1,
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
- .shost_attrs = host_attrs,
+ .shost_attrs = host_attrs_v3_hw,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
@@ -2161,6 +2403,12 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
hisi_hba->shost = shost;
SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
+ if (prot_mask & ~HISI_SAS_PROT_MASK)
+ dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n",
+ prot_mask);
+ else
+ hisi_hba->prot_mask = prot_mask;
+
timer_setup(&hisi_hba->timer, NULL, 0);
if (hisi_sas_get_fw_info(hisi_hba) < 0)
@@ -2199,14 +2447,11 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
goto err_out_disable_device;
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
- dev_err(dev, "No usable DMA addressing method\n");
- rc = -EIO;
- goto err_out_regions;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_err(dev, "No usable DMA addressing method\n");
+ rc = -EIO;
+ goto err_out_regions;
}
shost = hisi_sas_shost_alloc_pci(pdev);
@@ -2245,7 +2490,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_lun = ~0;
shost->max_channel = 1;
shost->max_cmd_len = 16;
- shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
shost->can_queue = hisi_hba->hw->max_command_entries -
HISI_SAS_RESERVED_IPTT_CNT;
shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
@@ -2275,6 +2519,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
goto err_out_register_ha;
+ if (hisi_hba->prot_mask) {
+ dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
+ prot_mask);
+ scsi_host_set_prot(hisi_hba->shost, prot_mask);
+ }
+
scsi_scan_host(shost);
return 0;
@@ -2301,8 +2551,9 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
free_irq(pci_irq_vector(pdev, 11), hisi_hba);
for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ int nr = hisi_sas_intr_conv ? 16 : 16 + i;
- free_irq(pci_irq_vector(pdev, i+16), cq);
+ free_irq(pci_irq_vector(pdev, nr), cq);
}
pci_free_irq_vectors(pdev);
}
@@ -2529,7 +2780,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
- u32 device_state;
+ pci_power_t device_state;
int rc;
if (!pdev->pm_cap) {
@@ -2575,7 +2826,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
struct Scsi_Host *shost = hisi_hba->shost;
struct device *dev = hisi_hba->dev;
unsigned int rc;
- u32 device_state = pdev->current_state;
+ pci_power_t device_state = pdev->current_state;
dev_warn(dev, "resuming from operating state [D%d]\n",
device_state);
@@ -2624,6 +2875,7 @@ static struct pci_driver sas_v3_pci_driver = {
};
module_pci_driver(sas_v3_pci_driver);
+module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index cc71136ba300..eaf329db3973 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -416,7 +416,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->sg_prot_tablesize = sht->sg_prot_tablesize;
shost->cmd_per_lun = sht->cmd_per_lun;
shost->unchecked_isa_dma = sht->unchecked_isa_dma;
- shost->use_clustering = sht->use_clustering;
shost->no_write_same = sht->no_write_same;
if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
@@ -449,6 +448,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
+ if (sht->max_segment_size)
+ shost->max_segment_size = sht->max_segment_size;
+ else
+ shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
+
/*
* assume a 4GB boundary, if not set
*/
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c9cccf35e9d7..ff67ef5d5347 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -965,7 +965,6 @@ static struct scsi_host_template hpsa_driver_template = {
.scan_finished = hpsa_scan_finished,
.change_queue_depth = hpsa_change_queue_depth,
.this_id = -1,
- .use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
.slave_alloc = hpsa_slave_alloc,
@@ -4663,6 +4662,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
case WRITE_6:
case WRITE_12:
is_write = 1;
+ /* fall through */
case READ_6:
case READ_12:
if (*cdb_len == 6) {
@@ -5093,6 +5093,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
switch (cmd->cmnd[0]) {
case WRITE_6:
is_write = 1;
+ /* fall through */
case READ_6:
first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
(cmd->cmnd[2] << 8) |
@@ -5103,6 +5104,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
break;
case WRITE_10:
is_write = 1;
+ /* fall through */
case READ_10:
first_block =
(((u64) cmd->cmnd[2]) << 24) |
@@ -5115,6 +5117,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
break;
case WRITE_12:
is_write = 1;
+ /* fall through */
case READ_12:
first_block =
(((u64) cmd->cmnd[2]) << 24) |
@@ -5129,6 +5132,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
break;
case WRITE_16:
is_write = 1;
+ /* fall through */
case READ_16:
first_block =
(((u64) cmd->cmnd[2]) << 56) |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 2fad7f03aa02..3eedfd4f8f57 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1180,7 +1180,6 @@ static struct scsi_host_template driver_template = {
.eh_host_reset_handler = hptiop_reset,
.info = hptiop_info,
.emulated = 0,
- .use_clustering = ENABLE_CLUSTERING,
.proc_name = driver_name,
.shost_attrs = hptiop_attrs,
.slave_configure = hptiop_slave_config,
@@ -1309,11 +1308,11 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
/* Enable 64bit DMA if possible */
iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
- if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) {
- if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
- printk(KERN_ERR "hptiop: fail to set dma_mask\n");
- goto disable_pci_device;
- }
+ if (dma_set_mask(&pcidev->dev,
+ DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) ||
+ dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) {
+ printk(KERN_ERR "hptiop: fail to set dma_mask\n");
+ goto disable_pci_device;
}
if (pci_request_regions(pcidev, driver_name)) {
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b64ca977825d..dbaa4f131433 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3100,7 +3100,6 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = IBMVFC_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = ibmvfc_attrs,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 9df8a1a2299c..1135e74646e2 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2079,7 +2079,6 @@ static struct scsi_host_template driver_template = {
.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = ibmvscsi_attrs,
};
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index e63aadd10dfd..cc9cae469c4b 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3695,11 +3695,6 @@ static int ibmvscsis_get_system_info(void)
return 0;
}
-static char *ibmvscsis_get_fabric_name(void)
-{
- return "ibmvscsis";
-}
-
static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct ibmvscsis_tport *tport =
@@ -4044,9 +4039,8 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
static const struct target_core_fabric_ops ibmvscsis_ops = {
.module = THIS_MODULE,
- .name = "ibmvscsis",
+ .fabric_name = "ibmvscsis",
.max_data_sg_nents = MAX_TXU / PAGE_SIZE,
- .get_fabric_name = ibmvscsis_get_fabric_name,
.tpg_get_wwn = ibmvscsis_get_fabric_wwn,
.tpg_get_tag = ibmvscsis_get_tag,
.tpg_get_default_depth = ibmvscsis_get_default_depth,
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 8c6627bc8a39..cea7f502e8ca 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1110,7 +1110,6 @@ static struct scsi_host_template imm_template = {
.bios_param = imm_biosparam,
.this_id = 7,
.sg_tablesize = SG_ALL,
- .use_clustering = ENABLE_CLUSTERING,
.can_queue = 1,
.slave_alloc = imm_adjust_queue,
};
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 7a91cf3ff173..eb2778b5c81b 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2817,7 +2817,6 @@ static struct scsi_host_template initio_template = {
.can_queue = MAX_TARGETS * i91u_MAXQUEUE,
.this_id = 1,
.sg_tablesize = SG_ALL,
- .use_clustering = ENABLE_CLUSTERING,
};
static int initio_probe_one(struct pci_dev *pdev,
@@ -2840,7 +2839,7 @@ static int initio_probe_one(struct pci_dev *pdev,
reg = 0;
bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
error = -ENODEV;
goto out_disable_device;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 271990bc065b..d1b4025a4503 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6754,7 +6754,6 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = IPR_MAX_SGLIST,
.max_sectors = IPR_IOA_MAX_SECTORS,
.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = ipr_ioa_attrs,
.sdev_attrs = ipr_dev_attrs,
.proc_name = IPR_NAME,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index ee8a1ecd58fd..e8bc8d328bab 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -365,7 +365,6 @@ static struct scsi_host_template ips_driver_template = {
.this_id = -1,
.sg_tablesize = IPS_MAX_SG,
.cmd_per_lun = 3,
- .use_clustering = ENABLE_CLUSTERING,
.no_write_same = 1,
};
@@ -1801,13 +1800,13 @@ ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr,
}
if (IPS_USE_ENH_SGLIST(ha)) {
scb->sg_list.enh_list[indx].address_lo =
- cpu_to_le32(pci_dma_lo32(busaddr));
+ cpu_to_le32(lower_32_bits(busaddr));
scb->sg_list.enh_list[indx].address_hi =
- cpu_to_le32(pci_dma_hi32(busaddr));
+ cpu_to_le32(upper_32_bits(busaddr));
scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len);
} else {
scb->sg_list.std_list[indx].address =
- cpu_to_le32(pci_dma_lo32(busaddr));
+ cpu_to_le32(lower_32_bits(busaddr));
scb->sg_list.std_list[indx].length = cpu_to_le32(e_len);
}
@@ -6678,7 +6677,6 @@ ips_register_scsi(int index)
sh->sg_tablesize = sh->hostt->sg_tablesize;
sh->can_queue = sh->hostt->can_queue;
sh->cmd_per_lun = sh->hostt->cmd_per_lun;
- sh->use_clustering = sh->hostt->use_clustering;
sh->max_sectors = 128;
sh->max_id = ha->ntargets;
@@ -6926,7 +6924,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
* it! Also, don't use 64bit addressing if dma addresses
* are guaranteed to be < 4G.
*/
- if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
+ if (sizeof(dma_addr_t) > 4 && IPS_HAS_ENH_SGLIST(ha) &&
!dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
(ha)->flags |= IPS_HA_ENH_SG;
} else {
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index db546171e97f..6c0678fb9a67 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -96,15 +96,6 @@
#define __iomem
#endif
- #define pci_dma_hi32(a) ((a >> 16) >> 16)
- #define pci_dma_lo32(a) (a & 0xffffffff)
-
- #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
- #define IPS_ENABLE_DMA64 (1)
- #else
- #define IPS_ENABLE_DMA64 (0)
- #endif
-
/*
* Adapter address map equates
*/
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 08c7b1e25fe4..68b90c4f79a3 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -163,7 +163,6 @@ static struct scsi_host_template isci_sht = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
.eh_abort_handler = sas_eh_abort_handler,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
@@ -304,21 +303,10 @@ static int isci_pci_init(struct pci_dev *pdev)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err)
- return err;
- }
-
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err)
- return err;
- }
-
- return 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ return err;
}
static int num_controllers(struct pci_dev *pdev)
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 1deca8c5a94f..7f9b3f20e5e4 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -778,6 +778,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
break;
case SCU_EVENT_LINK_FAILURE:
scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+ /* fall through */
case SCU_EVENT_HARD_RESET_RECEIVED:
/* Start the oob/sn state machine over again */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index cc51f38b116d..9d29edb9f590 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -310,7 +310,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
/* Kill all outstanding requests for the device. */
sci_remote_device_terminate_requests(idev);
- /* Fall through into the default case... */
+ /* Fall through - into the default case... */
default:
clear_bit(IDEV_IO_READY, &idev->flags);
break;
@@ -593,7 +593,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
break;
}
- /* Else, fall through and treat as unhandled... */
+ /* fall through - and treat as unhandled... */
default:
dev_dbg(scirdev_to_dev(idev),
"%s: device: %p event code: %x: %s\n",
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index e3f2a5359d71..474a43460963 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -601,9 +601,9 @@ enum sci_status sci_remote_node_context_suspend(
__func__, sci_rnc);
return SCI_FAILURE_INVALID_STATE;
}
- /* Fall through and handle like SCI_RNC_POSTING */
+ /* Fall through - and handle like SCI_RNC_POSTING */
case SCI_RNC_RESUMING:
- /* Fall through and handle like SCI_RNC_POSTING */
+ /* Fall through - and handle like SCI_RNC_POSTING */
case SCI_RNC_POSTING:
/* Set the destination state to AWAIT - this signals the
* entry into the SCI_RNC_READY state that a suspension
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 2f151708b59a..1b18cf55167e 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -894,7 +894,7 @@ sci_io_request_terminate(struct isci_request *ireq)
* and don't wait for the task response.
*/
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
- /* Fall through and handle like ABORTING... */
+ /* Fall through - and handle like ABORTING... */
case SCI_REQ_ABORTING:
if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
set_bit(IREQ_PENDING_ABORT, &ireq->flags);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 23354f206533..cae6368ebb98 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -44,6 +44,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_transport_iscsi.h>
+#include <trace/events/iscsi.h>
#include "iscsi_tcp.h"
@@ -72,6 +73,9 @@ MODULE_PARM_DESC(debug_iscsi_tcp, "Turn on debugging for iscsi_tcp module "
iscsi_conn_printk(KERN_INFO, _conn, \
"%s " dbg_fmt, \
__func__, ##arg); \
+ iscsi_dbg_trace(trace_iscsi_dbg_sw_tcp, \
+ &(_conn)->cls_conn->dev, \
+ "%s " dbg_fmt, __func__, ##arg);\
} while (0);
@@ -980,7 +984,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.slave_alloc = iscsi_sw_tcp_slave_alloc,
.slave_configure = iscsi_sw_tcp_slave_configure,
.target_alloc = iscsi_target_alloc,
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 1e1c0f1b9e69..9192a1d9dec6 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -860,7 +860,6 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
static void fc_rport_recv_flogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
{
- struct fc_disc *disc;
struct fc_els_flogi *flp;
struct fc_rport_priv *rdata;
struct fc_frame *fp = rx_fp;
@@ -871,7 +870,6 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n");
- disc = &lport->disc;
if (!lport->point_to_multipoint) {
rjt_data.reason = ELS_RJT_UNSUP;
rjt_data.explan = ELS_EXPL_NONE;
@@ -1724,6 +1722,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
kref_put(&rdata->kref, fc_rport_destroy);
goto busy;
}
+ /* fall through */
default:
FC_RPORT_DBG(rdata,
"Reject ELS 0x%02x while in state %s\n",
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f78d2e5c1471..b8d325ce8754 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -40,6 +40,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/libiscsi.h>
+#include <trace/events/iscsi.h>
static int iscsi_dbg_lib_conn;
module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
@@ -68,6 +69,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
iscsi_conn_printk(KERN_INFO, _conn, \
"%s " dbg_fmt, \
__func__, ##arg); \
+ iscsi_dbg_trace(trace_iscsi_dbg_conn, \
+ &(_conn)->cls_conn->dev, \
+ "%s " dbg_fmt, __func__, ##arg);\
} while (0);
#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
@@ -76,6 +80,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
iscsi_session_printk(KERN_INFO, _session, \
"%s " dbg_fmt, \
__func__, ##arg); \
+ iscsi_dbg_trace(trace_iscsi_dbg_session, \
+ &(_session)->cls_session->dev, \
+ "%s " dbg_fmt, __func__, ##arg); \
} while (0);
#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \
@@ -84,6 +91,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
iscsi_session_printk(KERN_INFO, _session, \
"%s " dbg_fmt, \
__func__, ##arg); \
+ iscsi_dbg_trace(trace_iscsi_dbg_eh, \
+ &(_session)->cls_session->dev, \
+ "%s " dbg_fmt, __func__, ##arg); \
} while (0);
inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 4fcb9e65be57..8a6b1b3f8277 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -43,6 +43,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_transport_iscsi.h>
+#include <trace/events/iscsi.h>
#include "iscsi_tcp.h"
@@ -65,6 +66,9 @@ MODULE_PARM_DESC(debug_libiscsi_tcp, "Turn on debugging for libiscsi_tcp "
iscsi_conn_printk(KERN_INFO, _conn, \
"%s " dbg_fmt, \
__func__, ##arg); \
+ iscsi_dbg_trace(trace_iscsi_dbg_tcp, \
+ &(_conn)->cls_conn->dev, \
+ "%s " dbg_fmt, __func__, ##arg);\
} while (0);
static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index 2e70140f70c3..5d51520c6f23 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -26,10 +26,11 @@ libsas-y += sas_init.o \
sas_phy.o \
sas_port.o \
sas_event.o \
- sas_dump.o \
sas_discover.o \
sas_expander.o \
sas_scsi_host.o \
sas_task.o
libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o
+
+ccflags-y := -DDEBUG
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index c90b278cc28c..6f93fee2b21b 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -75,8 +75,8 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
- SAS_DPRINTK("%s: Saw error %d. What to do?\n",
- __func__, ts->stat);
+ pr_warn("%s: Saw error %d. What to do?\n",
+ __func__, ts->stat);
return AC_ERR_OTHER;
case SAM_STAT_CHECK_CONDITION:
@@ -151,8 +151,7 @@ static void sas_ata_task_done(struct sas_task *task)
} else {
ac = sas_to_ata_err(stat);
if (ac) {
- SAS_DPRINTK("%s: SAS error %x\n", __func__,
- stat->stat);
+ pr_warn("%s: SAS error %x\n", __func__, stat->stat);
/* We saw a SAS error. Send a vague error. */
if (!link->sactive) {
qc->err_mask = ac;
@@ -237,7 +236,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
if (ret) {
- SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
+ pr_debug("lldd_execute_task returned: %d\n", ret);
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, NULL);
@@ -282,9 +281,9 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
&dev->sata_dev.rps_resp);
if (res) {
- SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
- "0x%x\n", SAS_ADDR(dev->parent->sas_addr),
- phy->phy_id, res);
+ pr_debug("report phy sata to %016llx:0x%x returned 0x%x\n",
+ SAS_ADDR(dev->parent->sas_addr),
+ phy->phy_id, res);
return res;
}
memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
@@ -375,7 +374,7 @@ static int sas_ata_printk(const char *level, const struct domain_device *ddev,
vaf.fmt = fmt;
vaf.va = &args;
- r = printk("%ssas: ata%u: %s: %pV",
+ r = printk("%s" SAS_FMT "ata%u: %s: %pV",
level, ap->print_id, dev_name(dev), &vaf);
va_end(args);
@@ -431,8 +430,7 @@ static void sas_ata_internal_abort(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
- task);
+ pr_debug("%s: Task %p already finished.\n", __func__, task);
goto out;
}
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
@@ -452,7 +450,7 @@ static void sas_ata_internal_abort(struct sas_task *task)
* aborted ata tasks, otherwise we (likely) leak the sas task
* here
*/
- SAS_DPRINTK("%s: Task %p leaked.\n", __func__, task);
+ pr_warn("%s: Task %p leaked.\n", __func__, task);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
@@ -558,7 +556,7 @@ int sas_ata_init(struct domain_device *found_dev)
ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL);
if (!ata_host) {
- SAS_DPRINTK("ata host alloc failed.\n");
+ pr_err("ata host alloc failed.\n");
return -ENOMEM;
}
@@ -566,7 +564,7 @@ int sas_ata_init(struct domain_device *found_dev)
ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost);
if (!ap) {
- SAS_DPRINTK("ata_sas_port_alloc failed.\n");
+ pr_err("ata_sas_port_alloc failed.\n");
rc = -ENODEV;
goto free_host;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index dde433aa59c2..726ada9b8c79 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -128,7 +128,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
SAS_FANOUT_EXPANDER_DEVICE);
break;
default:
- printk("ERROR: Unidentified device type %d\n", dev->dev_type);
+ pr_warn("ERROR: Unidentified device type %d\n", dev->dev_type);
rphy = NULL;
break;
}
@@ -186,10 +186,9 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
res = i->dft->lldd_dev_found(dev);
if (res) {
- printk("sas: driver on pcidev %s cannot handle "
- "device %llx, error:%d\n",
- dev_name(sas_ha->dev),
- SAS_ADDR(dev->sas_addr), res);
+ pr_warn("driver on host %s cannot handle device %llx, error:%d\n",
+ dev_name(sas_ha->dev),
+ SAS_ADDR(dev->sas_addr), res);
}
set_bit(SAS_DEV_FOUND, &dev->state);
kref_get(&dev->kref);
@@ -456,8 +455,8 @@ static void sas_discover_domain(struct work_struct *work)
return;
dev = port->port_dev;
- SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id,
- task_pid_nr(current));
+ pr_debug("DOING DISCOVERY on port %d, pid:%d\n", port->id,
+ task_pid_nr(current));
switch (dev->dev_type) {
case SAS_END_DEVICE:
@@ -473,12 +472,12 @@ static void sas_discover_domain(struct work_struct *work)
error = sas_discover_sata(dev);
break;
#else
- SAS_DPRINTK("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
+ pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
/* Fall through */
#endif
default:
error = -ENXIO;
- SAS_DPRINTK("unhandled device %d\n", dev->dev_type);
+ pr_err("unhandled device %d\n", dev->dev_type);
break;
}
@@ -495,8 +494,8 @@ static void sas_discover_domain(struct work_struct *work)
sas_probe_devices(port);
- SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
- task_pid_nr(current), error);
+ pr_debug("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
+ task_pid_nr(current), error);
}
static void sas_revalidate_domain(struct work_struct *work)
@@ -510,22 +509,22 @@ static void sas_revalidate_domain(struct work_struct *work)
/* prevent revalidation from finding sata links in recovery */
mutex_lock(&ha->disco_mutex);
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
- SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n",
- port->id, task_pid_nr(current));
+ pr_debug("REVALIDATION DEFERRED on port %d, pid:%d\n",
+ port->id, task_pid_nr(current));
goto out;
}
clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
- SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
- task_pid_nr(current));
+ pr_debug("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
+ task_pid_nr(current));
if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
res = sas_ex_revalidate_domain(ddev);
- SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
- port->id, task_pid_nr(current), res);
+ pr_debug("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
+ port->id, task_pid_nr(current), res);
out:
mutex_unlock(&ha->disco_mutex);
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
deleted file mode 100644
index 7e5d262e7a7d..000000000000
--- a/drivers/scsi/libsas/sas_dump.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Serial Attached SCSI (SAS) Dump/Debugging routines
- *
- * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
- * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-
-#include "sas_dump.h"
-
-static const char *sas_porte_str[] = {
- [0] = "PORTE_BYTES_DMAED",
- [1] = "PORTE_BROADCAST_RCVD",
- [2] = "PORTE_LINK_RESET_ERR",
- [3] = "PORTE_TIMER_EVENT",
- [4] = "PORTE_HARD_RESET",
-};
-
-static const char *sas_phye_str[] = {
- [0] = "PHYE_LOSS_OF_SIGNAL",
- [1] = "PHYE_OOB_DONE",
- [2] = "PHYE_OOB_ERROR",
- [3] = "PHYE_SPINUP_HOLD",
- [4] = "PHYE_RESUME_TIMEOUT",
-};
-
-void sas_dprint_porte(int phyid, enum port_event pe)
-{
- SAS_DPRINTK("phy%d: port event: %s\n", phyid, sas_porte_str[pe]);
-}
-void sas_dprint_phye(int phyid, enum phy_event pe)
-{
- SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]);
-}
-
-void sas_dump_port(struct asd_sas_port *port)
-{
- SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class);
- SAS_DPRINTK("port%d: sas_addr:%llx\n", port->id,
- SAS_ADDR(port->sas_addr));
- SAS_DPRINTK("port%d: attached_sas_addr:%llx\n", port->id,
- SAS_ADDR(port->attached_sas_addr));
- SAS_DPRINTK("port%d: iproto:0x%x\n", port->id, port->iproto);
- SAS_DPRINTK("port%d: tproto:0x%x\n", port->id, port->tproto);
- SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
- SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
-}
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
deleted file mode 100644
index 6aaee6b0fcdb..000000000000
--- a/drivers/scsi/libsas/sas_dump.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Serial Attached SCSI (SAS) Dump/Debugging routines header file
- *
- * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
- * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-
-#include "sas_internal.h"
-
-void sas_dprint_porte(int phyid, enum port_event pe);
-void sas_dprint_phye(int phyid, enum phy_event pe);
-void sas_dump_port(struct asd_sas_port *port);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index ae923eb6de95..b1e0f7d2b396 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -25,7 +25,6 @@
#include <linux/export.h>
#include <scsi/scsi_host.h>
#include "sas_internal.h"
-#include "sas_dump.h"
int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 0d1f72752ca2..17eb4185f29d 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -99,17 +99,17 @@ static int smp_execute_task_sg(struct domain_device *dev,
if (res) {
del_timer(&task->slow_task->timer);
- SAS_DPRINTK("executing SMP task failed:%d\n", res);
+ pr_notice("executing SMP task failed:%d\n", res);
break;
}
wait_for_completion(&task->slow_task->completion);
res = -ECOMM;
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- SAS_DPRINTK("smp task timed out or aborted\n");
+ pr_notice("smp task timed out or aborted\n");
i->dft->lldd_abort_task(task);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- SAS_DPRINTK("SMP task aborted and not done\n");
+ pr_notice("SMP task aborted and not done\n");
break;
}
}
@@ -134,11 +134,11 @@ static int smp_execute_task_sg(struct domain_device *dev,
task->task_status.stat == SAS_DEVICE_UNKNOWN)
break;
else {
- SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
- "status 0x%x\n", __func__,
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
+ pr_notice("%s: task to dev %016llx response: 0x%x status 0x%x\n",
+ __func__,
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
sas_free_task(task);
task = NULL;
}
@@ -347,11 +347,11 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending);
- SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
- test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
- SAS_ADDR(dev->sas_addr), phy->phy_id,
- sas_route_char(dev, phy), phy->linkrate,
- SAS_ADDR(phy->attached_sas_addr), type);
+ pr_debug("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
+ test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
+ SAS_ADDR(dev->sas_addr), phy->phy_id,
+ sas_route_char(dev, phy), phy->linkrate,
+ SAS_ADDR(phy->attached_sas_addr), type);
}
/* check if we have an existing attached ata device on this expander phy */
@@ -393,7 +393,7 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
return res;
dr = &((struct smp_resp *)disc_resp)->disc;
if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
- sas_printk("Found loopback topology, just ignore it!\n");
+ pr_notice("Found loopback topology, just ignore it!\n");
return 0;
}
sas_set_ex_phy(dev, single, disc_resp);
@@ -500,12 +500,12 @@ static int sas_ex_general(struct domain_device *dev)
RG_RESP_SIZE);
if (res) {
- SAS_DPRINTK("RG to ex %016llx failed:0x%x\n",
- SAS_ADDR(dev->sas_addr), res);
+ pr_notice("RG to ex %016llx failed:0x%x\n",
+ SAS_ADDR(dev->sas_addr), res);
goto out;
} else if (rg_resp->result != SMP_RESP_FUNC_ACC) {
- SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n",
- SAS_ADDR(dev->sas_addr), rg_resp->result);
+ pr_debug("RG:ex %016llx returned SMP result:0x%x\n",
+ SAS_ADDR(dev->sas_addr), rg_resp->result);
res = rg_resp->result;
goto out;
}
@@ -513,8 +513,8 @@ static int sas_ex_general(struct domain_device *dev)
ex_assign_report_general(dev, rg_resp);
if (dev->ex_dev.configuring) {
- SAS_DPRINTK("RG: ex %llx self-configuring...\n",
- SAS_ADDR(dev->sas_addr));
+ pr_debug("RG: ex %llx self-configuring...\n",
+ SAS_ADDR(dev->sas_addr));
schedule_timeout_interruptible(5*HZ);
} else
break;
@@ -568,12 +568,12 @@ static int sas_ex_manuf_info(struct domain_device *dev)
res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
if (res) {
- SAS_DPRINTK("MI: ex %016llx failed:0x%x\n",
- SAS_ADDR(dev->sas_addr), res);
+ pr_notice("MI: ex %016llx failed:0x%x\n",
+ SAS_ADDR(dev->sas_addr), res);
goto out;
} else if (mi_resp[2] != SMP_RESP_FUNC_ACC) {
- SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n",
- SAS_ADDR(dev->sas_addr), mi_resp[2]);
+ pr_debug("MI ex %016llx returned SMP result:0x%x\n",
+ SAS_ADDR(dev->sas_addr), mi_resp[2]);
goto out;
}
@@ -836,10 +836,9 @@ static struct domain_device *sas_ex_discover_end_dev(
res = sas_discover_sata(child);
if (res) {
- SAS_DPRINTK("sas_discover_sata() for device %16llx at "
- "%016llx:0x%x returned 0x%x\n",
- SAS_ADDR(child->sas_addr),
- SAS_ADDR(parent->sas_addr), phy_id, res);
+ pr_notice("sas_discover_sata() for device %16llx at %016llx:0x%x returned 0x%x\n",
+ SAS_ADDR(child->sas_addr),
+ SAS_ADDR(parent->sas_addr), phy_id, res);
goto out_list_del;
}
} else
@@ -861,16 +860,15 @@ static struct domain_device *sas_ex_discover_end_dev(
res = sas_discover_end_dev(child);
if (res) {
- SAS_DPRINTK("sas_discover_end_dev() for device %16llx "
- "at %016llx:0x%x returned 0x%x\n",
- SAS_ADDR(child->sas_addr),
- SAS_ADDR(parent->sas_addr), phy_id, res);
+ pr_notice("sas_discover_end_dev() for device %16llx at %016llx:0x%x returned 0x%x\n",
+ SAS_ADDR(child->sas_addr),
+ SAS_ADDR(parent->sas_addr), phy_id, res);
goto out_list_del;
}
} else {
- SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
- phy->attached_tproto, SAS_ADDR(parent->sas_addr),
- phy_id);
+ pr_notice("target proto 0x%x at %016llx:0x%x not handled\n",
+ phy->attached_tproto, SAS_ADDR(parent->sas_addr),
+ phy_id);
goto out_free;
}
@@ -927,11 +925,10 @@ static struct domain_device *sas_ex_discover_expander(
int res;
if (phy->routing_attr == DIRECT_ROUTING) {
- SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not "
- "allowed\n",
- SAS_ADDR(parent->sas_addr), phy_id,
- SAS_ADDR(phy->attached_sas_addr),
- phy->attached_phy_id);
+ pr_warn("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not allowed\n",
+ SAS_ADDR(parent->sas_addr), phy_id,
+ SAS_ADDR(phy->attached_sas_addr),
+ phy->attached_phy_id);
return NULL;
}
child = sas_alloc_device();
@@ -1038,25 +1035,24 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
ex_phy->attached_dev_type != SAS_SATA_PENDING) {
- SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
- "phy 0x%x\n", ex_phy->attached_dev_type,
- SAS_ADDR(dev->sas_addr),
- phy_id);
+ pr_warn("unknown device type(0x%x) attached to ex %016llx phy 0x%x\n",
+ ex_phy->attached_dev_type,
+ SAS_ADDR(dev->sas_addr),
+ phy_id);
return 0;
}
res = sas_configure_routing(dev, ex_phy->attached_sas_addr);
if (res) {
- SAS_DPRINTK("configure routing for dev %016llx "
- "reported 0x%x. Forgotten\n",
- SAS_ADDR(ex_phy->attached_sas_addr), res);
+ pr_notice("configure routing for dev %016llx reported 0x%x. Forgotten\n",
+ SAS_ADDR(ex_phy->attached_sas_addr), res);
sas_disable_routing(dev, ex_phy->attached_sas_addr);
return res;
}
if (sas_ex_join_wide_port(dev, phy_id)) {
- SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
- phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
+ pr_debug("Attaching ex phy%d to wide port %016llx\n",
+ phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
return res;
}
@@ -1067,12 +1063,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
break;
case SAS_FANOUT_EXPANDER_DEVICE:
if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
- SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
- "attached to ex %016llx phy 0x%x\n",
- SAS_ADDR(ex_phy->attached_sas_addr),
- ex_phy->attached_phy_id,
- SAS_ADDR(dev->sas_addr),
- phy_id);
+ pr_debug("second fanout expander %016llx phy 0x%x attached to ex %016llx phy 0x%x\n",
+ SAS_ADDR(ex_phy->attached_sas_addr),
+ ex_phy->attached_phy_id,
+ SAS_ADDR(dev->sas_addr),
+ phy_id);
sas_ex_disable_phy(dev, phy_id);
break;
} else
@@ -1101,9 +1096,8 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
SAS_ADDR(child->sas_addr)) {
ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
if (sas_ex_join_wide_port(dev, i))
- SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
- i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
-
+ pr_debug("Attaching ex phy%d to wide port %016llx\n",
+ i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
}
}
}
@@ -1154,13 +1148,11 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
if (sas_find_sub_addr(child, s2) &&
(SAS_ADDR(sub_addr) != SAS_ADDR(s2))) {
- SAS_DPRINTK("ex %016llx->%016llx-?->%016llx "
- "diverges from subtractive "
- "boundary %016llx\n",
- SAS_ADDR(dev->sas_addr),
- SAS_ADDR(child->sas_addr),
- SAS_ADDR(s2),
- SAS_ADDR(sub_addr));
+ pr_notice("ex %016llx->%016llx-?->%016llx diverges from subtractive boundary %016llx\n",
+ SAS_ADDR(dev->sas_addr),
+ SAS_ADDR(child->sas_addr),
+ SAS_ADDR(s2),
+ SAS_ADDR(sub_addr));
sas_ex_disable_port(child, s2);
}
@@ -1239,12 +1231,10 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
else if (SAS_ADDR(sub_sas_addr) !=
SAS_ADDR(phy->attached_sas_addr)) {
- SAS_DPRINTK("ex %016llx phy 0x%x "
- "diverges(%016llx) on subtractive "
- "boundary(%016llx). Disabled\n",
- SAS_ADDR(dev->sas_addr), i,
- SAS_ADDR(phy->attached_sas_addr),
- SAS_ADDR(sub_sas_addr));
+ pr_notice("ex %016llx phy 0x%x diverges(%016llx) on subtractive boundary(%016llx). Disabled\n",
+ SAS_ADDR(dev->sas_addr), i,
+ SAS_ADDR(phy->attached_sas_addr),
+ SAS_ADDR(sub_sas_addr));
sas_ex_disable_phy(dev, i);
}
}
@@ -1262,19 +1252,17 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
};
struct domain_device *parent = child->parent;
- sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx "
- "phy 0x%x has %c:%c routing link!\n",
-
- ex_type[parent->dev_type],
- SAS_ADDR(parent->sas_addr),
- parent_phy->phy_id,
+ pr_notice("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x has %c:%c routing link!\n",
+ ex_type[parent->dev_type],
+ SAS_ADDR(parent->sas_addr),
+ parent_phy->phy_id,
- ex_type[child->dev_type],
- SAS_ADDR(child->sas_addr),
- child_phy->phy_id,
+ ex_type[child->dev_type],
+ SAS_ADDR(child->sas_addr),
+ child_phy->phy_id,
- sas_route_char(parent, parent_phy),
- sas_route_char(child, child_phy));
+ sas_route_char(parent, parent_phy),
+ sas_route_char(child, child_phy));
}
static int sas_check_eeds(struct domain_device *child,
@@ -1286,13 +1274,12 @@ static int sas_check_eeds(struct domain_device *child,
if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
res = -ENODEV;
- SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx "
- "phy S:0x%x, while there is a fanout ex %016llx\n",
- SAS_ADDR(parent->sas_addr),
- parent_phy->phy_id,
- SAS_ADDR(child->sas_addr),
- child_phy->phy_id,
- SAS_ADDR(parent->port->disc.fanout_sas_addr));
+ pr_warn("edge ex %016llx phy S:0x%x <--> edge ex %016llx phy S:0x%x, while there is a fanout ex %016llx\n",
+ SAS_ADDR(parent->sas_addr),
+ parent_phy->phy_id,
+ SAS_ADDR(child->sas_addr),
+ child_phy->phy_id,
+ SAS_ADDR(parent->port->disc.fanout_sas_addr));
} else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) {
memcpy(parent->port->disc.eeds_a, parent->sas_addr,
SAS_ADDR_SIZE);
@@ -1310,12 +1297,11 @@ static int sas_check_eeds(struct domain_device *child,
;
else {
res = -ENODEV;
- SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx "
- "phy 0x%x link forms a third EEDS!\n",
- SAS_ADDR(parent->sas_addr),
- parent_phy->phy_id,
- SAS_ADDR(child->sas_addr),
- child_phy->phy_id);
+ pr_warn("edge ex %016llx phy 0x%x <--> edge ex %016llx phy 0x%x link forms a third EEDS!\n",
+ SAS_ADDR(parent->sas_addr),
+ parent_phy->phy_id,
+ SAS_ADDR(child->sas_addr),
+ child_phy->phy_id);
}
return res;
@@ -1429,14 +1415,13 @@ static int sas_configure_present(struct domain_device *dev, int phy_id,
goto out;
res = rri_resp[2];
if (res == SMP_RESP_NO_INDEX) {
- SAS_DPRINTK("overflow of indexes: dev %016llx "
- "phy 0x%x index 0x%x\n",
- SAS_ADDR(dev->sas_addr), phy_id, i);
+ pr_warn("overflow of indexes: dev %016llx phy 0x%x index 0x%x\n",
+ SAS_ADDR(dev->sas_addr), phy_id, i);
goto out;
} else if (res != SMP_RESP_FUNC_ACC) {
- SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
- "result 0x%x\n", __func__,
- SAS_ADDR(dev->sas_addr), phy_id, i, res);
+ pr_notice("%s: dev %016llx phy 0x%x index 0x%x result 0x%x\n",
+ __func__, SAS_ADDR(dev->sas_addr), phy_id,
+ i, res);
goto out;
}
if (SAS_ADDR(sas_addr) != 0) {
@@ -1500,9 +1485,8 @@ static int sas_configure_set(struct domain_device *dev, int phy_id,
goto out;
res = cri_resp[2];
if (res == SMP_RESP_NO_INDEX) {
- SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x "
- "index 0x%x\n",
- SAS_ADDR(dev->sas_addr), phy_id, index);
+ pr_warn("overflow of indexes: dev %016llx phy 0x%x index 0x%x\n",
+ SAS_ADDR(dev->sas_addr), phy_id, index);
}
out:
kfree(cri_req);
@@ -1549,8 +1533,8 @@ static int sas_configure_parent(struct domain_device *parent,
}
if (ex_parent->conf_route_table == 0) {
- SAS_DPRINTK("ex %016llx has self-configuring routing table\n",
- SAS_ADDR(parent->sas_addr));
+ pr_debug("ex %016llx has self-configuring routing table\n",
+ SAS_ADDR(parent->sas_addr));
return 0;
}
@@ -1611,8 +1595,8 @@ static int sas_discover_expander(struct domain_device *dev)
res = sas_expander_discover(dev);
if (res) {
- SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n",
- SAS_ADDR(dev->sas_addr), res);
+ pr_warn("expander %016llx discovery failed(0x%x)\n",
+ SAS_ADDR(dev->sas_addr), res);
goto out_err;
}
@@ -1856,10 +1840,10 @@ static int sas_find_bcast_dev(struct domain_device *dev,
if (phy_id != -1) {
*src_dev = dev;
ex->ex_change_count = ex_change_count;
- SAS_DPRINTK("Expander phy change count has changed\n");
+ pr_info("Expander phy change count has changed\n");
return res;
} else
- SAS_DPRINTK("Expander phys DID NOT change\n");
+ pr_info("Expander phys DID NOT change\n");
}
list_for_each_entry(ch, &ex->children, siblings) {
if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
@@ -1969,8 +1953,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
struct domain_device *child;
int res;
- SAS_DPRINTK("ex %016llx phy%d new device attached\n",
- SAS_ADDR(dev->sas_addr), phy_id);
+ pr_debug("ex %016llx phy%d new device attached\n",
+ SAS_ADDR(dev->sas_addr), phy_id);
res = sas_ex_phy_discover(dev, phy_id);
if (res)
return res;
@@ -2048,15 +2032,15 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
action = ", needs recovery";
- SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
- SAS_ADDR(dev->sas_addr), phy_id, action);
+ pr_debug("ex %016llx phy 0x%x broadcast flutter%s\n",
+ SAS_ADDR(dev->sas_addr), phy_id, action);
return res;
}
/* we always have to delete the old device when we went here */
- SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
- SAS_ADDR(dev->sas_addr), phy_id,
- SAS_ADDR(phy->attached_sas_addr));
+ pr_info("ex %016llx phy 0x%x replace %016llx\n",
+ SAS_ADDR(dev->sas_addr), phy_id,
+ SAS_ADDR(phy->attached_sas_addr));
sas_unregister_devs_sas_addr(dev, phy_id, last);
return sas_discover_new(dev, phy_id);
@@ -2084,8 +2068,8 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id)
int i;
bool last = true; /* is this the last phy of the port */
- SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
- SAS_ADDR(dev->sas_addr), phy_id);
+ pr_debug("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
+ SAS_ADDR(dev->sas_addr), phy_id);
if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) {
for (i = 0; i < ex->num_phys; i++) {
@@ -2095,8 +2079,8 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id)
continue;
if (SAS_ADDR(phy->attached_sas_addr) ==
SAS_ADDR(changed_phy->attached_sas_addr)) {
- SAS_DPRINTK("phy%d part of wide port with "
- "phy%d\n", phy_id, i);
+ pr_debug("phy%d part of wide port with phy%d\n",
+ phy_id, i);
last = false;
break;
}
@@ -2154,23 +2138,23 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
case SAS_FANOUT_EXPANDER_DEVICE:
break;
default:
- printk("%s: can we send a smp request to a device?\n",
+ pr_err("%s: can we send a smp request to a device?\n",
__func__);
goto out;
}
dev = sas_find_dev_by_rphy(rphy);
if (!dev) {
- printk("%s: fail to find a domain_device?\n", __func__);
+ pr_err("%s: fail to find a domain_device?\n", __func__);
goto out;
}
/* do we need to support multiple segments? */
if (job->request_payload.sg_cnt > 1 ||
job->reply_payload.sg_cnt > 1) {
- printk("%s: multiple segments req %u, rsp %u\n",
- __func__, job->request_payload.payload_len,
- job->reply_payload.payload_len);
+ pr_info("%s: multiple segments req %u, rsp %u\n",
+ __func__, job->request_payload.payload_len,
+ job->reply_payload.payload_len);
goto out;
}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index ede0af78144f..221340ee8651 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -128,19 +128,19 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
error = sas_register_phys(sas_ha);
if (error) {
- printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
+ pr_notice("couldn't register sas phys:%d\n", error);
return error;
}
error = sas_register_ports(sas_ha);
if (error) {
- printk(KERN_NOTICE "couldn't register sas ports:%d\n", error);
+ pr_notice("couldn't register sas ports:%d\n", error);
goto Undo_phys;
}
error = sas_init_events(sas_ha);
if (error) {
- printk(KERN_NOTICE "couldn't start event thread:%d\n", error);
+ pr_notice("couldn't start event thread:%d\n", error);
goto Undo_ports;
}
@@ -623,8 +623,8 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
if (i->dft->lldd_control_phy) {
if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
- sas_printk("The phy%02d bursting events, shut it down.\n",
- phy->id);
+ pr_notice("The phy%02d bursting events, shut it down.\n",
+ phy->id);
sas_notify_phy_event(phy, PHYE_SHUTDOWN);
}
} else {
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 50e12d662ffe..2cdb981cf476 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -32,9 +32,13 @@
#include <scsi/libsas.h>
#include <scsi/sas_ata.h>
-#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define SAS_FMT "sas: "
-#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__)
+#define pr_fmt(fmt) SAS_FMT fmt
#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
@@ -120,10 +124,10 @@ static inline void sas_smp_host_handler(struct bsg_job *job,
static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
{
- SAS_DPRINTK("%s: for %s device %16llx returned %d\n",
- func, dev->parent ? "exp-attached" :
- "direct-attached",
- SAS_ADDR(dev->sas_addr), err);
+ pr_warn("%s: for %s device %16llx returned %d\n",
+ func, dev->parent ? "exp-attached" :
+ "direct-attached",
+ SAS_ADDR(dev->sas_addr), err);
sas_unregister_dev(dev->port, dev);
}
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index bf3e1b979ca6..0374243c85d0 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -122,11 +122,11 @@ static void sas_phye_shutdown(struct work_struct *work)
phy->enabled = 0;
ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
if (ret)
- sas_printk("lldd disable phy%02d returned %d\n",
- phy->id, ret);
+ pr_notice("lldd disable phy%02d returned %d\n",
+ phy->id, ret);
} else
- sas_printk("phy%02d is not enabled, cannot shutdown\n",
- phy->id);
+ pr_notice("phy%02d is not enabled, cannot shutdown\n",
+ phy->id);
}
/* ---------- Phy class registration ---------- */
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index fad23dd39114..03fe479359b6 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -110,9 +110,9 @@ static void sas_form_port(struct asd_sas_phy *phy)
wake_up(&sas_ha->eh_wait_q);
return;
} else {
- SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
- __func__, phy->id, phy->port->id,
- phy->port->num_phys);
+ pr_info("%s: phy%d belongs to port%d already(%d)!\n",
+ __func__, phy->id, phy->port->id,
+ phy->port->num_phys);
return;
}
}
@@ -125,8 +125,8 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (*(u64 *) port->sas_addr &&
phy_is_wideport_member(port, phy) && port->num_phys > 0) {
/* wide port */
- SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
- port->id);
+ pr_debug("phy%d matched wide port%d\n", phy->id,
+ port->id);
break;
}
spin_unlock(&port->phy_list_lock);
@@ -147,8 +147,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
}
if (i >= sas_ha->num_phys) {
- printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
- __func__);
+ pr_err("%s: couldn't find a free port, bug?\n", __func__);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
return;
}
@@ -180,10 +179,10 @@ static void sas_form_port(struct asd_sas_phy *phy)
}
sas_port_add_phy(port->port, phy->phy);
- SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
- dev_name(&phy->phy->dev), dev_name(&port->port->dev),
- port->phy_mask,
- SAS_ADDR(port->attached_sas_addr));
+ pr_debug("%s added to %s, phy_mask:0x%x (%16llx)\n",
+ dev_name(&phy->phy->dev), dev_name(&port->port->dev),
+ port->phy_mask,
+ SAS_ADDR(port->attached_sas_addr));
if (port->port_dev)
port->port_dev->pathways = port->num_phys;
@@ -279,7 +278,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
prim = phy->sas_prim;
spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
- SAS_DPRINTK("broadcast received: %d\n", prim);
+ pr_debug("broadcast received: %d\n", prim);
sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
if (phy->port)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index af085432c5fe..c43a00a9d819 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -93,9 +93,8 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
hs = DID_ERROR;
break;
case SAS_PROTO_RESPONSE:
- SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
- "task; please report this\n",
- task->dev->port->ha->sas_ha_name);
+ pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
+ task->dev->port->ha->sas_ha_name);
break;
case SAS_ABORTED_TASK:
hs = DID_ABORT;
@@ -132,12 +131,12 @@ static void sas_scsi_task_done(struct sas_task *task)
if (unlikely(!task)) {
/* task will be completed by the error handler */
- SAS_DPRINTK("task done but aborted\n");
+ pr_debug("task done but aborted\n");
return;
}
if (unlikely(!sc)) {
- SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
+ pr_debug("task_done called with non existing SCSI cmnd!\n");
sas_free_task(task);
return;
}
@@ -208,7 +207,7 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
return 0;
out_free_task:
- SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
+ pr_debug("lldd_execute_task returned: %d\n", res);
ASSIGN_SAS_TASK(cmd, NULL);
sas_free_task(task);
if (res == -SAS_QUEUE_FULL)
@@ -301,40 +300,38 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
to_sas_internal(task->dev->port->ha->core.shost->transportt);
for (i = 0; i < 5; i++) {
- SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
+ pr_notice("%s: aborting task 0x%p\n", __func__, task);
res = si->dft->lldd_abort_task(task);
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
- task);
+ pr_debug("%s: task 0x%p is done\n", __func__, task);
return TASK_IS_DONE;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (res == TMF_RESP_FUNC_COMPLETE) {
- SAS_DPRINTK("%s: task 0x%p is aborted\n",
- __func__, task);
+ pr_notice("%s: task 0x%p is aborted\n",
+ __func__, task);
return TASK_IS_ABORTED;
} else if (si->dft->lldd_query_task) {
- SAS_DPRINTK("%s: querying task 0x%p\n",
- __func__, task);
+ pr_notice("%s: querying task 0x%p\n", __func__, task);
res = si->dft->lldd_query_task(task);
switch (res) {
case TMF_RESP_FUNC_SUCC:
- SAS_DPRINTK("%s: task 0x%p at LU\n",
- __func__, task);
+ pr_notice("%s: task 0x%p at LU\n", __func__,
+ task);
return TASK_IS_AT_LU;
case TMF_RESP_FUNC_COMPLETE:
- SAS_DPRINTK("%s: task 0x%p not at LU\n",
- __func__, task);
+ pr_notice("%s: task 0x%p not at LU\n",
+ __func__, task);
return TASK_IS_NOT_AT_LU;
case TMF_RESP_FUNC_FAILED:
- SAS_DPRINTK("%s: task 0x%p failed to abort\n",
- __func__, task);
- return TASK_ABORT_FAILED;
- }
+ pr_notice("%s: task 0x%p failed to abort\n",
+ __func__, task);
+ return TASK_ABORT_FAILED;
+ }
}
}
@@ -350,9 +347,9 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
int_to_scsilun(cmd->device->lun, &lun);
- SAS_DPRINTK("eh: device %llx LUN %llx has the task\n",
- SAS_ADDR(dev->sas_addr),
- cmd->device->lun);
+ pr_notice("eh: device %llx LUN %llx has the task\n",
+ SAS_ADDR(dev->sas_addr),
+ cmd->device->lun);
if (i->dft->lldd_abort_task_set)
res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
@@ -376,8 +373,8 @@ static int sas_recover_I_T(struct domain_device *dev)
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
- SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
- SAS_ADDR(dev->sas_addr));
+ pr_notice("I_T nexus reset for dev %016llx\n",
+ SAS_ADDR(dev->sas_addr));
if (i->dft->lldd_I_T_nexus_reset)
res = i->dft->lldd_I_T_nexus_reset(dev);
@@ -471,9 +468,9 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
return SUCCESS;
}
- SAS_DPRINTK("%s reset of %s failed\n",
- reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
- dev_name(&dev->rphy->dev));
+ pr_warn("%s reset of %s failed\n",
+ reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
+ dev_name(&dev->rphy->dev));
return FAILED;
}
@@ -501,7 +498,7 @@ int sas_eh_abort_handler(struct scsi_cmnd *cmd)
if (task)
res = i->dft->lldd_abort_task(task);
else
- SAS_DPRINTK("no task to abort\n");
+ pr_notice("no task to abort\n");
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
@@ -612,34 +609,33 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (need_reset) {
- SAS_DPRINTK("%s: task 0x%p requests reset\n",
- __func__, task);
+ pr_notice("%s: task 0x%p requests reset\n",
+ __func__, task);
goto reset;
}
- SAS_DPRINTK("trying to find task 0x%p\n", task);
+ pr_debug("trying to find task 0x%p\n", task);
res = sas_scsi_find_task(task);
switch (res) {
case TASK_IS_DONE:
- SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
+ pr_notice("%s: task 0x%p is done\n", __func__,
task);
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_ABORTED:
- SAS_DPRINTK("%s: task 0x%p is aborted\n",
- __func__, task);
+ pr_notice("%s: task 0x%p is aborted\n",
+ __func__, task);
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_AT_LU:
- SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
+ pr_info("task 0x%p is at LU: lu recover\n", task);
reset:
tmf_resp = sas_recover_lu(task->dev, cmd);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
- SAS_DPRINTK("dev %016llx LU %llx is "
- "recovered\n",
- SAS_ADDR(task->dev),
- cmd->device->lun);
+ pr_notice("dev %016llx LU %llx is recovered\n",
+ SAS_ADDR(task->dev),
+ cmd->device->lun);
sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
@@ -647,14 +643,14 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
/* fallthrough */
case TASK_IS_NOT_AT_LU:
case TASK_ABORT_FAILED:
- SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
- task);
+ pr_notice("task 0x%p is not at LU: I_T recover\n",
+ task);
tmf_resp = sas_recover_I_T(task->dev);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
tmf_resp == -ENODEV) {
struct domain_device *dev = task->dev;
- SAS_DPRINTK("I_T %016llx recovered\n",
- SAS_ADDR(task->dev->sas_addr));
+ pr_notice("I_T %016llx recovered\n",
+ SAS_ADDR(task->dev->sas_addr));
sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_I_T(work_q, dev);
goto Again;
@@ -663,12 +659,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
try_to_reset_cmd_device(cmd);
if (i->dft->lldd_clear_nexus_port) {
struct asd_sas_port *port = task->dev->port;
- SAS_DPRINTK("clearing nexus for port:%d\n",
- port->id);
+ pr_debug("clearing nexus for port:%d\n",
+ port->id);
res = i->dft->lldd_clear_nexus_port(port);
if (res == TMF_RESP_FUNC_COMPLETE) {
- SAS_DPRINTK("clear nexus port:%d "
- "succeeded\n", port->id);
+ pr_notice("clear nexus port:%d succeeded\n",
+ port->id);
sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_port(work_q,
port);
@@ -676,11 +672,10 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
}
}
if (i->dft->lldd_clear_nexus_ha) {
- SAS_DPRINTK("clear nexus ha\n");
+ pr_debug("clear nexus ha\n");
res = i->dft->lldd_clear_nexus_ha(ha);
if (res == TMF_RESP_FUNC_COMPLETE) {
- SAS_DPRINTK("clear nexus ha "
- "succeeded\n");
+ pr_notice("clear nexus ha succeeded\n");
sas_eh_finish_cmd(cmd);
goto clear_q;
}
@@ -689,10 +684,9 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
* of effort could recover from errors. Quite
* possibly the HA just disappeared.
*/
- SAS_DPRINTK("error from device %llx, LUN %llx "
- "couldn't be recovered in any way\n",
- SAS_ADDR(task->dev->sas_addr),
- cmd->device->lun);
+ pr_err("error from device %llx, LUN %llx couldn't be recovered in any way\n",
+ SAS_ADDR(task->dev->sas_addr),
+ cmd->device->lun);
sas_eh_finish_cmd(cmd);
goto clear_q;
@@ -704,7 +698,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
return;
clear_q:
- SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
+ pr_debug("--- Exit %s -- clear_q\n", __func__);
list_for_each_entry_safe(cmd, n, work_q, eh_entry)
sas_eh_finish_cmd(cmd);
goto out;
@@ -758,8 +752,8 @@ retry:
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
spin_unlock_irq(shost->host_lock);
- SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
- __func__, scsi_host_busy(shost), shost->host_failed);
+ pr_notice("Enter %s busy: %d failed: %d\n",
+ __func__, scsi_host_busy(shost), shost->host_failed);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism),
@@ -800,9 +794,9 @@ out:
if (retry)
goto retry;
- SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
- __func__, scsi_host_busy(shost),
- shost->host_failed, tries);
+ pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
+ __func__, scsi_host_busy(shost),
+ shost->host_failed, tries);
}
int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -875,9 +869,8 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
if (scsi_dev->tagged_supported) {
scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
} else {
- SAS_DPRINTK("device %llx, LUN %llx doesn't support "
- "TCQ\n", SAS_ADDR(dev->sas_addr),
- scsi_dev->lun);
+ pr_notice("device %llx, LUN %llx doesn't support TCQ\n",
+ SAS_ADDR(dev->sas_addr), scsi_dev->lun);
scsi_change_queue_depth(scsi_dev, 1);
}
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index a78e5bd3e514..c3b9befad4e6 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -1,3 +1,6 @@
+
+#include "sas_internal.h"
+
#include <linux/kernel.h>
#include <linux/export.h>
#include <scsi/sas.h>
@@ -23,11 +26,8 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
if (iu->status != SAM_STAT_CHECK_CONDITION)
- dev_printk(KERN_WARNING, dev,
- "dev %llx sent sense data, but "
- "stat(%x) is not CHECK CONDITION\n",
- SAS_ADDR(task->dev->sas_addr),
- iu->status);
+ dev_warn(dev, "dev %llx sent sense data, but stat(%x) is not CHECK CONDITION\n",
+ SAS_ADDR(task->dev->sas_addr), iu->status);
}
else
/* when datapres contains corrupt/unknown value... */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index c1eb2b00ca7f..ebdfe5b26937 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -335,6 +335,18 @@ enum hba_state {
LPFC_HBA_ERROR = -1
};
+struct lpfc_trunk_link_state {
+ enum hba_state state;
+ uint8_t fault;
+};
+
+struct lpfc_trunk_link {
+ struct lpfc_trunk_link_state link0,
+ link1,
+ link2,
+ link3;
+};
+
struct lpfc_vport {
struct lpfc_hba *phba;
struct list_head listentry;
@@ -490,6 +502,7 @@ struct lpfc_vport {
struct nvme_fc_local_port *localport;
uint8_t nvmei_support; /* driver supports NVME Initiator */
uint32_t last_fcp_wqidx;
+ uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */
};
struct hbq_s {
@@ -683,6 +696,7 @@ struct lpfc_hba {
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
+ struct lpfc_trunk_link trunk_link;
enum hba_state link_state;
uint32_t link_flag; /* link state flags */
#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
@@ -717,6 +731,7 @@ struct lpfc_hba {
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
+#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -783,6 +798,7 @@ struct lpfc_hba {
#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
uint32_t cfg_fcf_failover_policy;
uint32_t cfg_fcp_io_sched;
+ uint32_t cfg_ns_query;
uint32_t cfg_fcp2_no_tgt_reset;
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
@@ -989,7 +1005,8 @@ struct lpfc_hba {
spinlock_t port_list_lock; /* lock for port_list mutations */
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
-#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
+#define LPFC_MAX_VPI 0xFF /* Max number VPI supported 0 - 0xff */
+#define LPFC_MAX_VPORTS 0x100 /* Max vports per port, with pport */
uint16_t max_vports; /*
* For IOV HBAs max_vpi can change
* after a reset. max_vports is max
@@ -1111,6 +1128,10 @@ struct lpfc_hba {
uint16_t vlan_id;
struct list_head fcf_conn_rec_list;
+ bool defer_flogi_acc_flag;
+ uint16_t defer_flogi_acc_rx_id;
+ uint16_t defer_flogi_acc_ox_id;
+
spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters;
struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
@@ -1262,6 +1283,12 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
static inline struct lpfc_sli_ring *
lpfc_phba_elsring(struct lpfc_hba *phba)
{
+ /* Return NULL if sli_rev has become invalid due to bad fw */
+ if (phba->sli_rev != LPFC_SLI_REV4 &&
+ phba->sli_rev != LPFC_SLI_REV3 &&
+ phba->sli_rev != LPFC_SLI_REV2)
+ return NULL;
+
if (phba->sli_rev == LPFC_SLI_REV4) {
if (phba->sli4_hba.els_wq)
return phba->sli4_hba.els_wq->pring;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index dda7f450b96d..4bae72cbf3f6 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -883,6 +883,42 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
}
}
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ ((bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_6))) {
+ struct lpfc_trunk_link link = phba->trunk_link;
+
+ if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Trunk port 0: Link %s %s\n",
+ (link.link0.state == LPFC_LINK_UP) ?
+ "Up" : "Down. ",
+ trunk_errmsg[link.link0.fault]);
+
+ if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Trunk port 1: Link %s %s\n",
+ (link.link1.state == LPFC_LINK_UP) ?
+ "Up" : "Down. ",
+ trunk_errmsg[link.link1.fault]);
+
+ if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Trunk port 2: Link %s %s\n",
+ (link.link2.state == LPFC_LINK_UP) ?
+ "Up" : "Down. ",
+ trunk_errmsg[link.link2.fault]);
+
+ if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Trunk port 3: Link %s %s\n",
+ (link.link3.state == LPFC_LINK_UP) ?
+ "Up" : "Down. ",
+ trunk_errmsg[link.link3.fault]);
+
+ }
+
return len;
}
@@ -1156,6 +1192,82 @@ out:
}
/**
+ * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Issues a PCI secondary bus reset for the phba->pcidev.
+ *
+ * Notes:
+ * First walks the bus_list to ensure only PCI devices with Emulex
+ * vendor id, device ids that support hot reset, only one occurrence
+ * of function 0, and all ports on the bus are in offline mode to ensure the
+ * hot reset only affects one valid HBA.
+ *
+ * Returns:
+ * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
+ * -ENODEV, NULL ptr to pcidev
+ * -EBADSLT, detected invalid device
+ * -EBUSY, port is not in offline state
+ * 0, successful
+ */
+int
+lpfc_reset_pci_bus(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ struct Scsi_Host *shost = NULL;
+ struct lpfc_hba *phba_other = NULL;
+ struct pci_dev *ptr = NULL;
+ int res;
+
+ if (phba->cfg_enable_hba_reset != 2)
+ return -ENOTSUPP;
+
+ if (!pdev) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
+ return -ENODEV;
+ }
+
+ res = lpfc_check_pci_resettable(phba);
+ if (res)
+ return res;
+
+ /* Walk the list of devices on the pci_dev's bus */
+ list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
+ /* Check port is offline */
+ shost = pci_get_drvdata(ptr);
+ if (shost) {
+ phba_other =
+ ((struct lpfc_vport *)shost->hostdata)->phba;
+ if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
+ lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
+ "8349 WWPN = 0x%02x%02x%02x%02x"
+ "%02x%02x%02x%02x is not "
+ "offline!\n",
+ phba_other->wwpn[0],
+ phba_other->wwpn[1],
+ phba_other->wwpn[2],
+ phba_other->wwpn[3],
+ phba_other->wwpn[4],
+ phba_other->wwpn[5],
+ phba_other->wwpn[6],
+ phba_other->wwpn[7]);
+ return -EBUSY;
+ }
+ }
+ }
+
+ /* Issue PCI bus reset */
+ res = pci_reset_bus(pdev);
+ if (res) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "8350 PCI reset bus failed: %d\n", res);
+ }
+
+ return res;
+}
+
+/**
* lpfc_selective_reset - Offline then onlines the port
* @phba: lpfc_hba pointer.
*
@@ -1322,7 +1434,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
return -EACCES;
if ((phba->sli_rev < LPFC_SLI_REV4) ||
- (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
LPFC_SLI_INTF_IF_TYPE_2))
return -EPERM;
@@ -1430,6 +1542,66 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
}
+int
+lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
+{
+ LPFC_MBOXQ_t *mbox = NULL;
+ unsigned long val = 0;
+ char *pval = 0;
+ int rc = 0;
+
+ if (!strncmp("enable", buff_out,
+ strlen("enable"))) {
+ pval = buff_out + strlen("enable") + 1;
+ rc = kstrtoul(pval, 0, &val);
+ if (rc)
+ return rc; /* Invalid number */
+ } else if (!strncmp("disable", buff_out,
+ strlen("disable"))) {
+ val = 0;
+ } else {
+ return -EINVAL; /* Invalid command */
+ }
+
+ switch (val) {
+ case 0:
+ val = 0x0; /* Disable */
+ break;
+ case 2:
+ val = 0x1; /* Enable two port trunk */
+ break;
+ case 4:
+ val = 0x2; /* Enable four port trunk */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0070 Set trunk mode with val %ld ", val);
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
+ 12, LPFC_SLI4_MBX_EMBED);
+
+ bf_set(lpfc_mbx_set_trunk_mode,
+ &mbox->u.mqe.un.set_trunk_mode,
+ val);
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ if (rc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0071 Set trunk mode failed with status: %d",
+ rc);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+
+ return 0;
+}
+
/**
* lpfc_board_mode_show - Return the state of the board
* @dev: class device that is converted into a Scsi_host.
@@ -1522,6 +1694,11 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
+ else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
+ == 0)
+ status = lpfc_reset_pci_bus(phba);
+ else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
+ status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
else
status = -EINVAL;
@@ -1590,7 +1767,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_CONFIG;
pmb->mbxOwner = OWN_HOST;
- pmboxq->context1 = NULL;
+ pmboxq->ctx_buf = NULL;
if (phba->pport->fc_flag & FC_OFFLINE_MODE)
rc = MBX_NOT_FINISHED;
@@ -1622,6 +1799,9 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
(bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+ /* Limit the max we support */
+ if (max_vpi > LPFC_MAX_VPI)
+ max_vpi = LPFC_MAX_VPI;
if (mvpi)
*mvpi = max_vpi;
if (avpi)
@@ -1637,8 +1817,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
*axri = pmb->un.varRdConfig.avail_xri;
if (mvpi)
*mvpi = pmb->un.varRdConfig.max_vpi;
- if (avpi)
- *avpi = pmb->un.varRdConfig.avail_vpi;
+ if (avpi) {
+ /* avail_vpi is only valid if link is up and ready */
+ if (phba->link_state == LPFC_HBA_READY)
+ *avpi = pmb->un.varRdConfig.avail_vpi;
+ else
+ *avpi = pmb->un.varRdConfig.max_vpi;
+ }
}
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -3831,8 +4016,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
val);
return -EINVAL;
}
- if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
- val == 4) {
+ if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+ phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3114 Loop mode not supported\n");
return -EINVAL;
@@ -4254,7 +4440,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
uint32_t prev_val, if_type;
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
- if (if_type == LPFC_SLI_INTF_IF_TYPE_2 &&
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
phba->hba_flag & HBA_FORCED_LINK_SPEED)
return -EPERM;
@@ -5070,6 +5256,18 @@ LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN,
"issuing commands [0] - Round Robin, [1] - Current CPU");
/*
+ * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
+ * range is [0,1]. Default value is 0.
+ * For [0], GID_FT is used for NameServer queries after RSCN (default)
+ * For [1], GID_PT is used for NameServer queries after RSCN
+ *
+ */
+LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
+ LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
+ "Determine algorithm NameServer queries after RSCN "
+ "[0] - GID_FT, [1] - GID_PT");
+
+/*
# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
# range is [0,1]. Default value is 0.
# For [0], bus reset issues target reset to ALL devices
@@ -5257,9 +5455,10 @@ LPFC_ATTR_R(nvme_io_channel,
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
# 0 = HBA resets disabled
# 1 = HBA resets enabled (default)
-# Value range is [0,1]. Default value is 1.
+# 2 = HBA reset via PCI bus reset enabled
+# Value range is [0,2]. Default value is 1.
*/
-LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
+LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
/*
# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
@@ -5514,6 +5713,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_scan_down,
&dev_attr_lpfc_link_speed,
&dev_attr_lpfc_fcp_io_sched,
+ &dev_attr_lpfc_ns_query,
&dev_attr_lpfc_fcp2_no_tgt_reset,
&dev_attr_lpfc_cr_delay,
&dev_attr_lpfc_cr_count,
@@ -6006,6 +6206,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
case LPFC_LINK_SPEED_64GHZ:
fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
break;
+ case LPFC_LINK_SPEED_128GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
+ break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
@@ -6105,7 +6308,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
- pmboxq->context1 = NULL;
+ pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -6137,7 +6340,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb->mbxCommand = MBX_READ_LNK_STAT;
pmb->mbxOwner = OWN_HOST;
- pmboxq->context1 = NULL;
+ pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -6217,7 +6420,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
pmb->un.varWords[0] = 0x1; /* reset request */
- pmboxq->context1 = NULL;
+ pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
@@ -6235,7 +6438,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmb->mbxCommand = MBX_READ_LNK_STAT;
pmb->mbxOwner = OWN_HOST;
- pmboxq->context1 = NULL;
+ pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
@@ -6564,6 +6767,7 @@ void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
+ lpfc_ns_query_init(phba, lpfc_ns_query);
lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
lpfc_cr_delay_init(phba, lpfc_cr_delay);
lpfc_cr_count_init(phba, lpfc_cr_count);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 7bd7ae86bed5..8698af86485d 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2222,7 +2222,7 @@ lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
if (phba->sli_rev < LPFC_SLI_REV4)
rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
- else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2)
rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
else
@@ -2262,7 +2262,7 @@ lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
if (phba->sli_rev < LPFC_SLI_REV4)
return -ENODEV;
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
LPFC_SLI_INTF_IF_TYPE_2)
return -ENODEV;
@@ -2354,7 +2354,7 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
rc = -ENODEV;
goto job_error;
}
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
LPFC_SLI_INTF_IF_TYPE_2) {
rc = -ENODEV;
goto job_error;
@@ -2501,9 +2501,9 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
return -ENOMEM;
}
- dmabuff = (struct lpfc_dmabuf *) mbox->context1;
- mbox->context1 = NULL;
- mbox->context2 = NULL;
+ dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ mbox->ctx_buf = NULL;
+ mbox->ctx_ndlp = NULL;
status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -3388,7 +3388,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
unsigned long flags;
uint8_t *pmb, *pmb_buf;
- dd_data = pmboxq->context1;
+ dd_data = pmboxq->ctx_ndlp;
/*
* The outgoing buffer is readily referred from the dma buffer,
@@ -3573,7 +3573,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint8_t *pmbx;
- dd_data = pmboxq->context1;
+ dd_data = pmboxq->ctx_buf;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -3960,7 +3960,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->context1 = dd_data;
+ pmboxq->ctx_buf = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4131,7 +4131,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->context1 = dd_data;
+ pmboxq->ctx_buf = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4476,7 +4476,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->context1 = dd_data;
+ pmboxq->ctx_buf = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4761,7 +4761,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
from = pmbx;
ext = from + sizeof(MAILBOX_t);
- pmboxq->context2 = ext;
+ pmboxq->ctx_buf = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
pmboxq->out_ext_byte_len =
@@ -4889,7 +4889,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
- pmboxq->context1 = dd_data;
+ pmboxq->ctx_ndlp = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -5348,7 +5348,7 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
sizeof(struct fc_bsg_request) +
sizeof(struct lpfc_bsg_ras_req)) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
- "6181 Received RAS_LOG request "
+ "6192 FW_LOG request received "
"below minimum size\n");
rc = -EINVAL;
goto ras_job_error;
@@ -5356,7 +5356,7 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
/* Check FW log status */
rc = lpfc_check_fwlog_support(phba);
- if (rc == -EACCES || rc == -EPERM)
+ if (rc)
goto ras_job_error;
ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
@@ -5381,25 +5381,6 @@ ras_job_error:
}
/**
- * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
- * @phba: Pointer to HBA context object.
- *
- * Disable FW logging into host memory on the adapter. To
- * be done before reading logs from the host memory.
- **/
-static void
-lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
-{
- struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
-
- ras_fwlog->ras_active = false;
-
- /* Disable FW logging to host memory */
- writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
- phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
-}
-
-/**
* lpfc_bsg_set_ras_config: Set FW logging parameters
* @job: fc_bsg_job to handle
*
@@ -5416,7 +5397,7 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
struct fc_bsg_reply *bsg_reply = job->reply;
uint8_t action = 0, log_level = 0;
- int rc = 0;
+ int rc = 0, action_status = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) +
@@ -5430,7 +5411,7 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
/* Check FW log status */
rc = lpfc_check_fwlog_support(phba);
- if (rc == -EACCES || rc == -EPERM)
+ if (rc)
goto ras_job_error;
ras_req = (struct lpfc_bsg_set_ras_config_req *)
@@ -5449,16 +5430,25 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
lpfc_ras_stop_fwlog(phba);
} else {
/*action = LPFC_RASACTION_START_LOGGING*/
- if (ras_fwlog->ras_active == true) {
- rc = -EINPROGRESS;
- goto ras_job_error;
- }
+
+ /* Even though FW-logging is active re-initialize
+ * FW-logging with new log-level. Return status
+ * "Logging already Running" to caller.
+ **/
+ if (ras_fwlog->ras_active)
+ action_status = -EINPROGRESS;
/* Enable logging */
rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
LPFC_RAS_ENABLE_LOGGING);
- if (rc)
+ if (rc) {
rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ /* Check if FW-logging is re-initialized */
+ if (action_status == -EINPROGRESS)
+ rc = action_status;
}
ras_job_error:
/* make error code available to userspace */
@@ -5487,12 +5477,11 @@ lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
struct lpfc_hba *phba = vport->phba;
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
struct fc_bsg_reply *bsg_reply = job->reply;
- uint32_t lwpd_offset = 0;
- uint64_t wrap_value = 0;
+ u32 *lwpd_ptr = NULL;
int rc = 0;
rc = lpfc_check_fwlog_support(phba);
- if (rc == -EACCES || rc == -EPERM)
+ if (rc)
goto ras_job_error;
if (job->request_len <
@@ -5508,11 +5497,19 @@ lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
- lwpd_offset = *((uint32_t *)ras_fwlog->lwpd.virt) & 0xffffffff;
- ras_reply->offset = be32_to_cpu(lwpd_offset);
+ if (!ras_fwlog->lwpd.virt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6193 Restart FW Logging\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ /* Get lwpd offset */
+ lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
+ ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
- wrap_value = *((uint64_t *)ras_fwlog->lwpd.virt);
- ras_reply->wrap_count = be32_to_cpu((wrap_value >> 32) & 0xffffffff);
+ /* Get wrap count */
+ ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
ras_job_error:
/* make error code available to userspace */
@@ -5539,9 +5536,8 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_bsg_get_fwlog_req *ras_req;
- uint32_t rd_offset, rd_index, offset, pending_wlen;
- uint32_t boundary = 0, align_len = 0, write_len = 0;
- void *dest, *src, *fwlog_buff;
+ u32 rd_offset, rd_index, offset;
+ void *src, *fwlog_buff;
struct lpfc_ras_fwlog *ras_fwlog = NULL;
struct lpfc_dmabuf *dmabuf, *next;
int rc = 0;
@@ -5549,7 +5545,7 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
ras_fwlog = &phba->ras_fwlog;
rc = lpfc_check_fwlog_support(phba);
- if (rc == -EACCES || rc == -EPERM)
+ if (rc)
goto ras_job_error;
/* Logging to be stopped before reading */
@@ -5581,8 +5577,6 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
- pending_wlen = ras_req->read_size;
- dest = fwlog_buff;
list_for_each_entry_safe(dmabuf, next,
&ras_fwlog->fwlog_buff_list, list) {
@@ -5590,29 +5584,9 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
if (dmabuf->buffer_tag < rd_index)
continue;
- /* Align read to buffer size */
- if (offset) {
- boundary = ((dmabuf->buffer_tag + 1) *
- LPFC_RAS_MAX_ENTRY_SIZE);
-
- align_len = (boundary - offset);
- write_len = min_t(u32, align_len,
- LPFC_RAS_MAX_ENTRY_SIZE);
- } else {
- write_len = min_t(u32, pending_wlen,
- LPFC_RAS_MAX_ENTRY_SIZE);
- align_len = 0;
- boundary = 0;
- }
src = dmabuf->virt + offset;
- memcpy(dest, src, write_len);
-
- pending_wlen -= write_len;
- if (!pending_wlen)
- break;
-
- dest += write_len;
- offset = (offset + write_len) % LPFC_RAS_MAX_ENTRY_SIZE;
+ memcpy(fwlog_buff, src, ras_req->read_size);
+ break;
}
bsg_reply->reply_payload_rcv_len =
@@ -5629,6 +5603,77 @@ ras_job_error:
return rc;
}
+static int
+lpfc_get_trunk_info(struct bsg_job *job)
+{
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct lpfc_trunk_info *event_reply;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2744 Received GET TRUNK _INFO request below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ event_reply = (struct lpfc_trunk_info *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct lpfc_trunk_info)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2728 Received GET TRUNK _INFO reply below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+ if (event_reply == NULL) {
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
+ (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
+
+ bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
+ (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
+
+ bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
+ (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
+
+ bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
+ (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
+
+ bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
+ (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
+
+ bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
+ bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
+
+ bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
+ bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
+
+ bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
+ bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
+
+ bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
+ bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
+
+ event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
+ event_reply->logical_speed =
+ phba->sli4_hba.link_state.logical_speed / 100;
+job_error:
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return rc;
+
+}
/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
@@ -5689,6 +5734,9 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
rc = lpfc_bsg_set_ras_config(job);
break;
+ case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
+ rc = lpfc_get_trunk_info(job);
+ break;
default:
rc = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 820323f1139b..9151824beea4 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -42,6 +42,7 @@
#define LPFC_BSG_VENDOR_RAS_GET_FWLOG 17
#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18
#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19
+#define LPFC_BSG_VENDOR_GET_TRUNK_INFO 20
struct set_ct_event {
uint32_t command;
@@ -331,6 +332,43 @@ struct lpfc_bsg_get_ras_config_reply {
uint32_t log_buff_sz;
};
+struct lpfc_trunk_info {
+ uint32_t word0;
+#define lpfc_trunk_info_link_status_SHIFT 0
+#define lpfc_trunk_info_link_status_MASK 1
+#define lpfc_trunk_info_link_status_WORD word0
+#define lpfc_trunk_info_trunk_active0_SHIFT 8
+#define lpfc_trunk_info_trunk_active0_MASK 1
+#define lpfc_trunk_info_trunk_active0_WORD word0
+#define lpfc_trunk_info_trunk_active1_SHIFT 9
+#define lpfc_trunk_info_trunk_active1_MASK 1
+#define lpfc_trunk_info_trunk_active1_WORD word0
+#define lpfc_trunk_info_trunk_active2_SHIFT 10
+#define lpfc_trunk_info_trunk_active2_MASK 1
+#define lpfc_trunk_info_trunk_active2_WORD word0
+#define lpfc_trunk_info_trunk_active3_SHIFT 11
+#define lpfc_trunk_info_trunk_active3_MASK 1
+#define lpfc_trunk_info_trunk_active3_WORD word0
+#define lpfc_trunk_info_trunk_config0_SHIFT 12
+#define lpfc_trunk_info_trunk_config0_MASK 1
+#define lpfc_trunk_info_trunk_config0_WORD word0
+#define lpfc_trunk_info_trunk_config1_SHIFT 13
+#define lpfc_trunk_info_trunk_config1_MASK 1
+#define lpfc_trunk_info_trunk_config1_WORD word0
+#define lpfc_trunk_info_trunk_config2_SHIFT 14
+#define lpfc_trunk_info_trunk_config2_MASK 1
+#define lpfc_trunk_info_trunk_config2_WORD word0
+#define lpfc_trunk_info_trunk_config3_SHIFT 15
+#define lpfc_trunk_info_trunk_config3_MASK 1
+#define lpfc_trunk_info_trunk_config3_WORD word0
+ uint16_t port_speed;
+ uint16_t logical_speed;
+ uint32_t reserved3;
+};
+
+struct get_trunk_info_req {
+ uint32_t command;
+};
/* driver only */
#define SLI_CONFIG_NOT_HANDLED 0
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index e01136507780..39f3fa988732 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -74,7 +74,6 @@ void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
-void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
@@ -175,6 +174,7 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
+int lpfc_issue_gidpt(struct lpfc_vport *vport);
int lpfc_issue_gidft(struct lpfc_vport *vport);
int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
@@ -380,8 +380,10 @@ void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma);
void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
+int lpfc_link_reset(struct lpfc_vport *vport);
/* Function prototypes. */
+int lpfc_check_pci_resettable(const struct lpfc_hba *phba);
const char* lpfc_info(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
@@ -550,6 +552,7 @@ void lpfc_sli4_ras_init(struct lpfc_hba *phba);
void lpfc_sli4_ras_setup(struct lpfc_hba *phba);
int lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level,
uint32_t fwlog_enable);
+void lpfc_ras_stop_fwlog(struct lpfc_hba *phba);
int lpfc_check_fwlog_support(struct lpfc_hba *phba);
/* NVME interfaces. */
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 789ad1502534..552da8bf43e4 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -540,7 +540,17 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ char *str;
+ if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT)
+ str = "GID_FT";
+ else
+ str = "GID_PT";
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6430 Process %s rsp for %08x type %x %s %s\n",
+ str, Did, fc4_type,
+ (fc4_type == FC_TYPE_FCP) ? "FCP" : " ",
+ (fc4_type == FC_TYPE_NVME) ? "NVME" : " ");
/*
* To conserve rpi's, filter out addresses for other
* vports on the same physical HBAs.
@@ -832,6 +842,198 @@ out:
}
static void
+lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp;
+ struct lpfc_dmabuf *outp;
+ struct lpfc_dmabuf *inp;
+ struct lpfc_sli_ct_request *CTrsp;
+ struct lpfc_sli_ct_request *CTreq;
+ struct lpfc_nodelist *ndlp;
+ int rc;
+
+ /* First save ndlp, before we overwrite it */
+ ndlp = cmdiocb->context_un.ndlp;
+
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+ inp = (struct lpfc_dmabuf *)cmdiocb->context1;
+ outp = (struct lpfc_dmabuf *)cmdiocb->context2;
+ irsp = &rspiocb->iocb;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_PT cmpl: status:x%x/x%x rtry:%d",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ vport->fc_ns_retry);
+
+ /* Don't bother processing response if vport is being torn down. */
+ if (vport->load_flag & FC_UNLOADING) {
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
+ goto out;
+ }
+
+ if (lpfc_els_chk_latt(vport)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "4108 Link event during NS query\n");
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ goto out;
+ }
+ if (lpfc_error_lost_link(irsp)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "4101 NS query failed due to link event\n");
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
+ goto out;
+ }
+
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_RSCN_DEFERRED) {
+ vport->fc_flag &= ~FC_RSCN_DEFERRED;
+ spin_unlock_irq(shost->host_lock);
+
+ /* This is a GID_PT completing so the gidft_inp counter was
+ * incremented before the GID_PT was issued to the wire.
+ */
+ vport->gidft_inp--;
+
+ /*
+ * Skip processing the NS response
+ * Re-issue the NS cmd
+ */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "4102 Process Deferred RSCN Data: x%x x%x\n",
+ vport->fc_flag, vport->fc_rscn_id_cnt);
+ lpfc_els_handle_rscn(vport);
+
+ goto out;
+ }
+ spin_unlock_irq(shost->host_lock);
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
+ (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ IOERR_NO_RESOURCES)
+ vport->fc_ns_retry++;
+
+ /* CT command is being retried */
+ vport->gidft_inp--;
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT,
+ vport->fc_ns_retry, GID_PT_N_PORT);
+ if (rc == 0)
+ goto out;
+ }
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "4103 GID_FT Query error: 0x%x 0x%x\n",
+ irsp->ulpStatus, vport->fc_ns_retry);
+ } else {
+ /* Good status, continue checking */
+ CTreq = (struct lpfc_sli_ct_request *)inp->virt;
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "4105 NameServer Rsp Data: x%x x%x\n",
+ vport->fc_flag,
+ CTreq->un.gid.Fc4Type);
+
+ lpfc_ns_rsp(vport,
+ outp,
+ CTreq->un.gid.Fc4Type,
+ (uint32_t)(irsp->un.genreq64.bdl.bdeSize));
+ } else if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ /* NameServer Rsp Error */
+ if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
+ && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
+ lpfc_printf_vlog(
+ vport, KERN_INFO, LOG_DISCOVERY,
+ "4106 No NameServer Entries "
+ "Data: x%x x%x x%x x%x\n",
+ CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t)CTrsp->ReasonCode,
+ (uint32_t)CTrsp->Explanation,
+ vport->fc_flag);
+
+ lpfc_debugfs_disc_trc(
+ vport, LPFC_DISC_TRC_CT,
+ "GID_PT no entry cmd:x%x rsn:x%x exp:x%x",
+ (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t)CTrsp->ReasonCode,
+ (uint32_t)CTrsp->Explanation);
+ } else {
+ lpfc_printf_vlog(
+ vport, KERN_INFO, LOG_DISCOVERY,
+ "4107 NameServer Rsp Error "
+ "Data: x%x x%x x%x x%x\n",
+ CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t)CTrsp->ReasonCode,
+ (uint32_t)CTrsp->Explanation,
+ vport->fc_flag);
+
+ lpfc_debugfs_disc_trc(
+ vport, LPFC_DISC_TRC_CT,
+ "GID_PT rsp err1 cmd:x%x rsn:x%x exp:x%x",
+ (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t)CTrsp->ReasonCode,
+ (uint32_t)CTrsp->Explanation);
+ }
+ } else {
+ /* NameServer Rsp Error */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "4109 NameServer Rsp Error "
+ "Data: x%x x%x x%x x%x\n",
+ CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t)CTrsp->ReasonCode,
+ (uint32_t)CTrsp->Explanation,
+ vport->fc_flag);
+
+ lpfc_debugfs_disc_trc(
+ vport, LPFC_DISC_TRC_CT,
+ "GID_PT rsp err2 cmd:x%x rsn:x%x exp:x%x",
+ (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t)CTrsp->ReasonCode,
+ (uint32_t)CTrsp->Explanation);
+ }
+ vport->gidft_inp--;
+ }
+ /* Link up / RSCN discovery */
+ if ((vport->num_disc_nodes == 0) &&
+ (vport->gidft_inp == 0)) {
+ /*
+ * The driver has cycled through all Nports in the RSCN payload.
+ * Complete the handling by cleaning up and marking the
+ * current driver state.
+ */
+ if (vport->port_state >= LPFC_DISC_AUTH) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ lpfc_els_flush_rscn(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+ spin_unlock_irq(shost->host_lock);
+ } else {
+ lpfc_els_flush_rscn(vport);
+ }
+ }
+
+ lpfc_disc_start(vport);
+ }
+out:
+ cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
+ lpfc_ct_free_iocb(phba, cmdiocb);
+}
+
+static void
lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
@@ -857,6 +1059,13 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6431 Process GFF_ID rsp for %08x "
+ "fbits %02x %s %s\n",
+ did, fbits,
+ (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ",
+ (fbits & FC4_FEATURE_TARGET) ? "Target" : " ");
+
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
if ((fbits & FC4_FEATURE_INIT) &&
@@ -979,9 +1188,15 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
+
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n",
- did, fc4_data_0, fc4_data_1);
+ "6432 Process GFT_ID rsp for %08x "
+ "Data %08x %08x %s %s\n",
+ did, fc4_data_0, fc4_data_1,
+ (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ?
+ "FCP" : " ",
+ (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ?
+ "NVME" : " ");
ndlp = lpfc_findnode_did(vport, did);
if (ndlp) {
@@ -1312,6 +1527,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
struct ulp_bde64 *bpl;
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *) = NULL;
+ uint32_t *ptr;
uint32_t rsp_size = 1024;
size_t size;
int rc = 0;
@@ -1365,6 +1581,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
bpl->tus.f.bdeFlags = 0;
if (cmdcode == SLI_CTNS_GID_FT)
bpl->tus.f.bdeSize = GID_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_GID_PT)
+ bpl->tus.f.bdeSize = GID_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_GFF_ID)
bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_GFT_ID)
@@ -1405,6 +1623,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
rsp_size = FC_MAX_NS_RSP;
break;
+ case SLI_CTNS_GID_PT:
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_GID_PT);
+ CtReq->un.gid.PortType = context;
+
+ if (vport->port_state < LPFC_NS_QRY)
+ vport->port_state = LPFC_NS_QRY;
+ lpfc_set_disctmo(vport);
+ cmpl = lpfc_cmpl_ct_cmd_gid_pt;
+ rsp_size = FC_MAX_NS_RSP;
+ break;
+
case SLI_CTNS_GFF_ID:
CtReq->CommandResponse.bits.CmdRsp =
cpu_to_be16(SLI_CTNS_GFF_ID);
@@ -1436,8 +1666,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
*/
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
- CtReq->un.rft.rsvd[0] = cpu_to_be32(0x00000100);
+ CtReq->un.rft.rsvd[0] =
+ cpu_to_be32(LPFC_FC4_TYPE_BITMASK);
+ ptr = (uint32_t *)CtReq;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6433 Issue RFT (%s %s): %08x %08x %08x %08x "
+ "%08x %08x %08x %08x\n",
+ CtReq->un.rft.fcpReg ? "FCP" : " ",
+ CtReq->un.rft.rsvd[0] ? "NVME" : " ",
+ *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
+ *(ptr + 4), *(ptr + 5),
+ *(ptr + 6), *(ptr + 7));
cmpl = lpfc_cmpl_ct_cmd_rft_id;
break;
@@ -1512,6 +1752,14 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
else
goto ns_cmd_free_bmpvirt;
+ ptr = (uint32_t *)CtReq;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6434 Issue RFF (%s): %08x %08x %08x %08x "
+ "%08x %08x %08x %08x\n",
+ (context == FC_TYPE_NVME) ? "NVME" : "FCP",
+ *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
+ *(ptr + 4), *(ptr + 5),
+ *(ptr + 6), *(ptr + 7));
cmpl = lpfc_cmpl_ct_cmd_rff_id;
break;
}
@@ -1758,7 +2006,7 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
memset(ae, 0, 256);
strncpy(ae->un.AttrString,
- "Emulex Corporation",
+ "Broadcom Inc.",
sizeof(ae->un.AttrString));
len = strnlen(ae->un.AttrString,
sizeof(ae->un.AttrString));
@@ -2134,6 +2382,8 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if (phba->lmt & LMT_128Gb)
+ ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
if (phba->lmt & LMT_64Gb)
ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
if (phba->lmt & LMT_32Gb)
@@ -2210,6 +2460,9 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
case LPFC_LINK_SPEED_64GHZ:
ae->un.AttrInt = HBA_PORTSPEED_64GFC;
break;
+ case LPFC_LINK_SPEED_128GHZ:
+ ae->un.AttrInt = HBA_PORTSPEED_128GFC;
+ break;
default:
ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
break;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 34d311a7dbef..a58f0b3f03a9 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -645,6 +645,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
i, ndlp->cmd_qdepth);
outio += i;
}
+ len += snprintf(buf + len, size - len, "defer:%x ",
+ ndlp->nlp_defer_did);
len += snprintf(buf+len, size-len, "\n");
}
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 28e2b60fc5c0..1c89c9f314fa 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -138,6 +138,7 @@ struct lpfc_nodelist {
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
+ uint32_t nlp_defer_did;
};
struct lpfc_node_rrq {
struct list_head list;
@@ -165,6 +166,7 @@ struct lpfc_node_rrq {
#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */
+#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */
#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
@@ -293,4 +295,4 @@ struct lpfc_node_rrq {
#define NLP_EVT_DEVICE_RM 0xb /* Device not found in NS / ALPAmap */
#define NLP_EVT_DEVICE_RECOVERY 0xc /* Device existence unknown */
#define NLP_EVT_MAX_EVENT 0xd
-
+#define NLP_EVT_NOTHING_PENDING 0xff
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f1c1faa74b46..b3a4789468c3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -242,6 +242,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
if (elscmd == ELS_CMD_FLOGI)
icmd->ulpTimeout = FF_DEF_RATOV * 2;
+ else if (elscmd == ELS_CMD_LOGO)
+ icmd->ulpTimeout = phba->fc_ratov;
else
icmd->ulpTimeout = phba->fc_ratov * 2;
} else {
@@ -313,20 +315,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
/* Xmit ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0116 Xmit ELS command x%x to remote "
- "NPORT x%x I/O tag: x%x, port state:x%x"
- " fc_flag:x%x\n",
+ "NPORT x%x I/O tag: x%x, port state:x%x "
+ "rpi x%x fc_flag:x%x\n",
elscmd, did, elsiocb->iotag,
- vport->port_state,
+ vport->port_state, ndlp->nlp_rpi,
vport->fc_flag);
} else {
/* Xmit ELS response <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0117 Xmit ELS response x%x to remote "
"NPORT x%x I/O tag: x%x, size: x%x "
- "port_state x%x fc_flag x%x\n",
+ "port_state x%x rpi x%x fc_flag x%x\n",
elscmd, ndlp->nlp_DID, elsiocb->iotag,
cmdSize, vport->port_state,
- vport->fc_flag);
+ ndlp->nlp_rpi, vport->fc_flag);
}
return elsiocb;
@@ -413,7 +415,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
/* increment the reference count on ndlp to hold reference
* for the callback routine.
*/
- mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
@@ -428,7 +430,7 @@ fail_issue_reg_login:
* for the failed mbox command.
*/
lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *) mbox->context1;
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
fail_free_mbox:
@@ -502,7 +504,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
mboxq->vport = vport;
- mboxq->context1 = dmabuf;
+ mboxq->ctx_buf = dmabuf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
rc = -ENXIO;
@@ -1055,9 +1057,9 @@ stop_rr_fcf_flogi:
goto flogifail;
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
- "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+ "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
+ cmdiocb->sli4_xritag, irsp->ulpTimeout);
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
@@ -1111,7 +1113,8 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0101 FLOGI completes successfully, I/O tag:x%x, "
- "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
+ "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
+ cmdiocb->iotag, cmdiocb->sli4_xritag,
irsp->un.ulpWord[4], sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
vport->port_state, vport->fc_flag);
@@ -1155,6 +1158,7 @@ stop_rr_fcf_flogi:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock);
+ phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out;
}
if (!rc) {
@@ -1169,6 +1173,7 @@ stop_rr_fcf_flogi:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock);
+ phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out;
}
}
@@ -1229,9 +1234,10 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
+ struct lpfc_iocbq defer_flogi_acc;
uint8_t *pcmd;
uint16_t cmdsize;
- uint32_t tmo;
+ uint32_t tmo, did;
int rc;
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
@@ -1303,6 +1309,35 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->sli3_options, 0, 0);
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+
+ phba->hba_flag |= HBA_FLOGI_ISSUED;
+
+ /* Check for a deferred FLOGI ACC condition */
+ if (phba->defer_flogi_acc_flag) {
+ did = vport->fc_myDID;
+ vport->fc_myDID = Fabric_DID;
+
+ memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
+
+ defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
+ defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
+ phba->defer_flogi_acc_ox_id;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
+ " ox_id: x%x, hba_flag x%x\n",
+ phba->defer_flogi_acc_rx_id,
+ phba->defer_flogi_acc_ox_id, phba->hba_flag);
+
+ /* Send deferred FLOGI ACC */
+ lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
+ ndlp, NULL);
+
+ phba->defer_flogi_acc_flag = false;
+
+ vport->fc_myDID = did;
+ }
+
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -1338,6 +1373,8 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
Fabric_DID);
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return -EIO;
/*
* Check the txcmplq for an iocb that matches the nport the driver is
@@ -1531,7 +1568,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
+ uint32_t keep_new_nlp_flag = 0;
uint16_t keep_nlp_state;
+ u32 keep_nlp_fc4_type = 0;
struct lpfc_nvme_rport *keep_nrport = NULL;
int put_node;
int put_rport;
@@ -1551,8 +1590,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
*/
new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
+ /* return immediately if the WWPN matches ndlp */
if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
return ndlp;
+
if (phba->sli_rev == LPFC_SLI_REV4) {
active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
GFP_KERNEL);
@@ -1561,9 +1602,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
phba->cfg_rrq_xri_bitmap_sz);
}
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
- ndlp, ndlp->nlp_DID, new_ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
+ "3178 PLOGI confirm: ndlp x%x x%x x%x: "
+ "new_ndlp x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type,
+ (new_ndlp ? new_ndlp->nlp_DID : 0),
+ (new_ndlp ? new_ndlp->nlp_flag : 0),
+ (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
if (!new_ndlp) {
rc = memcmp(&ndlp->nlp_portname, name,
@@ -1612,6 +1657,16 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
phba->cfg_rrq_xri_bitmap_sz);
}
+ /* At this point in this routine, we know new_ndlp will be
+ * returned. however, any previous GID_FTs that were done
+ * would have updated nlp_fc4_type in ndlp, so we must ensure
+ * new_ndlp has the right value.
+ */
+ if (vport->fc_flag & FC_FABRIC) {
+ keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
+ new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
+ }
+
lpfc_unreg_rpi(vport, new_ndlp);
new_ndlp->nlp_DID = ndlp->nlp_DID;
new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
@@ -1621,9 +1676,36 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
phba->cfg_rrq_xri_bitmap_sz);
spin_lock_irq(shost->host_lock);
- keep_nlp_flag = new_ndlp->nlp_flag;
+ keep_new_nlp_flag = new_ndlp->nlp_flag;
+ keep_nlp_flag = ndlp->nlp_flag;
new_ndlp->nlp_flag = ndlp->nlp_flag;
- ndlp->nlp_flag = keep_nlp_flag;
+
+ /* if new_ndlp had NLP_UNREG_INP set, keep it */
+ if (keep_new_nlp_flag & NLP_UNREG_INP)
+ new_ndlp->nlp_flag |= NLP_UNREG_INP;
+ else
+ new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
+
+ /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
+ if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
+ new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ else
+ new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+
+ ndlp->nlp_flag = keep_new_nlp_flag;
+
+ /* if ndlp had NLP_UNREG_INP set, keep it */
+ if (keep_nlp_flag & NLP_UNREG_INP)
+ ndlp->nlp_flag |= NLP_UNREG_INP;
+ else
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+
+ /* if ndlp had NLP_RPI_REGISTERED set, keep it */
+ if (keep_nlp_flag & NLP_RPI_REGISTERED)
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ else
+ ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+
spin_unlock_irq(shost->host_lock);
/* Set nlp_states accordingly */
@@ -1661,7 +1743,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
if (ndlp->nrport) {
ndlp->nrport = NULL;
lpfc_nlp_put(ndlp);
- new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
}
/* We shall actually free the ndlp with both nlp_DID and
@@ -1674,7 +1755,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
spin_unlock_irq(&phba->ndlp_lock);
}
- /* Two ndlps cannot have the same did on the nodelist */
+ /* Two ndlps cannot have the same did on the nodelist.
+ * Note: for this case, ndlp has a NULL WWPN so setting
+ * the nlp_fc4_type isn't required.
+ */
ndlp->nlp_DID = keepDID;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
@@ -1693,8 +1777,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
lpfc_unreg_rpi(vport, ndlp);
- /* Two ndlps cannot have the same did */
+ /* Two ndlps cannot have the same did and the fc4
+ * type must be transferred because the ndlp is in
+ * flight.
+ */
ndlp->nlp_DID = keepDID;
+ ndlp->nlp_fc4_type = keep_nlp_fc4_type;
+
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
memcpy(ndlp->active_rrqs_xri_bitmap,
@@ -1735,6 +1824,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
active_rrqs_xri_bitmap)
mempool_free(active_rrqs_xri_bitmap,
phba->active_rrq_pool);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
+ "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
+ new_ndlp->nlp_DID, new_ndlp->nlp_flag,
+ new_ndlp->nlp_fc4_type);
+
return new_ndlp;
}
@@ -1895,7 +1990,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- rc = 0;
+ rc = 0;
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -2002,8 +2097,29 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
int ret;
ndlp = lpfc_findnode_did(vport, did);
- if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
- ndlp = NULL;
+
+ if (ndlp) {
+ /* Defer the processing of the issue PLOGI until after the
+ * outstanding UNREG_RPI mbox command completes, unless we
+ * are going offline. This logic does not apply for Fabric DIDs
+ */
+ if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+ ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+ !(vport->fc_flag & FC_OFFLINE_MODE)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "4110 Issue PLOGI x%x deferred "
+ "on NPort x%x rpi x%x Data: %p\n",
+ ndlp->nlp_defer_did, ndlp->nlp_DID,
+ ndlp->nlp_rpi, ndlp);
+
+ /* We can only defer 1st PLOGI */
+ if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
+ ndlp->nlp_defer_did = did;
+ return 0;
+ }
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ ndlp = NULL;
+ }
/* If ndlp is not NULL, we will bump the reference count on it */
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
@@ -2137,7 +2253,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
- } else
+ } else {
/* Good status, call state machine. However, if another
* PRLI is outstanding, don't call the state machine
* because final disposition to Mapped or Unmapped is
@@ -2145,6 +2261,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
+ }
out:
lpfc_els_free_iocb(phba, cmdiocb);
@@ -2203,7 +2320,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
ndlp->nvme_fb_size = 0;
send_next_prli:
@@ -2682,16 +2799,15 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+ /* The LOGO will not be retried on failure. A LOGO was
+ * issued to the remote rport and a ACC or RJT or no Answer are
+ * all acceptable. Note the failure and move forward with
+ * discovery. The PLOGI will retry.
+ */
if (irsp->ulpStatus) {
- /* Check for retry */
- if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
- /* ELS command is being retried */
- skip_recovery = 1;
- goto out;
- }
/* LOGO failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
+ "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
@@ -2737,7 +2853,8 @@ out:
* For any other port type, the rpi is unregistered as an implicit
* LOGO.
*/
- if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
+ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
+ skip_recovery == 0) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irqsave(shost->host_lock, flags);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -2770,6 +2887,8 @@ out:
* will be stored into the context1 field of the IOCB for the completion
* callback function to the LOGO ELS command.
*
+ * Callers of this routine are expected to unregister the RPI first
+ *
* Return code
* 0 - successfully issued logo
* 1 - failed to issue logo
@@ -2811,22 +2930,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue LOGO: did:x%x",
ndlp->nlp_DID, 0, 0);
- /*
- * If we are issuing a LOGO, we may try to recover the remote NPort
- * by issuing a PLOGI later. Even though we issue ELS cmds by the
- * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
- * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
- * for that ELS cmd. To avoid this situation, lets get rid of the
- * RPI right now, before any ELS cmds are sent.
- */
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_ISSUE_LOGO;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_unreg_rpi(vport, ndlp)) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 0;
- }
-
phba->fc_stat.elsXmitLOGO++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
spin_lock_irq(shost->host_lock);
@@ -2834,7 +2937,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
spin_unlock_irq(shost->host_lock);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-
if (rc == IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
@@ -2842,6 +2944,11 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
return 0;
}
@@ -3250,6 +3357,62 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
}
/**
+ * lpfc_link_reset - Issue link reset
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine performs link reset by sending INIT_LINK mailbox command.
+ * For SLI-3 adapter, link attention interrupt is enabled before issuing
+ * INIT_LINK mailbox command.
+ *
+ * Return code
+ * 0 - Link reset initiated successfully
+ * 1 - Failed to initiate link reset
+ **/
+int
+lpfc_link_reset(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t control;
+ int rc;
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2851 Attempt link reset\n");
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2852 Failed to allocate mbox memory");
+ return 1;
+ }
+
+ /* Enable Link attention interrupts */
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ lpfc_init_link(phba, mbox, phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2853 Failed to issue INIT_LINK "
+ "mbox command, rc:x%x\n", rc);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
* lpfc_els_retry - Make retry decision on an els command iocb
* @phba: pointer to lpfc hba data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
@@ -3285,6 +3448,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int logerr = 0;
uint32_t cmd = 0;
uint32_t did;
+ int link_reset = 0, rc;
/* Note: context2 may be 0 for internal driver abort
@@ -3366,7 +3530,6 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 1;
break;
- case IOERR_SEQUENCE_TIMEOUT:
case IOERR_INVALID_RPI:
if (cmd == ELS_CMD_PLOGI &&
did == NameServer_DID) {
@@ -3377,6 +3540,18 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
retry = 1;
break;
+
+ case IOERR_SEQUENCE_TIMEOUT:
+ if (cmd == ELS_CMD_PLOGI &&
+ did == NameServer_DID &&
+ (cmdiocb->retry + 1) == maxretry) {
+ /* Reset the Link */
+ link_reset = 1;
+ break;
+ }
+ retry = 1;
+ delay = 100;
+ break;
}
break;
@@ -3533,6 +3708,19 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
}
+ if (link_reset) {
+ rc = lpfc_link_reset(vport);
+ if (rc) {
+ /* Do not give up. Retry PLOGI one more time and attempt
+ * link reset if PLOGI fails again.
+ */
+ retry = 1;
+ delay = 100;
+ goto out_retry;
+ }
+ return 1;
+ }
+
if (did == FDMI_DID)
retry = 1;
@@ -3895,11 +4083,11 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ pmb->ctx_buf = NULL;
+ pmb->ctx_ndlp = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -3975,7 +4163,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
if (mbox) {
- mp = (struct lpfc_dmabuf *) mbox->context1;
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -4019,7 +4207,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_rpi, ndlp->nlp_flag);
- mp = mbox->context1;
+ mp = mbox->ctx_buf;
if (mp) {
lpfc_mbuf_free(phba, mp->virt,
mp->phys);
@@ -4032,7 +4220,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Increment reference count to ndlp to hold the
* reference to ndlp for the callback function.
*/
- mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->vport = vport;
if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
@@ -4086,7 +4274,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
}
- mp = (struct lpfc_dmabuf *) mbox->context1;
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -4272,14 +4460,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
default:
return 1;
}
- /* Xmit ELS ACC response tag <ulpIoTag> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
- "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
- "fc_flag x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi, vport->fc_flag);
if (ndlp->nlp_flag & NLP_LOGO_ACC) {
spin_lock_irq(shost->host_lock);
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
@@ -4448,6 +4628,15 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
+
+ /* Xmit ELS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
+ "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
+ "RPI: x%x, fc_flag x%x\n",
+ rc, elsiocb->iotag, elsiocb->sli4_xritag,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi, vport->fc_flag);
return 0;
}
@@ -5281,6 +5470,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
+ if (phba->lmt & LMT_128Gb)
+ rdp_cap |= RDP_PS_128GB;
if (phba->lmt & LMT_64Gb)
rdp_cap |= RDP_PS_64GB;
if (phba->lmt & LMT_32Gb)
@@ -5499,7 +5690,7 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
goto prep_mbox_fail;
mbox->vport = rdp_context->ndlp->vport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
- mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+ mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
goto issue_mbox_fail;
@@ -5542,7 +5733,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct ls_rjt stat;
if (phba->sli_rev < LPFC_SLI_REV4 ||
- bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
LPFC_SLI_INTF_IF_TYPE_2) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_expl = LSEXP_REQ_UNSUPPORTED;
@@ -5624,10 +5815,10 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
int rc;
mb = &pmb->u.mb;
- lcb_context = (struct lpfc_lcb_context *)pmb->context1;
+ lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
ndlp = lcb_context->ndlp;
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ pmb->ctx_ndlp = NULL;
+ pmb->ctx_buf = NULL;
shdr = (union lpfc_sli4_cfg_shdr *)
&pmb->u.mqe.un.beacon_config.header.cfg_shdr;
@@ -5701,6 +5892,9 @@ error:
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
+ stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitLSRJT++;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@@ -5731,7 +5925,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
LPFC_SLI4_MBX_EMBED);
- mbox->context1 = (void *)lcb_context;
+ mbox->ctx_ndlp = (void *)lcb_context;
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_els_lcb_rsp;
bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
@@ -6011,6 +6205,19 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
if (vport->phba->nvmet_support)
continue;
+ /* If we are in the process of doing discovery on this
+ * NPort, let it continue on its own.
+ */
+ switch (ndlp->nlp_state) {
+ case NLP_STE_PLOGI_ISSUE:
+ case NLP_STE_ADISC_ISSUE:
+ case NLP_STE_REG_LOGIN_ISSUE:
+ case NLP_STE_PRLI_ISSUE:
+ case NLP_STE_LOGO_ISSUE:
+ continue;
+ }
+
+
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -6272,6 +6479,7 @@ int
lpfc_els_handle_rscn(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp;
+ struct lpfc_hba *phba = vport->phba;
/* Ignore RSCN if the port is being torn down. */
if (vport->load_flag & FC_UNLOADING) {
@@ -6300,8 +6508,15 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
* flush the RSCN. Otherwise, the outstanding requests
* need to complete.
*/
- if (lpfc_issue_gidft(vport) > 0)
+ if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) {
+ if (lpfc_issue_gidft(vport) > 0)
+ return 1;
+ } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) {
+ if (lpfc_issue_gidpt(vport) > 0)
+ return 1;
+ } else {
return 1;
+ }
} else {
/* Nameserver login in question. Revalidate. */
if (ndlp) {
@@ -6455,6 +6670,11 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
port_state = vport->port_state;
vport->fc_flag |= FC_PT2PT;
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+
+ /* Acking an unsol FLOGI. Count 1 for link bounce
+ * work-around.
+ */
+ vport->rcv_flogi_cnt++;
spin_unlock_irq(shost->host_lock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3311 Rcv Flogi PS x%x new PS x%x "
@@ -6472,6 +6692,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+ /* Defer ACC response until AFTER we issue a FLOGI */
+ if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
+ phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
+ phba->defer_flogi_acc_ox_id =
+ cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+
+ vport->fc_myDID = did;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3344 Deferring FLOGI ACC: rx_id: x%x,"
+ " ox_id: x%x, hba_flag x%x\n",
+ phba->defer_flogi_acc_rx_id,
+ phba->defer_flogi_acc_ox_id, phba->hba_flag);
+
+ phba->defer_flogi_acc_flag = true;
+
+ return 0;
+ }
+
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
@@ -6644,11 +6883,11 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb = &pmb->u.mb;
- ndlp = (struct lpfc_nodelist *) pmb->context2;
- rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
- oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
+ oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
+ pmb->ctx_buf = NULL;
+ pmb->ctx_ndlp = NULL;
if (mb->mbxStatus) {
mempool_free(pmb, phba->mbox_mem_pool);
@@ -6732,11 +6971,11 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb = &pmb->u.mb;
- ndlp = (struct lpfc_nodelist *) pmb->context2;
- rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
- oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
+ oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
+ pmb->ctx_ndlp = NULL;
+ pmb->ctx_buf = NULL;
if (mb->mbxStatus) {
mempool_free(pmb, phba->mbox_mem_pool);
@@ -6827,10 +7066,10 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
- mbox->context1 = (void *)((unsigned long)
+ mbox->ctx_buf = (void *)((unsigned long)
((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
cmdiocb->iocb.ulpContext)); /* rx_id */
- mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
@@ -6990,10 +7229,10 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
- mbox->context1 = (void *)((unsigned long)
+ mbox->ctx_buf = (void *)((unsigned long)
((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
cmdiocb->iocb.ulpContext)); /* rx_id */
- mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
@@ -7852,8 +8091,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct ls_rjt stat;
uint32_t *payload;
uint32_t cmd, did, newnode;
- uint8_t rjt_exp, rjt_err = 0;
+ uint8_t rjt_exp, rjt_err = 0, init_link = 0;
IOCB_t *icmd = &elsiocb->iocb;
+ LPFC_MBOXQ_t *mbox;
if (!vport || !(elsiocb->context2))
goto dropit;
@@ -7940,9 +8180,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
cmd, did, vport->port_state, vport->fc_flag,
vport->fc_myDID, vport->fc_prevDID);
- /* reject till our FLOGI completes */
+ /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
- (cmd != ELS_CMD_FLOGI)) {
+ (cmd != ELS_CMD_FLOGI) &&
+ !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
rjt_err = LSRJT_LOGICAL_BSY;
rjt_exp = LSEXP_NOTHING_MORE;
goto lsrjt;
@@ -8002,6 +8243,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
did, vport->port_state, ndlp->nlp_flag);
phba->fc_stat.elsRcvFLOGI++;
+
+ /* If the driver believes fabric discovery is done and is ready,
+ * bounce the link. There is some descrepancy.
+ */
+ if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
+ vport->fc_flag & FC_PT2PT &&
+ vport->rcv_flogi_cnt >= 1) {
+ rjt_err = LSRJT_LOGICAL_BSY;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ init_link++;
+ goto lsrjt;
+ }
+
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
if (newnode)
lpfc_nlp_put(ndlp);
@@ -8230,6 +8484,27 @@ lsrjt:
lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = NULL;
+
+ /* Special case. Driver received an unsolicited command that
+ * unsupportable given the driver's current state. Reset the
+ * link and start over.
+ */
+ if (init_link) {
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return;
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, mbox,
+ phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
+ MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+
return;
dropit:
@@ -8453,7 +8728,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
MAILBOX_t *mb = &pmb->u.mb;
int rc;
@@ -8571,7 +8846,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
if (mbox) {
lpfc_reg_vpi(vport, mbox);
mbox->vport = vport;
- mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
== MBX_NOT_FINISHED) {
@@ -9505,7 +9780,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
"rport in state 0x%x\n", ndlp->nlp_state);
return;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
"3094 Start rport recovery on shost id 0x%x "
"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
"flags 0x%x\n",
@@ -9518,8 +9794,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
*/
spin_lock_irqsave(shost->host_lock, flags);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
spin_unlock_irqrestore(shost->host_lock, flags);
- lpfc_issue_els_logo(vport, ndlp, 0);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+ lpfc_unreg_rpi(vport, ndlp);
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f4deb862efc6..b183b882d506 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -888,17 +888,31 @@ lpfc_linkdown(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mb;
int i;
- if (phba->link_state == LPFC_LINK_DOWN)
+ if (phba->link_state == LPFC_LINK_DOWN) {
+ if (phba->sli4_hba.conf_trunk) {
+ phba->trunk_link.link0.state = 0;
+ phba->trunk_link.link1.state = 0;
+ phba->trunk_link.link2.state = 0;
+ phba->trunk_link.link3.state = 0;
+ }
return 0;
-
+ }
/* Block all SCSI stack I/Os */
lpfc_scsi_dev_block(phba);
+ phba->defer_flogi_acc_flag = false;
+
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
spin_unlock_irq(&phba->hbalock);
if (phba->link_state > LPFC_LINK_DOWN) {
phba->link_state = LPFC_LINK_DOWN;
+ if (phba->sli4_hba.conf_trunk) {
+ phba->trunk_link.link0.state = 0;
+ phba->trunk_link.link1.state = 0;
+ phba->trunk_link.link2.state = 0;
+ phba->trunk_link.link3.state = 0;
+ }
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag &= ~FC_LBIT;
spin_unlock_irq(shost->host_lock);
@@ -947,6 +961,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
}
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+ phba->pport->rcv_flogi_cnt = 0;
spin_unlock_irq(shost->host_lock);
}
return 0;
@@ -1018,6 +1033,7 @@ lpfc_linkup(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
int i;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
phba->link_state = LPFC_LINK_UP;
@@ -1031,6 +1047,18 @@ lpfc_linkup(struct lpfc_hba *phba)
lpfc_linkup_port(vports[i]);
lpfc_destroy_vport_work_array(phba, vports);
+ /* Clear the pport flogi counter in case the link down was
+ * absorbed without an ACQE. No lock here - in worker thread
+ * and discovery is synchronized.
+ */
+ spin_lock_irq(shost->host_lock);
+ phba->pport->rcv_flogi_cnt = 0;
+ spin_unlock_irq(shost->host_lock);
+
+ /* reinitialize initial FLOGI flag */
+ phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
+ phba->defer_flogi_acc_flag = false;
+
return 0;
}
@@ -1992,6 +2020,26 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
"failover and change port state:x%x/x%x\n",
phba->pport->port_state, LPFC_VPORT_UNKNOWN);
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+
+ if (!phba->fcf.fcf_redisc_attempted) {
+ lpfc_unregister_fcf(phba);
+
+ rc = lpfc_sli4_redisc_fcf_table(phba);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3195 Rediscover FCF table\n");
+ phba->fcf.fcf_redisc_attempted = 1;
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "3196 Rediscover FCF table "
+ "failed. Status:x%x\n", rc);
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "3197 Already rediscover FCF table "
+ "attempted. No more retry\n");
+ }
goto stop_flogi_current_fcf;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
@@ -2915,7 +2963,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
void
lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
- struct lpfc_dmabuf *dmabuf = mboxq->context1;
+ struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
struct lpfc_vport *vport = mboxq->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -3008,7 +3056,7 @@ static void
lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct serv_parm *sp = &vport->fc_sparam;
@@ -3052,7 +3100,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
out:
- pmb->context1 = NULL;
+ pmb->ctx_buf = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
lpfc_issue_clear_la(phba, vport);
@@ -3085,6 +3133,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
case LPFC_LINK_SPEED_16GHZ:
case LPFC_LINK_SPEED_32GHZ:
case LPFC_LINK_SPEED_64GHZ:
+ case LPFC_LINK_SPEED_128GHZ:
break;
default:
phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
@@ -3190,7 +3239,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
+ mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(sparam_mbox, phba->mbox_mem_pool);
@@ -3319,7 +3368,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_mbx_read_top *la;
struct lpfc_sli_ring *pring;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
uint8_t attn_type;
/* Unblock ELS traffic */
@@ -3476,12 +3525,12 @@ void
lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ pmb->ctx_buf = NULL;
+ pmb->ctx_ndlp = NULL;
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
@@ -3689,8 +3738,8 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_buff = (uint8_t *) vport_info;
do {
/* free dma buffer from previous round */
- if (pmb->context1) {
- mp = (struct lpfc_dmabuf *)pmb->context1;
+ if (pmb->ctx_buf) {
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
@@ -3712,7 +3761,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
if (phba->sli_rev == LPFC_SLI_REV4) {
byte_count = pmb->u.mqe.un.mb_words[5];
- mp = (struct lpfc_dmabuf *)pmb->context1;
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
if (byte_count > sizeof(struct static_vport_info) -
offset)
byte_count = sizeof(struct static_vport_info)
@@ -3777,8 +3826,8 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
out:
kfree(vport_info);
if (mbx_wait_rc != MBX_TIMEOUT) {
- if (pmb->context1) {
- mp = (struct lpfc_dmabuf *)pmb->context1;
+ if (pmb->ctx_buf) {
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
@@ -3799,13 +3848,13 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
- ndlp = (struct lpfc_nodelist *) pmb->context2;
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ pmb->ctx_ndlp = NULL;
+ pmb->ctx_buf = NULL;
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -3913,6 +3962,35 @@ lpfc_issue_gidft(struct lpfc_vport *vport)
return vport->gidft_inp;
}
+/**
+ * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
+ * @vport: The virtual port for which this call is being executed.
+ *
+ * This routine will issue a GID_PT to get a list of all N_Ports
+ *
+ * Return value :
+ * 0 - Failure to issue a GID_PT
+ * 1 - GID_PT issued
+ **/
+int
+lpfc_issue_gidpt(struct lpfc_vport *vport)
+{
+ /* Good status, issue CT Request to NameServer */
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
+ /* Cannot issue NameServer FCP Query, so finish up
+ * discovery
+ */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ "0606 %s Port TYPE %x %s\n",
+ "Failed to issue GID_PT to ",
+ GID_PT_N_PORT,
+ "Finishing discovery.");
+ return 0;
+ }
+ vport->gidft_inp++;
+ return 1;
+}
+
/*
* This routine handles processing a NameServer REG_LOGIN mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@@ -3923,12 +4001,12 @@ void
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ pmb->ctx_buf = NULL;
+ pmb->ctx_ndlp = NULL;
vport->gidft_inp = 0;
if (mb->mbxStatus) {
@@ -4385,6 +4463,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+ ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
}
struct lpfc_nodelist *
@@ -4392,10 +4471,11 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int state)
{
struct lpfc_hba *phba = vport->phba;
- uint32_t did;
+ uint32_t did, flag;
unsigned long flags;
unsigned long *active_rrqs_xri_bitmap = NULL;
int rpi = LPFC_RPI_ALLOC_ERROR;
+ uint32_t defer_did = 0;
if (!ndlp)
return NULL;
@@ -4428,16 +4508,23 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto free_rpi;
}
- /* Keep the original DID */
+ /* First preserve the orginal DID, xri_bitmap and some flags */
did = ndlp->nlp_DID;
+ flag = (ndlp->nlp_flag & NLP_UNREG_INP);
+ if (flag & NLP_UNREG_INP)
+ defer_did = ndlp->nlp_defer_did;
if (phba->sli_rev == LPFC_SLI_REV4)
active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
- /* re-initialize ndlp except of ndlp linked list pointer */
+ /* Zero ndlp except of ndlp linked list pointer */
memset((((char *)ndlp) + sizeof (struct list_head)), 0,
sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
- lpfc_initialize_node(vport, ndlp, did);
+ /* Next reinitialize and restore saved objects */
+ lpfc_initialize_node(vport, ndlp, did);
+ ndlp->nlp_flag |= flag;
+ if (flag & NLP_UNREG_INP)
+ ndlp->nlp_defer_did = defer_did;
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
@@ -4697,11 +4784,27 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct lpfc_nodelist *ndlp;
- ndlp = (struct lpfc_nodelist *)(pmb->context1);
+ ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Check to see if there are any deferred events to process */
+ if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+ (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "1434 UNREG cmpl deferred logo x%x "
+ "on NPort x%x Data: x%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_defer_did, ndlp);
+
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ } else {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ }
}
/*
@@ -4730,6 +4833,21 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"did x%x\n",
ndlp->nlp_rpi, ndlp->nlp_flag,
ndlp->nlp_DID);
+
+ /* If there is already an UNREG in progress for this ndlp,
+ * no need to queue up another one.
+ */
+ if (ndlp->nlp_flag & NLP_UNREG_INP) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "1436 unreg_rpi SKIP UNREG x%x on "
+ "NPort x%x deferred x%x flg x%x "
+ "Data: %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_defer_did,
+ ndlp->nlp_flag, ndlp);
+ goto out;
+ }
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
/* SLI4 ports require the physical rpi value. */
@@ -4740,26 +4858,38 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
- mbox->context1 = ndlp;
+ mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else {
if (phba->sli_rev == LPFC_SLI_REV4 &&
(!(vport->load_flag & FC_UNLOADING)) &&
(bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf) ==
+ &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) &&
(kref_read(&ndlp->kref) > 0)) {
- mbox->context1 = lpfc_nlp_get(ndlp);
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl =
lpfc_sli4_unreg_rpi_cmpl_clr;
/*
* accept PLOGIs after unreg_rpi_cmpl
*/
acc_plogi = 0;
- } else
+ } else {
+ mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl =
lpfc_sli_def_mbox_cmpl;
+ }
}
+ if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
+ Fabric_DID_MASK) &&
+ (!(vport->fc_flag & FC_OFFLINE_MODE)))
+ ndlp->nlp_flag |= NLP_UNREG_INP;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "1433 unreg_rpi UNREG x%x on "
+ "NPort x%x deferred flg x%x Data:%p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp);
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
@@ -4768,7 +4898,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
}
}
lpfc_no_rpi(phba, ndlp);
-
+out:
if (phba->sli_rev != LPFC_SLI_REV4)
ndlp->nlp_rpi = 0;
ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
@@ -4836,7 +4966,7 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->context1 = NULL;
+ mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
@@ -4861,7 +4991,7 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->context1 = NULL;
+ mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
@@ -4915,8 +5045,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
- (ndlp == (struct lpfc_nodelist *) mb->context2)) {
- mb->context2 = NULL;
+ (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ mb->ctx_ndlp = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
@@ -4926,18 +5056,18 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
- (ndlp != (struct lpfc_nodelist *) mb->context2))
+ (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
continue;
- mb->context2 = NULL;
+ mb->ctx_ndlp = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
- (ndlp == (struct lpfc_nodelist *) mb->context2)) {
- mp = (struct lpfc_dmabuf *) (mb->context1);
+ (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
if (mp) {
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -5007,7 +5137,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
mbox->vport = vport;
- mbox->context2 = ndlp;
+ mbox->ctx_ndlp = ndlp;
rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
@@ -5772,12 +5902,12 @@ void
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ pmb->ctx_buf = NULL;
+ pmb->ctx_ndlp = NULL;
if (phba->sli_rev < LPFC_SLI_REV4)
ndlp->nlp_rpi = mb->un.varWords[0];
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 009aa0eee040..ec1227018913 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -115,6 +115,7 @@ struct lpfc_sli_ct_request {
uint32_t PortID;
struct gid {
uint8_t PortType; /* for GID_PT requests */
+#define GID_PT_N_PORT 1
uint8_t DomainScope;
uint8_t AreaScope;
uint8_t Fc4Type; /* for GID_FT requests */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bbd0a57e953f..c15b9b6fb840 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -197,6 +197,10 @@ struct lpfc_sli_intf {
#define LPFC_FCP_SCHED_ROUND_ROBIN 0
#define LPFC_FCP_SCHED_BY_CPU 1
+/* Algrithmns for NameServer Query after RSCN */
+#define LPFC_NS_QUERY_GID_FT 0
+#define LPFC_NS_QUERY_GID_PT 1
+
/* Delay Multiplier constant */
#define LPFC_DMULT_CONST 651042
#define LPFC_DMULT_MAX 1023
@@ -1031,6 +1035,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
+#define LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE 0x42
/* Low level Opcodes */
#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION 0x37
@@ -2777,6 +2782,9 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8
#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001
#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2
+#define lpfc_mbx_rd_conf_trunk_SHIFT 12
+#define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F
+#define lpfc_mbx_rd_conf_trunk_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
@@ -3512,6 +3520,15 @@ struct lpfc_mbx_set_host_data {
uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
};
+struct lpfc_mbx_set_trunk_mode {
+ struct mbox_header header;
+ uint32_t word0;
+#define lpfc_mbx_set_trunk_mode_WORD word0
+#define lpfc_mbx_set_trunk_mode_SHIFT 0
+#define lpfc_mbx_set_trunk_mode_MASK 0xFF
+ uint32_t word1;
+ uint32_t word2;
+};
struct lpfc_mbx_get_sli4_parameters {
struct mbox_header header;
@@ -3833,6 +3850,9 @@ struct lpfc_mbx_wr_object {
#define lpfc_wr_object_eof_SHIFT 31
#define lpfc_wr_object_eof_MASK 0x00000001
#define lpfc_wr_object_eof_WORD word4
+#define lpfc_wr_object_eas_SHIFT 29
+#define lpfc_wr_object_eas_MASK 0x00000001
+#define lpfc_wr_object_eas_WORD word4
#define lpfc_wr_object_write_length_SHIFT 0
#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
#define lpfc_wr_object_write_length_WORD word4
@@ -3843,6 +3863,15 @@ struct lpfc_mbx_wr_object {
} request;
struct {
uint32_t actual_write_length;
+ uint32_t word5;
+#define lpfc_wr_object_change_status_SHIFT 0
+#define lpfc_wr_object_change_status_MASK 0x000000FF
+#define lpfc_wr_object_change_status_WORD word5
+#define LPFC_CHANGE_STATUS_NO_RESET_NEEDED 0x00
+#define LPFC_CHANGE_STATUS_PHYS_DEV_RESET 0x01
+#define LPFC_CHANGE_STATUS_FW_RESET 0x02
+#define LPFC_CHANGE_STATUS_PORT_MIGRATION 0x04
+#define LPFC_CHANGE_STATUS_PCI_RESET 0x05
} response;
} u;
};
@@ -3911,6 +3940,7 @@ struct lpfc_mqe {
struct lpfc_mbx_set_feature set_feature;
struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
struct lpfc_mbx_set_host_data set_host_data;
+ struct lpfc_mbx_set_trunk_mode set_trunk_mode;
struct lpfc_mbx_nop nop;
struct lpfc_mbx_set_ras_fwlog ras_fwlog;
} un;
@@ -4047,6 +4077,23 @@ struct lpfc_acqe_grp5 {
uint32_t trailer;
};
+static char *const trunk_errmsg[] = { /* map errcode */
+ "", /* There is no such error code at index 0*/
+ "link negotiated speed does not match existing"
+ " trunk - link was \"low\" speed",
+ "link negotiated speed does not match"
+ " existing trunk - link was \"middle\" speed",
+ "link negotiated speed does not match existing"
+ " trunk - link was \"high\" speed",
+ "Attached to non-trunking port - F_Port",
+ "Attached to non-trunking port - N_Port",
+ "FLOGI response timeout",
+ "non-FLOGI frame received",
+ "Invalid FLOGI response",
+ "Trunking initialization protocol",
+ "Trunk peer device mismatch",
+};
+
struct lpfc_acqe_fc_la {
uint32_t word0;
#define lpfc_acqe_fc_la_speed_SHIFT 24
@@ -4080,6 +4127,7 @@ struct lpfc_acqe_fc_la {
#define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4
#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5
#define LPFC_FC_LA_TYPE_UNEXP_WWPN 0x6
+#define LPFC_FC_LA_TYPE_TRUNKING_EVENT 0x7
#define lpfc_acqe_fc_la_port_type_SHIFT 6
#define lpfc_acqe_fc_la_port_type_MASK 0x00000003
#define lpfc_acqe_fc_la_port_type_WORD word0
@@ -4088,6 +4136,32 @@ struct lpfc_acqe_fc_la {
#define lpfc_acqe_fc_la_port_number_SHIFT 0
#define lpfc_acqe_fc_la_port_number_MASK 0x0000003F
#define lpfc_acqe_fc_la_port_number_WORD word0
+
+/* Attention Type is 0x07 (Trunking Event) word0 */
+#define lpfc_acqe_fc_la_trunk_link_status_port0_SHIFT 16
+#define lpfc_acqe_fc_la_trunk_link_status_port0_MASK 0x0000001
+#define lpfc_acqe_fc_la_trunk_link_status_port0_WORD word0
+#define lpfc_acqe_fc_la_trunk_link_status_port1_SHIFT 17
+#define lpfc_acqe_fc_la_trunk_link_status_port1_MASK 0x0000001
+#define lpfc_acqe_fc_la_trunk_link_status_port1_WORD word0
+#define lpfc_acqe_fc_la_trunk_link_status_port2_SHIFT 18
+#define lpfc_acqe_fc_la_trunk_link_status_port2_MASK 0x0000001
+#define lpfc_acqe_fc_la_trunk_link_status_port2_WORD word0
+#define lpfc_acqe_fc_la_trunk_link_status_port3_SHIFT 19
+#define lpfc_acqe_fc_la_trunk_link_status_port3_MASK 0x0000001
+#define lpfc_acqe_fc_la_trunk_link_status_port3_WORD word0
+#define lpfc_acqe_fc_la_trunk_config_port0_SHIFT 20
+#define lpfc_acqe_fc_la_trunk_config_port0_MASK 0x0000001
+#define lpfc_acqe_fc_la_trunk_config_port0_WORD word0
+#define lpfc_acqe_fc_la_trunk_config_port1_SHIFT 21
+#define lpfc_acqe_fc_la_trunk_config_port1_MASK 0x0000001
+#define lpfc_acqe_fc_la_trunk_config_port1_WORD word0
+#define lpfc_acqe_fc_la_trunk_config_port2_SHIFT 22
+#define lpfc_acqe_fc_la_trunk_config_port2_MASK 0x0000001
+#define lpfc_acqe_fc_la_trunk_config_port2_WORD word0
+#define lpfc_acqe_fc_la_trunk_config_port3_SHIFT 23
+#define lpfc_acqe_fc_la_trunk_config_port3_MASK 0x0000001
+#define lpfc_acqe_fc_la_trunk_config_port3_WORD word0
uint32_t word1;
#define lpfc_acqe_fc_la_llink_spd_SHIFT 16
#define lpfc_acqe_fc_la_llink_spd_MASK 0x0000FFFF
@@ -4095,6 +4169,12 @@ struct lpfc_acqe_fc_la {
#define lpfc_acqe_fc_la_fault_SHIFT 0
#define lpfc_acqe_fc_la_fault_MASK 0x000000FF
#define lpfc_acqe_fc_la_fault_WORD word1
+#define lpfc_acqe_fc_la_trunk_fault_SHIFT 0
+#define lpfc_acqe_fc_la_trunk_fault_MASK 0x0000000F
+#define lpfc_acqe_fc_la_trunk_fault_WORD word1
+#define lpfc_acqe_fc_la_trunk_linkmask_SHIFT 4
+#define lpfc_acqe_fc_la_trunk_linkmask_MASK 0x000000F
+#define lpfc_acqe_fc_la_trunk_linkmask_WORD word1
#define LPFC_FC_LA_FAULT_NONE 0x0
#define LPFC_FC_LA_FAULT_LOCAL 0x1
#define LPFC_FC_LA_FAULT_REMOTE 0x2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 68d62d55a3a5..c1c36812c3d2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -447,19 +447,19 @@ lpfc_config_port_post(struct lpfc_hba *phba)
"READ_SPARM mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
phba->link_state = LPFC_HBA_ERROR;
- mp = (struct lpfc_dmabuf *) pmb->context1;
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
return -EIO;
}
- mp = (struct lpfc_dmabuf *) pmb->context1;
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- pmb->context1 = NULL;
+ pmb->ctx_buf = NULL;
lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
@@ -1801,7 +1801,12 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
lpfc_offline(phba);
/* release interrupt for possible resource change */
lpfc_sli4_disable_intr(phba);
- lpfc_sli_brdrestart(phba);
+ rc = lpfc_sli_brdrestart(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6309 Failed to restart board\n");
+ return rc;
+ }
/* request and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -4106,6 +4111,32 @@ finished:
return stat;
}
+void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ fc_host_supported_speeds(shost) = 0;
+ if (phba->lmt & LMT_128Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
+ if (phba->lmt & LMT_64Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
+ if (phba->lmt & LMT_32Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
+ if (phba->lmt & LMT_16Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
+ if (phba->lmt & LMT_10Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
+ if (phba->lmt & LMT_8Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
+ if (phba->lmt & LMT_4Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
+ if (phba->lmt & LMT_2Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
+ if (phba->lmt & LMT_1Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
+}
+
/**
* lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
* @shost: pointer to SCSI host data structure.
@@ -4133,23 +4164,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
sizeof fc_host_symbolic_name(shost));
- fc_host_supported_speeds(shost) = 0;
- if (phba->lmt & LMT_64Gb)
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
- if (phba->lmt & LMT_32Gb)
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
- if (phba->lmt & LMT_16Gb)
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
- if (phba->lmt & LMT_10Gb)
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
- if (phba->lmt & LMT_8Gb)
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
- if (phba->lmt & LMT_4Gb)
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
- if (phba->lmt & LMT_2Gb)
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
- if (phba->lmt & LMT_1Gb)
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
+ lpfc_host_supported_speeds_set(shost);
fc_host_maxframe_size(shost) =
(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
@@ -4467,6 +4482,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
case LPFC_FC_LA_SPEED_64G:
port_speed = 64000;
break;
+ case LPFC_FC_LA_SPEED_128G:
+ port_speed = 128000;
+ break;
default:
port_speed = 0;
}
@@ -4609,6 +4627,136 @@ out_free_pmb:
}
/**
+ * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
+ * topology.
+ * @phba: pointer to lpfc hba data structure.
+ * @evt_code: asynchronous event code.
+ * @speed_code: asynchronous event link speed code.
+ *
+ * This routine is to parse the giving SLI4 async event link speed code into
+ * value of Read topology link speed.
+ *
+ * Return: link speed in terms of Read topology.
+ **/
+static uint8_t
+lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
+{
+ uint8_t port_speed;
+
+ switch (speed_code) {
+ case LPFC_FC_LA_SPEED_1G:
+ port_speed = LPFC_LINK_SPEED_1GHZ;
+ break;
+ case LPFC_FC_LA_SPEED_2G:
+ port_speed = LPFC_LINK_SPEED_2GHZ;
+ break;
+ case LPFC_FC_LA_SPEED_4G:
+ port_speed = LPFC_LINK_SPEED_4GHZ;
+ break;
+ case LPFC_FC_LA_SPEED_8G:
+ port_speed = LPFC_LINK_SPEED_8GHZ;
+ break;
+ case LPFC_FC_LA_SPEED_16G:
+ port_speed = LPFC_LINK_SPEED_16GHZ;
+ break;
+ case LPFC_FC_LA_SPEED_32G:
+ port_speed = LPFC_LINK_SPEED_32GHZ;
+ break;
+ case LPFC_FC_LA_SPEED_64G:
+ port_speed = LPFC_LINK_SPEED_64GHZ;
+ break;
+ case LPFC_FC_LA_SPEED_128G:
+ port_speed = LPFC_LINK_SPEED_128GHZ;
+ break;
+ case LPFC_FC_LA_SPEED_256G:
+ port_speed = LPFC_LINK_SPEED_256GHZ;
+ break;
+ default:
+ port_speed = 0;
+ break;
+ }
+
+ return port_speed;
+}
+
+#define trunk_link_status(__idx)\
+ bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
+ ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
+ "Link up" : "Link down") : "NA"
+/* Did port __idx reported an error */
+#define trunk_port_fault(__idx)\
+ bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
+ (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
+
+static void
+lpfc_update_trunk_link_status(struct lpfc_hba *phba,
+ struct lpfc_acqe_fc_la *acqe_fc)
+{
+ uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
+ uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
+
+ phba->sli4_hba.link_state.speed =
+ lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
+ bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
+
+ phba->sli4_hba.link_state.logical_speed =
+ bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
+ /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
+ phba->fc_linkspeed =
+ lpfc_async_link_speed_to_read_top(
+ phba,
+ bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
+
+ if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
+ phba->trunk_link.link0.state =
+ bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
+ ? LPFC_LINK_UP : LPFC_LINK_DOWN;
+ phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
+ }
+ if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
+ phba->trunk_link.link1.state =
+ bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
+ ? LPFC_LINK_UP : LPFC_LINK_DOWN;
+ phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
+ }
+ if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
+ phba->trunk_link.link2.state =
+ bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
+ ? LPFC_LINK_UP : LPFC_LINK_DOWN;
+ phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
+ }
+ if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
+ phba->trunk_link.link3.state =
+ bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
+ ? LPFC_LINK_UP : LPFC_LINK_DOWN;
+ phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2910 Async FC Trunking Event - Speed:%d\n"
+ "\tLogical speed:%d "
+ "port0: %s port1: %s port2: %s port3: %s\n",
+ phba->sli4_hba.link_state.speed,
+ phba->sli4_hba.link_state.logical_speed,
+ trunk_link_status(0), trunk_link_status(1),
+ trunk_link_status(2), trunk_link_status(3));
+
+ if (port_fault)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3202 trunk error:0x%x (%s) seen on port0:%s "
+ /*
+ * SLI-4: We have only 0xA error codes
+ * defined as of now. print an appropriate
+ * message in case driver needs to be updated.
+ */
+ "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
+ "UNDEFINED. update driver." : trunk_errmsg[err],
+ trunk_port_fault(0), trunk_port_fault(1),
+ trunk_port_fault(2), trunk_port_fault(3));
+}
+
+
+/**
* lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
* @phba: pointer to lpfc hba data structure.
* @acqe_fc: pointer to the async fc completion queue entry.
@@ -4633,6 +4781,13 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
bf_get(lpfc_trailer_type, acqe_fc));
return;
}
+
+ if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
+ LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
+ lpfc_update_trunk_link_status(phba, acqe_fc);
+ return;
+ }
+
/* Keep the link status for extra SLI4 state machine reference */
phba->sli4_hba.link_state.speed =
lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
@@ -4762,6 +4917,8 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
struct temp_event temp_event_data;
struct lpfc_acqe_misconfigured_event *misconfigured;
struct Scsi_Host *shost;
+ struct lpfc_vport **vports;
+ int rc, i;
evt_type = bf_get(lpfc_trailer_type, acqe_sli);
@@ -4887,6 +5044,25 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
sprintf(message, "Unknown event status x%02x", status);
break;
}
+
+ /* Issue READ_CONFIG mbox command to refresh supported speeds */
+ rc = lpfc_sli4_read_config(phba);
+ if (rc) {
+ phba->lmt = 0;
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3194 Unable to retrieve supported "
+ "speeds, rc = 0x%x\n", rc);
+ }
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+ i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ lpfc_host_supported_speeds_set(shost);
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+
phba->sli4_hba.lnk_info.optic_state = status;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3176 Port Name %c %s\n", port_name, message);
@@ -5044,7 +5220,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
break;
}
/* If fast FCF failover rescan event is pending, do nothing */
- if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
+ if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
spin_unlock_irq(&phba->hbalock);
break;
}
@@ -7181,26 +7357,19 @@ lpfc_post_init_setup(struct lpfc_hba *phba)
static int
lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = phba->pcidev;
unsigned long bar0map_len, bar2map_len;
int i, hbq_count;
void *ptr;
int error = -ENODEV;
- /* Obtain PCI device reference */
- if (!phba->pcidev)
+ if (!pdev)
return error;
- else
- pdev = phba->pcidev;
/* Set the device DMA mask size */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
- return error;
- }
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ return error;
/* Get the bus address of Bar0 and Bar2 and the number of bytes
* required by each mapping.
@@ -7779,6 +7948,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
}
+ phba->sli4_hba.conf_trunk =
+ bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
phba->sli4_hba.extents_in_use =
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
@@ -7787,6 +7958,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
phba->sli4_hba.max_cfg_param.max_vpi =
bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+ /* Limit the max we support */
+ if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
+ phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
phba->sli4_hba.max_cfg_param.vpi_base =
bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_rpi =
@@ -9562,25 +9736,18 @@ out:
static int
lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = phba->pcidev;
unsigned long bar0map_len, bar1map_len, bar2map_len;
int error = -ENODEV;
uint32_t if_type;
- /* Obtain PCI device reference */
- if (!phba->pcidev)
+ if (!pdev)
return error;
- else
- pdev = phba->pcidev;
/* Set the device DMA mask size */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
- return error;
- }
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ return error;
/*
* The BARs and register set definitions and offset locations are
@@ -10523,12 +10690,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
kthread_stop(phba->worker_thread);
/* Disable FW logging to host memory */
- writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
- phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
-
- /* Free RAS DMA memory */
- if (phba->ras_fwlog.ras_enabled == true)
- lpfc_sli4_ras_dma_free(phba);
+ lpfc_ras_stop_fwlog(phba);
/* Unset the queues shared with the hardware then release all
* allocated resources.
@@ -10539,6 +10701,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
+ /* Free RAS DMA memory */
+ if (phba->ras_fwlog.ras_enabled)
+ lpfc_sli4_ras_dma_free(phba);
+
/* Stop the SLI4 device port */
phba->pport->work_port_events = 0;
}
@@ -12476,7 +12642,8 @@ lpfc_sli4_ras_init(struct lpfc_hba *phba)
case PCI_DEVICE_ID_LANCER_G6_FC:
case PCI_DEVICE_ID_LANCER_G7_FC:
phba->ras_fwlog.ras_hwsupport = true;
- if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn))
+ if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
+ phba->cfg_ras_fwlog_buffsize)
phba->ras_fwlog.ras_enabled = true;
else
phba->ras_fwlog.ras_enabled = false;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index deb094fdbb79..f6a5083a621e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -94,7 +94,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
memset(mp->virt, 0, LPFC_BPL_SIZE);
INIT_LIST_HEAD(&mp->list);
/* save address for completion */
- pmb->context1 = (uint8_t *)mp;
+ pmb->ctx_buf = (uint8_t *)mp;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -139,7 +139,7 @@ lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
void *ctx;
mb = &pmb->u.mb;
- ctx = pmb->context2;
+ ctx = pmb->ctx_buf;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -151,7 +151,7 @@ lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
- pmb->context2 = ctx;
+ pmb->ctx_buf = ctx;
mb->mbxOwner = OWN_HOST;
return;
}
@@ -172,7 +172,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb = &pmb->u.mb;
/* Save context so that we can restore after memset */
- ctx = pmb->context2;
+ ctx = pmb->ctx_buf;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
@@ -186,7 +186,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
- pmb->context2 = ctx;
+ pmb->ctx_buf = ctx;
return;
}
@@ -304,7 +304,7 @@ lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
/* Save address for later completion and set the owner to host so that
* the FW knows this mailbox is available for processing.
*/
- pmb->context1 = (uint8_t *)mp;
+ pmb->ctx_buf = (uint8_t *)mp;
mb->mbxOwner = OWN_HOST;
return (0);
}
@@ -513,9 +513,9 @@ lpfc_init_link(struct lpfc_hba * phba,
break;
}
- if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
- mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
- /* Failover is not tried for Lancer G6 */
+ if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+ phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
}
@@ -631,7 +631,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
/* save address for completion */
- pmb->context1 = mp;
+ pmb->ctx_buf = mp;
return (0);
}
@@ -783,7 +783,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
memcpy(sparam, param, sizeof (struct serv_parm));
/* save address for completion */
- pmb->context1 = (uint8_t *) mp;
+ pmb->ctx_buf = (uint8_t *)mp;
mb->mbxCommand = MBX_REG_LOGIN64;
mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
@@ -858,7 +858,7 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->context1 = NULL;
+ mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
mempool_free(mbox, phba->mbox_mem_pool);
@@ -2288,7 +2288,7 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
INIT_LIST_HEAD(&mp->list);
/* save address for completion */
- mbox->context1 = (uint8_t *) mp;
+ mbox->ctx_buf = (uint8_t *)mp;
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.type = DMP_NV_PARAMS;
@@ -2305,7 +2305,7 @@ lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
MAILBOX_t *mb;
int rc = FAILURE;
struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mboxq->context2);
+ (struct lpfc_rdp_context *)(mboxq->ctx_ndlp);
mb = &mboxq->u.mb;
if (mb->mbxStatus)
@@ -2323,9 +2323,9 @@ mbx_failed:
static void
lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mbox->context2);
+ (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
goto error_mbuf_free;
@@ -2341,7 +2341,7 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
lpfc_read_lnk_stat(phba, mbox);
mbox->vport = rdp_context->ndlp->vport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
- mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+ mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
goto error_cmd_free;
@@ -2359,9 +2359,9 @@ void
lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
int rc;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (mbox->context1);
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mbox->context2);
+ (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
goto error;
@@ -2375,7 +2375,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
INIT_LIST_HEAD(&mp->list);
/* save address for completion */
- mbox->context1 = mp;
+ mbox->ctx_buf = mp;
mbox->vport = rdp_context->ndlp->vport;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
@@ -2391,7 +2391,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
- mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+ mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
goto error;
@@ -2436,7 +2436,7 @@ lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
/* save address for completion */
- mbox->context1 = mp;
+ mbox->ctx_buf = mp;
bf_set(lpfc_mbx_memory_dump_type3_type,
&mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 9c22a2c93462..66191fa35f63 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -330,7 +330,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
/* Free memory used in mailbox queue back to mailbox memory pool */
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
- mp = (struct lpfc_dmabuf *) (mbox->context1);
+ mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -340,7 +340,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
}
/* Free memory used in mailbox cmpl list back to mailbox memory pool */
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
- mp = (struct lpfc_dmabuf *) (mbox->context1);
+ mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -354,7 +354,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
if (psli->mbox_active) {
mbox = psli->mbox_active;
- mp = (struct lpfc_dmabuf *) (mbox->context1);
+ mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 269808e8480f..96bc3789a166 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -467,7 +467,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
/*
- * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
+ * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
* command issued in lpfc_cmpl_els_acc().
*/
mbox->vport = vport;
@@ -535,8 +535,8 @@ lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_nodelist *ndlp;
uint32_t cmd;
- elsiocb = (struct lpfc_iocbq *)mboxq->context1;
- ndlp = (struct lpfc_nodelist *) mboxq->context2;
+ elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
+ ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
vport = mboxq->vport;
cmd = elsiocb->drvrTimeout;
@@ -836,7 +836,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
return 0;
}
@@ -851,7 +853,10 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return 1;
}
}
+
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
@@ -866,13 +871,26 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* to release a rpi.
**/
void
-lpfc_release_rpi(struct lpfc_hba *phba,
- struct lpfc_vport *vport,
- uint16_t rpi)
+lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, uint16_t rpi)
{
LPFC_MBOXQ_t *pmb;
int rc;
+ /* If there is already an UNREG in progress for this ndlp,
+ * no need to queue up another one.
+ */
+ if (ndlp->nlp_flag & NLP_UNREG_INP) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "1435 release_rpi SKIP UNREG x%x on "
+ "NPort x%x deferred x%x flg x%x "
+ "Data: %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_defer_did,
+ ndlp->nlp_flag, ndlp);
+ return;
+ }
+
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!pmb)
@@ -881,6 +899,18 @@ lpfc_release_rpi(struct lpfc_hba *phba,
else {
lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ pmb->vport = vport;
+ pmb->ctx_ndlp = ndlp;
+
+ if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+ (!(vport->fc_flag & FC_OFFLINE_MODE)))
+ ndlp->nlp_flag |= NLP_UNREG_INP;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "1437 release_rpi UNREG x%x "
+ "on NPort x%x flg x%x\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
+
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
mempool_free(pmb, phba->mbox_mem_pool);
@@ -901,7 +931,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(evt == NLP_EVT_CMPL_REG_LOGIN) &&
(!pmb->u.mb.mbxStatus)) {
rpi = pmb->u.mb.un.varWords[0];
- lpfc_release_rpi(phba, vport, rpi);
+ lpfc_release_rpi(phba, vport, ndlp, rpi);
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0271 Illegal State Transition: node x%x "
@@ -1253,7 +1283,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
}
- mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->vport = vport;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
!= MBX_NOT_FINISHED) {
@@ -1267,7 +1297,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
* command
*/
lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *) mbox->context1;
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(mbox, phba->mbox_mem_pool);
@@ -1329,7 +1359,7 @@ lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
if (!(phba->pport->load_flag & FC_UNLOADING) &&
!mb->mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
- lpfc_release_rpi(phba, vport, rpi);
+ lpfc_release_rpi(phba, vport, ndlp, rpi);
}
return ndlp->nlp_state;
}
@@ -1636,10 +1666,10 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
- (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
- mb->context2 = NULL;
+ mb->ctx_ndlp = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
@@ -1647,8 +1677,8 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
- (ndlp == (struct lpfc_nodelist *) mb->context2)) {
- mp = (struct lpfc_dmabuf *) (mb->context1);
+ (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
if (mp) {
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -1770,9 +1800,16 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
} else if (ndlp->nlp_fc4_type == 0) {
- rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
- 0, ndlp->nlp_DID);
- return ndlp->nlp_state;
+ /* If we are only configured for FCP, the driver
+ * should just issue PRLI for FCP. Otherwise issue
+ * GFT_ID to determine if remote port supports NVME.
+ */
+ if (phba->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
+ 0, ndlp->nlp_DID);
+ return ndlp->nlp_state;
+ }
+ ndlp->nlp_fc4_type = NLP_FC4_FCP;
}
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
@@ -2863,8 +2900,9 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0211 DSM in event x%x on NPort x%x in "
- "state %d Data: x%x\n",
- evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
+ "state %d rpi x%x Data: x%x x%x\n",
+ evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
+ ndlp->nlp_flag, ndlp->nlp_fc4_type);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM in: evt:%d ste:%d did:x%x",
@@ -2876,8 +2914,9 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* DSM out state <rc> on NPort <nlp_DID> */
if (got_ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0212 DSM out state %d on NPort x%x Data: x%x\n",
- rc, ndlp->nlp_DID, ndlp->nlp_flag);
+ "0212 DSM out state %d on NPort x%x "
+ "rpi x%x Data: x%x\n",
+ rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM out: ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index ba831def9301..4c66b19e6199 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1855,7 +1855,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
/* word 7 */
- bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
nvmereq_wqe->iocb.ulpClass);
@@ -1870,7 +1869,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
abts_buf->iotag);
/* word 10 */
- bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx);
bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index baed2b891efb..b4f1a840b3b4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -2734,6 +2734,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
int fcpdl;
+ struct lpfc_vport *vport = phba->pport;
/*
* Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2839,6 +2840,14 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
*/
iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+ /*
+ * For First burst, we may need to adjust the initial transfer
+ * length for DIF
+ */
+ if (iocb_cmd->un.fcpi.fcpi_XRdy &&
+ (fcpdl < vport->cfg_first_burst_size))
+ iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+
return 0;
err:
if (lpfc_cmd->seg_cnt)
@@ -3403,6 +3412,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
int fcpdl;
+ struct lpfc_vport *vport = phba->pport;
/*
* Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
@@ -3519,6 +3529,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
/*
+ * For First burst, we may need to adjust the initial transfer
+ * length for DIF
+ */
+ if (iocb_cmd->un.fcpi.fcpi_XRdy &&
+ (fcpdl < vport->cfg_first_burst_size))
+ iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+
+ /*
* If the OAS driver feature is enabled and the lun is enabled for
* OAS, set the oas iocb related flags.
*/
@@ -4163,7 +4181,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* If pCmd was set to NULL from abort path, do not call scsi_done */
if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0711 FCP cmd already NULL, sid: 0x%06x, "
+ "5688 FCP cmd already NULL, sid: 0x%06x, "
"did: 0x%06x, oxid: 0x%04x\n",
vport->fc_myDID,
(pnode) ? pnode->nlp_DID : 0,
@@ -4442,6 +4460,66 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
}
/**
+ * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
+ * if issuing a pci_bus_reset is possibly unsafe
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Walks the bus_list to ensure only PCI devices with Emulex
+ * vendor id, device ids that support hot reset, and only one occurrence
+ * of function 0.
+ *
+ * Returns:
+ * -EBADSLT, detected invalid device
+ * 0, successful
+ */
+int
+lpfc_check_pci_resettable(const struct lpfc_hba *phba)
+{
+ const struct pci_dev *pdev = phba->pcidev;
+ struct pci_dev *ptr = NULL;
+ u8 counter = 0;
+
+ /* Walk the list of devices on the pci_dev's bus */
+ list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
+ /* Check for Emulex Vendor ID */
+ if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8346 Non-Emulex vendor found: "
+ "0x%04x\n", ptr->vendor);
+ return -EBADSLT;
+ }
+
+ /* Check for valid Emulex Device ID */
+ switch (ptr->device) {
+ case PCI_DEVICE_ID_LANCER_FC:
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8347 Invalid device found: "
+ "0x%04x\n", ptr->device);
+ return -EBADSLT;
+ }
+
+ /* Check for only one function 0 ID to ensure only one HBA on
+ * secondary bus
+ */
+ if (ptr->devfn == 0) {
+ if (++counter > 1) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8348 More than one device on "
+ "secondary bus found\n");
+ return -EBADSLT;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
* lpfc_info - Info entry point of scsi_host_template data structure
* @host: The scsi host for which this call is being executed.
*
@@ -4455,32 +4533,53 @@ lpfc_info(struct Scsi_Host *host)
{
struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
struct lpfc_hba *phba = vport->phba;
- int len, link_speed = 0;
- static char lpfcinfobuf[384];
+ int link_speed = 0;
+ static char lpfcinfobuf[384];
+ char tmp[384] = {0};
- memset(lpfcinfobuf,0,384);
+ memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
if (phba && phba->pcidev){
- strncpy(lpfcinfobuf, phba->ModelDesc, 256);
- len = strlen(lpfcinfobuf);
- snprintf(lpfcinfobuf + len,
- 384-len,
- " on PCI bus %02x device %02x irq %d",
- phba->pcidev->bus->number,
- phba->pcidev->devfn,
- phba->pcidev->irq);
- len = strlen(lpfcinfobuf);
+ /* Model Description */
+ scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
+ if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+ sizeof(lpfcinfobuf))
+ goto buffer_done;
+
+ /* PCI Info */
+ scnprintf(tmp, sizeof(tmp),
+ " on PCI bus %02x device %02x irq %d",
+ phba->pcidev->bus->number, phba->pcidev->devfn,
+ phba->pcidev->irq);
+ if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+ sizeof(lpfcinfobuf))
+ goto buffer_done;
+
+ /* Port Number */
if (phba->Port[0]) {
- snprintf(lpfcinfobuf + len,
- 384-len,
- " port %s",
- phba->Port);
+ scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
+ if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+ sizeof(lpfcinfobuf))
+ goto buffer_done;
}
- len = strlen(lpfcinfobuf);
+
+ /* Link Speed */
link_speed = lpfc_sli_port_speed_get(phba);
- if (link_speed != 0)
- snprintf(lpfcinfobuf + len, 384-len,
- " Logical Link Speed: %d Mbps", link_speed);
+ if (link_speed != 0) {
+ scnprintf(tmp, sizeof(tmp),
+ " Logical Link Speed: %d Mbps", link_speed);
+ if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+ sizeof(lpfcinfobuf))
+ goto buffer_done;
+ }
+
+ /* PCI resettable */
+ if (!lpfc_check_pci_resettable(phba)) {
+ scnprintf(tmp, sizeof(tmp), " PCI resettable");
+ strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
+ }
}
+
+buffer_done:
return lpfcinfobuf;
}
@@ -6036,7 +6135,6 @@ struct scsi_host_template lpfc_template_nvme = {
.this_id = -1,
.sg_tablesize = 1,
.cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
@@ -6061,7 +6159,6 @@ struct scsi_host_template lpfc_template_no_hr = {
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
@@ -6088,7 +6185,6 @@ struct scsi_host_template lpfc_template = {
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
@@ -6113,7 +6209,6 @@ struct scsi_host_template lpfc_vport_template = {
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_vport_attrs,
.max_sectors = 0xFFFF,
.change_queue_depth = scsi_change_queue_depth,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index cc99859774ff..b759b089432c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -194,6 +194,10 @@ struct lpfc_scsi_buf {
#define NO_MORE_OAS_LUN -1
#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
+#ifndef FC_PORTSPEED_128GBIT
+#define FC_PORTSPEED_128GBIT 0x2000
+#endif
+
#define TXRDY_PAYLOAD_LEN 12
int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index b9e5cd79931a..30734caf77e1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2456,7 +2456,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t rpi, vpi;
int rc;
- mp = (struct lpfc_dmabuf *) (pmb->context1);
+ mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -2491,9 +2491,35 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)pmb->context2;
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
lpfc_nlp_put(ndlp);
- pmb->context2 = NULL;
+ pmb->ctx_buf = NULL;
+ pmb->ctx_ndlp = NULL;
+ }
+
+ if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+
+ /* Check to see if there are any deferred events to process */
+ if (ndlp) {
+ lpfc_printf_vlog(
+ vport,
+ KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
+ "1438 UNREG cmpl deferred mbox x%x "
+ "on NPort x%x Data: x%x x%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
+
+ if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+ (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ } else {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ }
+ }
+ pmb->ctx_ndlp = NULL;
}
/* Check security permission status on INIT_LINK mailbox command */
@@ -2527,21 +2553,46 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct lpfc_nodelist *ndlp;
- ndlp = pmb->context1;
+ ndlp = pmb->ctx_ndlp;
if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
if (phba->sli_rev == LPFC_SLI_REV4 &&
(bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2)) {
if (ndlp) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0010 UNREG_LOGIN vpi:%x "
- "rpi:%x DID:%x map:%x %p\n",
- vport->vpi, ndlp->nlp_rpi,
- ndlp->nlp_DID,
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(
+ vport, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "0010 UNREG_LOGIN vpi:%x "
+ "rpi:%x DID:%x defer x%x flg x%x "
+ "map:%x %p\n",
+ vport->vpi, ndlp->nlp_rpi,
+ ndlp->nlp_DID, ndlp->nlp_defer_did,
+ ndlp->nlp_flag,
+ ndlp->nlp_usg_map, ndlp);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
lpfc_nlp_put(ndlp);
+
+ /* Check to see if there are any deferred
+ * events to process
+ */
+ if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+ (ndlp->nlp_defer_did !=
+ NLP_EVT_NOTHING_PENDING)) {
+ lpfc_printf_vlog(
+ vport, KERN_INFO, LOG_DISCOVERY,
+ "4111 UNREG cmpl deferred "
+ "clr x%x on "
+ "NPort x%x Data: x%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_defer_did, ndlp);
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ ndlp->nlp_defer_did =
+ NLP_EVT_NOTHING_PENDING;
+ lpfc_issue_els_plogi(
+ vport, ndlp->nlp_DID, 0);
+ } else {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ }
}
}
}
@@ -4640,6 +4691,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
rc = lpfc_sli4_brdreset(phba);
+ if (rc)
+ return rc;
spin_lock_irq(&phba->hbalock);
phba->pport->stopped = 0;
@@ -5228,7 +5281,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
goto out_free_mboxq;
}
- mp = (struct lpfc_dmabuf *) mboxq->context1;
+ mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
@@ -6148,6 +6201,25 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
}
/**
+ * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Disable FW logging into host memory on the adapter. To
+ * be done before reading logs from the host memory.
+ **/
+void
+lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+ ras_fwlog->ras_active = false;
+
+ /* Disable FW logging to host memory */
+ writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+}
+
+/**
* lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
* @phba: Pointer to HBA context object.
*
@@ -6211,7 +6283,7 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
&ras_fwlog->lwpd.phys,
GFP_KERNEL);
if (!ras_fwlog->lwpd.virt) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6185 LWPD Memory Alloc Failed\n");
return -ENOMEM;
@@ -6228,7 +6300,7 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
goto free_mem;
}
- dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
LPFC_RAS_MAX_ENTRY_SIZE,
&dmabuf->phys,
GFP_KERNEL);
@@ -6239,7 +6311,6 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
"6187 DMA Alloc Failed FW logging");
goto free_mem;
}
- memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
dmabuf->buffer_tag = i;
list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
}
@@ -6274,11 +6345,13 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"6188 FW LOG mailbox "
"completed with status x%x add_status x%x,"
" mbx status x%x\n",
shdr_status, shdr_add_status, mb->mbxStatus);
+
+ ras_fwlog->ras_hwsupport = false;
goto disable_ras;
}
@@ -6326,7 +6399,7 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "6189 RAS FW Log Support Not Enabled");
+ "6189 FW Log Memory Allocation Failed");
return rc;
}
}
@@ -6334,7 +6407,7 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
/* Setup Mailbox command */
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6190 RAS MBX Alloc Failed");
rc = -ENOMEM;
goto mem_free;
@@ -6379,8 +6452,8 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "6191 RAS Mailbox failed. "
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6191 FW-Log Mailbox failed. "
"status %d mbxStatus : x%x", rc,
bf_get(lpfc_mqe_status, &mbox->u.mqe));
mempool_free(mbox, phba->mbox_mem_pool);
@@ -7348,7 +7421,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
mboxq->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- mp = (struct lpfc_dmabuf *) mboxq->context1;
+ mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
if (rc == MBX_SUCCESS) {
memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
rc = 0;
@@ -7360,7 +7433,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
*/
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- mboxq->context1 = NULL;
+ mboxq->ctx_buf = NULL;
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0382 READ_SPARAM command failed "
@@ -7635,7 +7708,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
*/
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_DOWN;
+
+ /* Check if physical ports are trunked */
+ if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
+ phba->trunk_link.link0.state = LPFC_LINK_DOWN;
+ if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
+ phba->trunk_link.link1.state = LPFC_LINK_DOWN;
+ if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
+ phba->trunk_link.link2.state = LPFC_LINK_DOWN;
+ if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
+ phba->trunk_link.link3.state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
+
if (!(phba->hba_flag & HBA_FCOE_MODE) &&
(phba->hba_flag & LINK_DISABLED)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
@@ -8119,10 +8203,10 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
/* Copy the mailbox extension data */
- if (pmbox->in_ext_byte_len && pmbox->context2) {
- lpfc_sli_pcimem_bcopy(pmbox->context2,
- (uint8_t *)phba->mbox_ext,
- pmbox->in_ext_byte_len);
+ if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
+ lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
+ (uint8_t *)phba->mbox_ext,
+ pmbox->in_ext_byte_len);
}
/* Copy command data to host SLIM area */
lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
@@ -8133,10 +8217,10 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
- if (pmbox->in_ext_byte_len && pmbox->context2)
+ if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
lpfc_memcpy_to_slim(phba->MBslimaddr +
MAILBOX_HBA_EXT_OFFSET,
- pmbox->context2, pmbox->in_ext_byte_len);
+ pmbox->ctx_buf, pmbox->in_ext_byte_len);
if (mbx->mbxCommand == MBX_CONFIG_PORT)
/* copy command data into host mbox for cmpl */
@@ -8259,9 +8343,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
- if (pmbox->out_ext_byte_len && pmbox->context2) {
+ if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
- pmbox->context2,
+ pmbox->ctx_buf,
pmbox->out_ext_byte_len);
}
} else {
@@ -8269,8 +8353,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
- if (pmbox->out_ext_byte_len && pmbox->context2) {
- lpfc_memcpy_from_slim(pmbox->context2,
+ if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
+ lpfc_memcpy_from_slim(
+ pmbox->ctx_buf,
phba->MBslimaddr +
MAILBOX_HBA_EXT_OFFSET,
pmbox->out_ext_byte_len);
@@ -11265,19 +11350,12 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Complete prepping the abort wqe and issue to the FW. */
abts_wqe = &abtsiocbp->wqe;
- bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* Explicitly set reserved fields to zero.*/
- abts_wqe->abort_cmd.rsrvd4 = 0;
- abts_wqe->abort_cmd.rsrvd5 = 0;
- /* WQE Common - word 6. Context is XRI tag. Set 0. */
- bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
- bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
+ /* Clear any stale WQE contents */
+ memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+ bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
/* word 7 */
- bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
cmdiocb->iocb.ulpClass);
@@ -11292,7 +11370,6 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp->iotag);
/* word 10 */
- bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
@@ -12545,10 +12622,10 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
if (pmb->out_ext_byte_len &&
- pmb->context2)
+ pmb->ctx_buf)
lpfc_sli_pcimem_bcopy(
phba->mbox_ext,
- pmb->context2,
+ pmb->ctx_buf,
pmb->out_ext_byte_len);
}
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
@@ -12563,9 +12640,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
if (!pmbox->mbxStatus) {
mp = (struct lpfc_dmabuf *)
- (pmb->context1);
+ (pmb->ctx_buf);
ndlp = (struct lpfc_nodelist *)
- pmb->context2;
+ pmb->ctx_ndlp;
/* Reg_LOGIN of dflt RPI was
* successful. new lets get
@@ -12578,8 +12655,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
pmb);
pmb->mbox_cmpl =
lpfc_mbx_cmpl_dflt_rpi;
- pmb->context1 = mp;
- pmb->context2 = ndlp;
+ pmb->ctx_buf = mp;
+ pmb->ctx_ndlp = ndlp;
pmb->vport = vport;
rc = lpfc_sli_issue_mbox(phba,
pmb,
@@ -13185,16 +13262,16 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
mcqe_status,
pmbox->un.varWords[0], 0);
if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
- mp = (struct lpfc_dmabuf *)(pmb->context1);
- ndlp = (struct lpfc_nodelist *)pmb->context2;
+ mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
/* Reg_LOGIN of dflt RPI was successful. Now lets get
* RID of the PPI using the same mbox buffer.
*/
lpfc_unreg_login(phba, vport->vpi,
pmbox->un.varWords[0], pmb);
pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
- pmb->context1 = mp;
- pmb->context2 = ndlp;
+ pmb->ctx_buf = mp;
+ pmb->ctx_ndlp = ndlp;
pmb->vport = vport;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_BUSY)
@@ -13413,6 +13490,8 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
return workposted;
}
+#define FC_RCTL_MDS_DIAGS 0xF4
+
/**
* lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
* @phba: Pointer to HBA context object.
@@ -13426,6 +13505,7 @@ static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
{
bool workposted = false;
+ struct fc_frame_header *fc_hdr;
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
struct lpfc_nvmet_tgtport *tgtp;
@@ -13462,7 +13542,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
hrq->RQ_buf_posted--;
memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
- /* save off the frame for the word thread to process */
+ fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
+
+ if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
+ fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ /* Handle MDS Loopback frames */
+ lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
+ break;
+ }
+
+ /* save off the frame for the work thread to process */
list_add_tail(&dma_buf->cq_event.list,
&phba->sli4_hba.sp_queue_event);
/* Frame received */
@@ -14501,7 +14591,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
hw_page_size))/hw_page_size;
/* If needed, Adjust page count to match the max the adapter supports */
- if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
+ if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
+ (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
INIT_LIST_HEAD(&queue->list);
@@ -14669,7 +14760,8 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->context1 = NULL;
+ mbox->ctx_buf = NULL;
+ mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -14789,7 +14881,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
}
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->context1 = NULL;
+ mbox->ctx_buf = NULL;
+ mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
@@ -16863,8 +16956,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
struct fc_vft_header *fc_vft_hdr;
uint32_t *header = (uint32_t *) fc_hdr;
-#define FC_RCTL_MDS_DIAGS 0xF4
-
switch (fc_hdr->fh_r_ctl) {
case FC_RCTL_DD_UNCAT: /* uncategorized information */
case FC_RCTL_DD_SOL_DATA: /* solicited data */
@@ -16903,15 +16994,12 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
goto drop;
}
-#define FC_TYPE_VENDOR_UNIQUE 0xFF
-
switch (fc_hdr->fh_type) {
case FC_TYPE_BLS:
case FC_TYPE_ELS:
case FC_TYPE_FCP:
case FC_TYPE_CT:
case FC_TYPE_NVME:
- case FC_TYPE_VENDOR_UNIQUE:
break;
case FC_TYPE_IP:
case FC_TYPE_ILS:
@@ -17741,6 +17829,7 @@ lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
kfree(pcmd);
lpfc_sli_release_iocbq(phba, cmdiocb);
+ lpfc_drain_txq(phba);
}
static void
@@ -17754,14 +17843,23 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
struct lpfc_dmabuf *pcmd = NULL;
uint32_t frame_len;
int rc;
+ unsigned long iflags;
fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
/* Send the received frame back */
iocbq = lpfc_sli_get_iocbq(phba);
- if (!iocbq)
- goto exit;
+ if (!iocbq) {
+ /* Queue cq event and wakeup worker thread to process it */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&dmabuf->cq_event.list,
+ &phba->sli4_hba.sp_queue_event);
+ phba->hba_flag |= HBA_SP_QUEUE_EVT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_worker_wake_up(phba);
+ return;
+ }
/* Allocate buffer for command payload */
pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -17846,6 +17944,14 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
/* Process each received buffer */
fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
+ fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
+ vport = phba->pport;
+ /* Handle MDS Loopback frames */
+ lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+ return;
+ }
+
/* check to see if this a valid type of frame */
if (lpfc_fc_frame_check(phba, fc_hdr)) {
lpfc_in_buf_free(phba, &dmabuf->dbuf);
@@ -17860,13 +17966,6 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
fcfi = bf_get(lpfc_rcqe_fcf_id,
&dmabuf->cq_event.cqe.rcqe_cmpl);
- if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
- vport = phba->pport;
- /* Handle MDS Loopback frames */
- lpfc_sli4_handle_mds_loopback(vport, dmabuf);
- return;
- }
-
/* d_id this frame is directed to */
did = sli4_did_from_fc_hdr(fc_hdr);
@@ -18207,8 +18306,8 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
- mboxq->context1 = arg;
- mboxq->context2 = ndlp;
+ mboxq->ctx_buf = arg;
+ mboxq->ctx_ndlp = ndlp;
} else
mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mboxq->vport = ndlp->vport;
@@ -18711,15 +18810,8 @@ next_priority:
goto initial_priority;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2844 No roundrobin failover FCF available\n");
- if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
- return LPFC_FCOE_FCF_NEXT_NONE;
- else {
- lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
- "3063 Only FCF available idx %d, flag %x\n",
- next_fcf_index,
- phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
- return next_fcf_index;
- }
+
+ return LPFC_FCOE_FCF_NEXT_NONE;
}
if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
@@ -19026,7 +19118,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
goto out;
mqe = &mboxq->u.mqe;
- mp = (struct lpfc_dmabuf *) mboxq->context1;
+ mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc)
goto out;
@@ -19171,11 +19263,11 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
struct lpfc_mbx_wr_object *wr_object;
LPFC_MBOXQ_t *mbox;
int rc = 0, i = 0;
- uint32_t shdr_status, shdr_add_status;
+ uint32_t shdr_status, shdr_add_status, shdr_change_status;
uint32_t mbox_tmo;
- union lpfc_sli4_cfg_shdr *shdr;
struct lpfc_dmabuf *dmabuf;
uint32_t written = 0;
+ bool check_change_status = false;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -19203,6 +19295,8 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
(size - written);
written += (size - written);
bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+ bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
+ check_change_status = true;
} else {
wr_object->u.request.bde[i].tus.f.bdeSize =
SLI4_PAGE_SIZE;
@@ -19219,9 +19313,39 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &wr_object->header.cfg_shdr.response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &wr_object->header.cfg_shdr.response);
+ if (check_change_status) {
+ shdr_change_status = bf_get(lpfc_wr_object_change_status,
+ &wr_object->u.response);
+ switch (shdr_change_status) {
+ case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3198 Firmware write complete: System "
+ "reboot required to instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_FW_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3199 Firmware write complete: Firmware"
+ " reset required to instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3200 Firmware write complete: Port "
+ "Migration or PCI Reset required to "
+ "instantiate\n");
+ break;
+ case (LPFC_CHANGE_STATUS_PCI_RESET):
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3201 Firmware write complete: PCI "
+ "Reset required to instantiate\n");
+ break;
+ default:
+ break;
+ }
+ }
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
@@ -19277,7 +19401,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
(mb->u.mb.mbxCommand == MBX_REG_VPI))
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
+ act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
/* Put reference count for delayed processing */
act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
/* Unregister the RPI when mailbox complete */
@@ -19302,7 +19426,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)mb->context2;
+ ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
/* Unregister the RPI when mailbox complete */
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
restart_loop = 1;
@@ -19322,13 +19446,14 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
while (!list_empty(&mbox_cmd_list)) {
list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- mp = (struct lpfc_dmabuf *) (mb->context1);
+ mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
if (mp) {
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
- ndlp = (struct lpfc_nodelist *) mb->context2;
- mb->context2 = NULL;
+ mb->ctx_buf = NULL;
+ ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+ mb->ctx_ndlp = NULL;
if (ndlp) {
spin_lock(shost->host_lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 34b7ab69b9b4..7abb395bb64a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -144,9 +144,9 @@ typedef struct lpfcMboxq {
MAILBOX_t mb; /* Mailbox cmd */
struct lpfc_mqe mqe;
} u;
- struct lpfc_vport *vport;/* virtual port pointer */
- void *context1; /* caller context information */
- void *context2; /* caller context information */
+ struct lpfc_vport *vport; /* virtual port pointer */
+ void *ctx_ndlp; /* caller ndlp information */
+ void *ctx_buf; /* caller buffer information */
void *context3;
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index e76c380e1a84..6b2d2350e2c6 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -279,6 +279,7 @@ struct lpfc_fcf {
#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
+ uint16_t fcf_redisc_attempted;
uint32_t addr_mode;
uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec;
@@ -717,6 +718,19 @@ struct lpfc_sli4_hba {
uint16_t num_online_cpu;
uint16_t num_present_cpu;
uint16_t curr_disp_cpu;
+ uint32_t conf_trunk;
+#define lpfc_conf_trunk_port0_WORD conf_trunk
+#define lpfc_conf_trunk_port0_SHIFT 0
+#define lpfc_conf_trunk_port0_MASK 0x1
+#define lpfc_conf_trunk_port1_WORD conf_trunk
+#define lpfc_conf_trunk_port1_SHIFT 1
+#define lpfc_conf_trunk_port1_MASK 0x1
+#define lpfc_conf_trunk_port2_WORD conf_trunk
+#define lpfc_conf_trunk_port2_SHIFT 2
+#define lpfc_conf_trunk_port2_MASK 0x1
+#define lpfc_conf_trunk_port3_WORD conf_trunk
+#define lpfc_conf_trunk_port3_SHIFT 3
+#define lpfc_conf_trunk_port3_MASK 0x1
};
enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 5a0d512ff497..3f4398ffb567 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.0.0.7"
+#define LPFC_DRIVER_VERSION "12.0.0.10"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index c340e0e47473..102a011ff6d4 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -138,8 +138,8 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
* Grab buffer pointer and clear context1 so we can use
* lpfc_sli_issue_box_wait
*/
- mp = (struct lpfc_dmabuf *) pmb->context1;
- pmb->context1 = NULL;
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ pmb->ctx_buf = NULL;
pmb->vport = vport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 177701dfdfcb..c8e6ae98a4a6 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -403,7 +403,7 @@ static struct scsi_host_template mac53c94_template = {
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_ALL,
- .use_clustering = DISABLE_CLUSTERING,
+ .max_segment_size = 65535,
};
static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 764d320bb2ca..ee741207fd4e 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -307,7 +307,7 @@ static int esp_mac_probe(struct platform_device *dev)
goto fail;
host->max_id = 8;
- host->use_clustering = DISABLE_CLUSTERING;
+ host->dma_boundary = PAGE_SIZE - 1;
esp = shost_priv(host);
esp->host = host;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index dd6057359d7c..8b4b5b1a13d7 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -333,7 +333,7 @@ static struct scsi_host_template mac_scsi_template = {
.this_id = 7,
.sg_tablesize = 1,
.cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.cmd_size = NCR5380_CMD_SIZE,
.max_sectors = 128,
};
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 8c7154143a4e..4862f65ec3e8 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4148,7 +4148,6 @@ static struct scsi_host_template megaraid_template = {
.this_id = DEFAULT_INITIATOR_ID,
.sg_tablesize = MAX_SGLIST,
.cmd_per_lun = DEF_CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.eh_abort_handler = megaraid_abort,
.eh_device_reset_handler = megaraid_reset,
.eh_bus_reset_handler = megaraid_reset,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 3b7abe5ca7f5..e836392b75e8 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -336,7 +336,6 @@ static struct scsi_host_template megaraid_template_g = {
.eh_abort_handler = megaraid_abort_handler,
.eh_host_reset_handler = megaraid_reset_handler,
.change_queue_depth = scsi_change_queue_depth,
- .use_clustering = ENABLE_CLUSTERING,
.no_write_same = 1,
.sdev_attrs = megaraid_sdev_attrs,
.shost_attrs = megaraid_shost_attrs,
@@ -1243,8 +1242,7 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
sg_pci_blk[i].dma_addr);
}
- if (raid_dev->sg_pool_handle)
- dma_pool_destroy(raid_dev->sg_pool_handle);
+ dma_pool_destroy(raid_dev->sg_pool_handle);
epthru_pci_blk = raid_dev->epthru_pool;
@@ -1252,8 +1250,7 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
dma_pool_free(raid_dev->epthru_pool_handle,
epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
}
- if (raid_dev->epthru_pool_handle)
- dma_pool_destroy(raid_dev->epthru_pool_handle);
+ dma_pool_destroy(raid_dev->epthru_pool_handle);
mbox_pci_blk = raid_dev->mbox_pool;
@@ -1261,8 +1258,7 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
dma_pool_free(raid_dev->mbox_pool_handle,
mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
}
- if (raid_dev->mbox_pool_handle)
- dma_pool_destroy(raid_dev->mbox_pool_handle);
+ dma_pool_destroy(raid_dev->mbox_pool_handle);
return;
}
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 8428247015db..3ce837e4b24c 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -1017,8 +1017,7 @@ memalloc_error:
kfree(adapter->kioc_list);
kfree(adapter->mbox_list);
- if (adapter->pthru_dma_pool)
- dma_pool_destroy(adapter->pthru_dma_pool);
+ dma_pool_destroy(adapter->pthru_dma_pool);
kfree(adapter);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 67d356d84717..16536c41f0c5 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2,7 +2,8 @@
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2003-2013 LSI Corporation
- * Copyright (c) 2013-2014 Avago Technologies
+ * Copyright (c) 2013-2016 Avago Technologies
+ * Copyright (c) 2016-2018 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -19,14 +20,11 @@
*
* FILE: megaraid_sas.h
*
- * Authors: Avago Technologies
- * Kashyap Desai <kashyap.desai@avagotech.com>
- * Sumit Saxena <sumit.saxena@avagotech.com>
+ * Authors: Broadcom Inc.
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
*
- * Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- * San Jose, California 95131
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
*/
#ifndef LSI_MEGARAID_SAS_H
@@ -35,8 +33,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.706.03.00-rc1"
-#define MEGASAS_RELDATE "May 21, 2018"
+#define MEGASAS_VERSION "07.707.50.00-rc1"
+#define MEGASAS_RELDATE "December 18, 2018"
/*
* Device IDs
@@ -62,6 +60,10 @@
#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017
#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B
#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT 0x001C
+#define PCI_DEVICE_ID_LSI_AERO_10E1 0x10e1
+#define PCI_DEVICE_ID_LSI_AERO_10E2 0x10e2
+#define PCI_DEVICE_ID_LSI_AERO_10E5 0x10e5
+#define PCI_DEVICE_ID_LSI_AERO_10E6 0x10e6
/*
* Intel HBA SSDIDs
@@ -142,6 +144,7 @@
* CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
* HOTPLUG : Resume from Hotplug
* MFI_STOP_ADP : Send signal to FW to stop processing
+ * MFI_ADP_TRIGGER_SNAP_DUMP: Inform firmware to initiate snap dump
*/
#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */
#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */
@@ -158,6 +161,7 @@
#define MFI_RESET_FLAGS MFI_INIT_READY| \
MFI_INIT_MFIMODE| \
MFI_INIT_ABORT
+#define MFI_ADP_TRIGGER_SNAP_DUMP 0x00000100
#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
/*
@@ -860,8 +864,22 @@ struct megasas_ctrl_prop {
u32 reserved:18;
#endif
} OnOffProperties;
- u8 autoSnapVDSpace;
- u8 viewSpace;
+
+ union {
+ u8 autoSnapVDSpace;
+ u8 viewSpace;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u16 reserved2:11;
+ u16 enable_snap_dump:1;
+ u16 reserved1:4;
+#else
+ u16 reserved1:4;
+ u16 enable_snap_dump:1;
+ u16 reserved2:11;
+#endif
+ } on_off_properties2;
+ };
__le16 spinDownTime;
u8 reserved[24];
} __packed;
@@ -1485,7 +1503,6 @@ enum FW_BOOT_CONTEXT {
#define MEGASAS_IOCTL_CMD 0
#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
-#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
#define MEGASAS_DEFAULT_TM_TIMEOUT 50
/*
* FW reports the maximum of number of commands that it can accept (maximum
@@ -1544,11 +1561,16 @@ enum FW_BOOT_CONTEXT {
#define MR_CAN_HANDLE_64_BIT_DMA_OFFSET (1 << 25)
+#define MEGASAS_WATCHDOG_THREAD_INTERVAL 1000
+#define MEGASAS_WAIT_FOR_NEXT_DMA_MSECS 20
+#define MEGASAS_WATCHDOG_WAIT_COUNT 50
+
enum MR_ADAPTER_TYPE {
MFI_SERIES = 1,
THUNDERBOLT_SERIES = 2,
INVADER_SERIES = 3,
VENTURA_SERIES = 4,
+ AERO_SERIES = 5,
};
/*
@@ -1588,11 +1610,10 @@ struct megasas_register_set {
u32 reserved_3[3]; /*00A4h*/
- u32 outbound_scratch_pad ; /*00B0h*/
- u32 outbound_scratch_pad_2; /*00B4h*/
- u32 outbound_scratch_pad_3; /*00B8h*/
- u32 outbound_scratch_pad_4; /*00BCh*/
-
+ u32 outbound_scratch_pad_0; /*00B0h*/
+ u32 outbound_scratch_pad_1; /*00B4h*/
+ u32 outbound_scratch_pad_2; /*00B8h*/
+ u32 outbound_scratch_pad_3; /*00BCh*/
u32 inbound_low_queue_port ; /*00C0h*/
@@ -2181,6 +2202,9 @@ struct megasas_instance {
struct MR_LD_TARGETID_LIST *ld_targetid_list_buf;
dma_addr_t ld_targetid_list_buf_h;
+ struct MR_SNAPDUMP_PROPERTIES *snapdump_prop;
+ dma_addr_t snapdump_prop_h;
+
void *crash_buf[MAX_CRASH_DUMP_SIZE];
unsigned int fw_crash_buffer_size;
unsigned int fw_crash_state;
@@ -2250,7 +2274,9 @@ struct megasas_instance {
struct megasas_instance_template *instancet;
struct tasklet_struct isr_tasklet;
struct work_struct work_init;
- struct work_struct crash_init;
+ struct delayed_work fw_fault_work;
+ struct workqueue_struct *fw_fault_work_q;
+ char fault_handler_work_q_name[48];
u8 flag;
u8 unload;
@@ -2310,6 +2336,7 @@ struct megasas_instance {
bool support_nvme_passthru;
u8 task_abort_tmo;
u8 max_reset_tmo;
+ u8 snapdump_wait_time;
};
struct MR_LD_VF_MAP {
u32 size;
@@ -2386,9 +2413,9 @@ struct megasas_instance_template {
void (*enable_intr)(struct megasas_instance *);
void (*disable_intr)(struct megasas_instance *);
- int (*clear_intr)(struct megasas_register_set __iomem *);
+ int (*clear_intr)(struct megasas_instance *);
- u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
+ u32 (*read_fw_status_reg)(struct megasas_instance *);
int (*adp_reset)(struct megasas_instance *, \
struct megasas_register_set __iomem *);
int (*check_reset)(struct megasas_instance *, \
@@ -2535,11 +2562,11 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
bool is_target_prop);
int megasas_get_target_prop(struct megasas_instance *instance,
struct scsi_device *sdev);
+void megasas_get_snapdump_properties(struct megasas_instance *instance);
int megasas_set_crash_dump_params(struct megasas_instance *instance,
u8 crash_buf_state);
void megasas_free_host_crash_buffer(struct megasas_instance *instance);
-void megasas_fusion_crash_dump_wq(struct work_struct *work);
void megasas_return_cmd_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd);
@@ -2560,6 +2587,9 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
u32 mega_mod64(u64 dividend, u32 divisor);
int megasas_alloc_fusion_context(struct megasas_instance *instance);
void megasas_free_fusion_context(struct megasas_instance *instance);
+int megasas_fusion_start_watchdog(struct megasas_instance *instance);
+void megasas_fusion_stop_watchdog(struct megasas_instance *instance);
+
void megasas_set_dma_settings(struct megasas_instance *instance,
struct megasas_dcmd_frame *dcmd,
dma_addr_t dma_addr, u32 dma_len);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9b90c716f06d..f7bdd783360a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2,7 +2,8 @@
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2003-2013 LSI Corporation
- * Copyright (c) 2013-2014 Avago Technologies
+ * Copyright (c) 2013-2016 Avago Technologies
+ * Copyright (c) 2016-2018 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -17,18 +18,15 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
- * Authors: Avago Technologies
+ * Authors: Broadcom Inc.
* Sreenivas Bagalkote
* Sumant Patro
* Bo Yang
* Adam Radford
- * Kashyap Desai <kashyap.desai@avagotech.com>
- * Sumit Saxena <sumit.saxena@avagotech.com>
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
*
- * Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- * San Jose, California 95131
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
*/
#include <linux/kernel.h>
@@ -87,8 +85,7 @@ MODULE_PARM_DESC(throttlequeuedepth,
unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
module_param(resetwaittime, int, S_IRUGO);
-MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
- "before resetting adapter. Default: 180");
+MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, S_IRUGO);
@@ -96,7 +93,7 @@ MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Defau
int rdpq_enable = 1;
module_param(rdpq_enable, int, S_IRUGO);
-MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
+MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
unsigned int dual_qdepth_disable;
module_param(dual_qdepth_disable, int, S_IRUGO);
@@ -108,8 +105,8 @@ MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
-MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
-MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
+MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
+MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
static int megasas_get_pd_list(struct megasas_instance *instance);
@@ -165,6 +162,10 @@ static struct pci_device_id megasas_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
{}
};
@@ -189,7 +190,7 @@ void
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
u8 alt_status);
static u32
-megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
static int
megasas_adp_reset_gen2(struct megasas_instance *instance,
struct megasas_register_set __iomem *reg_set);
@@ -219,6 +220,28 @@ megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
static inline void
megasas_init_ctrl_params(struct megasas_instance *instance);
+u32 megasas_readl(struct megasas_instance *instance,
+ const volatile void __iomem *addr)
+{
+ u32 i = 0, ret_val;
+ /*
+ * Due to a HW errata in Aero controllers, reads to certain
+ * Fusion registers could intermittently return all zeroes.
+ * This behavior is transient in nature and subsequent reads will
+ * return valid value. As a workaround in driver, retry readl for
+ * upto three times until a non-zero value is read.
+ */
+ if (instance->adapter_type == AERO_SERIES) {
+ do {
+ ret_val = readl(addr);
+ i++;
+ } while (ret_val == 0 && i < 3);
+ return ret_val;
+ } else {
+ return readl(addr);
+ }
+}
+
/**
* megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
* @instance: Adapter soft state
@@ -419,19 +442,21 @@ megasas_disable_intr_xscale(struct megasas_instance *instance)
* @regs: MFI register set
*/
static u32
-megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
+megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
{
- return readl(&(regs)->outbound_msg_0);
+ return readl(&instance->reg_set->outbound_msg_0);
}
/**
* megasas_clear_interrupt_xscale - Check & clear interrupt
* @regs: MFI register set
*/
static int
-megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
+megasas_clear_intr_xscale(struct megasas_instance *instance)
{
u32 status;
u32 mfiStatus = 0;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
/*
* Check if it is our interrupt
@@ -596,9 +621,9 @@ megasas_disable_intr_ppc(struct megasas_instance *instance)
* @regs: MFI register set
*/
static u32
-megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
+megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return readl(&instance->reg_set->outbound_scratch_pad_0);
}
/**
@@ -606,9 +631,11 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
* @regs: MFI register set
*/
static int
-megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
+megasas_clear_intr_ppc(struct megasas_instance *instance)
{
u32 status, mfiStatus = 0;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
/*
* Check if it is our interrupt
@@ -721,9 +748,9 @@ megasas_disable_intr_skinny(struct megasas_instance *instance)
* @regs: MFI register set
*/
static u32
-megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
+megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return readl(&instance->reg_set->outbound_scratch_pad_0);
}
/**
@@ -731,10 +758,12 @@ megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
* @regs: MFI register set
*/
static int
-megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
+megasas_clear_intr_skinny(struct megasas_instance *instance)
{
u32 status;
u32 mfiStatus = 0;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
/*
* Check if it is our interrupt
@@ -748,7 +777,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
/*
* Check if it is our interrupt
*/
- if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
+ if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
MFI_STATE_FAULT) {
mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
} else
@@ -866,9 +895,9 @@ megasas_disable_intr_gen2(struct megasas_instance *instance)
* @regs: MFI register set
*/
static u32
-megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
+megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return readl(&instance->reg_set->outbound_scratch_pad_0);
}
/**
@@ -876,10 +905,12 @@ megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
* @regs: MFI register set
*/
static int
-megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
+megasas_clear_intr_gen2(struct megasas_instance *instance)
{
u32 status;
u32 mfiStatus = 0;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
/*
* Check if it is our interrupt
@@ -2079,9 +2110,11 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
(instance->adapter_type != MFI_SERIES)) {
- writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
- /* Flush */
- readl(&instance->reg_set->doorbell);
+ if (!instance->requestorId) {
+ writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+ /* Flush */
+ readl(&instance->reg_set->doorbell);
+ }
if (instance->requestorId && instance->peerIsPresent)
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
} else {
@@ -2682,7 +2715,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
i = 0;
outstanding = atomic_read(&instance->fw_outstanding);
- fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+ fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
goto no_outstanding;
@@ -2711,7 +2744,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
outstanding = atomic_read(&instance->fw_outstanding);
- fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+ fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
goto no_outstanding;
}
@@ -3186,7 +3219,6 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
- .use_clustering = ENABLE_CLUSTERING,
.change_queue_depth = scsi_change_queue_depth,
.no_write_same = 1,
};
@@ -3278,6 +3310,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
megasas_complete_int_cmd(instance, cmd);
break;
}
+ /* fall through */
case MFI_CMD_LD_READ:
case MFI_CMD_LD_WRITE:
@@ -3665,9 +3698,8 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
return IRQ_HANDLED;
}
- if ((mfiStatus = instance->instancet->clear_intr(
- instance->reg_set)
- ) == 0) {
+ mfiStatus = instance->instancet->clear_intr(instance);
+ if (mfiStatus == 0) {
/* Hardware may not set outbound_intr_status in MSI-X mode */
if (!instance->msix_vectors)
return IRQ_NONE;
@@ -3677,7 +3709,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
fw_state = instance->instancet->read_fw_status_reg(
- instance->reg_set) & MFI_STATE_MASK;
+ instance) & MFI_STATE_MASK;
if (fw_state != MFI_STATE_FAULT) {
dev_notice(&instance->pdev->dev, "fw state:%x\n",
@@ -3760,7 +3792,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
u32 cur_state;
u32 abs_state, curr_abs_state;
- abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
+ abs_state = instance->instancet->read_fw_status_reg(instance);
fw_state = abs_state & MFI_STATE_MASK;
if (fw_state != MFI_STATE_READY)
@@ -3832,7 +3864,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
if (instance->adapter_type != MFI_SERIES) {
for (i = 0; i < (10 * 1000); i += 20) {
- if (readl(
+ if (megasas_readl(
+ instance,
&instance->
reg_set->
doorbell) & 1)
@@ -3891,12 +3924,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
/*
* The cur_state should not last for more than max_wait secs
*/
- for (i = 0; i < (max_wait * 1000); i++) {
+ for (i = 0; i < max_wait; i++) {
curr_abs_state = instance->instancet->
- read_fw_status_reg(instance->reg_set);
+ read_fw_status_reg(instance);
if (abs_state == curr_abs_state) {
- msleep(1);
+ msleep(1000);
} else
break;
}
@@ -4634,9 +4667,9 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
}
dev_info(&instance->pdev->dev,
- "firmware type\t: %s\n",
- instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
- "Legacy(64 VD) firmware");
+ "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
+ instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
+ instance->ctrl_info_buf->max_lds);
if (instance->max_raid_mapsize) {
ventura_map_sz = instance->max_raid_mapsize *
@@ -4661,6 +4694,87 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
}
+/*
+ * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
+ * dcmd.hdr.length - number of bytes to read
+ * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES
+ * Desc: Fill in snapdump properties
+ * Status: MFI_STAT_OK- Command successful
+ */
+void megasas_get_snapdump_properties(struct megasas_instance *instance)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_SNAPDUMP_PROPERTIES *ci;
+ dma_addr_t ci_h = 0;
+
+ ci = instance->snapdump_prop;
+ ci_h = instance->snapdump_prop_h;
+
+ if (!ci)
+ return;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
+ return;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
+
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct MR_SNAPDUMP_PROPERTIES));
+
+ if (!instance->mask_interrupts) {
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MFI_IO_TIMEOUT_SECS);
+ } else {
+ ret = megasas_issue_polled(instance, cmd);
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ }
+
+ switch (ret) {
+ case DCMD_SUCCESS:
+ instance->snapdump_wait_time =
+ min_t(u8, ci->trigger_min_num_sec_before_ocr,
+ MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
+ break;
+
+ case DCMD_TIMEOUT:
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+ }
+
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
+}
+
/**
* megasas_get_controller_info - Returns FW's controller structure
* @instance: Adapter soft state
@@ -4720,6 +4834,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
* CPU endianness format.
*/
le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
+ le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
le32_to_cpus((u32 *)&ci->adapterOperations2);
le32_to_cpus((u32 *)&ci->adapterOperations3);
le16_to_cpus((u16 *)&ci->adapter_operations4);
@@ -4741,6 +4856,11 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
/*Check whether controller is iMR or MR */
instance->is_imr = (ci->memory_size ? 0 : 1);
+
+ instance->snapdump_wait_time =
+ (ci->properties.on_off_properties2.enable_snap_dump ?
+ MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
+
dev_info(&instance->pdev->dev,
"controller type\t: %s(%dMB)\n",
instance->is_imr ? "iMR" : "MR",
@@ -4942,16 +5062,13 @@ fail_fw_init:
static u32
megasas_init_adapter_mfi(struct megasas_instance *instance)
{
- struct megasas_register_set __iomem *reg_set;
u32 context_sz;
u32 reply_q_sz;
- reg_set = instance->reg_set;
-
/*
* Get various operational parameters from status register
*/
- instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
/*
* Reduce the max supported cmds by 1. This is to ensure that the
* reply_q_sz (1 more than the max cmd that driver may send)
@@ -4959,7 +5076,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
*/
instance->max_fw_cmds = instance->max_fw_cmds-1;
instance->max_mfi_cmds = instance->max_fw_cmds;
- instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
+ instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
0x10;
/*
* For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
@@ -5015,7 +5132,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
instance->fw_support_ieee = 0;
instance->fw_support_ieee =
- (instance->instancet->read_fw_status_reg(reg_set) &
+ (instance->instancet->read_fw_status_reg(instance) &
0x04000000);
dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
@@ -5213,14 +5330,14 @@ static int megasas_init_fw(struct megasas_instance *instance)
{
u32 max_sectors_1;
u32 max_sectors_2, tmp_sectors, msix_enable;
- u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
+ u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
resource_size_t base_addr;
- struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info = NULL;
unsigned long bar_list;
int i, j, loop, fw_msix_count = 0;
struct IOV_111 *iovPtr;
struct fusion_context *fusion;
+ bool do_adp_reset = true;
fusion = instance->ctrl_context;
@@ -5241,8 +5358,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
goto fail_ioremap;
}
- reg_set = instance->reg_set;
-
if (instance->adapter_type != MFI_SERIES)
instance->instancet = &megasas_instance_template_fusion;
else {
@@ -5269,19 +5384,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
if (megasas_transition_to_ready(instance, 0)) {
- atomic_set(&instance->fw_reset_no_pci_access, 1);
- instance->instancet->adp_reset
- (instance, instance->reg_set);
- atomic_set(&instance->fw_reset_no_pci_access, 0);
- dev_info(&instance->pdev->dev,
- "FW restarted successfully from %s!\n",
- __func__);
+ if (instance->adapter_type >= INVADER_SERIES) {
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance);
+ do_adp_reset = status_reg & MFI_RESET_ADAPTER;
+ }
- /*waitting for about 30 second before retry*/
- ssleep(30);
+ if (do_adp_reset) {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+ dev_info(&instance->pdev->dev,
+ "FW restarted successfully from %s!\n",
+ __func__);
+
+ /*waiting for about 30 second before retry*/
+ ssleep(30);
- if (megasas_transition_to_ready(instance, 0))
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ } else {
goto fail_ready_state;
+ }
}
megasas_init_ctrl_params(instance);
@@ -5297,38 +5422,57 @@ static int megasas_init_fw(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- if (instance->adapter_type == VENTURA_SERIES) {
- scratch_pad_3 =
- readl(&instance->reg_set->outbound_scratch_pad_3);
- instance->max_raid_mapsize = ((scratch_pad_3 >>
+ if (instance->adapter_type >= VENTURA_SERIES) {
+ scratch_pad_2 =
+ megasas_readl(instance,
+ &instance->reg_set->outbound_scratch_pad_2);
+ instance->max_raid_mapsize = ((scratch_pad_2 >>
MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
MR_MAX_RAID_MAP_SIZE_MASK);
}
/* Check if MSI-X is supported while in ready state */
- msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+ msix_enable = (instance->instancet->read_fw_status_reg(instance) &
0x4000000) >> 0x1a;
if (msix_enable && !msix_disable) {
int irq_flags = PCI_IRQ_MSIX;
- scratch_pad_2 = readl
- (&instance->reg_set->outbound_scratch_pad_2);
+ scratch_pad_1 = megasas_readl
+ (instance, &instance->reg_set->outbound_scratch_pad_1);
/* Check max MSI-X vectors */
if (fusion) {
if (instance->adapter_type == THUNDERBOLT_SERIES) {
/* Thunderbolt Series*/
- instance->msix_vectors = (scratch_pad_2
+ instance->msix_vectors = (scratch_pad_1
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
fw_msix_count = instance->msix_vectors;
- } else { /* Invader series supports more than 8 MSI-x vectors*/
- instance->msix_vectors = ((scratch_pad_2
+ } else {
+ instance->msix_vectors = ((scratch_pad_1
& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
- if (instance->msix_vectors > 16)
- instance->msix_combined = true;
+
+ /*
+ * For Invader series, > 8 MSI-x vectors
+ * supported by FW/HW implies combined
+ * reply queue mode is enabled.
+ * For Ventura series, > 16 MSI-x vectors
+ * supported by FW/HW implies combined
+ * reply queue mode is enabled.
+ */
+ switch (instance->adapter_type) {
+ case INVADER_SERIES:
+ if (instance->msix_vectors > 8)
+ instance->msix_combined = true;
+ break;
+ case AERO_SERIES:
+ case VENTURA_SERIES:
+ if (instance->msix_vectors > 16)
+ instance->msix_combined = true;
+ break;
+ }
if (rdpq_enable)
- instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
+ instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
1 : 0;
fw_msix_count = instance->msix_vectors;
/* Save 1-15 reply post index address to local memory
@@ -5377,7 +5521,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (!instance->msix_vectors) {
i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
if (i < 0)
- goto fail_setup_irqs;
+ goto fail_init_adapter;
}
megasas_setup_reply_map(instance);
@@ -5403,13 +5547,14 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
- if (instance->adapter_type == VENTURA_SERIES) {
- scratch_pad_4 =
- readl(&instance->reg_set->outbound_scratch_pad_4);
- if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
+ if (instance->adapter_type >= VENTURA_SERIES) {
+ scratch_pad_3 =
+ megasas_readl(instance,
+ &instance->reg_set->outbound_scratch_pad_3);
+ if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
MR_DEFAULT_NVME_PAGE_SHIFT)
instance->nvme_page_size =
- (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
+ (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
dev_info(&instance->pdev->dev,
"NVME page size\t: (%d)\n", instance->nvme_page_size);
@@ -5439,7 +5584,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
/* stream detection initialization */
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
fusion->stream_detect_by_ld =
kcalloc(MAX_LOGICAL_DRIVES_EXT,
sizeof(struct LD_STREAM_DETECT *),
@@ -5539,6 +5684,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->crash_dump_buf = NULL;
}
+ if (instance->snapdump_wait_time) {
+ megasas_get_snapdump_properties(instance);
+ dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
+ instance->snapdump_wait_time);
+ }
dev_info(&instance->pdev->dev,
"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
@@ -5553,7 +5703,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
instance->use_seqnum_jbod_fp ? "yes" : "no");
-
instance->max_sectors_per_req = instance->max_num_sge *
SGE_BUFFER_SIZE / 512;
if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
@@ -5576,19 +5725,32 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Launch SR-IOV heartbeat timer */
if (instance->requestorId) {
- if (!megasas_sriov_start_heartbeat(instance, 1))
+ if (!megasas_sriov_start_heartbeat(instance, 1)) {
megasas_start_timer(instance);
- else
+ } else {
instance->skip_heartbeat_timer_del = 1;
+ goto fail_get_ld_pd_list;
+ }
}
+ /*
+ * Create and start watchdog thread which will monitor
+ * controller state every 1 sec and trigger OCR when
+ * it enters fault state
+ */
+ if (instance->adapter_type != MFI_SERIES)
+ if (megasas_fusion_start_watchdog(instance) != SUCCESS)
+ goto fail_start_watchdog;
+
return 0;
+fail_start_watchdog:
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
fail_get_ld_pd_list:
instance->instancet->disable_intr(instance);
-fail_init_adapter:
megasas_destroy_irqs(instance);
-fail_setup_irqs:
+fail_init_adapter:
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
instance->msix_vectors = 0;
@@ -6022,13 +6184,13 @@ static int megasas_io_attach(struct megasas_instance *instance)
* @instance: Adapter soft state
* Description:
*
- * For Ventura, driver/FW will operate in 64bit DMA addresses.
+ * For Ventura, driver/FW will operate in 63bit DMA addresses.
*
* For invader-
* By default, driver/FW will operate in 32bit DMA addresses
* for consistent DMA mapping but if 32 bit consistent
- * DMA mask fails, driver will try with 64 bit consistent
- * mask provided FW is true 64bit DMA capable
+ * DMA mask fails, driver will try with 63 bit consistent
+ * mask provided FW is true 63bit DMA capable
*
* For older controllers(Thunderbolt and MFI based adapters)-
* driver/FW will operate in 32 bit consistent DMA addresses.
@@ -6038,31 +6200,31 @@ megasas_set_dma_mask(struct megasas_instance *instance)
{
u64 consistent_mask;
struct pci_dev *pdev;
- u32 scratch_pad_2;
+ u32 scratch_pad_1;
pdev = instance->pdev;
- consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
- DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+ consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
+ DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
if (IS_DMA64) {
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
goto fail_set_dma_mask;
- if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
+ if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
(dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
/*
* If 32 bit DMA mask fails, then try for 64 bit mask
* for FW capable of handling 64 bit DMA.
*/
- scratch_pad_2 = readl
- (&instance->reg_set->outbound_scratch_pad_2);
+ scratch_pad_1 = megasas_readl
+ (instance, &instance->reg_set->outbound_scratch_pad_1);
- if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
+ if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
goto fail_set_dma_mask;
else if (dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(64)))
+ DMA_BIT_MASK(63)))
goto fail_set_dma_mask;
}
} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
@@ -6074,8 +6236,8 @@ megasas_set_dma_mask(struct megasas_instance *instance)
instance->consistent_mask_64bit = true;
dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
- ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
- (instance->consistent_mask_64bit ? "64" : "32"));
+ ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"),
+ (instance->consistent_mask_64bit ? "63" : "32"));
return 0;
@@ -6088,12 +6250,14 @@ fail_set_dma_mask:
/*
* megasas_set_adapter_type - Set adapter type.
* Supported controllers can be divided in
- * 4 categories- enum MR_ADAPTER_TYPE {
- * MFI_SERIES = 1,
- * THUNDERBOLT_SERIES = 2,
- * INVADER_SERIES = 3,
- * VENTURA_SERIES = 4,
- * };
+ * different categories-
+ * enum MR_ADAPTER_TYPE {
+ * MFI_SERIES = 1,
+ * THUNDERBOLT_SERIES = 2,
+ * INVADER_SERIES = 3,
+ * VENTURA_SERIES = 4,
+ * AERO_SERIES = 5,
+ * };
* @instance: Adapter soft state
* return: void
*/
@@ -6104,6 +6268,12 @@ static inline void megasas_set_adapter_type(struct megasas_instance *instance)
instance->adapter_type = MFI_SERIES;
} else {
switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_AERO_10E1:
+ case PCI_DEVICE_ID_LSI_AERO_10E2:
+ case PCI_DEVICE_ID_LSI_AERO_10E5:
+ case PCI_DEVICE_ID_LSI_AERO_10E6:
+ instance->adapter_type = AERO_SERIES;
+ break;
case PCI_DEVICE_ID_LSI_VENTURA:
case PCI_DEVICE_ID_LSI_CRUSADER:
case PCI_DEVICE_ID_LSI_HARPOON:
@@ -6171,6 +6341,7 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
if (megasas_alloc_mfi_ctrl_mem(instance))
goto fail;
break;
+ case AERO_SERIES:
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
@@ -6245,6 +6416,14 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
"Failed to allocate PD list buffer\n");
return -ENOMEM;
}
+
+ instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_SNAPDUMP_PROPERTIES),
+ &instance->snapdump_prop_h, GFP_KERNEL);
+
+ if (!instance->snapdump_prop)
+ dev_err(&pdev->dev,
+ "Failed to allocate snapdump properties buffer\n");
}
instance->pd_list_buf =
@@ -6388,6 +6567,12 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
instance->crash_dump_buf,
instance->crash_dump_h);
+
+ if (instance->snapdump_prop)
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct MR_SNAPDUMP_PROPERTIES),
+ instance->snapdump_prop,
+ instance->snapdump_prop_h);
}
/*
@@ -6434,12 +6619,10 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
instance->disableOnlineCtrlReset = 1;
instance->UnevenSpanSupport = 0;
- if (instance->adapter_type != MFI_SERIES) {
+ if (instance->adapter_type != MFI_SERIES)
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
- INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
- } else {
+ else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
- }
}
/**
@@ -6455,6 +6638,13 @@ static int megasas_probe_one(struct pci_dev *pdev,
struct megasas_instance *instance;
u16 control = 0;
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_LSI_AERO_10E1:
+ case PCI_DEVICE_ID_LSI_AERO_10E5:
+ dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
+ break;
+ }
+
/* Reset MSI-X in the kdump kernel */
if (reset_devices) {
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
@@ -6708,6 +6898,10 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
+ /* Stop the FW fault detection watchdog */
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_fusion_stop_watchdog(instance);
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
@@ -6843,8 +7037,16 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_start_aen(instance))
dev_err(&instance->pdev->dev, "Start AEN failed\n");
+ /* Re-launch FW fault watchdog */
+ if (instance->adapter_type != MFI_SERIES)
+ if (megasas_fusion_start_watchdog(instance) != SUCCESS)
+ goto fail_start_watchdog;
+
return 0;
+fail_start_watchdog:
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
fail_init_mfi:
megasas_free_ctrl_dma_buffers(instance);
megasas_free_ctrl_mem(instance);
@@ -6912,6 +7114,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
+ /* Stop the FW fault detection watchdog */
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_fusion_stop_watchdog(instance);
+
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
@@ -6956,7 +7162,7 @@ skip_firing_dcmds:
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
kfree(fusion->stream_detect_by_ld[i]);
kfree(fusion->stream_detect_by_ld);
@@ -7737,8 +7943,15 @@ megasas_aen_polling(struct work_struct *work)
break;
case MR_EVT_CTRL_PROP_CHANGED:
- dcmd_ret = megasas_get_ctrl_info(instance);
- break;
+ dcmd_ret = megasas_get_ctrl_info(instance);
+ if (dcmd_ret == DCMD_SUCCESS &&
+ instance->snapdump_wait_time) {
+ megasas_get_snapdump_properties(instance);
+ dev_info(&instance->pdev->dev,
+ "Snap dump wait time\t: %d\n",
+ instance->snapdump_wait_time);
+ }
+ break;
default:
doscan = 0;
break;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 59ecbb3b53b5..87c2c0472c8f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -2,7 +2,8 @@
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2009-2013 LSI Corporation
- * Copyright (c) 2013-2014 Avago Technologies
+ * Copyright (c) 2013-2016 Avago Technologies
+ * Copyright (c) 2016-2018 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -19,17 +20,14 @@
*
* FILE: megaraid_sas_fp.c
*
- * Authors: Avago Technologies
+ * Authors: Broadcom Inc.
* Sumant Patro
* Varad Talamacki
* Manoj Jose
- * Kashyap Desai <kashyap.desai@avagotech.com>
- * Sumit Saxena <sumit.saxena@avagotech.com>
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
*
- * Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- * San Jose, California 95131
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
*/
#include <linux/kernel.h>
@@ -745,7 +743,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
*pDevHandle = MR_PdDevHandleGet(pd, map);
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
/* get second pd also for raid 1/10 fast path writes*/
- if ((instance->adapter_type == VENTURA_SERIES) &&
+ if ((instance->adapter_type >= VENTURA_SERIES) &&
(raid->level == 1) &&
!io_info->isRead) {
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -770,7 +768,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
@@ -861,7 +859,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
*pDevHandle = MR_PdDevHandleGet(pd, map);
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
/* get second pd also for raid 1/10 fast path writes*/
- if ((instance->adapter_type == VENTURA_SERIES) &&
+ if ((instance->adapter_type >= VENTURA_SERIES) &&
(raid->level == 1) &&
!io_info->isRead) {
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -888,7 +886,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
@@ -1266,7 +1264,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, drv_map);
- if (ld >= MAX_LOGICAL_DRIVES_EXT) {
+ if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
lbInfo[ldCount].loadBalanceFlag = 0;
continue;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f74b5ea24f0f..211c17c33aa0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2,7 +2,8 @@
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2009-2013 LSI Corporation
- * Copyright (c) 2013-2014 Avago Technologies
+ * Copyright (c) 2013-2016 Avago Technologies
+ * Copyright (c) 2016-2018 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -19,16 +20,13 @@
*
* FILE: megaraid_sas_fusion.c
*
- * Authors: Avago Technologies
+ * Authors: Broadcom Inc.
* Sumant Patro
* Adam Radford
- * Kashyap Desai <kashyap.desai@avagotech.com>
- * Sumit Saxena <sumit.saxena@avagotech.com>
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
*
- * Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- * San Jose, California 95131
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
*/
#include <linux/kernel.h>
@@ -48,6 +46,7 @@
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -74,7 +73,7 @@ void
megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
int megasas_alloc_cmds(struct megasas_instance *instance);
int
-megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
+megasas_clear_intr_fusion(struct megasas_instance *instance);
int
megasas_issue_polled(struct megasas_instance *instance,
struct megasas_cmd *cmd);
@@ -95,6 +94,9 @@ static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
static void megasas_free_reply_fusion(struct megasas_instance *instance);
static inline
void megasas_configure_queue_sizes(struct megasas_instance *instance);
+static void megasas_fusion_crash_dump(struct megasas_instance *instance);
+extern u32 megasas_readl(struct megasas_instance *instance,
+ const volatile void __iomem *addr);
/**
* megasas_check_same_4gb_region - check if allocation
@@ -165,9 +167,11 @@ megasas_disable_intr_fusion(struct megasas_instance *instance)
}
int
-megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
+megasas_clear_intr_fusion(struct megasas_instance *instance)
{
u32 status;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
/*
* Check if it is our interrupt
*/
@@ -262,16 +266,17 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
reg_set = instance->reg_set;
- /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
+ /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */
if (instance->adapter_type < VENTURA_SERIES)
cur_max_fw_cmds =
- readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
+ megasas_readl(instance,
+ &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF;
if (dual_qdepth_disable || !cur_max_fw_cmds)
- cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
else
ldio_threshold =
- (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
+ (instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
dev_info(&instance->pdev->dev,
"Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
@@ -807,10 +812,8 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
}
- if (fusion->reply_frames_desc_pool)
- dma_pool_destroy(fusion->reply_frames_desc_pool);
- if (fusion->reply_frames_desc_pool_align)
- dma_pool_destroy(fusion->reply_frames_desc_pool_align);
+ dma_pool_destroy(fusion->reply_frames_desc_pool);
+ dma_pool_destroy(fusion->reply_frames_desc_pool_align);
if (fusion->rdpq_virt)
dma_free_coherent(&instance->pdev->dev,
@@ -830,8 +833,7 @@ megasas_free_reply_fusion(struct megasas_instance *instance) {
fusion->reply_frames_desc[0],
fusion->reply_frames_desc_phys[0]);
- if (fusion->reply_frames_desc_pool)
- dma_pool_destroy(fusion->reply_frames_desc_pool);
+ dma_pool_destroy(fusion->reply_frames_desc_pool);
}
@@ -974,7 +976,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
struct megasas_header *frame_hdr;
const char *sys_info;
MFI_CAPABILITIES *drv_ops;
- u32 scratch_pad_2;
+ u32 scratch_pad_1;
ktime_t time;
bool cur_fw_64bit_dma_capable;
@@ -985,14 +987,14 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
cmd = fusion->ioc_init_cmd;
- scratch_pad_2 = readl
- (&instance->reg_set->outbound_scratch_pad_2);
+ scratch_pad_1 = megasas_readl
+ (instance, &instance->reg_set->outbound_scratch_pad_1);
- cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+ cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
if (instance->adapter_type == INVADER_SERIES) {
cur_fw_64bit_dma_capable =
- (scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
+ (scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
@@ -1010,7 +1012,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_fw_init;
}
- instance->fw_sync_cache_support = (scratch_pad_2 &
+ instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
instance->fw_sync_cache_support ? "Yes" : "No");
@@ -1043,9 +1045,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF;
- frame_hdr->flags = cpu_to_le16(
- le16_to_cpu(frame_hdr->flags) |
- MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
init_frame->cmd = MFI_CMD_INIT;
init_frame->cmd_status = 0xFF;
@@ -1107,7 +1107,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
instance->instancet->disable_intr(instance);
for (i = 0; i < (10 * 1000); i += 20) {
- if (readl(&instance->reg_set->doorbell) & 1)
+ if (megasas_readl(instance, &instance->reg_set->doorbell) & 1)
msleep(20);
else
break;
@@ -1115,7 +1115,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
megasas_fire_cmd_fusion(instance, &req_desc);
- wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
+ wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS);
frame_hdr = &cmd->frame->hdr;
if (frame_hdr->cmd_status != 0) {
@@ -1559,14 +1559,12 @@ void megasas_configure_queue_sizes(struct megasas_instance *instance)
fusion = instance->ctrl_context;
max_cmd = instance->max_fw_cmds;
- if (instance->adapter_type == VENTURA_SERIES)
+ if (instance->adapter_type >= VENTURA_SERIES)
instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
else
instance->max_mpt_cmds = instance->max_fw_cmds;
- instance->max_scsi_cmds = instance->max_fw_cmds -
- (MEGASAS_FUSION_INTERNAL_CMDS +
- MEGASAS_FUSION_IOCTL_CMDS);
+ instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds;
instance->cur_can_queue = instance->max_scsi_cmds;
instance->host->can_queue = instance->cur_can_queue;
@@ -1627,8 +1625,7 @@ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
fusion->ioc_init_cmd->frame,
fusion->ioc_init_cmd->frame_phys_addr);
- if (fusion->ioc_init_cmd)
- kfree(fusion->ioc_init_cmd);
+ kfree(fusion->ioc_init_cmd);
}
/**
@@ -1642,7 +1639,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *reg_set;
struct fusion_context *fusion;
- u32 scratch_pad_2;
+ u32 scratch_pad_1;
int i = 0, count;
fusion = instance->ctrl_context;
@@ -1659,20 +1656,21 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
megasas_configure_queue_sizes(instance);
- scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
- /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
+ scratch_pad_1 = megasas_readl(instance,
+ &instance->reg_set->outbound_scratch_pad_1);
+ /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
* Firmware support extended IO chain frame which is 4 times more than
* legacy Firmware.
* Legacy Firmware - Frame size is (8 * 128) = 1K
* 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
*/
- if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
+ if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
instance->max_chain_frame_sz =
- ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
+ ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
else
instance->max_chain_frame_sz =
- ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
+ ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
@@ -1760,6 +1758,90 @@ fail_alloc_mfi_cmds:
}
/**
+ * megasas_fault_detect_work - Worker function of
+ * FW fault handling workqueue.
+ */
+static void
+megasas_fault_detect_work(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance,
+ fw_fault_work.work);
+ u32 fw_state, dma_state, status;
+
+ /* Check the fw state */
+ fw_state = instance->instancet->read_fw_status_reg(instance) &
+ MFI_STATE_MASK;
+
+ if (fw_state == MFI_STATE_FAULT) {
+ dma_state = instance->instancet->read_fw_status_reg(instance) &
+ MFI_STATE_DMADONE;
+ /* Start collecting crash, if DMA bit is done */
+ if (instance->crash_dump_drv_support &&
+ instance->crash_dump_app_support && dma_state) {
+ megasas_fusion_crash_dump(instance);
+ } else {
+ if (instance->unload == 0) {
+ status = megasas_reset_fusion(instance->host, 0);
+ if (status != SUCCESS) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d, do not re-arm timer\n",
+ __func__, __LINE__);
+ return;
+ }
+ }
+ }
+ }
+
+ if (instance->fw_fault_work_q)
+ queue_delayed_work(instance->fw_fault_work_q,
+ &instance->fw_fault_work,
+ msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
+}
+
+int
+megasas_fusion_start_watchdog(struct megasas_instance *instance)
+{
+ /* Check if the Fault WQ is already started */
+ if (instance->fw_fault_work_q)
+ return SUCCESS;
+
+ INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work);
+
+ snprintf(instance->fault_handler_work_q_name,
+ sizeof(instance->fault_handler_work_q_name),
+ "poll_megasas%d_status", instance->host->host_no);
+
+ instance->fw_fault_work_q =
+ create_singlethread_workqueue(instance->fault_handler_work_q_name);
+ if (!instance->fw_fault_work_q) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return FAILED;
+ }
+
+ queue_delayed_work(instance->fw_fault_work_q,
+ &instance->fw_fault_work,
+ msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
+
+ return SUCCESS;
+}
+
+void
+megasas_fusion_stop_watchdog(struct megasas_instance *instance)
+{
+ struct workqueue_struct *wq;
+
+ if (instance->fw_fault_work_q) {
+ wq = instance->fw_fault_work_q;
+ instance->fw_fault_work_q = NULL;
+ if (!cancel_delayed_work_sync(&instance->fw_fault_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
* map_cmd_status - Maps FW cmd status to OS cmd status
* @cmd : Pointer to cmd
* @status : status of cmd returned by FW
@@ -2543,19 +2625,22 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
u8 *raidLUN;
unsigned long spinlock_flags;
- union RAID_CONTEXT_UNION *praid_context;
struct MR_LD_RAID *raid = NULL;
struct MR_PRIV_DEVICE *mrdev_priv;
+ struct RAID_CONTEXT *rctx;
+ struct RAID_CONTEXT_G35 *rctx_g35;
device_id = MEGASAS_DEV_INDEX(scp);
fusion = instance->ctrl_context;
io_request = cmd->io_request;
- io_request->RaidContext.raid_context.virtual_disk_tgt_id =
- cpu_to_le16(device_id);
- io_request->RaidContext.raid_context.status = 0;
- io_request->RaidContext.raid_context.ex_status = 0;
+ rctx = &io_request->RaidContext.raid_context;
+ rctx_g35 = &io_request->RaidContext.raid_context_g35;
+
+ rctx->virtual_disk_tgt_id = cpu_to_le16(device_id);
+ rctx->status = 0;
+ rctx->ex_status = 0;
req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
@@ -2631,11 +2716,10 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
raid = MR_LdRaidGet(ld, local_map_ptr);
if (!raid || (!fusion->fast_path_io)) {
- io_request->RaidContext.raid_context.reg_lock_flags = 0;
+ rctx->reg_lock_flags = 0;
fp_possible = false;
} else {
- if (MR_BuildRaidContext(instance, &io_info,
- &io_request->RaidContext.raid_context,
+ if (MR_BuildRaidContext(instance, &io_info, rctx,
local_map_ptr, &raidLUN))
fp_possible = (io_info.fpOkForIo > 0) ? true : false;
}
@@ -2643,9 +2727,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
- praid_context = &io_request->RaidContext;
-
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
/* FP for Optimal raid level 1.
* All large RAID-1 writes (> 32 KiB, both WT and WB modes)
* are built by the driver as LD I/Os.
@@ -2681,17 +2763,17 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
/* In ventura if stream detected for a read and it is
* read ahead capable make this IO as LDIO
*/
- if (is_stream_detected(&io_request->RaidContext.raid_context_g35))
+ if (is_stream_detected(rctx_g35))
fp_possible = false;
}
/* If raid is NULL, set CPU affinity to default CPU0 */
if (raid)
- megasas_set_raidflag_cpu_affinity(praid_context,
+ megasas_set_raidflag_cpu_affinity(&io_request->RaidContext,
raid, fp_possible, io_info.isRead,
scsi_buff_len);
else
- praid_context->raid_context_g35.routing_flags |=
+ rctx_g35->routing_flags |=
(MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
}
@@ -2703,25 +2785,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (instance->adapter_type == INVADER_SERIES) {
- if (io_request->RaidContext.raid_context.reg_lock_flags ==
- REGION_TYPE_UNUSED)
+ if (rctx->reg_lock_flags == REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->RaidContext.raid_context.type
- = MPI2_TYPE_CUDA;
- io_request->RaidContext.raid_context.nseg = 0x1;
+ rctx->type = MPI2_TYPE_CUDA;
+ rctx->nseg = 0x1;
io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
- io_request->RaidContext.raid_context.reg_lock_flags |=
+ rctx->reg_lock_flags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
- } else if (instance->adapter_type == VENTURA_SERIES) {
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (1 << RAID_CONTEXT_NSEG_SHIFT);
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
- io_request->RaidContext.raid_context_g35.routing_flags |=
- (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+ } else if (instance->adapter_type >= VENTURA_SERIES) {
+ rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
+ rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+ rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
}
@@ -2734,17 +2811,15 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
&io_info, local_map_ptr);
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
cmd->pd_r1_lb = io_info.pd_after_lb;
- if (instance->adapter_type == VENTURA_SERIES)
- io_request->RaidContext.raid_context_g35.span_arm
- = io_info.span_arm;
+ if (instance->adapter_type >= VENTURA_SERIES)
+ rctx_g35->span_arm = io_info.span_arm;
else
- io_request->RaidContext.raid_context.span_arm
- = io_info.span_arm;
+ rctx->span_arm = io_info.span_arm;
} else
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
- if (instance->adapter_type == VENTURA_SERIES)
+ if (instance->adapter_type >= VENTURA_SERIES)
cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
else
cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
@@ -2762,31 +2837,26 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
/* populate the LUN field */
memcpy(io_request->LUN, raidLUN, 8);
} else {
- io_request->RaidContext.raid_context.timeout_value =
+ rctx->timeout_value =
cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (instance->adapter_type == INVADER_SERIES) {
if (io_info.do_fp_rlbypass ||
- (io_request->RaidContext.raid_context.reg_lock_flags
- == REGION_TYPE_UNUSED))
+ (rctx->reg_lock_flags == REGION_TYPE_UNUSED))
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->RaidContext.raid_context.type
- = MPI2_TYPE_CUDA;
- io_request->RaidContext.raid_context.reg_lock_flags |=
+ rctx->type = MPI2_TYPE_CUDA;
+ rctx->reg_lock_flags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
- MR_RL_FLAGS_SEQ_NUM_ENABLE);
- io_request->RaidContext.raid_context.nseg = 0x1;
- } else if (instance->adapter_type == VENTURA_SERIES) {
- io_request->RaidContext.raid_context_g35.routing_flags |=
- (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (1 << RAID_CONTEXT_NSEG_SHIFT);
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ rctx->nseg = 0x1;
+ } else if (instance->adapter_type >= VENTURA_SERIES) {
+ rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+ rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
+ rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
}
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
io_request->DevHandle = cpu_to_le16(device_id);
@@ -2832,7 +2902,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
device_id < instance->fw_supported_vd_count)) {
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
- if (ld >= instance->fw_supported_vd_count)
+ if (ld >= instance->fw_supported_vd_count - 1)
fp_possible = 0;
else {
raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -2855,7 +2925,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
/* set RAID context values */
pRAID_Context->config_seq_num = raid->seqNum;
- if (instance->adapter_type != VENTURA_SERIES)
+ if (instance->adapter_type < VENTURA_SERIES)
pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
pRAID_Context->timeout_value =
cpu_to_le16(raid->fpIoTimeoutForLd);
@@ -2940,7 +3010,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.routing_flags |=
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -3073,7 +3143,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
return 1;
}
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
@@ -3385,7 +3455,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
}
- //Fall thru and complete IO
+ /* Fall through - and complete IO */
case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
atomic_dec(&instance->fw_outstanding);
if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
@@ -3501,18 +3571,13 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
{
struct megasas_instance *instance =
(struct megasas_instance *)instance_addr;
- unsigned long flags;
u32 count, MSIxIndex;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
/* If we have already declared adapter dead, donot complete cmds */
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
complete_cmd_fusion(instance, MSIxIndex);
@@ -3525,48 +3590,24 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
{
struct megasas_irq_context *irq_context = devp;
struct megasas_instance *instance = irq_context->instance;
- u32 mfiStatus, fw_state, dma_state;
+ u32 mfiStatus;
if (instance->mask_interrupts)
return IRQ_NONE;
if (!instance->msix_vectors) {
- mfiStatus = instance->instancet->clear_intr(instance->reg_set);
+ mfiStatus = instance->instancet->clear_intr(instance);
if (!mfiStatus)
return IRQ_NONE;
}
/* If we are resetting, bail */
if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
- instance->instancet->clear_intr(instance->reg_set);
+ instance->instancet->clear_intr(instance);
return IRQ_HANDLED;
}
- if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
- instance->instancet->clear_intr(instance->reg_set);
- /* If we didn't complete any commands, check for FW fault */
- fw_state = instance->instancet->read_fw_status_reg(
- instance->reg_set) & MFI_STATE_MASK;
- dma_state = instance->instancet->read_fw_status_reg
- (instance->reg_set) & MFI_STATE_DMADONE;
- if (instance->crash_dump_drv_support &&
- instance->crash_dump_app_support) {
- /* Start collecting crash, if DMA bit is done */
- if ((fw_state == MFI_STATE_FAULT) && dma_state)
- schedule_work(&instance->crash_init);
- else if (fw_state == MFI_STATE_FAULT) {
- if (instance->unload == 0)
- schedule_work(&instance->work_init);
- }
- } else if (fw_state == MFI_STATE_FAULT) {
- dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
- "for scsi%d\n", instance->host->host_no);
- if (instance->unload == 0)
- schedule_work(&instance->work_init);
- }
- }
-
- return IRQ_HANDLED;
+ return complete_cmd_fusion(instance, irq_context->MSIxIndex);
}
/**
@@ -3692,9 +3733,9 @@ megasas_release_fusion(struct megasas_instance *instance)
* @regs: MFI register set
*/
static u32
-megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
+megasas_read_fw_status_reg_fusion(struct megasas_instance *instance)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0);
}
/**
@@ -3756,11 +3797,12 @@ megasas_adp_reset_fusion(struct megasas_instance *instance,
writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
/* Check that the diag write enable (DRWE) bit is on */
- host_diag = readl(&instance->reg_set->fusion_host_diag);
+ host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
retry = 0;
while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
msleep(100);
- host_diag = readl(&instance->reg_set->fusion_host_diag);
+ host_diag = megasas_readl(instance,
+ &instance->reg_set->fusion_host_diag);
if (retry++ == 100) {
dev_warn(&instance->pdev->dev,
"Host diag unlock failed from %s %d\n",
@@ -3777,11 +3819,12 @@ megasas_adp_reset_fusion(struct megasas_instance *instance,
msleep(3000);
/* Make sure reset adapter bit is cleared */
- host_diag = readl(&instance->reg_set->fusion_host_diag);
+ host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
retry = 0;
while (host_diag & HOST_DIAG_RESET_ADAPTER) {
msleep(100);
- host_diag = readl(&instance->reg_set->fusion_host_diag);
+ host_diag = megasas_readl(instance,
+ &instance->reg_set->fusion_host_diag);
if (retry++ == 1000) {
dev_warn(&instance->pdev->dev,
"Diag reset adapter never cleared %s %d\n",
@@ -3792,14 +3835,14 @@ megasas_adp_reset_fusion(struct megasas_instance *instance,
if (host_diag & HOST_DIAG_RESET_ADAPTER)
return -1;
- abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
+ abs_state = instance->instancet->read_fw_status_reg(instance)
& MFI_STATE_MASK;
retry = 0;
while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
msleep(100);
abs_state = instance->instancet->
- read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+ read_fw_status_reg(instance) & MFI_STATE_MASK;
}
if (abs_state <= MFI_STATE_FW_INIT) {
dev_warn(&instance->pdev->dev,
@@ -3822,17 +3865,60 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
return 0;
}
+/**
+ * megasas_trigger_snap_dump - Trigger snap dump in FW
+ * @instance: Soft instance of adapter
+ */
+static inline void megasas_trigger_snap_dump(struct megasas_instance *instance)
+{
+ int j;
+ u32 fw_state;
+
+ if (!instance->disableOnlineCtrlReset) {
+ dev_info(&instance->pdev->dev, "Trigger snap dump\n");
+ writel(MFI_ADP_TRIGGER_SNAP_DUMP,
+ &instance->reg_set->doorbell);
+ readl(&instance->reg_set->doorbell);
+ }
+
+ for (j = 0; j < instance->snapdump_wait_time; j++) {
+ fw_state = instance->instancet->read_fw_status_reg(instance) &
+ MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ dev_err(&instance->pdev->dev,
+ "Found FW in FAULT state, after snap dump trigger\n");
+ return;
+ }
+ msleep(1000);
+ }
+}
+
/* This function waits for outstanding commands on fusion to complete */
int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
int reason, int *convert)
{
int i, outstanding, retval = 0, hb_seconds_missed = 0;
u32 fw_state;
+ u32 waittime_for_io_completion;
+
+ waittime_for_io_completion =
+ min_t(u32, resetwaittime,
+ (resetwaittime - instance->snapdump_wait_time));
- for (i = 0; i < resetwaittime; i++) {
+ if (reason == MFI_IO_TIMEOUT_OCR) {
+ dev_info(&instance->pdev->dev,
+ "MFI command is timed out\n");
+ megasas_complete_cmd_dpc_fusion((unsigned long)instance);
+ if (instance->snapdump_wait_time)
+ megasas_trigger_snap_dump(instance);
+ retval = 1;
+ goto out;
+ }
+
+ for (i = 0; i < waittime_for_io_completion; i++) {
/* Check if firmware is in fault state */
- fw_state = instance->instancet->read_fw_status_reg(
- instance->reg_set) & MFI_STATE_MASK;
+ fw_state = instance->instancet->read_fw_status_reg(instance) &
+ MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT) {
dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
" will reset adapter scsi%d.\n",
@@ -3850,13 +3936,6 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
goto out;
}
- if (reason == MFI_IO_TIMEOUT_OCR) {
- dev_info(&instance->pdev->dev,
- "MFI IO is timed out, initiating OCR\n");
- megasas_complete_cmd_dpc_fusion((unsigned long)instance);
- retval = 1;
- goto out;
- }
/* If SR-IOV VF mode & heartbeat timeout, don't wait */
if (instance->requestorId && !reason) {
@@ -3901,6 +3980,12 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
msleep(1000);
}
+ if (instance->snapdump_wait_time) {
+ megasas_trigger_snap_dump(instance);
+ retval = 1;
+ goto out;
+ }
+
if (atomic_read(&instance->fw_outstanding)) {
dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
"will reset adapter scsi%d.\n",
@@ -3908,6 +3993,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
*convert = 1;
retval = 1;
}
+
out:
return retval;
}
@@ -4518,7 +4604,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
mutex_unlock(&instance->reset_mutex);
return FAILED;
}
- status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+ status_reg = instance->instancet->read_fw_status_reg(instance);
abs_state = status_reg & MFI_STATE_MASK;
/* IO timeout detected, forcibly put FW in FAULT state */
@@ -4527,7 +4613,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
"forcibly FAULT Firmware\n");
atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
- status_reg = readl(&instance->reg_set->doorbell);
+ status_reg = megasas_readl(instance, &instance->reg_set->doorbell);
writel(status_reg | MFI_STATE_FORCE_OCR,
&instance->reg_set->doorbell);
readl(&instance->reg_set->doorbell);
@@ -4578,7 +4664,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
for (i = 0 ; i < instance->max_scsi_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
/*check for extra commands issued by driver*/
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
megasas_return_cmd_fusion(instance, r1_cmd);
}
@@ -4605,8 +4691,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
atomic_set(&instance->fw_outstanding, 0);
- status_reg = instance->instancet->read_fw_status_reg(
- instance->reg_set);
+ status_reg = instance->instancet->read_fw_status_reg(instance);
abs_state = status_reg & MFI_STATE_MASK;
reset_adapter = status_reg & MFI_RESET_ADAPTER;
if (instance->disableOnlineCtrlReset ||
@@ -4677,7 +4762,7 @@ transition_to_ready:
megasas_setup_jbod_map(instance);
/* reset stream detection array */
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
memset(fusion->stream_detect_by_ld[j],
0, sizeof(struct LD_STREAM_DETECT));
@@ -4721,6 +4806,13 @@ transition_to_ready:
megasas_set_crash_dump_params(instance,
MR_CRASH_BUF_TURN_OFF);
+ if (instance->snapdump_wait_time) {
+ megasas_get_snapdump_properties(instance);
+ dev_info(&instance->pdev->dev,
+ "Snap dump wait time\t: %d\n",
+ instance->snapdump_wait_time);
+ }
+
retval = SUCCESS;
/* Adapter reset completed successfully */
@@ -4752,16 +4844,15 @@ out:
return retval;
}
-/* Fusion Crash dump collection work queue */
-void megasas_fusion_crash_dump_wq(struct work_struct *work)
+/* Fusion Crash dump collection */
+void megasas_fusion_crash_dump(struct megasas_instance *instance)
{
- struct megasas_instance *instance =
- container_of(work, struct megasas_instance, crash_init);
u32 status_reg;
u8 partial_copy = 0;
+ int wait = 0;
- status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+ status_reg = instance->instancet->read_fw_status_reg(instance);
/*
* Allocate host crash buffers to copy data from 1 MB DMA crash buffer
@@ -4777,8 +4868,8 @@ void megasas_fusion_crash_dump_wq(struct work_struct *work)
"crash dump and initiating OCR\n");
status_reg |= MFI_STATE_CRASH_DUMP_DONE;
writel(status_reg,
- &instance->reg_set->outbound_scratch_pad);
- readl(&instance->reg_set->outbound_scratch_pad);
+ &instance->reg_set->outbound_scratch_pad_0);
+ readl(&instance->reg_set->outbound_scratch_pad_0);
return;
}
megasas_alloc_host_crash_buffer(instance);
@@ -4786,21 +4877,41 @@ void megasas_fusion_crash_dump_wq(struct work_struct *work)
"allocated: %d\n", instance->drv_buf_alloc);
}
- /*
- * Driver has allocated max buffers, which can be allocated
- * and FW has more crash dump data, then driver will
- * ignore the data.
- */
- if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
- dev_info(&instance->pdev->dev, "Driver is done copying "
- "the buffer: %d\n", instance->drv_buf_alloc);
- status_reg |= MFI_STATE_CRASH_DUMP_DONE;
- partial_copy = 1;
- } else {
- memcpy(instance->crash_buf[instance->drv_buf_index],
- instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
- instance->drv_buf_index++;
- status_reg &= ~MFI_STATE_DMADONE;
+ while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) &&
+ (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) {
+ if (!(status_reg & MFI_STATE_DMADONE)) {
+ /*
+ * Next crash dump buffer is not yet DMA'd by FW
+ * Check after 10ms. Wait for 1 second for FW to
+ * post the next buffer. If not bail out.
+ */
+ wait++;
+ msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance);
+ continue;
+ }
+
+ wait = 0;
+ if (instance->drv_buf_index >= instance->drv_buf_alloc) {
+ dev_info(&instance->pdev->dev,
+ "Driver is done copying the buffer: %d\n",
+ instance->drv_buf_alloc);
+ status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+ partial_copy = 1;
+ break;
+ } else {
+ memcpy(instance->crash_buf[instance->drv_buf_index],
+ instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
+ instance->drv_buf_index++;
+ status_reg &= ~MFI_STATE_DMADONE;
+ }
+
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
+ readl(&instance->reg_set->outbound_scratch_pad_0);
+
+ msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
+ status_reg = instance->instancet->read_fw_status_reg(instance);
}
if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
@@ -4809,13 +4920,10 @@ void megasas_fusion_crash_dump_wq(struct work_struct *work)
instance->fw_crash_buffer_size = instance->drv_buf_index;
instance->fw_crash_state = AVAILABLE;
instance->drv_buf_index = 0;
- writel(status_reg, &instance->reg_set->outbound_scratch_pad);
- readl(&instance->reg_set->outbound_scratch_pad);
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
+ readl(&instance->reg_set->outbound_scratch_pad_0);
if (!partial_copy)
megasas_reset_fusion(instance->host, 0);
- } else {
- writel(status_reg, &instance->reg_set->outbound_scratch_pad);
- readl(&instance->reg_set->outbound_scratch_pad);
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 8e5ebee6517f..ca73c50fe723 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -2,7 +2,8 @@
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2009-2013 LSI Corporation
- * Copyright (c) 2013-2014 Avago Technologies
+ * Copyright (c) 2013-2016 Avago Technologies
+ * Copyright (c) 2016-2018 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -19,16 +20,13 @@
*
* FILE: megaraid_sas_fusion.h
*
- * Authors: Avago Technologies
+ * Authors: Broadcom Inc.
* Manoj Jose
* Sumant Patro
- * Kashyap Desai <kashyap.desai@avagotech.com>
- * Sumit Saxena <sumit.saxena@avagotech.com>
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
*
- * Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- * San Jose, California 95131
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
*/
#ifndef _MEGARAID_SAS_FUSION_H_
@@ -725,6 +723,7 @@ struct MPI2_IOC_INIT_REQUEST {
#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
+#define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 0x01200100
struct MR_DEV_HANDLE_INFO {
__le16 curDevHdl;
@@ -1063,6 +1062,9 @@ struct MR_FW_RAID_MAP_DYNAMIC {
#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
+#define MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME 15
+#define MEGASAS_MAX_SNAP_DUMP_WAIT_TIME 60
+
struct megasas_register_set;
struct megasas_instance;
@@ -1350,6 +1352,14 @@ enum CMD_RET_VALUES {
RETURN_CMD = 3,
};
+struct MR_SNAPDUMP_PROPERTIES {
+ u8 offload_num;
+ u8 max_num_supported;
+ u8 cur_num_supported;
+ u8 trigger_min_num_sec_before_ocr;
+ u8 reserved[12];
+};
+
void megasas_free_cmds_fusion(struct megasas_instance *instance);
int megasas_ioc_init_fusion(struct megasas_instance *instance);
u8 megasas_get_map_info(struct megasas_instance *instance);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index ec6940f2fcb3..f3e182eb0970 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1838,7 +1838,7 @@ static struct scsi_host_template mesh_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
+ .max_segment_size = 65535,
};
static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 1e45268a78fc..7efd17a3c25b 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright 2000-2015 Avago Technologies. All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
*
*
* Name: mpi2.h
@@ -9,7 +9,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.50
+ * mpi2.h Version: 02.00.53
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -116,7 +116,12 @@
* 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT.
* 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT.
* 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT.
- * --------------------------------------------------------------------------
+ * 07-22-18 02.00.51 Added SECURE_BOOT define.
+ * Bumped MPI2_HEADER_VERSION_UNIT
+ * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IOCSTATUS_FAILURE
+ * --------------------------------------------------------------------------
*/
#ifndef MPI2_H
@@ -156,7 +161,7 @@
/* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x32)
+#define MPI2_HEADER_VERSION_UNIT (0x35)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -257,6 +262,8 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
*/
#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
+#define MPI26_DIAG_SECURE_BOOT (0x80000000)
+
#define MPI2_DIAG_SBR_RELOAD (0x00002000)
#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
@@ -687,7 +694,9 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
+/*MPI v2.6 and later */
#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A)
+#define MPI2_IOCSTATUS_FAILURE (0x000F)
/****************************************************************************
* Config IOCStatus values
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 5122920a961a..398fa6fde960 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright 2000-2015 Avago Technologies. All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
*
*
* Name: mpi2_cnfg.h
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.42
+ * mpi2_cnfg.h Version: 02.00.46
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -231,6 +231,18 @@
* Added NOIOB field to PCIe Device Page 2.
* Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to
* the Capabilities field of PCIe Device Page 2.
+ * 07-22-18 02.00.43 Added defines for SAS3916 and SAS3816.
+ * Added WRiteCache defines to IO Unit Page 1.
+ * Added MaxEnclosureLevel to BIOS Page 1.
+ * Added OEMRD to SAS Enclosure Page 1.
+ * Added DMDReportPCIe to PCIe IO Unit Page 1.
+ * Added Flags field and flags for Retimers to
+ * PCIe Switch Page 1.
+ * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7.
+ * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1
+ * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1
+ * Added DMDReport Delay Time defines to
+ * PCIeIOUnitPage1
* --------------------------------------------------------------------------
*/
@@ -568,8 +580,17 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1)
#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2)
-#define MPI26_MFGPAGE_DEVID_SAS3816 (0x00A1)
-#define MPI26_MFGPAGE_DEVID_SAS3916 (0x00A0)
+#define MPI26_MFGPAGE_DEVID_SEC_MASK_3916 (0x0003)
+#define MPI26_MFGPAGE_DEVID_INVALID0_3916 (0x00E0)
+#define MPI26_MFGPAGE_DEVID_CFG_SEC_3916 (0x00E1)
+#define MPI26_MFGPAGE_DEVID_HARD_SEC_3916 (0x00E2)
+#define MPI26_MFGPAGE_DEVID_INVALID1_3916 (0x00E3)
+
+#define MPI26_MFGPAGE_DEVID_SEC_MASK_3816 (0x0003)
+#define MPI26_MFGPAGE_DEVID_INVALID0_3816 (0x00E4)
+#define MPI26_MFGPAGE_DEVID_CFG_SEC_3816 (0x00E5)
+#define MPI26_MFGPAGE_DEVID_HARD_SEC_3816 (0x00E6)
+#define MPI26_MFGPAGE_DEVID_INVALID1_3816 (0x00E7)
/*Manufacturing Page 0 */
@@ -932,7 +953,11 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
-/*IO Unit Page 1 Flags defines */
+/* IO Unit Page 1 Flags defines */
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00000000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00010000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00020000)
#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
@@ -1511,7 +1536,7 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
U32 BiosOptions; /*0x04 */
U32 IOCSettings; /*0x08 */
U8 SSUTimeout; /*0x0C */
- U8 Reserved1; /*0x0D */
+ U8 MaxEnclosureLevel; /*0x0D */
U16 Reserved2; /*0x0E */
U32 DeviceSettings; /*0x10 */
U16 NumberOfDevices; /*0x14 */
@@ -1531,7 +1556,6 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
#define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000)
#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
-#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000)
#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800)
#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000)
@@ -3271,10 +3295,12 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
U16 NumSlots; /*0x18 */
U16 StartSlot; /*0x1A */
U8 ChassisSlot; /*0x1C */
- U8 EnclosureLeve; /*0x1D */
+ U8 EnclosureLevel; /*0x1D */
U16 SEPDevHandle; /*0x1E */
- U32 Reserved3; /*0x20 */
- U32 Reserved4; /*0x24 */
+ U8 OEMRD; /*0x20 */
+ U8 Reserved1a; /*0x21 */
+ U16 Reserved2; /*0x22 */
+ U32 Reserved3; /*0x24 */
} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
*PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t,
@@ -3285,6 +3311,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
/*values for SAS Enclosure Page 0 Flags field */
+#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_VALID (0x0080)
+#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040)
#define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
@@ -3298,6 +3326,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
#define MPI26_ENCLOSURE0_PAGEVERSION (0x04)
/*Values for Enclosure Page 0 Flags field */
+#define MPI26_ENCLS0_FLAGS_OEMRD_VALID (0x0080)
+#define MPI26_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040)
#define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
#define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
#define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F)
@@ -3696,8 +3726,9 @@ typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA {
Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t;
/*values for LinkFlags */
-#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS (0x00)
-#define MPI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS (0x01)
+#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK (0x00)
+#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN (0x01)
+#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN (0x02)
/*
*Host code (drivers, BIOS, utilities, etc.) should leave this define set to
@@ -3714,7 +3745,7 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
U16 AdditionalControlFlags; /*0x0C */
U16 NVMeMaxQueueDepth; /*0x0E */
U8 NumPhys; /*0x10 */
- U8 Reserved1; /*0x11 */
+ U8 DMDReportPCIe; /*0x11 */
U16 Reserved2; /*0x12 */
MPI26_PCIE_IO_UNIT1_PHY_DATA
PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */
@@ -3736,6 +3767,12 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40)
#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50)
+/*values for PCIe IO Unit Page 1 DMDReportPCIe */
+#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80)
+#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_1_SEC (0x00)
+#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_16_SEC (0x80)
+#define MPI26_PCIEIOUNIT1_DMDRPT_DELAY_TIME_MASK (0x7F)
+
/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
*values
*/
@@ -3788,6 +3825,9 @@ typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 {
/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+/* defines for the Flags field */
+#define MPI26_PCIESWITCH1_2_RETIMER_PRESENCE (0x0002)
+#define MPI26_PCIESWITCH1_RETIMER_PRESENCE (0x0001)
/****************************************************************************
* PCIe Device Config Pages (MPI v2.6 and later)
@@ -3849,19 +3889,21 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 {
*field
*/
-/*values for PCIe Device Page 0 Flags field */
-#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
-#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x4000)
-#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x2000)
-#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x0400)
-#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x0200)
-#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
-#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x0080)
-#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x0040)
-#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x0020)
-#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x0010)
-#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x0002)
-#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x0001)
+/*values for PCIe Device Page 0 Flags field*/
+#define MPI26_PCIEDEV0_FLAGS_2_RETIMER_PRESENCE (0x00020000)
+#define MPI26_PCIEDEV0_FLAGS_RETIMER_PRESENCE (0x00010000)
+#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x00008000)
+#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x00004000)
+#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x00002000)
+#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x00000400)
+#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x00000200)
+#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x00000100)
+#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x00000080)
+#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x00000040)
+#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x00000020)
+#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x00000010)
+#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x00000002)
+#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x00000001)
/* values for PCIe Device Page 0 SupportedLinkRates field */
#define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
new file mode 100644
index 000000000000..4959585f029d
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
@@ -0,0 +1,506 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2016-2020 Broadcom Limited. All rights reserved.
+ *
+ * Name: mpi2_image.h
+ * Description: Contains definitions for firmware and other component images
+ * Creation Date: 04/02/2018
+ * Version: 02.06.03
+ *
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 08-01-18 02.06.00 Initial version for MPI 2.6.5.
+ * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26
+ * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE
+ * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
+ */
+#ifndef MPI2_IMAGE_H
+#define MPI2_IMAGE_H
+
+
+/*FW Image Header */
+typedef struct _MPI2_FW_IMAGE_HEADER {
+ U32 Signature; /*0x00 */
+ U32 Signature0; /*0x04 */
+ U32 Signature1; /*0x08 */
+ U32 Signature2; /*0x0C */
+ MPI2_VERSION_UNION MPIVersion; /*0x10 */
+ MPI2_VERSION_UNION FWVersion; /*0x14 */
+ MPI2_VERSION_UNION NVDATAVersion; /*0x18 */
+ MPI2_VERSION_UNION PackageVersion; /*0x1C */
+ U16 VendorID; /*0x20 */
+ U16 ProductID; /*0x22 */
+ U16 ProtocolFlags; /*0x24 */
+ U16 Reserved26; /*0x26 */
+ U32 IOCCapabilities; /*0x28 */
+ U32 ImageSize; /*0x2C */
+ U32 NextImageHeaderOffset; /*0x30 */
+ U32 Checksum; /*0x34 */
+ U32 Reserved38; /*0x38 */
+ U32 Reserved3C; /*0x3C */
+ U32 Reserved40; /*0x40 */
+ U32 Reserved44; /*0x44 */
+ U32 Reserved48; /*0x48 */
+ U32 Reserved4C; /*0x4C */
+ U32 Reserved50; /*0x50 */
+ U32 Reserved54; /*0x54 */
+ U32 Reserved58; /*0x58 */
+ U32 Reserved5C; /*0x5C */
+ U32 BootFlags; /*0x60 */
+ U32 FirmwareVersionNameWhat; /*0x64 */
+ U8 FirmwareVersionName[32]; /*0x68 */
+ U32 VendorNameWhat; /*0x88 */
+ U8 VendorName[32]; /*0x8C */
+ U32 PackageNameWhat; /*0x88 */
+ U8 PackageName[32]; /*0x8C */
+ U32 ReservedD0; /*0xD0 */
+ U32 ReservedD4; /*0xD4 */
+ U32 ReservedD8; /*0xD8 */
+ U32 ReservedDC; /*0xDC */
+ U32 ReservedE0; /*0xE0 */
+ U32 ReservedE4; /*0xE4 */
+ U32 ReservedE8; /*0xE8 */
+ U32 ReservedEC; /*0xEC */
+ U32 ReservedF0; /*0xF0 */
+ U32 ReservedF4; /*0xF4 */
+ U32 ReservedF8; /*0xF8 */
+ U32 ReservedFC; /*0xFC */
+} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER,
+ Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t;
+
+/*Signature field */
+#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
+#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
+#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
+#define MPI26_FW_HEADER_SIGNATURE (0xEB000000)
+
+/*Signature0 field */
+#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
+#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
+/*Last byte is defined by architecture */
+#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01)
+/*legacy (0x5AEAA55A) */
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02)
+#define MPI26_FW_HEADER_SIGNATURE0 \
+ (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
+#define MPI26_FW_HEADER_SIGNATURE0_3516 \
+ (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
+#define MPI26_FW_HEADER_SIGNATURE0_4008 \
+ (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3)
+
+/*Signature1 field */
+#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
+#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
+#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5)
+
+/*Signature2 field */
+#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
+#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
+#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA)
+
+/*defines for using the ProductID field */
+#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
+#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
+
+#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
+#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
+
+#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
+/*SAS ProductID Family bits */
+#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
+#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
+#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028)
+#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031)
+
+/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
+
+/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
+
+#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
+#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
+
+#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60)
+#define MPI2_FW_HEADER_BOOTFLAGS_ISSI32M_FLAG (0x00000001)
+#define MPI2_FW_HEADER_BOOTFLAGS_W25Q256JW_FLAG (0x00000002)
+/*This image has a auto-discovery version of SPI */
+#define MPI2_FW_HEADER_BOOTFLAGS_AUTO_SPI_FLAG (0x00000004)
+
+
+#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
+
+#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
+
+#define MPI2_FW_HEADER_SIZE (0x100)
+
+
+/****************************************************************************
+ * Component Image Format and related defines *
+ ****************************************************************************/
+
+/*Maximum number of Hash Exclusion entries in a Component Image Header */
+#define MPI26_COMP_IMG_HDR_NUM_HASH_EXCL (4)
+
+/*Hash Exclusion Format */
+typedef struct _MPI26_HASH_EXCLUSION_FORMAT {
+ U32 Offset; /*0x00 */
+ U32 Size; /*0x04 */
+} MPI26_HASH_EXCLUSION_FORMAT,
+ *PTR_MPI26_HASH_EXCLUSION_FORMAT,
+ Mpi26HashSxclusionFormat_t,
+ *pMpi26HashExclusionFormat_t;
+
+/*FW Image Header */
+typedef struct _MPI26_COMPONENT_IMAGE_HEADER {
+ U32 Signature0; /*0x00 */
+ U32 LoadAddress; /*0x04 */
+ U32 DataSize; /*0x08 */
+ U32 StartAddress; /*0x0C */
+ U32 Signature1; /*0x10 */
+ U32 FlashOffset; /*0x14 */
+ U32 FlashSize; /*0x18 */
+ U32 VersionStringOffset; /*0x1C */
+ U32 BuildDateStringOffset; /*0x20 */
+ U32 BuildTimeStringOffset; /*0x24 */
+ U32 EnvironmentVariableOffset; /*0x28 */
+ U32 ApplicationSpecific; /*0x2C */
+ U32 Signature2; /*0x30 */
+ U32 HeaderSize; /*0x34 */
+ U32 Crc; /*0x38 */
+ U8 NotFlashImage; /*0x3C */
+ U8 Compressed; /*0x3D */
+ U16 Reserved3E; /*0x3E */
+ U32 SecondaryFlashOffset; /*0x40 */
+ U32 Reserved44; /*0x44 */
+ U32 Reserved48; /*0x48 */
+ MPI2_VERSION_UNION RMCInterfaceVersion; /*0x4C */
+ MPI2_VERSION_UNION Reserved50; /*0x50 */
+ MPI2_VERSION_UNION FWVersion; /*0x54 */
+ MPI2_VERSION_UNION NvdataVersion; /*0x58 */
+ MPI26_HASH_EXCLUSION_FORMAT
+ HashExclusion[MPI26_COMP_IMG_HDR_NUM_HASH_EXCL];/*0x5C */
+ U32 NextImageHeaderOffset; /*0x7C */
+ U32 Reserved80[32]; /*0x80 -- 0xFC */
+} MPI26_COMPONENT_IMAGE_HEADER,
+ *PTR_MPI26_COMPONENT_IMAGE_HEADER,
+ Mpi26ComponentImageHeader_t,
+ *pMpi26ComponentImageHeader_t;
+
+
+/**** Definitions for Signature0 field ****/
+#define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042)
+
+/**** Definitions for Signature1 field ****/
+#define MPI26_IMAGE_HEADER_SIGNATURE1_APPLICATION (0x20505041)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_CBB (0x20424243)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_MFG (0x2047464D)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_BIOS (0x534F4942)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_HIIM (0x4D494948)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_HIIA (0x41494948)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_CPLD (0x444C5043)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_NVDATA (0x5444564E)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_PBLP (0x50424C50)
+
+/**** Definitions for Signature2 field ****/
+#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
+
+/**** Offsets for Image Header Fields ****/
+#define MPI26_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00)
+#define MPI26_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04)
+#define MPI26_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08)
+#define MPI26_IMAGE_HEADER_START_ADDRESS_OFFSET (0x0C)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10)
+#define MPI26_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14)
+#define MPI26_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18)
+#define MPI26_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1C)
+#define MPI26_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20)
+#define MPI26_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24)
+#define MPI26_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28)
+#define MPI26_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2C)
+#define MPI26_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30)
+#define MPI26_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34)
+#define MPI26_IMAGE_HEADER_CRC_OFFSET (0x38)
+#define MPI26_IMAGE_HEADER_NOT_FLASH_IMAGE_OFFSET (0x3C)
+#define MPI26_IMAGE_HEADER_COMPRESSED_OFFSET (0x3D)
+#define MPI26_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40)
+#define MPI26_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4C)
+#define MPI26_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54)
+#define MPI26_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5C)
+#define MPI26_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7C)
+
+
+#define MPI26_IMAGE_HEADER_SIZE (0x100)
+
+
+/*Extended Image Header */
+typedef struct _MPI2_EXT_IMAGE_HEADER {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Checksum; /*0x04 */
+ U32 ImageSize; /*0x08 */
+ U32 NextImageHeaderOffset; /*0x0C */
+ U32 PackageVersion; /*0x10 */
+ U32 Reserved3; /*0x14 */
+ U32 Reserved4; /*0x18 */
+ U32 Reserved5; /*0x1C */
+ U8 IdentifyString[32]; /*0x20 */
+} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER,
+ Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t;
+
+/*useful offsets */
+#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
+#define MPI2_EXT_IMAGE_PACKAGEVERSION_OFFSET (0x10)
+
+#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
+
+/*defines for the ImageType field */
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
+#define MPI2_EXT_IMAGE_TYPE_RDE (0x0A)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)
+
+/*FLASH Layout Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check RegionsPerLayout at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
+#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
+#endif
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfLayouts at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
+#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
+#endif
+
+typedef struct _MPI2_FLASH_REGION {
+ U8 RegionType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 RegionOffset; /*0x04 */
+ U32 RegionSize; /*0x08 */
+ U32 Reserved3; /*0x0C */
+} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION,
+ Mpi2FlashRegion_t, *pMpi2FlashRegion_t;
+
+typedef struct _MPI2_FLASH_LAYOUT {
+ U32 FlashSize; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */
+} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
+ Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
+
+typedef struct _MPI2_FLASH_LAYOUT_DATA {
+ U8 ImageRevision; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 SizeOfRegion; /*0x02 */
+ U8 Reserved2; /*0x03 */
+ U16 NumberOfLayouts; /*0x04 */
+ U16 RegionsPerLayout; /*0x06 */
+ U16 MinimumSectorAlignment; /*0x08 */
+ U16 Reserved3; /*0x0A */
+ U32 Reserved4; /*0x0C */
+ MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */
+} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
+ Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
+
+/*defines for the RegionType field */
+#define MPI2_FLASH_REGION_UNUSED (0x00)
+#define MPI2_FLASH_REGION_FIRMWARE (0x01)
+#define MPI2_FLASH_REGION_BIOS (0x02)
+#define MPI2_FLASH_REGION_NVDATA (0x03)
+#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
+#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
+#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
+#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
+#define MPI2_FLASH_REGION_MEGARAID (0x09)
+#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A)
+#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
+#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D)
+#define MPI2_FLASH_REGION_SBR (0x0E)
+#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F)
+#define MPI2_FLASH_REGION_HIIM (0x10)
+#define MPI2_FLASH_REGION_HIIA (0x11)
+#define MPI2_FLASH_REGION_CTLR (0x12)
+#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13)
+#define MPI2_FLASH_REGION_MR_NVDATA (0x14)
+#define MPI2_FLASH_REGION_CPLD (0x15)
+#define MPI2_FLASH_REGION_PSOC (0x16)
+
+/*ImageRevision */
+#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
+
+/*Supported Devices Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfDevices at runtime.
+ */
+#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
+#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
+#endif
+
+typedef struct _MPI2_SUPPORTED_DEVICE {
+ U16 DeviceID; /*0x00 */
+ U16 VendorID; /*0x02 */
+ U16 DeviceIDMask; /*0x04 */
+ U16 Reserved1; /*0x06 */
+ U8 LowPCIRev; /*0x08 */
+ U8 HighPCIRev; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE,
+ Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t;
+
+typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
+ U8 ImageRevision; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 NumberOfDevices; /*0x02 */
+ U8 Reserved2; /*0x03 */
+ U32 Reserved3; /*0x04 */
+ MPI2_SUPPORTED_DEVICE
+ SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
+} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
+ Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
+
+/*ImageRevision */
+#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
+
+/*Init Extended Image Data */
+
+typedef struct _MPI2_INIT_IMAGE_FOOTER {
+ U32 BootFlags; /*0x00 */
+ U32 ImageSize; /*0x04 */
+ U32 Signature0; /*0x08 */
+ U32 Signature1; /*0x0C */
+ U32 Signature2; /*0x10 */
+ U32 ResetVector; /*0x14 */
+} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER,
+ Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t;
+
+/*defines for the BootFlags field */
+#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
+
+/*defines for the ImageSize field */
+#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
+
+/*defines for the Signature0 field */
+#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
+#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
+
+/*defines for the Signature1 field */
+#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
+#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
+
+/*defines for the Signature2 field */
+#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
+#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
+
+/*Signature fields as individual bytes */
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
+
+/*defines for the ResetVector field */
+#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
+
+
+/* Encrypted Hash Extended Image Data */
+
+typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
+ U8 HashImageType; /*0x00 */
+ U8 HashAlgorithm; /*0x01 */
+ U8 EncryptionAlgorithm; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 Reserved2; /*0x04 */
+ U32 EncryptedHash[1]; /*0x08 */ /* variable length */
+} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
+Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
+
+/* values for HashImageType */
+#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
+#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
+#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02)
+
+#define MPI26_HASH_IMAGE_TYPE_UNUSED (0x00)
+#define MPI26_HASH_IMAGE_TYPE_FIRMWARE (0x01)
+#define MPI26_HASH_IMAGE_TYPE_BIOS (0x02)
+#define MPI26_HASH_IMAGE_TYPE_KEY_HASH (0x03)
+
+/* values for HashAlgorithm */
+#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
+#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
+
+#define MPI26_HASH_ALGORITHM_VERSION_MASK (0xE0)
+#define MPI26_HASH_ALGORITHM_VERSION_NONE (0x00)
+#define MPI26_HASH_ALGORITHM_VERSION_SHA1 (0x20)
+#define MPI26_HASH_ALGORITHM_VERSION_SHA2 (0x40)
+#define MPI26_HASH_ALGORITHM_VERSION_SHA3 (0x60)
+#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F)
+#define MPI26_HASH_ALGORITHM_SIZE_256 (0x01)
+#define MPI26_HASH_ALGORITHM_SIZE_512 (0x02)
+
+
+/* values for EncryptionAlgorithm */
+#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
+#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
+
+#define MPI26_ENCRYPTION_ALG_UNUSED (0x00)
+#define MPI26_ENCRYPTION_ALG_RSA256 (0x01)
+#define MPI26_ENCRYPTION_ALG_RSA512 (0x02)
+#define MPI26_ENCRYPTION_ALG_RSA1024 (0x03)
+#define MPI26_ENCRYPTION_ALG_RSA2048 (0x04)
+#define MPI26_ENCRYPTION_ALG_RSA4096 (0x05)
+
+typedef struct _MPI25_ENCRYPTED_HASH_DATA {
+ U8 ImageVersion; /*0x00 */
+ U8 NumHash; /*0x01 */
+ U16 Reserved1; /*0x02 */
+ U32 Reserved2; /*0x04 */
+ MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /*0x08 */
+} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
+Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
+
+
+#endif /* MPI2_IMAGE_H */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index 6213ce6791ac..8f1b903fe0a9 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright 2000-2015 Avago Technologies. All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
*
*
* Name: mpi2_init.h
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 1faec3a93e69..68ea408cd5c5 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright 2000-2015 Avago Technologies. All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
*
*
* Name: mpi2_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.34
+ * mpi2_ioc.h Version: 02.00.37
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -171,6 +171,10 @@
* 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED
* to the ReasonCode field in PCIe Device Status Change
* Event Data.
+ * 07-22-18 02.00.35 Added FW_DOWNLOAD_ITYPE_CPLD and _PSOC.
+ * Moved FW image definitions ionto new mpi2_image,h
+ * 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
+ * 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
* --------------------------------------------------------------------------
*/
@@ -1255,6 +1259,7 @@ typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY {
#define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20)
#define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30)
#define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40)
+#define MPI26_EVENT_PCIE_TOPO_PI_16_LANES (0x50)
#define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F)
#define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
@@ -1450,7 +1455,11 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
#define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12)
#define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13)
#define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14)
+/*MPI v2.6 and newer */
+#define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15)
+#define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
+#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE (0xFF)
/*MPI v2.0 FWDownload TransactionContext Element */
typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
@@ -1597,352 +1606,6 @@ typedef struct _MPI2_FW_UPLOAD_REPLY {
} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY,
Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t;
-/*FW Image Header */
-typedef struct _MPI2_FW_IMAGE_HEADER {
- U32 Signature; /*0x00 */
- U32 Signature0; /*0x04 */
- U32 Signature1; /*0x08 */
- U32 Signature2; /*0x0C */
- MPI2_VERSION_UNION MPIVersion; /*0x10 */
- MPI2_VERSION_UNION FWVersion; /*0x14 */
- MPI2_VERSION_UNION NVDATAVersion; /*0x18 */
- MPI2_VERSION_UNION PackageVersion; /*0x1C */
- U16 VendorID; /*0x20 */
- U16 ProductID; /*0x22 */
- U16 ProtocolFlags; /*0x24 */
- U16 Reserved26; /*0x26 */
- U32 IOCCapabilities; /*0x28 */
- U32 ImageSize; /*0x2C */
- U32 NextImageHeaderOffset; /*0x30 */
- U32 Checksum; /*0x34 */
- U32 Reserved38; /*0x38 */
- U32 Reserved3C; /*0x3C */
- U32 Reserved40; /*0x40 */
- U32 Reserved44; /*0x44 */
- U32 Reserved48; /*0x48 */
- U32 Reserved4C; /*0x4C */
- U32 Reserved50; /*0x50 */
- U32 Reserved54; /*0x54 */
- U32 Reserved58; /*0x58 */
- U32 Reserved5C; /*0x5C */
- U32 BootFlags; /*0x60 */
- U32 FirmwareVersionNameWhat; /*0x64 */
- U8 FirmwareVersionName[32]; /*0x68 */
- U32 VendorNameWhat; /*0x88 */
- U8 VendorName[32]; /*0x8C */
- U32 PackageNameWhat; /*0x88 */
- U8 PackageName[32]; /*0x8C */
- U32 ReservedD0; /*0xD0 */
- U32 ReservedD4; /*0xD4 */
- U32 ReservedD8; /*0xD8 */
- U32 ReservedDC; /*0xDC */
- U32 ReservedE0; /*0xE0 */
- U32 ReservedE4; /*0xE4 */
- U32 ReservedE8; /*0xE8 */
- U32 ReservedEC; /*0xEC */
- U32 ReservedF0; /*0xF0 */
- U32 ReservedF4; /*0xF4 */
- U32 ReservedF8; /*0xF8 */
- U32 ReservedFC; /*0xFC */
-} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER,
- Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t;
-
-/*Signature field */
-#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
-#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
-#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
-#define MPI26_FW_HEADER_SIGNATURE (0xEB000000)
-
-/*Signature0 field */
-#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
-#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
-/* Last byte is defined by architecture */
-#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500)
-#define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A)
-#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00)
-#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01)
-/* legacy (0x5AEAA55A) */
-#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02)
-#define MPI26_FW_HEADER_SIGNATURE0 \
- (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
-#define MPI26_FW_HEADER_SIGNATURE0_3516 \
- (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
-#define MPI26_FW_HEADER_SIGNATURE0_4008 \
- (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3)
-
-/*Signature1 field */
-#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
-#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
-#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5)
-
-/*Signature2 field */
-#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
-#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
-#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA)
-
-/*defines for using the ProductID field */
-#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
-#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
-
-#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
-#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
-#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
-#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
-
-#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
-/*SAS ProductID Family bits */
-#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
-#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
-#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
-#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028)
-#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031)
-
-/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
-
-/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
-
-#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
-#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
-#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60)
-#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
-
-#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
-
-#define MPI2_FW_HEADER_SIZE (0x100)
-
-/*Extended Image Header */
-typedef struct _MPI2_EXT_IMAGE_HEADER {
- U8 ImageType; /*0x00 */
- U8 Reserved1; /*0x01 */
- U16 Reserved2; /*0x02 */
- U32 Checksum; /*0x04 */
- U32 ImageSize; /*0x08 */
- U32 NextImageHeaderOffset; /*0x0C */
- U32 PackageVersion; /*0x10 */
- U32 Reserved3; /*0x14 */
- U32 Reserved4; /*0x18 */
- U32 Reserved5; /*0x1C */
- U8 IdentifyString[32]; /*0x20 */
-} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER,
- Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t;
-
-/*useful offsets */
-#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
-#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
-#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
-
-#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
-
-/*defines for the ImageType field */
-#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
-#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
-#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
-#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
-#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
-#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
-#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
-#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
-#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
-#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
-#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
-
-#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)
-
-/*FLASH Layout Extended Image Data */
-
-/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check RegionsPerLayout at runtime.
- */
-#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
-#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
-#endif
-
-/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check NumberOfLayouts at runtime.
- */
-#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
-#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
-#endif
-
-typedef struct _MPI2_FLASH_REGION {
- U8 RegionType; /*0x00 */
- U8 Reserved1; /*0x01 */
- U16 Reserved2; /*0x02 */
- U32 RegionOffset; /*0x04 */
- U32 RegionSize; /*0x08 */
- U32 Reserved3; /*0x0C */
-} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION,
- Mpi2FlashRegion_t, *pMpi2FlashRegion_t;
-
-typedef struct _MPI2_FLASH_LAYOUT {
- U32 FlashSize; /*0x00 */
- U32 Reserved1; /*0x04 */
- U32 Reserved2; /*0x08 */
- U32 Reserved3; /*0x0C */
- MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */
-} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
- Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
-
-typedef struct _MPI2_FLASH_LAYOUT_DATA {
- U8 ImageRevision; /*0x00 */
- U8 Reserved1; /*0x01 */
- U8 SizeOfRegion; /*0x02 */
- U8 Reserved2; /*0x03 */
- U16 NumberOfLayouts; /*0x04 */
- U16 RegionsPerLayout; /*0x06 */
- U16 MinimumSectorAlignment; /*0x08 */
- U16 Reserved3; /*0x0A */
- U32 Reserved4; /*0x0C */
- MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */
-} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
- Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
-
-/*defines for the RegionType field */
-#define MPI2_FLASH_REGION_UNUSED (0x00)
-#define MPI2_FLASH_REGION_FIRMWARE (0x01)
-#define MPI2_FLASH_REGION_BIOS (0x02)
-#define MPI2_FLASH_REGION_NVDATA (0x03)
-#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
-#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
-#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
-#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
-#define MPI2_FLASH_REGION_MEGARAID (0x09)
-#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A)
-#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
-#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D)
-#define MPI2_FLASH_REGION_SBR (0x0E)
-#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F)
-#define MPI2_FLASH_REGION_HIIM (0x10)
-#define MPI2_FLASH_REGION_HIIA (0x11)
-#define MPI2_FLASH_REGION_CTLR (0x12)
-#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13)
-#define MPI2_FLASH_REGION_MR_NVDATA (0x14)
-
-/*ImageRevision */
-#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
-
-/*Supported Devices Extended Image Data */
-
-/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check NumberOfDevices at runtime.
- */
-#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
-#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
-#endif
-
-typedef struct _MPI2_SUPPORTED_DEVICE {
- U16 DeviceID; /*0x00 */
- U16 VendorID; /*0x02 */
- U16 DeviceIDMask; /*0x04 */
- U16 Reserved1; /*0x06 */
- U8 LowPCIRev; /*0x08 */
- U8 HighPCIRev; /*0x09 */
- U16 Reserved2; /*0x0A */
- U32 Reserved3; /*0x0C */
-} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE,
- Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t;
-
-typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
- U8 ImageRevision; /*0x00 */
- U8 Reserved1; /*0x01 */
- U8 NumberOfDevices; /*0x02 */
- U8 Reserved2; /*0x03 */
- U32 Reserved3; /*0x04 */
- MPI2_SUPPORTED_DEVICE
- SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
-} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
- Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
-
-/*ImageRevision */
-#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
-
-/*Init Extended Image Data */
-
-typedef struct _MPI2_INIT_IMAGE_FOOTER {
- U32 BootFlags; /*0x00 */
- U32 ImageSize; /*0x04 */
- U32 Signature0; /*0x08 */
- U32 Signature1; /*0x0C */
- U32 Signature2; /*0x10 */
- U32 ResetVector; /*0x14 */
-} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER,
- Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t;
-
-/*defines for the BootFlags field */
-#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
-
-/*defines for the ImageSize field */
-#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
-
-/*defines for the Signature0 field */
-#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
-#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
-
-/*defines for the Signature1 field */
-#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
-#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
-
-/*defines for the Signature2 field */
-#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
-#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
-
-/*Signature fields as individual bytes */
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
-
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
-
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
-
-/*defines for the ResetVector field */
-#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
-
-
-/* Encrypted Hash Extended Image Data */
-
-typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
- U8 HashImageType; /* 0x00 */
- U8 HashAlgorithm; /* 0x01 */
- U8 EncryptionAlgorithm; /* 0x02 */
- U8 Reserved1; /* 0x03 */
- U32 Reserved2; /* 0x04 */
- U32 EncryptedHash[1]; /* 0x08 */ /* variable length */
-} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
-Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
-
-/* values for HashImageType */
-#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
-#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
-#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02)
-
-/* values for HashAlgorithm */
-#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
-#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
-
-/* values for EncryptionAlgorithm */
-#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
-#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
-
-typedef struct _MPI25_ENCRYPTED_HASH_DATA {
- U8 ImageVersion; /* 0x00 */
- U8 NumHash; /* 0x01 */
- U16 Reserved1; /* 0x02 */
- U32 Reserved2; /* 0x04 */
- MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */
-} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
-Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
-
/****************************************************************************
* PowerManagementControl message
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
index f0281f943ec9..63a09509d7d1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
@@ -1,12 +1,12 @@
/*
- * Copyright 2012-2015 Avago Technologies. All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
*
*
* Name: mpi2_pci.h
* Title: MPI PCIe Attached Devices structures and definitions.
* Creation Date: October 9, 2012
*
- * mpi2_pci.h Version: 02.00.02
+ * mpi2_pci.h Version: 02.00.03
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -23,6 +23,7 @@
* Removed SOP support.
* 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to
* NVME Encapsulated Request.
+ * 07-22-18 02.00.03 Updted flags field for NVME Encapsulated req
* --------------------------------------------------------------------------
*/
@@ -75,10 +76,10 @@ typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST {
#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010)
/*Error Response Address Space */
#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR (0x000C)
+#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR_MASK (0x000C)
#define MPI26_NVME_FLAGS_SYSTEM_RSP_ADDR (0x0000)
-#define MPI26_NVME_FLAGS_IOCPLB_RSP_ADDR (0x0008)
-#define MPI26_NVME_FLAGS_IOCPLBNTA_RSP_ADDR (0x000C)
-/*Data Direction*/
+#define MPI26_NVME_FLAGS_IOCCTL_RSP_ADDR (0x0008)
+/* Data Direction*/
#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003)
#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000)
#define MPI26_NVME_FLAGS_WRITE (0x0001)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
index b9bb1c178f12..b770eb516c14 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright 2000-2014 Avago Technologies. All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
*
*
* Name: mpi2_raid.h
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
index afa17ff246b4..16c922a8a02b 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright 2000-2015 Avago Technologies. All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
*
*
* Name: mpi2_sas.h
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 629296ee9236..3f966b6796b3 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright 2000-2014 Avago Technologies. All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
*
*
* Name: mpi2_tool.h
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.14
+ * mpi2_tool.h Version: 02.00.15
*
* Version History
* ---------------
@@ -38,6 +38,8 @@
* 11-18-14 02.00.13 Updated copyright information.
* 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean
* Tool Request Message.
+ * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool.
+ * Added option for DeviceInfo field in ISTWI tool.
* --------------------------------------------------------------------------
*/
@@ -58,6 +60,7 @@
#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07)
+#define MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN (0x08)
/****************************************************************************
* Toolbox reply
@@ -226,6 +229,13 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
+/*MPI26 TOOLBOX Request MsgFlags defines */
+#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_MASK (0x01)
+/*Request uses Man Page 43 device index addressing */
+#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_DEVINDEX (0x00)
+/*Request uses Man Page 43 device info struct addressing */
+#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_DEVINFO (0x01)
+
/*Toolbox ISTWI Read Write Tool reply message */
typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
U8 Tool; /*0x00 */
@@ -387,6 +397,64 @@ Mpi2ToolboxTextDisplayRequest_t,
#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01)
+/***************************************************************************
+ * Toolbox Backend Lane Margining Tool
+ ***************************************************************************
+ */
+
+/*Toolbox Backend Lane Margining Tool request message */
+typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 Command; /*0x0C */
+ U8 SwitchPort; /*0x0D */
+ U16 DevHandle; /*0x0E */
+ U8 RegisterOffset; /*0x10 */
+ U8 Reserved5; /*0x11 */
+ U16 DataLength; /*0x12 */
+ MPI25_SGE_IO_UNION SGL; /*0x14 */
+} MPI26_TOOLBOX_LANE_MARGINING_REQUEST,
+ *PTR_MPI2_TOOLBOX_LANE_MARGINING_REQUEST,
+ Mpi26ToolboxLaneMarginingRequest_t,
+ *pMpi2ToolboxLaneMarginingRequest_t;
+
+/* defines for the Command field */
+#define MPI26_TOOL_MARGIN_COMMAND_ENTER_MARGIN_MODE (0x01)
+#define MPI26_TOOL_MARGIN_COMMAND_READ_REGISTER_DATA (0x02)
+#define MPI26_TOOL_MARGIN_COMMAND_WRITE_REGISTER_DATA (0x03)
+#define MPI26_TOOL_MARGIN_COMMAND_EXIT_MARGIN_MODE (0x04)
+
+
+/*Toolbox Backend Lane Margining Tool reply message */
+typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U16 ReturnedDataLength; /*0x14 */
+ U16 Reserved6; /*0x16 */
+} MPI26_TOOLBOX_LANE_MARGINING_REPLY,
+ *PTR_MPI26_TOOLBOX_LANE_MARGINING_REPLY,
+ Mpi26ToolboxLaneMarginingReply_t,
+ *pMpi26ToolboxLaneMarginingReply_t;
+
/*****************************************************************************
*
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 2500377d0723..0a6cb8f0680c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -157,6 +157,32 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
param_get_int, &mpt3sas_fwfault_debug, 0644);
/**
+ * _base_readl_aero - retry readl for max three times.
+ * @addr - MPT Fusion system interface register address
+ *
+ * Retry the readl() for max three times if it gets zero value
+ * while reading the system interface register.
+ */
+static inline u32
+_base_readl_aero(const volatile void __iomem *addr)
+{
+ u32 i = 0, ret_val;
+
+ do {
+ ret_val = readl(addr);
+ i++;
+ } while (ret_val == 0 && i < 3);
+
+ return ret_val;
+}
+
+static inline u32
+_base_readl(const volatile void __iomem *addr)
+{
+ return readl(addr);
+}
+
+/**
* _base_clone_reply_to_sys_mem - copies reply to reply free iomem
* in BAR0 space.
*
@@ -716,7 +742,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
dump_stack();
- doorbell = readl(&ioc->chip->Doorbell);
+ doorbell = ioc->base_readl(&ioc->chip->Doorbell);
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
mpt3sas_base_fault_info(ioc , doorbell);
else {
@@ -1325,10 +1351,10 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
u32 him_register;
ioc->mask_interrupts = 1;
- him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
writel(him_register, &ioc->chip->HostInterruptMask);
- readl(&ioc->chip->HostInterruptMask);
+ ioc->base_readl(&ioc->chip->HostInterruptMask);
}
/**
@@ -1342,7 +1368,7 @@ _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
{
u32 him_register;
- him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
him_register &= ~MPI2_HIM_RIM;
writel(him_register, &ioc->chip->HostInterruptMask);
ioc->mask_interrupts = 0;
@@ -3319,8 +3345,9 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
static inline void
_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
{
+ wmb();
__raw_writeq(b, addr);
- mmiowb();
+ barrier();
}
#else
static inline void
@@ -4060,7 +4087,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
* flag unset in NVDATA.
*/
mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
- if (ioc->manu_pg11.EEDPTagMode == 0) {
+ if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
ioc->name);
ioc->manu_pg11.EEDPTagMode &= ~0x3;
@@ -4854,7 +4881,7 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
{
u32 s, sc;
- s = readl(&ioc->chip->Doorbell);
+ s = ioc->base_readl(&ioc->chip->Doorbell);
sc = s & MPI2_IOC_STATE_MASK;
return cooked ? sc : s;
}
@@ -4910,7 +4937,7 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
count = 0;
cntdn = 1000 * timeout;
do {
- int_status = readl(&ioc->chip->HostInterruptStatus);
+ int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
dhsprintk(ioc,
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
@@ -4936,7 +4963,7 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
count = 0;
cntdn = 2000 * timeout;
do {
- int_status = readl(&ioc->chip->HostInterruptStatus);
+ int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
dhsprintk(ioc,
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
@@ -4974,14 +5001,14 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
count = 0;
cntdn = 1000 * timeout;
do {
- int_status = readl(&ioc->chip->HostInterruptStatus);
+ int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
dhsprintk(ioc,
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
__func__, count, timeout));
return 0;
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
- doorbell = readl(&ioc->chip->Doorbell);
+ doorbell = ioc->base_readl(&ioc->chip->Doorbell);
if ((doorbell & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_FAULT) {
mpt3sas_base_fault_info(ioc , doorbell);
@@ -5016,7 +5043,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
count = 0;
cntdn = 1000 * timeout;
do {
- doorbell_reg = readl(&ioc->chip->Doorbell);
+ doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
dhsprintk(ioc,
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
@@ -5078,6 +5105,39 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
}
/**
+ * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
+ * @ioc: per adapter object
+ * @wait_count: timeout in seconds
+ *
+ * Return: Waits up to timeout seconds for the IOC to
+ * become operational. Returns 0 if IOC is present
+ * and operational; otherwise returns -EFAULT.
+ */
+
+int
+mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
+{
+ int wait_state_count = 0;
+ u32 ioc_state;
+
+ do {
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
+ break;
+ ssleep(1);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, ++wait_state_count);
+ } while (--timeout);
+ if (!timeout) {
+ ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
+ return -EFAULT;
+ }
+ if (wait_state_count)
+ ioc_info(ioc, "ioc is operational\n");
+ return 0;
+}
+
+/**
* _base_handshake_req_reply_wait - send request thru doorbell interface
* @ioc: per adapter object
* @request_bytes: request length
@@ -5098,13 +5158,13 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
__le32 *mfp;
/* make sure doorbell is not in use */
- if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
return -EFAULT;
}
/* clear pending doorbell interrupts from previous state changes */
- if (readl(&ioc->chip->HostInterruptStatus) &
+ if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
MPI2_HIS_IOC2SYS_DB_STATUS)
writel(0, &ioc->chip->HostInterruptStatus);
@@ -5147,7 +5207,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* read the first two 16-bits, it gives the total length of the reply */
- reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5155,7 +5215,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
__LINE__);
return -EFAULT;
}
- reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
@@ -5166,9 +5226,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
return -EFAULT;
}
if (i >= reply_bytes/2) /* overflow case */
- readl(&ioc->chip->Doorbell);
+ ioc->base_readl(&ioc->chip->Doorbell);
else
- reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ reply[i] = le16_to_cpu(
+ ioc->base_readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
@@ -5211,11 +5272,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
Mpi2SasIoUnitControlRequest_t *mpi_request)
{
u16 smid;
- u32 ioc_state;
u8 issue_reset = 0;
int rc;
void *request;
- u16 wait_state_count;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -5227,20 +5286,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
goto out;
}
- wait_state_count = 0;
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- if (wait_state_count++ == 10) {
- ioc_err(ioc, "%s: failed due to ioc not operational\n",
- __func__);
- rc = -EFAULT;
- goto out;
- }
- ssleep(1);
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
- __func__, wait_state_count);
- }
+ rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (rc)
+ goto out;
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
@@ -5306,11 +5354,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
{
u16 smid;
- u32 ioc_state;
u8 issue_reset = 0;
int rc;
void *request;
- u16 wait_state_count;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -5322,20 +5368,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
goto out;
}
- wait_state_count = 0;
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- if (wait_state_count++ == 10) {
- ioc_err(ioc, "%s: failed due to ioc not operational\n",
- __func__);
- rc = -EFAULT;
- goto out;
- }
- ssleep(1);
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
- __func__, wait_state_count);
- }
+ rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (rc)
+ goto out;
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
@@ -6020,14 +6055,14 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (count++ > 20)
goto out;
- host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+ host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
drsprintk(ioc,
ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
count, host_diagnostic));
} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
- hcb_size = readl(&ioc->chip->HCBSize);
+ hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
@@ -6040,7 +6075,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
for (count = 0; count < (300000000 /
MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
- host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+ host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
if (host_diagnostic == 0xFFFFFFFF)
goto out;
@@ -6391,6 +6426,10 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->rdpq_array_enable_assigned = 0;
ioc->dma_mask = 0;
+ if (ioc->is_aero_ioc)
+ ioc->base_readl = &_base_readl_aero;
+ else
+ ioc->base_readl = &_base_readl;
r = mpt3sas_base_map_resources(ioc);
if (r)
goto out_free_resources;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 8f1d6b071b39..800351932cc3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -55,6 +55,7 @@
#include "mpi/mpi2_tool.h"
#include "mpi/mpi2_sas.h"
#include "mpi/mpi2_pci.h"
+#include "mpi/mpi2_image.h"
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -74,9 +75,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "26.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 26
-#define MPT3SAS_MINOR_VERSION 100
+#define MPT3SAS_DRIVER_VERSION "27.101.00.00"
+#define MPT3SAS_MAJOR_VERSION 27
+#define MPT3SAS_MINOR_VERSION 101
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -139,6 +140,9 @@
#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
#define FW_IMG_HDR_READ_TIMEOUT 15
+
+#define IOC_OPERATIONAL_WAIT_COUNT 10
+
/*
* NVMe defines
*/
@@ -908,6 +912,7 @@ typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid,
typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 funcdep);
typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
+typedef u32 (*BASE_READ_REG) (const volatile void __iomem *addr);
/* IOC Facts and Port Facts converted from little endian to cpu */
union mpi3_version_union {
@@ -1388,6 +1393,7 @@ struct MPT3SAS_ADAPTER {
u8 hide_drives;
spinlock_t diag_trigger_lock;
u8 diag_trigger_active;
+ BASE_READ_REG base_readl;
struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
@@ -1395,6 +1401,7 @@ struct MPT3SAS_ADAPTER {
void *device_remove_in_progress;
u16 device_remove_in_progress_sz;
u8 is_gen35_ioc;
+ u8 is_aero_ioc;
PUT_SMID_IO_FP_HIP put_smid_scsi_io;
};
@@ -1487,6 +1494,7 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
u8 status, void *mpi_request, int sz);
+int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 02209447f4ef..fb0a17252f86 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -119,7 +119,7 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
desc = "raid_volume";
break;
case MPI2_CONFIG_PAGETYPE_MANUFACTURING:
- desc = "manufaucturing";
+ desc = "manufacturing";
break;
case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK:
desc = "physdisk";
@@ -300,11 +300,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
void *config_page, u16 config_page_sz)
{
u16 smid;
- u32 ioc_state;
Mpi2ConfigRequest_t *config_request;
int r;
u8 retry_count, issue_host_reset = 0;
- u16 wait_state_count;
struct config_request mem;
u32 ioc_status = UINT_MAX;
@@ -361,23 +359,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
ioc_info(ioc, "%s: attempting retry (%d)\n",
__func__, retry_count);
}
- wait_state_count = 0;
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
- ioc_err(ioc, "%s: failed due to ioc not operational\n",
- __func__);
- ioc->config_cmds.status = MPT3_CMD_NOT_USED;
- r = -EFAULT;
- goto free_mem;
- }
- ssleep(1);
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
- __func__, wait_state_count);
- }
- if (wait_state_count)
- ioc_info(ioc, "%s: ioc is operational\n", __func__);
+
+ r = mpt3sas_wait_for_ioc(ioc, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT);
+ if (r)
+ goto free_mem;
smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
if (!smid) {
@@ -673,10 +658,6 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
sizeof(*config_page));
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
out:
return r;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 4afa597cbfba..b2bb47c14d35 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -641,7 +641,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
MPI2DefaultReply_t *mpi_reply;
Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
struct _pcie_device *pcie_device = NULL;
- u32 ioc_state;
u16 smid;
u8 timeout;
u8 issue_reset;
@@ -654,7 +653,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
dma_addr_t data_in_dma = 0;
size_t data_in_sz = 0;
long ret;
- u16 wait_state_count;
u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
@@ -666,22 +664,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
goto out;
}
- wait_state_count = 0;
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- if (wait_state_count++ == 10) {
- ioc_err(ioc, "%s: failed due to ioc not operational\n",
- __func__);
- ret = -EFAULT;
- goto out;
- }
- ssleep(1);
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
- __func__, wait_state_count);
- }
- if (wait_state_count)
- ioc_info(ioc, "%s: ioc is operational\n", __func__);
+ ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (ret)
+ goto out;
mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
if (!mpi_request) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 03c52847ed07..6be39dc27103 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3748,6 +3748,40 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
return _scsih_check_for_pending_tm(ioc, smid);
}
+/** _scsih_allow_scmd_to_device - check whether scmd needs to
+ * issue to IOC or not.
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ *
+ * Returns true if scmd can be issued to IOC otherwise returns false.
+ */
+inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd)
+{
+
+ if (ioc->pci_error_recovery)
+ return false;
+
+ if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
+ if (ioc->remove_host)
+ return false;
+
+ return true;
+ }
+
+ if (ioc->remove_host) {
+
+ switch (scmd->cmnd[0]) {
+ case SYNCHRONIZE_CACHE:
+ case START_STOP:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
/**
* _scsih_sas_control_complete - completion routine
@@ -4571,7 +4605,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
- if (ioc->pci_error_recovery || ioc->remove_host) {
+ if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
@@ -9641,6 +9675,7 @@ static void scsih_remove(struct pci_dev *pdev)
/* release all the volumes */
_scsih_ir_shutdown(ioc);
+ sas_remove_host(shost);
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
list) {
if (raid_device->starget) {
@@ -9682,7 +9717,6 @@ static void scsih_remove(struct pci_dev *pdev)
ioc->sas_hba.num_phys = 0;
}
- sas_remove_host(shost);
mpt3sas_base_detach(ioc);
spin_lock(&gioc_lock);
list_del(&ioc->list);
@@ -10139,7 +10173,6 @@ static struct scsi_host_template mpt2sas_driver_template = {
.sg_tablesize = MPT2SAS_SG_DEPTH,
.max_sectors = 32767,
.cmd_per_lun = 7,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
@@ -10178,7 +10211,6 @@ static struct scsi_host_template mpt3sas_driver_template = {
.sg_tablesize = MPT3SAS_SG_DEPTH,
.max_sectors = 32767,
.cmd_per_lun = 7,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
@@ -10250,6 +10282,10 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_SAS3516_1:
case MPI26_MFGPAGE_DEVID_SAS3416:
case MPI26_MFGPAGE_DEVID_SAS3616:
+ case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
+ case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
+ case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
+ case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
return MPI26_VERSION;
}
return 0;
@@ -10337,8 +10373,17 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI26_MFGPAGE_DEVID_SAS3616:
ioc->is_gen35_ioc = 1;
break;
+ case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
+ case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
+ dev_info(&pdev->dev,
+ "HBA is in Configurable Secure mode\n");
+ /* fall through */
+ case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
+ case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
+ ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
+ break;
default:
- ioc->is_gen35_ioc = 0;
+ ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
}
if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
@@ -10795,6 +10840,23 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
/* Mercator ~ 3616*/
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
PCI_ANY_ID, PCI_ANY_ID },
+
+ /* Aero SI 0x00E1 Configurable Secure
+ * 0x00E2 Hard Secure
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /* Sea SI 0x00E5 Configurable Secure
+ * 0x00E6 Hard Secure
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 6a8a3c09b4b1..60ae2d0feb2b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -296,7 +296,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
struct rep_manu_request *manufacture_request;
int rc;
u16 smid;
- u32 ioc_state;
void *psge;
u8 issue_reset = 0;
void *data_out = NULL;
@@ -304,7 +303,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
dma_addr_t data_in_dma;
size_t data_in_sz;
size_t data_out_sz;
- u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
ioc_info(ioc, "%s: host reset in progress!\n", __func__);
@@ -320,22 +318,9 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
}
ioc->transport_cmds.status = MPT3_CMD_PENDING;
- wait_state_count = 0;
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- if (wait_state_count++ == 10) {
- ioc_err(ioc, "%s: failed due to ioc not operational\n",
- __func__);
- rc = -EFAULT;
- goto out;
- }
- ssleep(1);
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
- __func__, wait_state_count);
- }
- if (wait_state_count)
- ioc_info(ioc, "%s: ioc is operational\n", __func__);
+ rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (rc)
+ goto out;
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
@@ -821,10 +806,13 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
mpt3sas_port->remote_identify.sas_address,
mpt3sas_phy->phy_id);
mpt3sas_phy->phy_belongs_to_port = 0;
- sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ if (!ioc->remove_host)
+ sas_port_delete_phy(mpt3sas_port->port,
+ mpt3sas_phy->phy);
list_del(&mpt3sas_phy->port_siblings);
}
- sas_port_delete(mpt3sas_port->port);
+ if (!ioc->remove_host)
+ sas_port_delete(mpt3sas_port->port);
kfree(mpt3sas_port);
}
@@ -1076,13 +1064,11 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
struct phy_error_log_reply *phy_error_log_reply;
int rc;
u16 smid;
- u32 ioc_state;
void *psge;
u8 issue_reset = 0;
void *data_out = NULL;
dma_addr_t data_out_dma;
u32 sz;
- u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
ioc_info(ioc, "%s: host reset in progress!\n", __func__);
@@ -1098,22 +1084,9 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
}
ioc->transport_cmds.status = MPT3_CMD_PENDING;
- wait_state_count = 0;
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- if (wait_state_count++ == 10) {
- ioc_err(ioc, "%s: failed due to ioc not operational\n",
- __func__);
- rc = -EFAULT;
- goto out;
- }
- ssleep(1);
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
- __func__, wait_state_count);
- }
- if (wait_state_count)
- ioc_info(ioc, "%s: ioc is operational\n", __func__);
+ rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (rc)
+ goto out;
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
@@ -1381,13 +1354,11 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
struct phy_control_reply *phy_control_reply;
int rc;
u16 smid;
- u32 ioc_state;
void *psge;
u8 issue_reset = 0;
void *data_out = NULL;
dma_addr_t data_out_dma;
u32 sz;
- u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
ioc_info(ioc, "%s: host reset in progress!\n", __func__);
@@ -1403,22 +1374,9 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
}
ioc->transport_cmds.status = MPT3_CMD_PENDING;
- wait_state_count = 0;
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- if (wait_state_count++ == 10) {
- ioc_err(ioc, "%s: failed due to ioc not operational\n",
- __func__);
- rc = -EFAULT;
- goto out;
- }
- ssleep(1);
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
- __func__, wait_state_count);
- }
- if (wait_state_count)
- ioc_info(ioc, "%s: ioc is operational\n", __func__);
+ rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (rc)
+ goto out;
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
@@ -1880,7 +1838,6 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
Mpi2SmpPassthroughReply_t *mpi_reply;
int rc;
u16 smid;
- u32 ioc_state;
void *psge;
dma_addr_t dma_addr_in;
dma_addr_t dma_addr_out;
@@ -1888,7 +1845,6 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
void *addr_out = NULL;
size_t dma_len_in;
size_t dma_len_out;
- u16 wait_state_count;
unsigned int reslen = 0;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
@@ -1924,22 +1880,9 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
if (rc)
goto unmap_out;
- wait_state_count = 0;
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- if (wait_state_count++ == 10) {
- ioc_err(ioc, "%s: failed due to ioc not operational\n",
- __func__);
- rc = -EFAULT;
- goto unmap_in;
- }
- ssleep(1);
- ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
- __func__, wait_state_count);
- }
- if (wait_state_count)
- ioc_info(ioc, "%s: ioc is operational\n", __func__);
+ rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (rc)
+ goto unmap_in;
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index 7d1ab414b78f..ca96d6d9c350 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -78,7 +78,6 @@ static struct scsi_host_template mvme147_host_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING
};
static struct Scsi_Host *mvme147_shost;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 3ac34373746c..030d911ee374 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -59,7 +59,6 @@ static struct scsi_host_template mvs_sht = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 2458974d1af6..dbe753fba486 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2197,6 +2197,7 @@ static struct scsi_host_template mvumi_template = {
.eh_timed_out = mvumi_timed_out,
.eh_host_reset_handler = mvumi_host_reset,
.bios_param = mvumi_bios_param,
+ .dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
};
@@ -2620,7 +2621,7 @@ static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
}
ret = mvumi_pci_set_master(pdev);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
goto fail;
ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 0642f2d0a3bb..539ac8ce4fcd 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1528,6 +1528,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
scmd->scsi_done(scmd);
return 0;
}
+ /* fall through */
case WRITE_6:
lba = (((scmd->cmnd[1] & 0x1F) << 16) |
(scmd->cmnd[2] << 8) |
@@ -1544,6 +1545,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
scmd->scsi_done(scmd);
return 0;
}
+ /* fall through */
case WRITE_10:
case VERIFY: /* 0x2F */
case WRITE_VERIFY: /* 0x2E */
@@ -1560,6 +1562,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
scmd->scsi_done(scmd);
return 0;
}
+ /* fall through */
case WRITE_12:
case VERIFY_12: /* 0xAF */
case WRITE_VERIFY_12: /* 0xAE */
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 6cd3e289ef99..1a236a3dfd51 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8313,7 +8313,6 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
tpnt->this_id = 7;
tpnt->sg_tablesize = SCSI_NCR_SG_TABLESIZE;
tpnt->cmd_per_lun = SCSI_NCR_CMD_PER_LUN;
- tpnt->use_clustering = ENABLE_CLUSTERING;
if (device->differential)
driver_setup.diff_support = device->differential;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 5aac3e801903..00e3cbee55b8 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -274,7 +274,7 @@ static struct scsi_host_template nsp32_template = {
.sg_tablesize = NSP32_SG_SIZE,
.max_sectors = 128,
.this_id = NSP32_HOST_SCSIID,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.eh_abort_handler = nsp32_eh_abort,
.eh_host_reset_handler = nsp32_eh_host_reset,
/* .highmem_io = 1, */
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index f3230494a8c9..1bd6825a4f14 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -86,7 +86,7 @@ static struct scsi_host_template nsp_driver_template = {
.can_queue = 1,
.this_id = NSP_INITIATOR_ID,
.sg_tablesize = SG_ALL,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
};
static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 173351a8554b..828d53faf09a 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -72,7 +72,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
.can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
};
/*====================================================================*/
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index a3b63bea0e50..d1e98a6ea28f 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -680,7 +680,6 @@ static struct scsi_host_template sym53c500_driver_template = {
.can_queue = 1,
.this_id = 7,
.sg_tablesize = 32,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = SYM53C500_shost_attrs
};
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index d71e7e4ec29c..a36060c23b37 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -84,7 +84,6 @@ static struct scsi_host_template pm8001_sht = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4e86994e10e8..7c4673308f5b 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -846,16 +846,9 @@ static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
cmd->ioa_cb->ioarcb.cdb[0], ioasc);
}
- /* if we had allocated sense buffers for request sense, copy the sense
- * release the buffers
- */
- if (cmd->sense_buffer != NULL) {
- memcpy(scsi_cmd->sense_buffer,
- cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE);
- pci_free_consistent(pinstance->pdev,
- SCSI_SENSE_BUFFERSIZE,
- cmd->sense_buffer, cmd->sense_buffer_dma);
+ if (cmd->sense_buffer) {
+ dma_unmap_single(&pinstance->pdev->dev, cmd->sense_buffer_dma,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
cmd->sense_buffer = NULL;
cmd->sense_buffer_dma = 0;
}
@@ -2444,13 +2437,12 @@ static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+ struct device *dev = &cmd->drv_inst->pdev->dev;
- /* allocate DMAable memory for sense buffers */
- cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
- SCSI_SENSE_BUFFERSIZE,
- &cmd->sense_buffer_dma);
-
- if (cmd->sense_buffer == NULL) {
+ cmd->sense_buffer = cmd->scsi_cmd->sense_buffer;
+ cmd->sense_buffer_dma = dma_map_single(dev, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, cmd->sense_buffer_dma)) {
pmcraid_err
("couldn't allocate sense buffer for request sense\n");
pmcraid_erp_done(cmd);
@@ -2491,17 +2483,15 @@ static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
/**
* pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
* @cmd: command that failed
- * @sense: true if request_sense is required after cancel all
+ * @need_sense: true if request_sense is required after cancel all
*
* This function sends a cancel all to a device to clear the queue.
*/
-static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
+static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, bool need_sense)
{
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
- void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
- : pmcraid_request_sense;
memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->request_flags0 = SYNC_OVERRIDE;
@@ -2519,7 +2509,8 @@ static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
/* writing to IOARRIN must be protected by host_lock, as mid-layer
* schedule queuecommand while we are doing this
*/
- pmcraid_send_cmd(cmd, cmd_done,
+ pmcraid_send_cmd(cmd, need_sense ?
+ pmcraid_erp_done : pmcraid_request_sense,
PMCRAID_REQUEST_SENSE_TIMEOUT,
pmcraid_timeout_handler);
}
@@ -2612,7 +2603,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
u32 ioasc = le32_to_cpu(ioasa->ioasc);
u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
- u32 sense_copied = 0;
+ bool sense_copied = false;
if (!res) {
pmcraid_info("resource pointer is NULL\n");
@@ -2684,7 +2675,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
memcpy(scsi_cmd->sense_buffer,
ioasa->sense_data,
data_size);
- sense_copied = 1;
+ sense_copied = true;
}
if (RES_IS_GSCSI(res->cfg_entry))
@@ -3523,7 +3514,7 @@ static int pmcraid_build_passthrough_ioadls(
return -ENOMEM;
}
- sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
+ sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
sglist->scatterlist,
sglist->num_sg, direction);
@@ -3572,7 +3563,7 @@ static void pmcraid_release_passthrough_ioadls(
struct pmcraid_sglist *sglist = cmd->sglist;
if (buflen > 0) {
- pci_unmap_sg(cmd->drv_inst->pdev,
+ dma_unmap_sg(&cmd->drv_inst->pdev->dev,
sglist->scatterlist,
sglist->num_sg,
direction);
@@ -4158,7 +4149,6 @@ static struct scsi_host_template pmcraid_host_template = {
.max_sectors = PMCRAID_IOA_MAX_SECTORS,
.no_write_same = 1,
.cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = pmcraid_host_attrs,
.proc_name = PMCRAID_DRIVER_NAME,
};
@@ -4708,9 +4698,9 @@ static void
pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
{
int i;
- for (i = 0; i < maxindex; i++) {
- pci_free_consistent(pinstance->pdev,
+ for (i = 0; i < maxindex; i++) {
+ dma_free_coherent(&pinstance->pdev->dev,
HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
pinstance->hrrq_start[i],
pinstance->hrrq_start_bus_addr[i]);
@@ -4737,11 +4727,9 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
for (i = 0; i < pinstance->num_hrrq; i++) {
pinstance->hrrq_start[i] =
- pci_alloc_consistent(
- pinstance->pdev,
- buffer_size,
- &(pinstance->hrrq_start_bus_addr[i]));
-
+ dma_alloc_coherent(&pinstance->pdev->dev, buffer_size,
+ &pinstance->hrrq_start_bus_addr[i],
+ GFP_KERNEL);
if (!pinstance->hrrq_start[i]) {
pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
i);
@@ -4770,7 +4758,7 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
{
if (pinstance->ccn.msg != NULL) {
- pci_free_consistent(pinstance->pdev,
+ dma_free_coherent(&pinstance->pdev->dev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ccn_ext),
pinstance->ccn.msg,
@@ -4782,7 +4770,7 @@ static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
}
if (pinstance->ldn.msg != NULL) {
- pci_free_consistent(pinstance->pdev,
+ dma_free_coherent(&pinstance->pdev->dev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ldn),
pinstance->ldn.msg,
@@ -4803,17 +4791,15 @@ static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
*/
static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
{
- pinstance->ccn.msg = pci_alloc_consistent(
- pinstance->pdev,
+ pinstance->ccn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ccn_ext),
- &(pinstance->ccn.baddr));
+ &pinstance->ccn.baddr, GFP_KERNEL);
- pinstance->ldn.msg = pci_alloc_consistent(
- pinstance->pdev,
+ pinstance->ldn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ldn),
- &(pinstance->ldn.baddr));
+ &pinstance->ldn.baddr, GFP_KERNEL);
if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
pmcraid_release_hcams(pinstance);
@@ -4841,7 +4827,7 @@ static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
{
if (pinstance->cfg_table != NULL &&
pinstance->cfg_table_bus_addr != 0) {
- pci_free_consistent(pinstance->pdev,
+ dma_free_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_config_table),
pinstance->cfg_table,
pinstance->cfg_table_bus_addr);
@@ -4886,10 +4872,10 @@ static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
list_add_tail(&pinstance->res_entries[i].queue,
&pinstance->free_res_q);
- pinstance->cfg_table =
- pci_alloc_consistent(pinstance->pdev,
+ pinstance->cfg_table = dma_alloc_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_config_table),
- &pinstance->cfg_table_bus_addr);
+ &pinstance->cfg_table_bus_addr,
+ GFP_KERNEL);
if (NULL == pinstance->cfg_table) {
pmcraid_err("couldn't alloc DMA memory for config table\n");
@@ -4954,7 +4940,7 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
if (pinstance->inq_data != NULL) {
- pci_free_consistent(pinstance->pdev,
+ dma_free_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_inquiry_data),
pinstance->inq_data,
pinstance->inq_data_baddr);
@@ -4964,7 +4950,7 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
}
if (pinstance->timestamp_data != NULL) {
- pci_free_consistent(pinstance->pdev,
+ dma_free_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_timestamp_data),
pinstance->timestamp_data,
pinstance->timestamp_data_baddr);
@@ -4981,8 +4967,8 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
* This routine pre-allocates memory based on the type of block as below:
* cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
* IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
- * config-table entries : DMAable memory using pci_alloc_consistent
- * HostRRQs : DMAable memory, using pci_alloc_consistent
+ * config-table entries : DMAable memory using dma_alloc_coherent
+ * HostRRQs : DMAable memory, using dma_alloc_coherent
*
* Return Value
* 0 in case all of the blocks are allocated, -ENOMEM otherwise.
@@ -5019,11 +5005,9 @@ static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
}
/* allocate DMAable memory for page D0 INQUIRY buffer */
- pinstance->inq_data = pci_alloc_consistent(
- pinstance->pdev,
+ pinstance->inq_data = dma_alloc_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_inquiry_data),
- &pinstance->inq_data_baddr);
-
+ &pinstance->inq_data_baddr, GFP_KERNEL);
if (pinstance->inq_data == NULL) {
pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
pmcraid_release_buffers(pinstance);
@@ -5031,11 +5015,10 @@ static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
}
/* allocate DMAable memory for set timestamp data buffer */
- pinstance->timestamp_data = pci_alloc_consistent(
- pinstance->pdev,
+ pinstance->timestamp_data = dma_alloc_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_timestamp_data),
- &pinstance->timestamp_data_baddr);
-
+ &pinstance->timestamp_data_baddr,
+ GFP_KERNEL);
if (pinstance->timestamp_data == NULL) {
pmcraid_err("couldn't allocate DMA memory for \
set time_stamp \n");
@@ -5324,12 +5307,12 @@ static int pmcraid_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- if ((sizeof(dma_addr_t) == 4) ||
- pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (sizeof(dma_addr_t) == 4 ||
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc == 0)
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc != 0) {
dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
@@ -5733,19 +5716,19 @@ static int pmcraid_probe(struct pci_dev *pdev,
/* Firmware requires the system bus address of IOARCB to be within
* 32-bit addressable range though it has 64-bit IOARRIN register.
* However, firmware supports 64-bit streaming DMA buffers, whereas
- * coherent buffers are to be 32-bit. Since pci_alloc_consistent always
+ * coherent buffers are to be 32-bit. Since dma_alloc_coherent always
* returns memory within 4GB (if not, change this logic), coherent
* buffers are within firmware acceptable address ranges.
*/
- if ((sizeof(dma_addr_t) == 4) ||
- pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (sizeof(dma_addr_t) == 4 ||
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
/* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
- * bit mask for pci_alloc_consistent to return addresses within 4GB
+ * bit mask for dma_alloc_coherent to return addresses within 4GB
*/
if (rc == 0)
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc != 0) {
dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index ee86a0c62dbf..c182b5458f98 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -978,7 +978,6 @@ static struct scsi_host_template ppa_template = {
.bios_param = ppa_biosparam,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .use_clustering = ENABLE_CLUSTERING,
.can_queue = 1,
.slave_alloc = ppa_adjust_queue,
};
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 4924424d20fe..8d769138c01c 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -349,7 +349,6 @@ static struct scsi_host_template ps3rom_host_template = {
.sg_tablesize = SG_ALL,
.emulated = 1, /* only sg driver uses this */
.max_sectors = PS3ROM_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
.module = THIS_MODULE,
};
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d5a4f17fce51..edcaf4b0cb0b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -785,7 +785,6 @@ static struct scsi_host_template qedf_host_template = {
.name = QEDF_MODULE_NAME,
.this_id = -1,
.cmd_per_lun = 32,
- .use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0xffff,
.queuecommand = qedf_queuecommand,
.shost_attrs = qedf_host_attrs,
@@ -2935,8 +2934,7 @@ static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
qedf_free_global_queues(qedf);
- if (qedf->global_queues)
- kfree(qedf->global_queues);
+ kfree(qedf->global_queues);
}
/*
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index a6f96b35e971..a26bb5066b90 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -45,7 +45,7 @@ struct qedi_endpoint;
#define QEDI_MAX_TASK_NUM 0x0FFF
#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024
#define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */
-#define MAX_OUSTANDING_TASKS_PER_CON 1024
+#define MAX_OUTSTANDING_TASKS_PER_CON 1024
#define QEDI_MAX_BD_LEN 0xffff
#define QEDI_BD_SPLIT_SZ 0x1000
@@ -63,12 +63,9 @@ struct qedi_endpoint;
#define QEDI_LOCAL_PORT_INVALID 0xffff
#define TX_RX_RING 16
#define RX_RING (TX_RX_RING - 1)
-#define LL2_SINGLE_BUF_SIZE 0x400
-#define QEDI_PAGE_SIZE 4096
#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE)
#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1))
-#define QEDI_PAGE_SIZE 4096
#define QEDI_HW_DMA_BOUNDARY 0xfff
#define QEDI_PATH_HANDLE 0xFE0000000UL
@@ -146,7 +143,7 @@ struct skb_work_list {
};
/* Queue sizes in number of elements */
-#define QEDI_SQ_SIZE MAX_OUSTANDING_TASKS_PER_CON
+#define QEDI_SQ_SIZE MAX_OUTSTANDING_TASKS_PER_CON
#define QEDI_CQ_SIZE 2048
#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK
#define QEDI_PROTO_CQ_PROD_IDX 0
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 2f0a4f2c5ff8..4da660c1c431 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -61,7 +61,6 @@ struct scsi_host_template qedi_host_template = {
.max_sectors = 0xffff,
.dma_boundary = QEDI_HW_DMA_BOUNDARY,
.cmd_per_lun = 128,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = qedi_shost_attrs,
};
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 311eb22068e1..5c53409a8cea 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -44,6 +44,11 @@ module_param(qedi_io_tracing, uint, 0644);
MODULE_PARM_DESC(qedi_io_tracing,
" Enable logging of SCSI requests/completions into trace buffer. (default off).");
+uint qedi_ll2_buf_size = 0x400;
+module_param(qedi_ll2_buf_size, uint, 0644);
+MODULE_PARM_DESC(qedi_ll2_buf_size,
+ "parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
+
const struct qed_iscsi_ops *qedi_ops;
static struct scsi_transport_template *qedi_scsi_transport;
static struct pci_driver qedi_pci_driver;
@@ -228,7 +233,7 @@ static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
}
/* Allocating memory for Tx/Rx pkt buffer */
- udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
+ udev->ll2_buf_size = TX_RX_RING * qedi_ll2_buf_size;
udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
__GFP_ZERO, 2);
@@ -283,7 +288,7 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
qedi->udev = udev;
udev->tx_pkt = udev->ll2_buf;
- udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
+ udev->rx_pkt = udev->ll2_buf + qedi_ll2_buf_size;
return 0;
err_uctrl:
@@ -658,7 +663,7 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
struct qedi_uio_dev *udev;
struct qedi_uio_ctrl *uctrl;
struct skb_work_list *work;
- u32 prod;
+ struct ethhdr *eh;
if (!qedi) {
QEDI_ERR(NULL, "qedi is NULL\n");
@@ -672,6 +677,29 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
return 0;
}
+ eh = (struct ethhdr *)skb->data;
+ /* Undo VLAN encapsulation */
+ if (eh->h_proto == htons(ETH_P_8021Q)) {
+ memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
+ eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+ skb_reset_mac_header(skb);
+ }
+
+ /* Filter out non FIP/FCoE frames here to free them faster */
+ if (eh->h_proto != htons(ETH_P_ARP) &&
+ eh->h_proto != htons(ETH_P_IP) &&
+ eh->h_proto != htons(ETH_P_IPV6)) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
+ "Dropping frame ethertype [0x%x] len [0x%x].\n",
+ eh->h_proto, skb->len);
+ kfree_skb(skb);
+ return 0;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
+ "Allowed frame ethertype [0x%x] len [0x%x].\n",
+ eh->h_proto, skb->len);
+
udev = qedi->udev;
uctrl = udev->uctrl;
@@ -694,17 +722,10 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
spin_lock_bh(&qedi->ll2_lock);
list_add_tail(&work->list, &qedi->ll2_skb_list);
+ spin_unlock_bh(&qedi->ll2_lock);
- ++uctrl->hw_rx_prod_cnt;
- prod = (uctrl->hw_rx_prod + 1) % RX_RING;
- if (prod != uctrl->host_rx_cons) {
- uctrl->hw_rx_prod = prod;
- spin_unlock_bh(&qedi->ll2_lock);
- wake_up_process(qedi->ll2_recv_thread);
- return 0;
- }
+ wake_up_process(qedi->ll2_recv_thread);
- spin_unlock_bh(&qedi->ll2_lock);
return 0;
}
@@ -719,6 +740,7 @@ static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
u32 rx_bd_prod;
void *pkt;
int len = 0;
+ u32 prod;
if (!qedi) {
QEDI_ERR(NULL, "qedi is NULL\n");
@@ -727,12 +749,16 @@ static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
udev = qedi->udev;
uctrl = udev->uctrl;
- pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
- len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
+
+ ++uctrl->hw_rx_prod_cnt;
+ prod = (uctrl->hw_rx_prod + 1) % RX_RING;
+
+ pkt = udev->rx_pkt + (prod * qedi_ll2_buf_size);
+ len = min_t(u32, skb->len, (u32)qedi_ll2_buf_size);
memcpy(pkt, skb->data, len);
memset(&rxbd, 0, sizeof(rxbd));
- rxbd.rx_pkt_index = uctrl->hw_rx_prod;
+ rxbd.rx_pkt_index = prod;
rxbd.rx_pkt_len = len;
rxbd.vlan_id = vlan_id;
@@ -743,6 +769,16 @@ static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
memcpy(p_rxbd, &rxbd, sizeof(rxbd));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
+ "hw_rx_prod [%d] prod [%d] hw_rx_bd_prod [%d] rx_pkt_idx [%d] rx_len [%d].\n",
+ uctrl->hw_rx_prod, prod, uctrl->hw_rx_bd_prod,
+ rxbd.rx_pkt_index, rxbd.rx_pkt_len);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
+ "host_rx_cons [%d] hw_rx_bd_cons [%d].\n",
+ uctrl->host_rx_cons, uctrl->host_rx_bd_cons);
+
+ uctrl->hw_rx_prod = prod;
+
/* notify the iscsiuio about new packet */
uio_event_notify(&udev->qedi_uinfo);
@@ -795,7 +831,7 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
int rval = 0;
- num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
+ num_sq_pages = (MAX_OUTSTANDING_TASKS_PER_CON * 8) / QEDI_PAGE_SIZE;
qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
@@ -833,7 +869,7 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
- if ((1 << log_page_size) == PAGE_SIZE)
+ if ((1 << log_page_size) == QEDI_PAGE_SIZE)
break;
}
qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
@@ -951,6 +987,9 @@ static int qedi_find_boot_info(struct qedi_ctx *qedi,
cls_sess = iscsi_conn_to_session(cls_conn);
sess = cls_sess->dd_data;
+ if (!iscsi_is_session_online(cls_sess))
+ continue;
+
if (pri_ctrl_flags) {
if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
!strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
@@ -1297,7 +1336,7 @@ static int qedi_request_msix_irq(struct qedi_ctx *qedi)
int i, rc, cpu;
cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+ for (i = 0; i < qedi->int_info.msix_cnt; i++) {
rc = request_irq(qedi->int_info.msix[i].vector,
qedi_msix_handler, 0, "qedi",
&qedi->fp_array[i]);
@@ -1375,7 +1414,7 @@ static void qedi_free_bdq(struct qedi_ctx *qedi)
int i;
if (qedi->bdq_pbl_list)
- dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
if (qedi->bdq_pbl)
@@ -1436,7 +1475,7 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
/* Alloc dma memory for BDQ page buffer list */
qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
- qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
+ qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, QEDI_PAGE_SIZE);
qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
@@ -1471,7 +1510,8 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
}
/* Allocate list of PBL pages */
- qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev,
+ QEDI_PAGE_SIZE,
&qedi->bdq_pbl_list_dma,
GFP_KERNEL);
if (!qedi->bdq_pbl_list) {
@@ -1484,13 +1524,14 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
* Now populate PBL list with pages that contain pointers to the
* individual buffers.
*/
- qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
+ qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size /
+ QEDI_PAGE_SIZE;
list = (u64 *)qedi->bdq_pbl_list;
page = qedi->bdq_pbl_list_dma;
for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
*list = qedi->bdq_pbl_dma;
list++;
- page += PAGE_SIZE;
+ page += QEDI_PAGE_SIZE;
}
return 0;
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index 8a0e523fc089..41bcbbafebd4 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
* this source tree.
*/
-#define QEDI_MODULE_VERSION "8.33.0.20"
+#define QEDI_MODULE_VERSION "8.33.0.21"
#define QEDI_DRIVER_MAJOR_VER 8
#define QEDI_DRIVER_MINOR_VER 33
#define QEDI_DRIVER_REV_VER 0
-#define QEDI_DRIVER_ENG_VER 20
+#define QEDI_DRIVER_ENG_VER 21
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 15a50cc7e4b3..a414f51302b7 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -383,20 +383,10 @@
#include "qla1280.h"
-#ifndef BITS_PER_LONG
-#error "BITS_PER_LONG not defined!"
-#endif
-#if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
#define QLA_64BIT_PTR 1
#endif
-#ifdef QLA_64BIT_PTR
-#define pci_dma_hi32(a) ((a >> 16) >> 16)
-#else
-#define pci_dma_hi32(a) 0
-#endif
-#define pci_dma_lo32(a) (a & 0xffffffff)
-
#define NVRAM_DELAY() udelay(500) /* 2 microseconds */
#if defined(__ia64__) && !defined(ia64_platform_is)
@@ -1790,8 +1780,8 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
mb[4] = cnt;
mb[3] = ha->request_dma & 0xffff;
mb[2] = (ha->request_dma >> 16) & 0xffff;
- mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
- mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
+ mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
+ mb[6] = upper_32_bits(ha->request_dma) >> 16;
dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
__func__, mb[0],
(void *)(long)ha->request_dma,
@@ -1810,8 +1800,8 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
mb[4] = cnt;
mb[3] = p_tbuf & 0xffff;
mb[2] = (p_tbuf >> 16) & 0xffff;
- mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
- mb[6] = pci_dma_hi32(p_tbuf) >> 16;
+ mb[7] = upper_32_bits(p_tbuf) & 0xffff;
+ mb[6] = upper_32_bits(p_tbuf) >> 16;
err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
BIT_1 | BIT_0, mb);
@@ -1933,8 +1923,8 @@ qla1280_init_rings(struct scsi_qla_host *ha)
mb[3] = ha->request_dma & 0xffff;
mb[2] = (ha->request_dma >> 16) & 0xffff;
mb[4] = 0;
- mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
- mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
+ mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
+ mb[6] = upper_32_bits(ha->request_dma) >> 16;
if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
BIT_3 | BIT_2 | BIT_1 | BIT_0,
&mb[0]))) {
@@ -1947,8 +1937,8 @@ qla1280_init_rings(struct scsi_qla_host *ha)
mb[3] = ha->response_dma & 0xffff;
mb[2] = (ha->response_dma >> 16) & 0xffff;
mb[5] = 0;
- mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
- mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
+ mb[7] = upper_32_bits(ha->response_dma) & 0xffff;
+ mb[6] = upper_32_bits(ha->response_dma) >> 16;
status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
BIT_3 | BIT_2 | BIT_1 | BIT_0,
&mb[0]);
@@ -2914,13 +2904,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
SCSI_BUS_32(cmd));
#endif
*dword_ptr++ =
- cpu_to_le32(pci_dma_lo32(dma_handle));
+ cpu_to_le32(lower_32_bits(dma_handle));
*dword_ptr++ =
- cpu_to_le32(pci_dma_hi32(dma_handle));
+ cpu_to_le32(upper_32_bits(dma_handle));
*dword_ptr++ = cpu_to_le32(sg_dma_len(s));
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
- cpu_to_le32(pci_dma_hi32(dma_handle)),
- cpu_to_le32(pci_dma_lo32(dma_handle)),
+ cpu_to_le32(upper_32_bits(dma_handle)),
+ cpu_to_le32(lower_32_bits(dma_handle)),
cpu_to_le32(sg_dma_len(sg_next(s))));
remseg--;
}
@@ -2976,14 +2966,14 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
SCSI_BUS_32(cmd));
#endif
*dword_ptr++ =
- cpu_to_le32(pci_dma_lo32(dma_handle));
+ cpu_to_le32(lower_32_bits(dma_handle));
*dword_ptr++ =
- cpu_to_le32(pci_dma_hi32(dma_handle));
+ cpu_to_le32(upper_32_bits(dma_handle));
*dword_ptr++ =
cpu_to_le32(sg_dma_len(s));
dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
- cpu_to_le32(pci_dma_hi32(dma_handle)),
- cpu_to_le32(pci_dma_lo32(dma_handle)),
+ cpu_to_le32(upper_32_bits(dma_handle)),
+ cpu_to_le32(lower_32_bits(dma_handle)),
cpu_to_le32(sg_dma_len(s)));
}
remseg -= cnt;
@@ -3178,10 +3168,10 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
if (cnt == 4)
break;
*dword_ptr++ =
- cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
+ cpu_to_le32(lower_32_bits(sg_dma_address(s)));
*dword_ptr++ = cpu_to_le32(sg_dma_len(s));
dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
- (pci_dma_lo32(sg_dma_address(s))),
+ (lower_32_bits(sg_dma_address(s))),
(sg_dma_len(s)));
remseg--;
}
@@ -3224,13 +3214,13 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
if (cnt == 7)
break;
*dword_ptr++ =
- cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
+ cpu_to_le32(lower_32_bits(sg_dma_address(s)));
*dword_ptr++ =
cpu_to_le32(sg_dma_len(s));
dprintk(1,
"S/G Segment Cont. phys_addr=0x%x, "
"len=0x%x\n",
- cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
+ cpu_to_le32(lower_32_bits(sg_dma_address(s))),
cpu_to_le32(sg_dma_len(s)));
}
remseg -= cnt;
@@ -4213,7 +4203,6 @@ static struct scsi_host_template qla1280_driver_template = {
.can_queue = MAX_OUTSTANDING_COMMANDS,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .use_clustering = ENABLE_CLUSTERING,
};
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 0bb9ac6ece92..00444dc79756 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2712,6 +2712,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
msleep(1000);
+ qla_nvme_delete(vha);
+
qla24xx_disable_vp(vha);
qla2x00_wait_for_sess_deletion(vha);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index eb59c796a795..364bb52ed2a6 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -237,15 +237,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_login_sp_done;
- if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
+ if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
- } else {
+ else
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
- if (fcport->fc4f_nvme)
- lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
-
- }
+ if (fcport->fc4f_nvme)
+ lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x2072,
"Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index d620f4bebcd0..099d8e9851cb 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -507,6 +507,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
qla2x00_start_timer(vha, WATCH_INTERVAL);
vha->req = base_vha->req;
+ vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
host->can_queue = base_vha->req->length + 128;
host->cmd_per_lun = 3;
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index fccc733145fc..39d892bbd219 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -463,21 +463,10 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
int rval = -ENODEV;
srb_t *sp;
struct qla_qpair *qpair = hw_queue_handle;
- struct nvme_private *priv;
+ struct nvme_private *priv = fd->private;
struct qla_nvme_rport *qla_rport = rport->private;
- if (!fd || !qpair) {
- ql_log(ql_log_warn, NULL, 0x2134,
- "NO NVMe request or Queue Handle\n");
- return rval;
- }
-
- priv = fd->private;
fcport = qla_rport->fcport;
- if (!fcport) {
- ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
- return rval;
- }
vha = fcport->vha;
@@ -506,6 +495,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
sp->name = "nvme_cmd";
sp->done = qla_nvme_sp_done;
sp->qpair = qpair;
+ sp->vha = vha;
nvme = &sp->u.iocb_cmd;
nvme->u.nvme.desc = fd;
@@ -553,7 +543,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
schedule_work(&fcport->free_work);
}
- fcport->nvme_flag &= ~(NVME_FLAG_REGISTERED | NVME_FLAG_DELETING);
+ fcport->nvme_flag &= ~NVME_FLAG_DELETING;
ql_log(ql_log_info, fcport->vha, 0x2110,
"remoteport_delete of %p completed.\n", fcport);
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f92196ec5489..ea69dafc9774 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -328,7 +328,6 @@ struct scsi_host_template qla2xxx_driver_template = {
.map_queues = qla2xxx_map_queues,
.this_id = -1,
.cmd_per_lun = 3,
- .use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
@@ -1742,10 +1741,53 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
+static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
+ unsigned long *flags)
+ __releases(qp->qp_lock_ptr)
+ __acquires(qp->qp_lock_ptr)
+{
+ scsi_qla_host_t *vha = qp->vha;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS) {
+ if (!sp_get(sp)) {
+ /* got sp */
+ spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
+ qla_nvme_abort(ha, sp, res);
+ spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+ }
+ } else if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+ !qla2x00_isp_reg_stat(ha) && sp->type == SRB_SCSI_CMD) {
+ /*
+ * Don't abort commands in adapter during EEH recovery as it's
+ * not accessible/responding.
+ *
+ * Get a reference to the sp and drop the lock. The reference
+ * ensures this sp->done() call and not the call in
+ * qla2xxx_eh_abort() ends the SCSI cmd (with result 'res').
+ */
+ if (!sp_get(sp)) {
+ int status;
+
+ spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
+ status = qla2xxx_eh_abort(GET_CMD_SP(sp));
+ spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+ /*
+ * Get rid of extra reference caused
+ * by early exit from qla2xxx_eh_abort
+ */
+ if (status == FAST_IO_FAIL)
+ atomic_dec(&sp->ref_count);
+ }
+ }
+ sp->done(sp, res);
+}
+
static void
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
- int cnt, status;
+ int cnt;
unsigned long flags;
srb_t *sp;
scsi_qla_host_t *vha = qp->vha;
@@ -1764,50 +1806,7 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
req->outstanding_cmds[cnt] = NULL;
switch (sp->cmd_type) {
case TYPE_SRB:
- if (sp->type == SRB_NVME_CMD ||
- sp->type == SRB_NVME_LS) {
- if (!sp_get(sp)) {
- /* got sp */
- spin_unlock_irqrestore
- (qp->qp_lock_ptr,
- flags);
- qla_nvme_abort(ha, sp, res);
- spin_lock_irqsave
- (qp->qp_lock_ptr, flags);
- }
- } else if (GET_CMD_SP(sp) &&
- !ha->flags.eeh_busy &&
- (!test_bit(ABORT_ISP_ACTIVE,
- &vha->dpc_flags)) &&
- !qla2x00_isp_reg_stat(ha) &&
- (sp->type == SRB_SCSI_CMD)) {
- /*
- * Don't abort commands in adapter
- * during EEH recovery as it's not
- * accessible/responding.
- *
- * Get a reference to the sp and drop
- * the lock. The reference ensures this
- * sp->done() call and not the call in
- * qla2xxx_eh_abort() ends the SCSI cmd
- * (with result 'res').
- */
- if (!sp_get(sp)) {
- spin_unlock_irqrestore
- (qp->qp_lock_ptr, flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
- spin_lock_irqsave
- (qp->qp_lock_ptr, flags);
- /*
- * Get rid of extra reference caused
- * by early exit from qla2xxx_eh_abort
- */
- if (status == FAST_IO_FAIL)
- atomic_dec(&sp->ref_count);
- }
- }
- sp->done(sp, res);
+ qla2x00_abort_srb(qp, sp, res, &flags);
break;
case TYPE_TGT_CMD:
if (!vha->hw->tgt.tgt_ops || !tgt ||
@@ -3558,6 +3557,8 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
mutex_unlock(&ha->vport_lock);
+ qla_nvme_delete(vha);
+
fc_vport_terminate(vha->fc_vport);
scsi_host_put(vha->host);
@@ -4179,12 +4180,10 @@ fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
fail_free_ctx_mempool:
- if (ha->ctx_mempool)
- mempool_destroy(ha->ctx_mempool);
+ mempool_destroy(ha->ctx_mempool);
ha->ctx_mempool = NULL;
fail_free_srb_mempool:
- if (ha->srb_mempool)
- mempool_destroy(ha->srb_mempool);
+ mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_gid_list:
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
@@ -4486,8 +4485,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
ha->mctp_dump_dma);
- if (ha->srb_mempool)
- mempool_destroy(ha->srb_mempool);
+ mempool_destroy(ha->srb_mempool);
if (ha->dcbx_tlv)
dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
@@ -4519,8 +4517,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->async_pd)
dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
- if (ha->s_dma_pool)
- dma_pool_destroy(ha->s_dma_pool);
+ dma_pool_destroy(ha->s_dma_pool);
if (ha->gid_list)
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
@@ -4541,14 +4538,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
}
}
- if (ha->dl_dma_pool)
- dma_pool_destroy(ha->dl_dma_pool);
+ dma_pool_destroy(ha->dl_dma_pool);
- if (ha->fcp_cmnd_dma_pool)
- dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
- if (ha->ctx_mempool)
- mempool_destroy(ha->ctx_mempool);
+ mempool_destroy(ha->ctx_mempool);
qlt_mem_free(ha);
@@ -7095,8 +7089,7 @@ qla2x00_module_exit(void)
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
qlt_exit();
- if (ctx_cachep)
- kmem_cache_destroy(ctx_cachep);
+ kmem_cache_destroy(ctx_cachep);
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index c4504740f0e2..510337eac106 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2379,20 +2379,20 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
}
if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
- if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
- ELS_LOGO ||
- mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
- ELS_PRLO ||
- mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
- ELS_TPRLO) {
+ switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
+ case ELS_LOGO:
+ case ELS_PRLO:
+ case ELS_TPRLO:
ql_dbg(ql_dbg_disc, vha, 0x2106,
"TM response logo %phC status %#x state %#x",
mcmd->sess->port_name, mcmd->fc_tm_rsp,
mcmd->flags);
qlt_schedule_sess_for_deletion(mcmd->sess);
- } else {
+ break;
+ default:
qlt_send_notify_ack(vha->hw->base_qpair,
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
+ break;
}
} else {
if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
@@ -2660,9 +2660,9 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
cnt++, prm->seg_cnt--) {
*dword_ptr++ =
- cpu_to_le32(pci_dma_lo32
+ cpu_to_le32(lower_32_bits
(sg_dma_address(prm->sg)));
- *dword_ptr++ = cpu_to_le32(pci_dma_hi32
+ *dword_ptr++ = cpu_to_le32(upper_32_bits
(sg_dma_address(prm->sg)));
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
@@ -2704,9 +2704,9 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm)
(cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
cnt++, prm->seg_cnt--) {
*dword_ptr++ =
- cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
+ cpu_to_le32(lower_32_bits(sg_dma_address(prm->sg)));
- *dword_ptr++ = cpu_to_le32(pci_dma_hi32(
+ *dword_ptr++ = cpu_to_le32(upper_32_bits(
sg_dma_address(prm->sg)));
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 721da593b1bc..577e1786a3f1 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -771,14 +771,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define FC_TM_REJECT 4
#define FC_TM_FAILED 5
-#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
-#define pci_dma_lo32(a) (a & 0xffffffff)
-#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
-#else
-#define pci_dma_lo32(a) (a & 0xffffffff)
-#define pci_dma_hi32(a) 0
-#endif
-
#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \
(((const uint8_t *)(sense))[0] & 0x70) == 0x70)
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 12bafff71a1a..ca7945cb959b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.11-k"
+#define QLA2XXX_VERSION "10.00.00.12-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 65053c066680..283e6b80abb5 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -108,11 +108,6 @@ static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
}
-static char *tcm_qla2xxx_get_fabric_name(void)
-{
- return "qla2xxx";
-}
-
/*
* From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
*/
@@ -178,11 +173,6 @@ static int tcm_qla2xxx_npiv_parse_wwn(
return 0;
}
-static char *tcm_qla2xxx_npiv_get_fabric_name(void)
-{
- return "qla2xxx_npiv";
-}
-
static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -964,38 +954,14 @@ static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item,
atomic_read(&tpg->lport_tpg_enabled));
}
-static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
-{
- struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
- struct tcm_qla2xxx_tpg, tpg_base_work);
- struct se_portal_group *se_tpg = &base_tpg->se_tpg;
- struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
-
- if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
- atomic_set(&base_tpg->lport_tpg_enabled, 1);
- qlt_enable_vha(base_vha);
- }
- complete(&base_tpg->tpg_base_comp);
-}
-
-static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
-{
- struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
- struct tcm_qla2xxx_tpg, tpg_base_work);
- struct se_portal_group *se_tpg = &base_tpg->se_tpg;
- struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
-
- if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
- atomic_set(&base_tpg->lport_tpg_enabled, 0);
- target_undepend_item(&se_tpg->tpg_group.cg_item);
- }
- complete(&base_tpg->tpg_base_comp);
-}
-
static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = to_tpg(item);
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
unsigned long op;
@@ -1014,24 +980,16 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
if (atomic_read(&tpg->lport_tpg_enabled))
return -EEXIST;
- INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
+ atomic_set(&tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(vha);
} else {
if (!atomic_read(&tpg->lport_tpg_enabled))
return count;
- INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
+ atomic_set(&tpg->lport_tpg_enabled, 0);
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
}
- init_completion(&tpg->tpg_base_comp);
- schedule_work(&tpg->tpg_base_work);
- wait_for_completion(&tpg->tpg_base_comp);
- if (op) {
- if (!atomic_read(&tpg->lport_tpg_enabled))
- return -ENODEV;
- } else {
- if (atomic_read(&tpg->lport_tpg_enabled))
- return -EPERM;
- }
return count;
}
@@ -1920,14 +1878,13 @@ static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.module = THIS_MODULE,
- .name = "qla2xxx",
+ .fabric_name = "qla2xxx",
.node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
/*
* XXX: Limit assumes single page per scatter-gather-list entry.
* Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096
*/
.max_data_sg_nents = 1200,
- .get_fabric_name = tcm_qla2xxx_get_fabric_name,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
.tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
@@ -1969,9 +1926,8 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.module = THIS_MODULE,
- .name = "qla2xxx_npiv",
+ .fabric_name = "qla2xxx_npiv",
.node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
- .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
.tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 7550ba2831c3..147cf6c90366 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -48,9 +48,6 @@ struct tcm_qla2xxx_tpg {
struct tcm_qla2xxx_tpg_attrib tpg_attrib;
/* Returned by tcm_qla2xxx_make_tpg() */
struct se_portal_group se_tpg;
- /* Items for dealing with configfs_depend_item */
- struct completion tpg_base_comp;
- struct work_struct tpg_base_work;
};
struct tcm_qla2xxx_fc_loopid {
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 051164f755a4..949e186cc5d7 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -205,7 +205,6 @@ static struct scsi_host_template qla4xxx_driver_template = {
.this_id = -1,
.cmd_per_lun = 3,
- .use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
@@ -4160,20 +4159,16 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
ha->fw_dump_size = 0;
/* Free srb pool. */
- if (ha->srb_mempool)
- mempool_destroy(ha->srb_mempool);
-
+ mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
- if (ha->chap_dma_pool)
- dma_pool_destroy(ha->chap_dma_pool);
+ dma_pool_destroy(ha->chap_dma_pool);
if (ha->chap_list)
vfree(ha->chap_list);
ha->chap_list = NULL;
- if (ha->fw_ddb_dma_pool)
- dma_pool_destroy(ha->fw_ddb_dma_pool);
+ dma_pool_destroy(ha->fw_ddb_dma_pool);
/* release io space registers */
if (is_qla8022(ha)) {
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 95431d605c24..8f709002f746 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -193,7 +193,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
.can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
};
static __init int qlogicfas_init(void)
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 9d09228eee28..e35ce762d454 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1287,7 +1287,6 @@ static struct scsi_host_template qpti_template = {
.can_queue = QLOGICPTI_REQ_QUEUE_LEN,
.this_id = 7,
.sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
- .use_clustering = ENABLE_CLUSTERING,
};
static const struct of_device_id qpti_match[];
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 4740f1e9dd17..661512bec3ac 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -3973,7 +3973,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
return 1; /* no resources, will be marked offline */
}
sdp->hostdata = devip;
- blk_queue_max_segment_size(sdp->request_queue, -1U);
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
config_cdb_len(sdp);
@@ -5851,7 +5850,7 @@ static struct scsi_host_template sdebug_driver_template = {
.sg_tablesize = SG_MAX_SEGMENTS,
.cmd_per_lun = DEF_CMD_PER_LUN,
.max_sectors = -1U,
- .use_clustering = DISABLE_CLUSTERING,
+ .max_segment_size = -1U,
.module = THIS_MODULE,
.track_queue_depth = 1,
};
@@ -5866,8 +5865,9 @@ static int sdebug_driver_probe(struct device *dev)
sdbg_host = to_sdebug_host(dev);
sdebug_driver_template.can_queue = sdebug_max_queue;
- if (sdebug_clustering)
- sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
+ if (!sdebug_clustering)
+ sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
+
hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
if (NULL == hpnt) {
pr_err("scsi_host_alloc failed\n");
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0dbf25512778..b13cc9288ba0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1842,10 +1842,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_segment_boundary(q, shost->dma_boundary);
dma_set_seg_boundary(dev, shost->dma_boundary);
- blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
-
- if (!shost->use_clustering)
- q->limits.cluster = 0;
+ blk_queue_max_segment_size(q,
+ min(shost->max_segment_size, dma_get_max_seg_size(dev)));
/*
* Set a reasonable default alignment: The larger of 32-byte (dword),
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index ff123023e5a5..0508831d6fb9 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -37,6 +37,18 @@
#define ISCSI_TRANSPORT_VERSION "2.0-870"
+#define CREATE_TRACE_POINTS
+#include <trace/events/iscsi.h>
+
+/*
+ * Export tracepoint symbols to be used by other modules.
+ */
+EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_conn);
+EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_eh);
+EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_session);
+EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_tcp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_sw_tcp);
+
static int dbg_session;
module_param_named(debug_session, dbg_session, int,
S_IRUGO | S_IWUSR);
@@ -59,6 +71,9 @@ MODULE_PARM_DESC(debug_conn,
iscsi_cls_session_printk(KERN_INFO, _session, \
"%s: " dbg_fmt, \
__func__, ##arg); \
+ iscsi_dbg_trace(trace_iscsi_dbg_trans_session, \
+ &(_session)->dev, \
+ "%s " dbg_fmt, __func__, ##arg); \
} while (0);
#define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \
@@ -66,7 +81,10 @@ MODULE_PARM_DESC(debug_conn,
if (dbg_conn) \
iscsi_cls_conn_printk(KERN_INFO, _conn, \
"%s: " dbg_fmt, \
- __func__, ##arg); \
+ __func__, ##arg); \
+ iscsi_dbg_trace(trace_iscsi_dbg_trans_conn, \
+ &(_conn)->dev, \
+ "%s " dbg_fmt, __func__, ##arg); \
} while (0);
struct iscsi_internal {
@@ -4494,6 +4512,20 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
}
EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
+void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *),
+ struct device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ trace(dev, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(iscsi_dbg_trace);
+
static __init int iscsi_transport_init(void)
{
int err;
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 5ed696dc9bbd..713bce998b0e 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -208,7 +208,7 @@ static struct scsi_host_template sgiwd93_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 8,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
};
static int sgiwd93_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e97bf2670315..af962368818b 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -21,6 +21,9 @@
#if !defined(_SMARTPQI_H)
#define _SMARTPQI_H
+#include <scsi/scsi_host.h>
+#include <linux/bsg-lib.h>
+
#pragma pack(1)
#define PQI_DEVICE_SIGNATURE "PQI DREG"
@@ -97,6 +100,12 @@ struct pqi_ctrl_registers {
struct pqi_device_registers pqi_registers; /* 4000h */
};
+#if ((HZ) < 1000)
+#define PQI_HZ 1000
+#else
+#define PQI_HZ (HZ)
+#endif
+
#define PQI_DEVICE_REGISTERS_OFFSET 0x4000
enum pqi_io_path {
@@ -347,6 +356,10 @@ struct pqi_event_config {
#define PQI_MAX_EVENT_DESCRIPTORS 255
+#define PQI_EVENT_OFA_MEMORY_ALLOCATION 0x0
+#define PQI_EVENT_OFA_QUIESCE 0x1
+#define PQI_EVENT_OFA_CANCELLED 0x2
+
struct pqi_event_response {
struct pqi_iu_header header;
u8 event_type;
@@ -354,7 +367,17 @@ struct pqi_event_response {
u8 request_acknowlege : 1;
__le16 event_id;
__le32 additional_event_id;
- u8 data[16];
+ union {
+ struct {
+ __le32 bytes_requested;
+ u8 reserved[12];
+ } ofa_memory_allocation;
+
+ struct {
+ __le16 reason; /* reason for cancellation */
+ u8 reserved[14];
+ } ofa_cancelled;
+ } data;
};
struct pqi_event_acknowledge_request {
@@ -389,6 +412,54 @@ struct pqi_task_management_response {
u8 response_code;
};
+struct pqi_vendor_general_request {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ __le16 function_code;
+ union {
+ struct {
+ __le16 first_section;
+ __le16 last_section;
+ u8 reserved[48];
+ } config_table_update;
+
+ struct {
+ __le64 buffer_address;
+ __le32 buffer_length;
+ u8 reserved[40];
+ } ofa_memory_allocation;
+ } data;
+};
+
+struct pqi_vendor_general_response {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ __le16 function_code;
+ __le16 status;
+ u8 reserved[2];
+};
+
+#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
+#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1
+
+#define PQI_OFA_VERSION 1
+#define PQI_OFA_SIGNATURE "OFA_QRM"
+#define PQI_OFA_MAX_SG_DESCRIPTORS 64
+
+#define PQI_OFA_MEMORY_DESCRIPTOR_LENGTH \
+ (offsetof(struct pqi_ofa_memory, sg_descriptor) + \
+ (PQI_OFA_MAX_SG_DESCRIPTORS * sizeof(struct pqi_sg_descriptor)))
+
+struct pqi_ofa_memory {
+ __le64 signature; /* "OFA_QRM" */
+ __le16 version; /* version of this struct(1 = 1st version) */
+ u8 reserved[62];
+ __le32 bytes_allocated; /* total allocated memory in bytes */
+ __le16 num_memory_descriptors;
+ u8 reserved1[2];
+ struct pqi_sg_descriptor sg_descriptor[1];
+};
+
struct pqi_aio_error_info {
u8 status;
u8 service_response;
@@ -419,6 +490,7 @@ struct pqi_raid_error_info {
#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
+#define PQI_REQUEST_IU_VENDOR_GENERAL 0x75
#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
@@ -430,6 +502,7 @@ struct pqi_raid_error_info {
#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3
#define PQI_RESPONSE_IU_AIO_PATH_DISABLED 0xf4
#define PQI_RESPONSE_IU_VENDOR_EVENT 0xf5
+#define PQI_RESPONSE_IU_VENDOR_GENERAL 0xf7
#define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY 0x0
#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ 0x10
@@ -492,6 +565,7 @@ struct pqi_raid_error_info {
#define PQI_EVENT_TYPE_HARDWARE 0x2
#define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4
#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
+#define PQI_EVENT_TYPE_OFA 0xfb
#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
@@ -556,6 +630,7 @@ typedef u32 pqi_index_t;
#define SOP_TASK_ATTRIBUTE_ACA 4
#define SOP_TMF_COMPLETE 0x0
+#define SOP_TMF_REJECTED 0x4
#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
/* additional CDB bytes usage field codes */
@@ -644,11 +719,13 @@ struct pqi_encryption_info {
#define PQI_CONFIG_TABLE_MAX_LENGTH ((u16)~0)
/* configuration table section IDs */
+#define PQI_CONFIG_TABLE_ALL_SECTIONS (-1)
#define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO 0
#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES 1
#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA 2
#define PQI_CONFIG_TABLE_SECTION_DEBUG 3
#define PQI_CONFIG_TABLE_SECTION_HEARTBEAT 4
+#define PQI_CONFIG_TABLE_SECTION_SOFT_RESET 5
struct pqi_config_table {
u8 signature[8]; /* "CFGTABLE" */
@@ -680,6 +757,18 @@ struct pqi_config_table_general_info {
/* command */
};
+struct pqi_config_table_firmware_features {
+ struct pqi_config_table_section_header header;
+ __le16 num_elements;
+ u8 features_supported[];
+/* u8 features_requested_by_host[]; */
+/* u8 features_enabled[]; */
+};
+
+#define PQI_FIRMWARE_FEATURE_OFA 0
+#define PQI_FIRMWARE_FEATURE_SMP 1
+#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
+
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
__le32 scratchpad;
@@ -690,6 +779,22 @@ struct pqi_config_table_heartbeat {
__le32 heartbeat_counter;
};
+struct pqi_config_table_soft_reset {
+ struct pqi_config_table_section_header header;
+ u8 soft_reset_status;
+};
+
+#define PQI_SOFT_RESET_INITIATE 0x1
+#define PQI_SOFT_RESET_ABORT 0x2
+
+enum pqi_soft_reset_status {
+ RESET_INITIATE_FIRMWARE,
+ RESET_INITIATE_DRIVER,
+ RESET_ABORT,
+ RESET_NORESPONSE,
+ RESET_TIMEDOUT
+};
+
union pqi_reset_register {
struct {
u32 reset_type : 3;
@@ -808,8 +913,10 @@ struct pqi_scsi_dev {
u8 scsi3addr[8];
__be64 wwid;
u8 volume_id[16];
+ u8 unique_id[16];
u8 is_physical_device : 1;
u8 is_external_raid_device : 1;
+ u8 is_expander_smp_device : 1;
u8 target_lun_valid : 1;
u8 device_gone : 1;
u8 new_device : 1;
@@ -817,6 +924,7 @@ struct pqi_scsi_dev {
u8 volume_offline : 1;
bool aio_enabled; /* only valid for physical disks */
bool in_reset;
+ bool in_remove;
bool device_offline;
u8 vendor[8]; /* bytes 8-15 of inquiry data */
u8 model[16]; /* bytes 16-31 of inquiry data */
@@ -854,6 +962,8 @@ struct pqi_scsi_dev {
#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */
#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
+#define SCSI_VPD_HEADER_SZ 4
+#define SCSI_VPD_DEVICE_ID_IDX 8 /* Index of page id in page */
#define VPD_PAGE (1 << 8)
@@ -916,6 +1026,7 @@ struct pqi_sas_node {
struct pqi_sas_port {
struct list_head port_list_entry;
u64 sas_address;
+ struct pqi_scsi_dev *device;
struct sas_port *port;
int next_phy_index;
struct list_head phy_list_head;
@@ -947,13 +1058,15 @@ struct pqi_io_request {
struct list_head request_list_entry;
};
-#define PQI_NUM_SUPPORTED_EVENTS 6
+#define PQI_NUM_SUPPORTED_EVENTS 7
struct pqi_event {
bool pending;
u8 event_type;
__le16 event_id;
__le32 additional_event_id;
+ __le32 ofa_bytes_requested;
+ __le16 ofa_cancel_reason;
};
#define PQI_RESERVED_IO_SLOTS_LUN_RESET 1
@@ -1014,12 +1127,16 @@ struct pqi_ctrl_info {
struct mutex scan_mutex;
struct mutex lun_reset_mutex;
+ struct mutex ofa_mutex; /* serialize ofa */
bool controller_online;
bool block_requests;
+ bool in_shutdown;
+ bool in_ofa;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
u8 pqi_reset_quiesce_supported : 1;
+ u8 soft_reset_handshake_supported : 1;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
@@ -1040,6 +1157,7 @@ struct pqi_ctrl_info {
int previous_num_interrupts;
u32 previous_heartbeat_count;
__le32 __iomem *heartbeat_counter;
+ u8 __iomem *soft_reset_status;
struct timer_list heartbeat_timer;
struct work_struct ctrl_offline_work;
@@ -1051,6 +1169,10 @@ struct pqi_ctrl_info {
struct list_head raid_bypass_retry_list;
spinlock_t raid_bypass_retry_list_lock;
struct work_struct raid_bypass_retry_work;
+
+ struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
+ dma_addr_t pqi_ofa_mem_dma_handle;
+ void **pqi_ofa_chunk_virt_addr;
};
enum pqi_ctrl_mode {
@@ -1080,8 +1202,13 @@ enum pqi_ctrl_mode {
#define BMIC_WRITE 0x27
#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
+#define BMIC_CSMI_PASSTHRU 0x68
#define BMIC_WRITE_HOST_WELLNESS 0xa5
#define BMIC_FLUSH_CACHE 0xc2
+#define BMIC_SET_DIAG_OPTIONS 0xf4
+#define BMIC_SENSE_DIAG_OPTIONS 0xf5
+
+#define CSMI_CC_SAS_SMP_PASSTHRU 0X17
#define SA_FLUSH_CACHE 0x1
@@ -1109,6 +1236,10 @@ struct bmic_identify_controller {
u8 reserved3[32];
};
+#define SA_EXPANDER_SMP_DEVICE 0x05
+/*SCSI Invalid Device Type for SAS devices*/
+#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff
+
struct bmic_identify_physical_device {
u8 scsi_bus; /* SCSI Bus number on controller */
u8 scsi_id; /* SCSI ID on this bus */
@@ -1189,6 +1320,50 @@ struct bmic_identify_physical_device {
u8 padding_to_multiple_of_512[9];
};
+struct bmic_smp_request {
+ u8 frame_type;
+ u8 function;
+ u8 allocated_response_length;
+ u8 request_length;
+ u8 additional_request_bytes[1016];
+};
+
+struct bmic_smp_response {
+ u8 frame_type;
+ u8 function;
+ u8 function_result;
+ u8 response_length;
+ u8 additional_response_bytes[1016];
+};
+
+struct bmic_csmi_ioctl_header {
+ __le32 header_length;
+ u8 signature[8];
+ __le32 timeout;
+ __le32 control_code;
+ __le32 return_code;
+ __le32 length;
+};
+
+struct bmic_csmi_smp_passthru {
+ u8 phy_identifier;
+ u8 port_identifier;
+ u8 connection_rate;
+ u8 reserved;
+ __be64 destination_sas_address;
+ __le32 request_length;
+ struct bmic_smp_request request;
+ u8 connection_status;
+ u8 reserved1[3];
+ __le32 response_length;
+ struct bmic_smp_response response;
+};
+
+struct bmic_csmi_smp_passthru_buffer {
+ struct bmic_csmi_ioctl_header ioctl_header;
+ struct bmic_csmi_smp_passthru parameters;
+};
+
struct bmic_flush_cache {
u8 disable_flag;
u8 system_power_action;
@@ -1206,8 +1381,42 @@ enum bmic_flush_cache_shutdown_event {
RESTART = 4
};
+struct bmic_diag_options {
+ __le32 options;
+};
+
#pragma pack()
+static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
+{
+ void *hostdata = shost_priv(shost);
+
+ return *((struct pqi_ctrl_info **)hostdata);
+}
+
+static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ return !ctrl_info->controller_online;
+}
+
+static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_inc(&ctrl_info->num_busy_threads);
+}
+
+static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_dec(&ctrl_info->num_busy_threads);
+}
+
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_requests;
+}
+
+void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+ struct sas_rphy *rphy);
+
int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
@@ -1216,6 +1425,9 @@ void pqi_remove_sas_device(struct pqi_scsi_dev *device);
struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd);
+int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
+ struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
+ struct pqi_raid_error_info *error_info);
extern struct sas_function_template pqi_sas_transport_functions;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index bac084260d80..e2fa3f476227 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -40,11 +40,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.1.4-130"
+#define DRIVER_VERSION "1.2.4-070"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 1
+#define DRIVER_MINOR 2
#define DRIVER_RELEASE 4
-#define DRIVER_REVISION 130
+#define DRIVER_REVISION 70
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -74,6 +74,15 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
struct pqi_encryption_info *encryption_info, bool raid_bypass);
+static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
+static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
+ u32 bytes_requested);
+static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
+static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
+static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, unsigned long timeout_secs);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -113,6 +122,7 @@ static unsigned int pqi_supported_event_types[] = {
PQI_EVENT_TYPE_HARDWARE,
PQI_EVENT_TYPE_PHYSICAL_DEVICE,
PQI_EVENT_TYPE_LOGICAL_DEVICE,
+ PQI_EVENT_TYPE_OFA,
PQI_EVENT_TYPE_AIO_STATE_CHANGE,
PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
};
@@ -176,16 +186,14 @@ static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
scmd->scsi_done(scmd);
}
-static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
+static inline void pqi_disable_write_same(struct scsi_device *sdev)
{
- return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
+ sdev->no_write_same = 1;
}
-static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
+static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
{
- void *hostdata = shost_priv(shost);
-
- return *((struct pqi_ctrl_info **)hostdata);
+ return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
}
static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
@@ -198,11 +206,6 @@ static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
return scsi3addr[2] != 0;
}
-static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
-{
- return !ctrl_info->controller_online;
-}
-
static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
{
if (ctrl_info->controller_online)
@@ -241,11 +244,6 @@ static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
scsi_unblock_requests(ctrl_info->scsi_host);
}
-static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
-{
- return ctrl_info->block_requests;
-}
-
static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
unsigned long timeout_msecs)
{
@@ -275,16 +273,6 @@ static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
return remaining_msecs;
}
-static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
-{
- atomic_inc(&ctrl_info->num_busy_threads);
-}
-
-static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
-{
- atomic_dec(&ctrl_info->num_busy_threads);
-}
-
static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
{
while (atomic_read(&ctrl_info->num_busy_threads) >
@@ -312,11 +300,39 @@ static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
return device->in_reset;
}
+static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->in_ofa = true;
+}
+
+static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->in_ofa = false;
+}
+
+static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->in_ofa;
+}
+
+static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
+{
+ device->in_remove = true;
+}
+
+static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ return device->in_remove & !ctrl_info->in_shutdown;
+}
+
static inline void pqi_schedule_rescan_worker_with_delay(
struct pqi_ctrl_info *ctrl_info, unsigned long delay)
{
if (pqi_ctrl_offline(ctrl_info))
return;
+ if (pqi_ctrl_in_ofa(ctrl_info))
+ return;
schedule_delayed_work(&ctrl_info->rescan_work, delay);
}
@@ -326,7 +342,7 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
}
-#define PQI_RESCAN_WORK_DELAY (10 * HZ)
+#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
static inline void pqi_schedule_rescan_worker_delayed(
struct pqi_ctrl_info *ctrl_info)
@@ -347,6 +363,27 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
return readl(ctrl_info->heartbeat_counter);
}
+static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
+{
+ if (!ctrl_info->soft_reset_status)
+ return 0;
+
+ return readb(ctrl_info->soft_reset_status);
+}
+
+static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
+ u8 clear)
+{
+ u8 status;
+
+ if (!ctrl_info->soft_reset_status)
+ return;
+
+ status = pqi_read_soft_reset_status(ctrl_info);
+ status &= ~clear;
+ writeb(status, ctrl_info->soft_reset_status);
+}
+
static int pqi_map_single(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
size_t buffer_length, enum dma_data_direction data_direction)
@@ -390,6 +427,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
u16 vpd_page, enum dma_data_direction *dir)
{
u8 *cdb;
+ size_t cdb_length = buffer_length;
memset(request, 0, sizeof(*request));
@@ -412,7 +450,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
cdb[1] = 0x1;
cdb[2] = (u8)vpd_page;
}
- cdb[4] = (u8)buffer_length;
+ cdb[4] = (u8)cdb_length;
break;
case CISS_REPORT_LOG:
case CISS_REPORT_PHYS:
@@ -422,32 +460,45 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
cdb[1] = CISS_REPORT_PHYS_EXTENDED;
else
cdb[1] = CISS_REPORT_LOG_EXTENDED;
- put_unaligned_be32(buffer_length, &cdb[6]);
+ put_unaligned_be32(cdb_length, &cdb[6]);
break;
case CISS_GET_RAID_MAP:
request->data_direction = SOP_READ_FLAG;
cdb[0] = CISS_READ;
cdb[1] = CISS_GET_RAID_MAP;
- put_unaligned_be32(buffer_length, &cdb[6]);
+ put_unaligned_be32(cdb_length, &cdb[6]);
break;
case SA_FLUSH_CACHE:
request->data_direction = SOP_WRITE_FLAG;
cdb[0] = BMIC_WRITE;
cdb[6] = BMIC_FLUSH_CACHE;
- put_unaligned_be16(buffer_length, &cdb[7]);
+ put_unaligned_be16(cdb_length, &cdb[7]);
break;
+ case BMIC_SENSE_DIAG_OPTIONS:
+ cdb_length = 0;
+ /* fall through */
case BMIC_IDENTIFY_CONTROLLER:
case BMIC_IDENTIFY_PHYSICAL_DEVICE:
request->data_direction = SOP_READ_FLAG;
cdb[0] = BMIC_READ;
cdb[6] = cmd;
- put_unaligned_be16(buffer_length, &cdb[7]);
+ put_unaligned_be16(cdb_length, &cdb[7]);
break;
+ case BMIC_SET_DIAG_OPTIONS:
+ cdb_length = 0;
+ /* fall through */
case BMIC_WRITE_HOST_WELLNESS:
request->data_direction = SOP_WRITE_FLAG;
cdb[0] = BMIC_WRITE;
cdb[6] = cmd;
- put_unaligned_be16(buffer_length, &cdb[7]);
+ put_unaligned_be16(cdb_length, &cdb[7]);
+ break;
+ case BMIC_CSMI_PASSTHRU:
+ request->data_direction = SOP_BIDIRECTIONAL;
+ cdb[0] = BMIC_WRITE;
+ cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
+ cdb[6] = cmd;
+ put_unaligned_be16(cdb_length, &cdb[7]);
break;
default:
dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
@@ -509,43 +560,130 @@ static void pqi_free_io_request(struct pqi_io_request *io_request)
atomic_dec(&io_request->refcount);
}
-static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
- struct bmic_identify_controller *buffer)
+static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+ u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
+ struct pqi_raid_error_info *error_info,
+ unsigned long timeout_msecs)
{
int rc;
enum dma_data_direction dir;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
- BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
- sizeof(*buffer), 0, &dir);
+ cmd, scsi3addr, buffer,
+ buffer_length, vpd_page, &dir);
if (rc)
return rc;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
- NULL, NO_TIMEOUT);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+ 0, error_info, timeout_msecs);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
-static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
+/* Helper functions for pqi_send_scsi_raid_request */
+
+static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
+ u8 cmd, void *buffer, size_t buffer_length)
+{
+ return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
+ buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+}
+
+static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
+ u8 cmd, void *buffer, size_t buffer_length,
+ struct pqi_raid_error_info *error_info)
+{
+ return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
+ buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+}
+
+
+static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
+ struct bmic_identify_controller *buffer)
+{
+ return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
+ buffer, sizeof(*buffer));
+}
+
+static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
{
+ return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
+ buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
+}
+
+static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
+ u8 *scsi3addr, u16 vpd_page)
+{
int rc;
- enum dma_data_direction dir;
- struct pqi_raid_path_request request;
+ int i;
+ int pages;
+ unsigned char *buf, bufsize;
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
- &dir);
- if (rc)
- return rc;
+ buf = kzalloc(256, GFP_KERNEL);
+ if (!buf)
+ return false;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
- NULL, NO_TIMEOUT);
+ /* Get the size of the page list first */
+ rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
+ VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
+ buf, SCSI_VPD_HEADER_SZ);
+ if (rc != 0)
+ goto exit_unsupported;
+
+ pages = buf[3];
+ if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
+ bufsize = pages + SCSI_VPD_HEADER_SZ;
+ else
+ bufsize = 255;
+
+ /* Get the whole VPD page list */
+ rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
+ VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
+ buf, bufsize);
+ if (rc != 0)
+ goto exit_unsupported;
+
+ pages = buf[3];
+ for (i = 1; i <= pages; i++)
+ if (buf[3 + i] == vpd_page)
+ goto exit_supported;
+
+exit_unsupported:
+ kfree(buf);
+ return false;
+
+exit_supported:
+ kfree(buf);
+ return true;
+}
+
+static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
+ u8 *scsi3addr, u8 *device_id, int buflen)
+{
+ int rc;
+ unsigned char *buf;
+
+ if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
+ return 1; /* function not supported */
+
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
+ VPD_PAGE | SCSI_VPD_DEVICE_ID,
+ buf, 64);
+ if (rc == 0) {
+ if (buflen > 16)
+ buflen = 16;
+ memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
+ }
+
+ kfree(buf);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -580,9 +718,7 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
enum bmic_flush_cache_shutdown_event shutdown_event)
{
int rc;
- struct pqi_raid_path_request request;
struct bmic_flush_cache *flush_cache;
- enum dma_data_direction dir;
/*
* Don't bother trying to flush the cache if the controller is
@@ -597,42 +733,55 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
flush_cache->shutdown_event = shutdown_event;
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
- sizeof(*flush_cache), 0, &dir);
- if (rc)
- goto out;
-
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
+ sizeof(*flush_cache));
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
-out:
kfree(flush_cache);
return rc;
}
-static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
- void *buffer, size_t buffer_length)
+int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
+ struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
+ struct pqi_raid_error_info *error_info)
+{
+ return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
+ buffer, buffer_length, error_info);
+}
+
+#define PQI_FETCH_PTRAID_DATA (1UL<<31)
+
+static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
{
int rc;
- struct pqi_raid_path_request request;
- enum dma_data_direction dir;
+ struct bmic_diag_options *diag;
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &dir);
+ diag = kzalloc(sizeof(*diag), GFP_KERNEL);
+ if (!diag)
+ return -ENOMEM;
+
+ rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
+ diag, sizeof(*diag));
if (rc)
- return rc;
+ goto out;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
+
+ rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
+ diag, sizeof(*diag));
+out:
+ kfree(diag);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
+static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
+ void *buffer, size_t buffer_length)
+{
+ return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
+ buffer, buffer_length);
+}
+
#pragma pack(1)
struct bmic_host_wellness_driver_version {
@@ -640,6 +789,7 @@ struct bmic_host_wellness_driver_version {
u8 driver_version_tag[2];
__le16 driver_version_length;
char driver_version[32];
+ u8 dont_write_tag[2];
u8 end_tag[2];
};
@@ -669,6 +819,8 @@ static int pqi_write_driver_version_to_host_wellness(
strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
sizeof(buffer->driver_version) - 1);
buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
+ buffer->dont_write_tag[0] = 'D';
+ buffer->dont_write_tag[1] = 'W';
buffer->end_tag[0] = 'Z';
buffer->end_tag[1] = 'Z';
@@ -742,7 +894,7 @@ static int pqi_write_current_time_to_host_wellness(
return rc;
}
-#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
+#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
static void pqi_update_time_worker(struct work_struct *work)
{
@@ -776,23 +928,11 @@ static inline void pqi_cancel_update_time_worker(
cancel_delayed_work_sync(&ctrl_info->update_time_work);
}
-static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
void *buffer, size_t buffer_length)
{
- int rc;
- enum dma_data_direction dir;
- struct pqi_raid_path_request request;
-
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir);
- if (rc)
- return rc;
-
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
- NULL, NO_TIMEOUT);
-
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
- return rc;
+ return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
+ buffer_length);
}
static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
@@ -1010,8 +1150,6 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
char *err_msg;
u32 raid_map_size;
u32 r5or6_blocks_per_row;
- unsigned int num_phys_disks;
- unsigned int num_raid_map_entries;
raid_map_size = get_unaligned_le32(&raid_map->structure_size);
@@ -1020,22 +1158,6 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
goto bad_raid_map;
}
- if (raid_map_size > sizeof(*raid_map)) {
- err_msg = "RAID map too large";
- goto bad_raid_map;
- }
-
- num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
- (get_unaligned_le16(&raid_map->data_disks_per_row) +
- get_unaligned_le16(&raid_map->metadata_disks_per_row));
- num_raid_map_entries = num_phys_disks *
- get_unaligned_le16(&raid_map->row_cnt);
-
- if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
- err_msg = "invalid number of map entries in RAID map";
- goto bad_raid_map;
- }
-
if (device->raid_level == SA_RAID_1) {
if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
err_msg = "invalid RAID-1 map";
@@ -1074,27 +1196,45 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
- enum dma_data_direction dir;
- struct pqi_raid_path_request request;
+ u32 raid_map_size;
struct raid_map *raid_map;
raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
if (!raid_map)
return -ENOMEM;
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
- sizeof(*raid_map), 0, &dir);
+ rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
+ device->scsi3addr, raid_map, sizeof(*raid_map),
+ 0, NULL, NO_TIMEOUT);
+
if (rc)
goto error;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
- NULL, NO_TIMEOUT);
+ raid_map_size = get_unaligned_le32(&raid_map->structure_size);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+ if (raid_map_size > sizeof(*raid_map)) {
- if (rc)
- goto error;
+ kfree(raid_map);
+
+ raid_map = kmalloc(raid_map_size, GFP_KERNEL);
+ if (!raid_map)
+ return -ENOMEM;
+
+ rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
+ device->scsi3addr, raid_map, raid_map_size,
+ 0, NULL, NO_TIMEOUT);
+ if (rc)
+ goto error;
+
+ if (get_unaligned_le32(&raid_map->structure_size)
+ != raid_map_size) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "Requested %d bytes, received %d bytes",
+ raid_map_size,
+ get_unaligned_le32(&raid_map->structure_size));
+ goto error;
+ }
+ }
rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
if (rc)
@@ -1165,6 +1305,9 @@ static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
+ if (vpd->page_code != CISS_VPD_LV_STATUS)
+ goto out;
+
page_length = offsetof(struct ciss_vpd_logical_volume_status,
volume_status) + vpd->page_length;
if (page_length < sizeof(*vpd))
@@ -1190,6 +1333,9 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
u8 *buffer;
unsigned int retries;
+ if (device->is_expander_smp_device)
+ return 0;
+
buffer = kmalloc(64, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -1225,6 +1371,14 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
}
}
+ if (pqi_get_device_id(ctrl_info, device->scsi3addr,
+ device->unique_id, sizeof(device->unique_id)) < 0)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "Can't get device id for scsi %d:%d:%d:%d\n",
+ ctrl_info->scsi_host->host_no,
+ device->bus, device->target,
+ device->lun);
+
out:
kfree(buffer);
@@ -1387,9 +1541,24 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
return rc;
}
+#define PQI_PENDING_IO_TIMEOUT_SECS 20
+
static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
+ int rc;
+
+ pqi_device_remove_start(device);
+
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device,
+ PQI_PENDING_IO_TIMEOUT_SECS);
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
+ ctrl_info->scsi_host->host_no, device->bus,
+ device->target, device->lun,
+ atomic_read(&device->scsi_cmds_outstanding));
+
if (pqi_is_logical_device(device))
scsi_remove_device(device->sdev);
else
@@ -1454,6 +1623,14 @@ static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
return DEVICE_NOT_FOUND;
}
+static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
+{
+ if (device->is_expander_smp_device)
+ return "Enclosure SMP ";
+
+ return scsi_device_type(device->devtype);
+}
+
#define PQI_DEV_INFO_BUFFER_LENGTH 128
static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
@@ -1489,7 +1666,7 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
" %s %.8s %.16s ",
- scsi_device_type(device->devtype),
+ pqi_device_type(device),
device->vendor,
device->model);
@@ -1534,6 +1711,8 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
existing_device->is_physical_device = new_device->is_physical_device;
existing_device->is_external_raid_device =
new_device->is_external_raid_device;
+ existing_device->is_expander_smp_device =
+ new_device->is_expander_smp_device;
existing_device->aio_enabled = new_device->aio_enabled;
memcpy(existing_device->vendor, new_device->vendor,
sizeof(existing_device->vendor));
@@ -1558,6 +1737,7 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
new_device->raid_bypass_configured;
existing_device->raid_bypass_enabled =
new_device->raid_bypass_enabled;
+ existing_device->device_offline = false;
/* To prevent this from being freed later. */
new_device->raid_map = NULL;
@@ -1589,6 +1769,14 @@ static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
device->keep_device = false;
}
+static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
+{
+ if (device->is_expander_smp_device)
+ return device->sas_port != NULL;
+
+ return device->sdev != NULL;
+}
+
static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
{
@@ -1674,6 +1862,9 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ if (pqi_ctrl_in_ofa(ctrl_info))
+ pqi_ctrl_ofa_done(ctrl_info);
+
/* Remove all devices that have gone away. */
list_for_each_entry_safe(device, next, &delete_list,
delete_list_entry) {
@@ -1683,7 +1874,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
} else {
pqi_dev_info(ctrl_info, "removed", device);
}
- if (device->sdev)
+ if (pqi_is_device_added(device))
pqi_remove_device(ctrl_info, device);
list_del(&device->delete_list_entry);
pqi_free_device(device);
@@ -1705,7 +1896,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
/* Expose any new devices. */
list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
- if (!device->sdev) {
+ if (!pqi_is_device_added(device)) {
pqi_dev_info(ctrl_info, "added", device);
rc = pqi_add_device(ctrl_info, device);
if (rc) {
@@ -1722,7 +1913,12 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
{
- bool is_supported = false;
+ bool is_supported;
+
+ if (device->is_expander_smp_device)
+ return true;
+
+ is_supported = false;
switch (device->devtype) {
case TYPE_DISK:
@@ -1756,6 +1952,30 @@ static inline bool pqi_skip_device(u8 *scsi3addr)
return false;
}
+static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
+{
+ if (!device->is_physical_device)
+ return false;
+
+ if (device->is_expander_smp_device)
+ return true;
+
+ switch (device->devtype) {
+ case TYPE_DISK:
+ case TYPE_ZBC:
+ case TYPE_ENCLOSURE:
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
+{
+ return !device->is_physical_device ||
+ !pqi_skip_device(device->scsi3addr);
+}
+
static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int i;
@@ -1864,9 +2084,14 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
device->is_physical_device = is_physical_device;
- if (!is_physical_device)
+ if (is_physical_device) {
+ if (phys_lun_ext_entry->device_type ==
+ SA_EXPANDER_SMP_DEVICE)
+ device->is_expander_smp_device = true;
+ } else {
device->is_external_raid_device =
pqi_is_external_raid_addr(scsi3addr);
+ }
/* Gather information about the device. */
rc = pqi_get_device_info(ctrl_info, device);
@@ -1899,30 +2124,23 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
device->wwid = phys_lun_ext_entry->wwid;
if ((phys_lun_ext_entry->device_flags &
REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
- phys_lun_ext_entry->aio_handle)
+ phys_lun_ext_entry->aio_handle) {
device->aio_enabled = true;
+ device->aio_handle =
+ phys_lun_ext_entry->aio_handle;
+ }
+ if (device->devtype == TYPE_DISK ||
+ device->devtype == TYPE_ZBC) {
+ pqi_get_physical_disk_info(ctrl_info,
+ device, id_phys);
+ }
} else {
memcpy(device->volume_id, log_lun_ext_entry->volume_id,
sizeof(device->volume_id));
}
- switch (device->devtype) {
- case TYPE_DISK:
- case TYPE_ZBC:
- case TYPE_ENCLOSURE:
- if (device->is_physical_device) {
- device->sas_address =
- get_unaligned_be64(&device->wwid);
- if (device->devtype == TYPE_DISK ||
- device->devtype == TYPE_ZBC) {
- device->aio_handle =
- phys_lun_ext_entry->aio_handle;
- pqi_get_physical_disk_info(ctrl_info,
- device, id_phys);
- }
- }
- break;
- }
+ if (pqi_is_device_with_sas_address(device))
+ device->sas_address = get_unaligned_be64(&device->wwid);
new_device_list[num_valid_devices++] = device;
}
@@ -1965,7 +2183,7 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (!device)
break;
- if (device->sdev)
+ if (pqi_is_device_added(device))
pqi_remove_device(ctrl_info, device);
pqi_free_device(device);
}
@@ -1991,7 +2209,13 @@ static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
static void pqi_scan_start(struct Scsi_Host *shost)
{
- pqi_scan_scsi_devices(shost_to_hba(shost));
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = shost_to_hba(shost);
+ if (pqi_ctrl_in_ofa(ctrl_info))
+ return;
+
+ pqi_scan_scsi_devices(ctrl_info);
}
/* Returns TRUE if scan is finished. */
@@ -2018,6 +2242,12 @@ static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
mutex_unlock(&ctrl_info->lun_reset_mutex);
}
+static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
+{
+ mutex_lock(&ctrl_info->ofa_mutex);
+ mutex_unlock(&ctrl_info->ofa_mutex);
+}
+
static inline void pqi_set_encryption_info(
struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
u64 first_block)
@@ -2325,9 +2555,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
(map_row * total_disks_per_row) + first_column;
}
- if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
- return PQI_RAID_BYPASS_INELIGIBLE;
-
aio_handle = raid_map->disk_data[map_index].aio_handle;
disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
first_row * strip_size +
@@ -2397,7 +2624,7 @@ static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
u8 status;
pqi_registers = ctrl_info->pqi_registers;
- timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
+ timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
while (1) {
signature = readq(&pqi_registers->signature);
@@ -2458,10 +2685,9 @@ static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
return;
device->device_offline = true;
- scsi_device_set_state(sdev, SDEV_OFFLINE);
ctrl_info = shost_to_hba(sdev->host);
pqi_schedule_rescan_worker(ctrl_info);
- dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
+ dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
path, ctrl_info->scsi_host->host_no, device->bus,
device->target, device->lun);
}
@@ -2665,6 +2891,9 @@ static int pqi_interpret_task_management_response(
case SOP_TMF_FUNCTION_SUCCEEDED:
rc = 0;
break;
+ case SOP_TMF_REJECTED:
+ rc = -EAGAIN;
+ break;
default:
rc = -EIO;
break;
@@ -2704,8 +2933,17 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
switch (response->header.iu_type) {
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
+ if (io_request->scmd)
+ io_request->scmd->result = 0;
+ /* fall through */
case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
break;
+ case PQI_RESPONSE_IU_VENDOR_GENERAL:
+ io_request->status =
+ get_unaligned_le16(
+ &((struct pqi_vendor_general_response *)
+ response)->status);
+ break;
case PQI_RESPONSE_IU_TASK_MANAGEMENT:
io_request->status =
pqi_interpret_task_management_response(
@@ -2825,6 +3063,111 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
pqi_send_event_ack(ctrl_info, &request, sizeof(request));
}
+#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
+#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
+
+static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long timeout;
+ u8 status;
+
+ timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
+
+ while (1) {
+ status = pqi_read_soft_reset_status(ctrl_info);
+ if (status & PQI_SOFT_RESET_INITIATE)
+ return RESET_INITIATE_DRIVER;
+
+ if (status & PQI_SOFT_RESET_ABORT)
+ return RESET_ABORT;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for soft reset status\n");
+ return RESET_TIMEDOUT;
+ }
+
+ if (!sis_is_firmware_running(ctrl_info))
+ return RESET_NORESPONSE;
+
+ ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
+ }
+}
+
+static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_soft_reset_status reset_status)
+{
+ int rc;
+
+ switch (reset_status) {
+ case RESET_INITIATE_DRIVER:
+ /* fall through */
+ case RESET_TIMEDOUT:
+ dev_info(&ctrl_info->pci_dev->dev,
+ "resetting controller %u\n", ctrl_info->ctrl_id);
+ sis_soft_reset(ctrl_info);
+ /* fall through */
+ case RESET_INITIATE_FIRMWARE:
+ rc = pqi_ofa_ctrl_restart(ctrl_info);
+ pqi_ofa_free_host_buffer(ctrl_info);
+ dev_info(&ctrl_info->pci_dev->dev,
+ "Online Firmware Activation for controller %u: %s\n",
+ ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
+ break;
+ case RESET_ABORT:
+ pqi_ofa_ctrl_unquiesce(ctrl_info);
+ dev_info(&ctrl_info->pci_dev->dev,
+ "Online Firmware Activation for controller %u: %s\n",
+ ctrl_info->ctrl_id, "ABORTED");
+ break;
+ case RESET_NORESPONSE:
+ pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info);
+ break;
+ }
+}
+
+static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_event *event)
+{
+ u16 event_id;
+ enum pqi_soft_reset_status status;
+
+ event_id = get_unaligned_le16(&event->event_id);
+
+ mutex_lock(&ctrl_info->ofa_mutex);
+
+ if (event_id == PQI_EVENT_OFA_QUIESCE) {
+ dev_info(&ctrl_info->pci_dev->dev,
+ "Received Online Firmware Activation quiesce event for controller %u\n",
+ ctrl_info->ctrl_id);
+ pqi_ofa_ctrl_quiesce(ctrl_info);
+ pqi_acknowledge_event(ctrl_info, event);
+ if (ctrl_info->soft_reset_handshake_supported) {
+ status = pqi_poll_for_soft_reset_status(ctrl_info);
+ pqi_process_soft_reset(ctrl_info, status);
+ } else {
+ pqi_process_soft_reset(ctrl_info,
+ RESET_INITIATE_FIRMWARE);
+ }
+
+ } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
+ pqi_acknowledge_event(ctrl_info, event);
+ pqi_ofa_setup_host_buffer(ctrl_info,
+ le32_to_cpu(event->ofa_bytes_requested));
+ pqi_ofa_host_memory_update(ctrl_info);
+ } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
+ pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_acknowledge_event(ctrl_info, event);
+ dev_info(&ctrl_info->pci_dev->dev,
+ "Online Firmware Activation(%u) cancel reason : %u\n",
+ ctrl_info->ctrl_id, event->ofa_cancel_reason);
+ }
+
+ mutex_unlock(&ctrl_info->ofa_mutex);
+}
+
static void pqi_event_worker(struct work_struct *work)
{
unsigned int i;
@@ -2844,6 +3187,11 @@ static void pqi_event_worker(struct work_struct *work)
for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
if (event->pending) {
event->pending = false;
+ if (event->event_type == PQI_EVENT_TYPE_OFA) {
+ pqi_ctrl_unbusy(ctrl_info);
+ pqi_ofa_process_event(ctrl_info, event);
+ return;
+ }
pqi_acknowledge_event(ctrl_info, event);
}
event++;
@@ -2853,7 +3201,7 @@ out:
pqi_ctrl_unbusy(ctrl_info);
}
-#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
+#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
static void pqi_heartbeat_timer_handler(struct timer_list *t)
{
@@ -2922,6 +3270,24 @@ static inline bool pqi_is_supported_event(unsigned int event_type)
return pqi_event_type_to_event_index(event_type) != -1;
}
+static void pqi_ofa_capture_event_payload(struct pqi_event *event,
+ struct pqi_event_response *response)
+{
+ u16 event_id;
+
+ event_id = get_unaligned_le16(&event->event_id);
+
+ if (event->event_type == PQI_EVENT_TYPE_OFA) {
+ if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
+ event->ofa_bytes_requested =
+ response->data.ofa_memory_allocation.bytes_requested;
+ } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
+ event->ofa_cancel_reason =
+ response->data.ofa_cancelled.reason;
+ }
+ }
+}
+
static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
{
unsigned int num_events;
@@ -2956,6 +3322,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
event->event_id = response->event_id;
event->additional_event_id =
response->additional_event_id;
+ pqi_ofa_capture_event_payload(event, response);
}
}
@@ -3389,7 +3756,7 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
return 0;
}
-#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
@@ -3482,7 +3849,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
admin_queues = &ctrl_info->admin_queues;
oq_ci = admin_queues->oq_ci_copy;
- timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
+ timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
while (1) {
oq_pi = readl(admin_queues->oq_pi);
@@ -3597,7 +3964,7 @@ static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
+ PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
rc = 0;
break;
}
@@ -4927,7 +5294,17 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
{
struct pqi_scsi_dev *device;
+ if (!scmd->device) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ return;
+ }
+
device = scmd->device->hostdata;
+ if (!device) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ return;
+ }
+
atomic_dec(&device->scsi_cmds_outstanding);
}
@@ -4944,16 +5321,24 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
device = scmd->device->hostdata;
ctrl_info = shost_to_hba(shost);
+ if (!device) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ pqi_scsi_done(scmd);
+ return 0;
+ }
+
atomic_inc(&device->scsi_cmds_outstanding);
- if (pqi_ctrl_offline(ctrl_info)) {
+ if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
+ device)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
}
pqi_ctrl_busy(ctrl_info);
- if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
+ if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
+ pqi_ctrl_in_ofa(ctrl_info)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -5098,25 +5483,75 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
}
}
+static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ unsigned int path;
+ struct pqi_queue_group *queue_group;
+ unsigned long flags;
+ struct pqi_io_request *io_request;
+ struct pqi_io_request *next;
+ struct scsi_cmnd *scmd;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+
+ for (path = 0; path < 2; path++) {
+ spin_lock_irqsave(&queue_group->submit_lock[path],
+ flags);
+
+ list_for_each_entry_safe(io_request, next,
+ &queue_group->request_list[path],
+ request_list_entry) {
+
+ scmd = io_request->scmd;
+ if (!scmd)
+ continue;
+
+ list_del(&io_request->request_list_entry);
+ set_host_byte(scmd, DID_RESET);
+ pqi_scsi_done(scmd);
+ }
+
+ spin_unlock_irqrestore(
+ &queue_group->submit_lock[path], flags);
+ }
+ }
+}
+
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+ struct pqi_scsi_dev *device, unsigned long timeout_secs)
{
+ unsigned long timeout;
+
+ timeout = (timeout_secs * PQI_HZ) + jiffies;
+
while (atomic_read(&device->scsi_cmds_outstanding)) {
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
+ if (timeout_secs != NO_TIMEOUT) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for pending IO\n");
+ return -ETIMEDOUT;
+ }
+ }
usleep_range(1000, 2000);
}
return 0;
}
-static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
+static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
+ unsigned long timeout_secs)
{
bool io_pending;
unsigned long flags;
+ unsigned long timeout;
struct pqi_scsi_dev *device;
+ timeout = (timeout_secs * PQI_HZ) + jiffies;
while (1) {
io_pending = false;
@@ -5138,6 +5573,13 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
+ if (timeout_secs != NO_TIMEOUT) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for pending IO\n");
+ return -ETIMEDOUT;
+ }
+ }
usleep_range(1000, 2000);
}
@@ -5161,7 +5603,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
+ PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
rc = 0;
break;
}
@@ -5212,20 +5654,56 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
return rc;
}
+#define PQI_LUN_RESET_RETRIES 3
+#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
/* Performs a reset at the LUN level. */
-static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
+static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
+ unsigned int retries;
+ unsigned long timeout_secs;
- rc = pqi_lun_reset(ctrl_info, device);
- if (rc == 0)
- rc = pqi_device_wait_for_pending_io(ctrl_info, device);
+ for (retries = 0;;) {
+ rc = pqi_lun_reset(ctrl_info, device);
+ if (rc != -EAGAIN ||
+ ++retries > PQI_LUN_RESET_RETRIES)
+ break;
+ msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
+ }
+ timeout_secs = rc ? PQI_LUN_RESET_TIMEOUT_SECS : NO_TIMEOUT;
+
+ rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
return rc == 0 ? SUCCESS : FAILED;
}
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ int rc;
+
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+
+ pqi_ctrl_block_requests(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+ pqi_fail_io_queued_for_device(ctrl_info, device);
+ rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
+ pqi_device_reset_start(device);
+ pqi_ctrl_unblock_requests(ctrl_info);
+
+ if (rc)
+ rc = FAILED;
+ else
+ rc = _pqi_device_reset(ctrl_info, device);
+
+ pqi_device_reset_done(device);
+
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+ return rc;
+}
+
static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
{
int rc;
@@ -5243,28 +5721,16 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller %u offlined - cannot send device reset\n",
+ ctrl_info->ctrl_id);
rc = FAILED;
goto out;
}
- mutex_lock(&ctrl_info->lun_reset_mutex);
-
- pqi_ctrl_block_requests(ctrl_info);
- pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_fail_io_queued_for_device(ctrl_info, device);
- rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
- pqi_device_reset_start(device);
- pqi_ctrl_unblock_requests(ctrl_info);
-
- if (rc)
- rc = FAILED;
- else
- rc = pqi_device_reset(ctrl_info, device);
-
- pqi_device_reset_done(device);
-
- mutex_unlock(&ctrl_info->lun_reset_mutex);
+ pqi_wait_until_ofa_finished(ctrl_info);
+ rc = pqi_device_reset(ctrl_info, device);
out:
dev_err(&ctrl_info->pci_dev->dev,
"reset of scsi %d:%d:%d:%d: %s\n",
@@ -5308,6 +5774,10 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
scsi_change_queue_depth(sdev,
device->advertised_queue_depth);
}
+ if (pqi_is_logical_device(device))
+ pqi_disable_write_same(sdev);
+ else
+ sdev->allow_restart = 1;
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -5580,6 +6050,9 @@ static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_in_ofa(ctrl_info))
+ return -EBUSY;
+
switch (cmd) {
case CCISS_DEREGDISK:
case CCISS_REGNEWDISK:
@@ -5684,6 +6157,150 @@ static struct device_attribute *pqi_shost_attrs[] = {
NULL
};
+static ssize_t pqi_unique_id_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ unsigned char uid[16];
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+ flags);
+ return -ENODEV;
+ }
+ memcpy(uid, device->unique_id, sizeof(uid));
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return snprintf(buffer, PAGE_SIZE,
+ "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ uid[0], uid[1], uid[2], uid[3],
+ uid[4], uid[5], uid[6], uid[7],
+ uid[8], uid[9], uid[10], uid[11],
+ uid[12], uid[13], uid[14], uid[15]);
+}
+
+static ssize_t pqi_lunid_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u8 lunid[8];
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+ flags);
+ return -ENODEV;
+ }
+ memcpy(lunid, device->scsi3addr, sizeof(lunid));
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
+}
+
+#define MAX_PATHS 8
+static ssize_t pqi_path_info_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ int i;
+ int output_len = 0;
+ u8 box;
+ u8 bay;
+ u8 path_map_index = 0;
+ char *active;
+ unsigned char phys_connector[2];
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+ flags);
+ return -ENODEV;
+ }
+
+ bay = device->bay;
+ for (i = 0; i < MAX_PATHS; i++) {
+ path_map_index = 1<<i;
+ if (i == device->active_path_index)
+ active = "Active";
+ else if (device->path_map & path_map_index)
+ active = "Inactive";
+ else
+ continue;
+
+ output_len += scnprintf(buf + output_len,
+ PAGE_SIZE - output_len,
+ "[%d:%d:%d:%d] %20.20s ",
+ ctrl_info->scsi_host->host_no,
+ device->bus, device->target,
+ device->lun,
+ scsi_device_type(device->devtype));
+
+ if (device->devtype == TYPE_RAID ||
+ pqi_is_logical_device(device))
+ goto end_buffer;
+
+ memcpy(&phys_connector, &device->phys_connector[i],
+ sizeof(phys_connector));
+ if (phys_connector[0] < '0')
+ phys_connector[0] = '0';
+ if (phys_connector[1] < '0')
+ phys_connector[1] = '0';
+
+ output_len += scnprintf(buf + output_len,
+ PAGE_SIZE - output_len,
+ "PORT: %.2s ", phys_connector);
+
+ box = device->box[i];
+ if (box != 0 && box != 0xFF)
+ output_len += scnprintf(buf + output_len,
+ PAGE_SIZE - output_len,
+ "BOX: %hhu ", box);
+
+ if ((device->devtype == TYPE_DISK ||
+ device->devtype == TYPE_ZBC) &&
+ pqi_expose_device(device))
+ output_len += scnprintf(buf + output_len,
+ PAGE_SIZE - output_len,
+ "BAY: %hhu ", bay);
+
+end_buffer:
+ output_len += scnprintf(buf + output_len,
+ PAGE_SIZE - output_len,
+ "%s\n", active);
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return output_len;
+}
+
+
static ssize_t pqi_sas_address_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
@@ -5760,12 +6377,18 @@ static ssize_t pqi_raid_level_show(struct device *dev,
return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
}
+static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
+static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
+static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
pqi_ssd_smart_path_enabled_show, NULL);
static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static struct device_attribute *pqi_sdev_attrs[] = {
+ &dev_attr_lunid,
+ &dev_attr_unique_id,
+ &dev_attr_path_info,
&dev_attr_sas_address,
&dev_attr_ssd_smart_path_enabled,
&dev_attr_raid_level,
@@ -5780,7 +6403,6 @@ static struct scsi_host_template pqi_driver_template = {
.scan_start = pqi_scan_start,
.scan_finished = pqi_scan_finished,
.this_id = -1,
- .use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = pqi_eh_device_reset_handler,
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
@@ -5948,6 +6570,244 @@ out:
return rc;
}
+struct pqi_config_table_section_info {
+ struct pqi_ctrl_info *ctrl_info;
+ void *section;
+ u32 section_offset;
+ void __iomem *section_iomem_addr;
+};
+
+static inline bool pqi_is_firmware_feature_supported(
+ struct pqi_config_table_firmware_features *firmware_features,
+ unsigned int bit_position)
+{
+ unsigned int byte_index;
+
+ byte_index = bit_position / BITS_PER_BYTE;
+
+ if (byte_index >= le16_to_cpu(firmware_features->num_elements))
+ return false;
+
+ return firmware_features->features_supported[byte_index] &
+ (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static inline bool pqi_is_firmware_feature_enabled(
+ struct pqi_config_table_firmware_features *firmware_features,
+ void __iomem *firmware_features_iomem_addr,
+ unsigned int bit_position)
+{
+ unsigned int byte_index;
+ u8 __iomem *features_enabled_iomem_addr;
+
+ byte_index = (bit_position / BITS_PER_BYTE) +
+ (le16_to_cpu(firmware_features->num_elements) * 2);
+
+ features_enabled_iomem_addr = firmware_features_iomem_addr +
+ offsetof(struct pqi_config_table_firmware_features,
+ features_supported) + byte_index;
+
+ return *((__force u8 *)features_enabled_iomem_addr) &
+ (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static inline void pqi_request_firmware_feature(
+ struct pqi_config_table_firmware_features *firmware_features,
+ unsigned int bit_position)
+{
+ unsigned int byte_index;
+
+ byte_index = (bit_position / BITS_PER_BYTE) +
+ le16_to_cpu(firmware_features->num_elements);
+
+ firmware_features->features_supported[byte_index] |=
+ (1 << (bit_position % BITS_PER_BYTE));
+}
+
+static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
+ u16 first_section, u16 last_section)
+{
+ struct pqi_vendor_general_request request;
+
+ memset(&request, 0, sizeof(request));
+
+ request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
+ put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
+ &request.header.iu_length);
+ put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
+ &request.function_code);
+ put_unaligned_le16(first_section,
+ &request.data.config_table_update.first_section);
+ put_unaligned_le16(last_section,
+ &request.data.config_table_update.last_section);
+
+ return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+ 0, NULL, NO_TIMEOUT);
+}
+
+static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_config_table_firmware_features *firmware_features,
+ void __iomem *firmware_features_iomem_addr)
+{
+ void *features_requested;
+ void __iomem *features_requested_iomem_addr;
+
+ features_requested = firmware_features->features_supported +
+ le16_to_cpu(firmware_features->num_elements);
+
+ features_requested_iomem_addr = firmware_features_iomem_addr +
+ (features_requested - (void *)firmware_features);
+
+ memcpy_toio(features_requested_iomem_addr, features_requested,
+ le16_to_cpu(firmware_features->num_elements));
+
+ return pqi_config_table_update(ctrl_info,
+ PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
+ PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
+}
+
+struct pqi_firmware_feature {
+ char *feature_name;
+ unsigned int feature_bit;
+ bool supported;
+ bool enabled;
+ void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_firmware_feature *firmware_feature);
+};
+
+static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ if (!firmware_feature->supported) {
+ dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
+ firmware_feature->feature_name);
+ return;
+ }
+
+ if (firmware_feature->enabled) {
+ dev_info(&ctrl_info->pci_dev->dev,
+ "%s enabled\n", firmware_feature->feature_name);
+ return;
+ }
+
+ dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
+ firmware_feature->feature_name);
+}
+
+static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ if (firmware_feature->feature_status)
+ firmware_feature->feature_status(ctrl_info, firmware_feature);
+}
+
+static DEFINE_MUTEX(pqi_firmware_features_mutex);
+
+static struct pqi_firmware_feature pqi_firmware_features[] = {
+ {
+ .feature_name = "Online Firmware Activation",
+ .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "Serial Management Protocol",
+ .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "New Soft Reset Handshake",
+ .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
+ .feature_status = pqi_firmware_feature_status,
+ },
+};
+
+static void pqi_process_firmware_features(
+ struct pqi_config_table_section_info *section_info)
+{
+ int rc;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_config_table_firmware_features *firmware_features;
+ void __iomem *firmware_features_iomem_addr;
+ unsigned int i;
+ unsigned int num_features_supported;
+
+ ctrl_info = section_info->ctrl_info;
+ firmware_features = section_info->section;
+ firmware_features_iomem_addr = section_info->section_iomem_addr;
+
+ for (i = 0, num_features_supported = 0;
+ i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ if (pqi_is_firmware_feature_supported(firmware_features,
+ pqi_firmware_features[i].feature_bit)) {
+ pqi_firmware_features[i].supported = true;
+ num_features_supported++;
+ } else {
+ pqi_firmware_feature_update(ctrl_info,
+ &pqi_firmware_features[i]);
+ }
+ }
+
+ if (num_features_supported == 0)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ if (!pqi_firmware_features[i].supported)
+ continue;
+ pqi_request_firmware_feature(firmware_features,
+ pqi_firmware_features[i].feature_bit);
+ }
+
+ rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
+ firmware_features_iomem_addr);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to enable firmware features in PQI configuration table\n");
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ if (!pqi_firmware_features[i].supported)
+ continue;
+ pqi_firmware_feature_update(ctrl_info,
+ &pqi_firmware_features[i]);
+ }
+ return;
+ }
+
+ ctrl_info->soft_reset_handshake_supported = false;
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ if (!pqi_firmware_features[i].supported)
+ continue;
+ if (pqi_is_firmware_feature_enabled(firmware_features,
+ firmware_features_iomem_addr,
+ pqi_firmware_features[i].feature_bit)) {
+ pqi_firmware_features[i].enabled = true;
+ if (pqi_firmware_features[i].feature_bit ==
+ PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
+ ctrl_info->soft_reset_handshake_supported =
+ true;
+ }
+ pqi_firmware_feature_update(ctrl_info,
+ &pqi_firmware_features[i]);
+ }
+}
+
+static void pqi_init_firmware_features(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ pqi_firmware_features[i].supported = false;
+ pqi_firmware_features[i].enabled = false;
+ }
+}
+
+static void pqi_process_firmware_features_section(
+ struct pqi_config_table_section_info *section_info)
+{
+ mutex_lock(&pqi_firmware_features_mutex);
+ pqi_init_firmware_features();
+ pqi_process_firmware_features(section_info);
+ mutex_unlock(&pqi_firmware_features_mutex);
+}
+
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
{
u32 table_length;
@@ -5955,8 +6815,11 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
void __iomem *table_iomem_addr;
struct pqi_config_table *config_table;
struct pqi_config_table_section_header *section;
+ struct pqi_config_table_section_info section_info;
table_length = ctrl_info->config_table_length;
+ if (table_length == 0)
+ return 0;
config_table = kmalloc(table_length, GFP_KERNEL);
if (!config_table) {
@@ -5973,13 +6836,22 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
ctrl_info->config_table_offset;
memcpy_fromio(config_table, table_iomem_addr, table_length);
+ section_info.ctrl_info = ctrl_info;
section_offset =
get_unaligned_le32(&config_table->first_section_offset);
while (section_offset) {
section = (void *)config_table + section_offset;
+ section_info.section = section;
+ section_info.section_offset = section_offset;
+ section_info.section_iomem_addr =
+ table_iomem_addr + section_offset;
+
switch (get_unaligned_le16(&section->section_id)) {
+ case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
+ pqi_process_firmware_features_section(&section_info);
+ break;
case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
if (pqi_disable_heartbeat)
dev_warn(&ctrl_info->pci_dev->dev,
@@ -5992,6 +6864,13 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
struct pqi_config_table_heartbeat,
heartbeat_counter);
break;
+ case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
+ ctrl_info->soft_reset_status =
+ table_iomem_addr +
+ section_offset +
+ offsetof(struct pqi_config_table_soft_reset,
+ soft_reset_status);
+ break;
}
section_offset =
@@ -6124,10 +7003,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = true;
pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
- rc = pqi_process_config_table(ctrl_info);
- if (rc)
- return rc;
-
rc = pqi_alloc_admin_queues(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -6189,6 +7064,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
ctrl_info->controller_online = true;
+
+ rc = pqi_process_config_table(ctrl_info);
+ if (rc)
+ return rc;
+
pqi_start_heartbeat_timer(ctrl_info);
rc = pqi_enable_events(ctrl_info);
@@ -6210,6 +7090,13 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return rc;
}
+ rc = pqi_set_diag_rescan(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error enabling multi-lun rescan\n");
+ return rc;
+ }
+
rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -6267,6 +7154,24 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc;
/*
+ * Get the controller properties. This allows us to determine
+ * whether or not it supports PQI mode.
+ */
+ rc = sis_get_ctrl_properties(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining controller properties\n");
+ return rc;
+ }
+
+ rc = sis_get_pqi_capabilities(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining controller capabilities\n");
+ return rc;
+ }
+
+ /*
* If the function we are about to call succeeds, the
* controller will transition from legacy SIS mode
* into PQI mode.
@@ -6306,9 +7211,14 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
ctrl_info->controller_online = true;
- pqi_start_heartbeat_timer(ctrl_info);
pqi_ctrl_unblock_requests(ctrl_info);
+ rc = pqi_process_config_table(ctrl_info);
+ if (rc)
+ return rc;
+
+ pqi_start_heartbeat_timer(ctrl_info);
+
rc = pqi_enable_events(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -6316,6 +7226,20 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc;
}
+ rc = pqi_get_ctrl_firmware_version(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining firmware version\n");
+ return rc;
+ }
+
+ rc = pqi_set_diag_rescan(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error enabling multi-lun rescan\n");
+ return rc;
+ }
+
rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -6426,6 +7350,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
mutex_init(&ctrl_info->scan_mutex);
mutex_init(&ctrl_info->lun_reset_mutex);
+ mutex_init(&ctrl_info->ofa_mutex);
INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
spin_lock_init(&ctrl_info->scsi_device_list_lock);
@@ -6502,6 +7427,217 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
pqi_free_ctrl_resources(ctrl_info);
}
+static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
+{
+ pqi_cancel_update_time_worker(ctrl_info);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_wait_until_lun_reset_finished(ctrl_info);
+ pqi_wait_until_scan_finished(ctrl_info);
+ pqi_ctrl_ofa_start(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+ pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
+ pqi_fail_io_queued_for_all_devices(ctrl_info);
+ pqi_wait_until_inbound_queues_empty(ctrl_info);
+ pqi_stop_heartbeat_timer(ctrl_info);
+ ctrl_info->pqi_mode_enabled = false;
+ pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+}
+
+static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
+{
+ pqi_ofa_free_host_buffer(ctrl_info);
+ ctrl_info->pqi_mode_enabled = true;
+ pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+ ctrl_info->controller_online = true;
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_start_heartbeat_timer(ctrl_info);
+ pqi_schedule_update_time_worker(ctrl_info);
+ pqi_clear_soft_reset_status(ctrl_info,
+ PQI_SOFT_RESET_ABORT);
+ pqi_scan_scsi_devices(ctrl_info);
+}
+
+static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
+ u32 total_size, u32 chunk_size)
+{
+ u32 sg_count;
+ u32 size;
+ int i;
+ struct pqi_sg_descriptor *mem_descriptor = NULL;
+ struct device *dev;
+ struct pqi_ofa_memory *ofap;
+
+ dev = &ctrl_info->pci_dev->dev;
+
+ sg_count = (total_size + chunk_size - 1);
+ sg_count /= chunk_size;
+
+ ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
+ if (sg_count*chunk_size < total_size)
+ goto out;
+
+ ctrl_info->pqi_ofa_chunk_virt_addr =
+ kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
+ if (!ctrl_info->pqi_ofa_chunk_virt_addr)
+ goto out;
+
+ for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
+ dma_addr_t dma_handle;
+
+ ctrl_info->pqi_ofa_chunk_virt_addr[i] =
+ dma_zalloc_coherent(dev, chunk_size, &dma_handle,
+ GFP_KERNEL);
+
+ if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
+ break;
+
+ mem_descriptor = &ofap->sg_descriptor[i];
+ put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
+ put_unaligned_le32 (chunk_size, &mem_descriptor->length);
+ }
+
+ if (!size || size < total_size)
+ goto out_free_chunks;
+
+ put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
+ put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
+ put_unaligned_le32(size, &ofap->bytes_allocated);
+
+ return 0;
+
+out_free_chunks:
+ while (--i >= 0) {
+ mem_descriptor = &ofap->sg_descriptor[i];
+ dma_free_coherent(dev, chunk_size,
+ ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ get_unaligned_le64(&mem_descriptor->address));
+ }
+ kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
+
+out:
+ put_unaligned_le32 (0, &ofap->bytes_allocated);
+ return -ENOMEM;
+}
+
+static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
+{
+ u32 total_size;
+ u32 min_chunk_size;
+ u32 chunk_sz;
+
+ total_size = le32_to_cpu(
+ ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
+ min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
+
+ for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
+ if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
+ return 0;
+
+ return -ENOMEM;
+}
+
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
+ u32 bytes_requested)
+{
+ struct pqi_ofa_memory *pqi_ofa_memory;
+ struct device *dev;
+
+ dev = &ctrl_info->pci_dev->dev;
+ pqi_ofa_memory = dma_zalloc_coherent(dev,
+ PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
+ &ctrl_info->pqi_ofa_mem_dma_handle,
+ GFP_KERNEL);
+
+ if (!pqi_ofa_memory)
+ return;
+
+ put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
+ memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
+ sizeof(pqi_ofa_memory->signature));
+ pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
+
+ ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
+
+ if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
+ dev_err(dev, "Failed to allocate host buffer of size = %u",
+ bytes_requested);
+ }
+}
+
+static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
+{
+ int i;
+ struct pqi_sg_descriptor *mem_descriptor;
+ struct pqi_ofa_memory *ofap;
+
+ ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
+ if (!ofap)
+ return;
+
+ if (!ofap->bytes_allocated)
+ goto out;
+
+ mem_descriptor = ofap->sg_descriptor;
+
+ for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
+ i++) {
+ dma_free_coherent(&ctrl_info->pci_dev->dev,
+ get_unaligned_le32(&mem_descriptor[i].length),
+ ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ get_unaligned_le64(&mem_descriptor[i].address));
+ }
+ kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
+
+out:
+ dma_free_coherent(&ctrl_info->pci_dev->dev,
+ PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
+ ctrl_info->pqi_ofa_mem_dma_handle);
+ ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+}
+
+static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
+{
+ struct pqi_vendor_general_request request;
+ size_t size;
+ struct pqi_ofa_memory *ofap;
+
+ memset(&request, 0, sizeof(request));
+
+ ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
+ request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
+ put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
+ &request.header.iu_length);
+ put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
+ &request.function_code);
+
+ if (ofap) {
+ size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
+ get_unaligned_le16(&ofap->num_memory_descriptors) *
+ sizeof(struct pqi_sg_descriptor);
+
+ put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
+ &request.data.ofa_memory_allocation.buffer_address);
+ put_unaligned_le32(size,
+ &request.data.ofa_memory_allocation.buffer_length);
+
+ }
+
+ return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+ 0, NULL, NO_TIMEOUT);
+}
+
+#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
+
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
+{
+ msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+ return pqi_ctrl_init_resume(ctrl_info);
+}
+
static void pqi_perform_lockup_action(void)
{
switch (pqi_lockup_action) {
@@ -6600,7 +7736,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
int rc;
- int node;
+ int node, cp_node;
struct pqi_ctrl_info *ctrl_info;
pqi_print_ctrl_info(pci_dev, id);
@@ -6618,8 +7754,12 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
"controller device ID matched using wildcards\n");
node = dev_to_node(&pci_dev->dev);
- if (node == NUMA_NO_NODE)
- set_dev_node(&pci_dev->dev, 0);
+ if (node == NUMA_NO_NODE) {
+ cp_node = cpu_to_node(0);
+ if (cp_node == NUMA_NO_NODE)
+ cp_node = 0;
+ set_dev_node(&pci_dev->dev, cp_node);
+ }
ctrl_info = pqi_alloc_ctrl_info(node);
if (!ctrl_info) {
@@ -6654,6 +7794,8 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
if (!ctrl_info)
return;
+ ctrl_info->in_shutdown = true;
+
pqi_remove_ctrl(ctrl_info);
}
@@ -6671,6 +7813,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
* storage.
*/
rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
+ pqi_free_interrupts(ctrl_info);
pqi_reset(ctrl_info);
if (rc == 0)
return;
@@ -6715,11 +7858,12 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat
pqi_cancel_rescan_worker(ctrl_info);
pqi_wait_until_scan_finished(ctrl_info);
pqi_wait_until_lun_reset_finished(ctrl_info);
+ pqi_wait_until_ofa_finished(ctrl_info);
pqi_flush_cache(ctrl_info, SUSPEND);
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_wait_until_inbound_queues_empty(ctrl_info);
- pqi_ctrl_wait_for_pending_io(ctrl_info);
+ pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
pqi_stop_heartbeat_timer(ctrl_info);
if (state.event == PM_EVENT_FREEZE)
@@ -6805,6 +7949,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0xc460)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0xc461)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0xf460)
},
{
@@ -6841,6 +7993,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x19e5, 0xd227)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x19e5, 0xd228)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x19e5, 0xd229)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x19e5, 0xd22a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x19e5, 0xd22b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x19e5, 0xd22c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0110)
},
{
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index b209a35e482e..0e4ef215115f 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -17,9 +17,11 @@
*/
#include <linux/kernel.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport_sas.h>
+#include <asm/unaligned.h>
#include "smartpqi.h"
static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port)
@@ -97,14 +99,32 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
identify = &rphy->identify;
identify->sas_address = pqi_sas_port->sas_address;
- identify->initiator_port_protocols = SAS_PROTOCOL_STP;
- identify->target_port_protocols = SAS_PROTOCOL_STP;
+
+ if (pqi_sas_port->device &&
+ pqi_sas_port->device->is_expander_smp_device) {
+ identify->initiator_port_protocols = SAS_PROTOCOL_SMP;
+ identify->target_port_protocols = SAS_PROTOCOL_SMP;
+ } else {
+ identify->initiator_port_protocols = SAS_PROTOCOL_STP;
+ identify->target_port_protocols = SAS_PROTOCOL_STP;
+ }
return sas_rphy_add(rphy);
}
+static struct sas_rphy *pqi_sas_rphy_alloc(struct pqi_sas_port *pqi_sas_port)
+{
+ if (pqi_sas_port->device &&
+ pqi_sas_port->device->is_expander_smp_device)
+ return sas_expander_alloc(pqi_sas_port->port,
+ SAS_FANOUT_EXPANDER_DEVICE);
+
+ return sas_end_device_alloc(pqi_sas_port->port);
+}
+
static struct pqi_sas_port *pqi_alloc_sas_port(
- struct pqi_sas_node *pqi_sas_node, u64 sas_address)
+ struct pqi_sas_node *pqi_sas_node, u64 sas_address,
+ struct pqi_scsi_dev *device)
{
int rc;
struct pqi_sas_port *pqi_sas_port;
@@ -127,6 +147,7 @@ static struct pqi_sas_port *pqi_alloc_sas_port(
pqi_sas_port->port = port;
pqi_sas_port->sas_address = sas_address;
+ pqi_sas_port->device = device;
list_add_tail(&pqi_sas_port->port_list_entry,
&pqi_sas_node->port_list_head);
@@ -146,7 +167,7 @@ static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port)
struct pqi_sas_phy *next;
list_for_each_entry_safe(pqi_sas_phy, next,
- &pqi_sas_port->phy_list_head, phy_list_entry)
+ &pqi_sas_port->phy_list_head, phy_list_entry)
pqi_free_sas_phy(pqi_sas_phy);
sas_port_delete(pqi_sas_port->port);
@@ -176,7 +197,7 @@ static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node)
return;
list_for_each_entry_safe(pqi_sas_port, next,
- &pqi_sas_node->port_list_head, port_list_entry)
+ &pqi_sas_node->port_list_head, port_list_entry)
pqi_free_sas_port(pqi_sas_port);
kfree(pqi_sas_node);
@@ -206,13 +227,14 @@ int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info)
struct pqi_sas_port *pqi_sas_port;
struct pqi_sas_phy *pqi_sas_phy;
- parent_dev = &shost->shost_gendev;
+ parent_dev = &shost->shost_dev;
pqi_sas_node = pqi_alloc_sas_node(parent_dev);
if (!pqi_sas_node)
return -ENOMEM;
- pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, ctrl_info->sas_address);
+ pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node,
+ ctrl_info->sas_address, NULL);
if (!pqi_sas_port) {
rc = -ENODEV;
goto free_sas_node;
@@ -254,11 +276,12 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
struct pqi_sas_port *pqi_sas_port;
struct sas_rphy *rphy;
- pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, device->sas_address);
+ pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node,
+ device->sas_address, device);
if (!pqi_sas_port)
return -ENOMEM;
- rphy = sas_end_device_alloc(pqi_sas_port->port);
+ rphy = pqi_sas_rphy_alloc(pqi_sas_port);
if (!rphy) {
rc = -ENODEV;
goto free_sas_port;
@@ -329,6 +352,128 @@ static int pqi_sas_phy_speed(struct sas_phy *phy,
return -EINVAL;
}
+#define CSMI_IOCTL_TIMEOUT 60
+#define SMP_CRC_FIELD_LENGTH 4
+
+static struct bmic_csmi_smp_passthru_buffer *
+pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy,
+ struct bsg_job *job)
+{
+ struct bmic_csmi_smp_passthru_buffer *smp_buf;
+ struct bmic_csmi_ioctl_header *ioctl_header;
+ struct bmic_csmi_smp_passthru *parameters;
+ u32 req_size;
+ u32 resp_size;
+
+ smp_buf = kzalloc(sizeof(*smp_buf), GFP_KERNEL);
+ if (!smp_buf)
+ return NULL;
+
+ req_size = job->request_payload.payload_len;
+ resp_size = job->reply_payload.payload_len;
+
+ ioctl_header = &smp_buf->ioctl_header;
+ put_unaligned_le32(sizeof(smp_buf->ioctl_header),
+ &ioctl_header->header_length);
+ put_unaligned_le32(CSMI_IOCTL_TIMEOUT, &ioctl_header->timeout);
+ put_unaligned_le32(CSMI_CC_SAS_SMP_PASSTHRU,
+ &ioctl_header->control_code);
+ put_unaligned_le32(sizeof(smp_buf->parameters), &ioctl_header->length);
+
+ parameters = &smp_buf->parameters;
+ parameters->phy_identifier = rphy->identify.phy_identifier;
+ parameters->port_identifier = 0;
+ parameters->connection_rate = 0;
+ put_unaligned_be64(rphy->identify.sas_address,
+ &parameters->destination_sas_address);
+
+ if (req_size > SMP_CRC_FIELD_LENGTH)
+ req_size -= SMP_CRC_FIELD_LENGTH;
+
+ put_unaligned_le32(req_size, &parameters->request_length);
+
+ put_unaligned_le32(resp_size, &parameters->response_length);
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->reply_payload.sg_cnt, &parameters->request,
+ req_size);
+
+ return smp_buf;
+}
+
+static unsigned int pqi_build_sas_smp_handler_reply(
+ struct bmic_csmi_smp_passthru_buffer *smp_buf, struct bsg_job *job,
+ struct pqi_raid_error_info *error_info)
+{
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, &smp_buf->parameters.response,
+ le32_to_cpu(smp_buf->parameters.response_length));
+
+ job->reply_len = le16_to_cpu(error_info->sense_data_length);
+ memcpy(job->reply, error_info->data,
+ le16_to_cpu(error_info->sense_data_length));
+
+ return job->reply_payload.payload_len -
+ get_unaligned_le32(&error_info->data_in_transferred);
+}
+
+void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ int rc;
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ struct bmic_csmi_smp_passthru_buffer *smp_buf;
+ struct pqi_raid_error_info error_info;
+ unsigned int reslen = 0;
+
+ pqi_ctrl_busy(ctrl_info);
+
+ if (job->reply_payload.payload_len == 0) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (!rphy) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (rphy->identify.device_type != SAS_FANOUT_EXPANDER_DEVICE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (job->request_payload.sg_cnt > 1 || job->reply_payload.sg_cnt > 1) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (pqi_ctrl_offline(ctrl_info)) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (pqi_ctrl_blocked(ctrl_info)) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job);
+ if (!smp_buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = pqi_csmi_smp_passthru(ctrl_info, smp_buf, sizeof(*smp_buf),
+ &error_info);
+ if (rc)
+ goto out;
+
+ reslen = pqi_build_sas_smp_handler_reply(smp_buf, job, &error_info);
+out:
+ bsg_job_done(job, rc, reslen);
+ pqi_ctrl_unbusy(ctrl_info);
+}
struct sas_function_template pqi_sas_transport_functions = {
.get_linkerrors = pqi_sas_get_linkerrors,
.get_enclosure_identifier = pqi_sas_get_enclosure_identifier,
@@ -338,4 +483,5 @@ struct sas_function_template pqi_sas_transport_functions = {
.phy_setup = pqi_sas_phy_setup,
.phy_release = pqi_sas_phy_release,
.set_phy_speed = pqi_sas_phy_speed,
+ .smp_handler = pqi_sas_smp_handler,
};
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index ea91658c7060..dcd11c6418cc 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -34,6 +34,7 @@
#define SIS_REENABLE_SIS_MODE 0x1
#define SIS_ENABLE_MSIX 0x40
#define SIS_ENABLE_INTX 0x80
+#define SIS_SOFT_RESET 0x100
#define SIS_CMD_READY 0x200
#define SIS_TRIGGER_SHUTDOWN 0x800000
#define SIS_PQI_RESET_QUIESCE 0x1000000
@@ -59,7 +60,7 @@
#define SIS_CTRL_KERNEL_UP 0x80
#define SIS_CTRL_KERNEL_PANIC 0x100
-#define SIS_CTRL_READY_TIMEOUT_SECS 30
+#define SIS_CTRL_READY_TIMEOUT_SECS 180
#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90
#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
@@ -90,7 +91,7 @@ static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
unsigned long timeout;
u32 status;
- timeout = (timeout_secs * HZ) + jiffies;
+ timeout = (timeout_secs * PQI_HZ) + jiffies;
while (1) {
status = readl(&ctrl_info->registers->sis_firmware_status);
@@ -202,7 +203,7 @@ static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
* the top of the loop in order to give the controller time to start
* processing the command before we start polling.
*/
- timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies;
+ timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * PQI_HZ) + jiffies;
while (1) {
msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS);
doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
@@ -348,7 +349,7 @@ static int sis_wait_for_doorbell_bit_to_clear(
u32 doorbell_register;
unsigned long timeout;
- timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies;
+ timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * PQI_HZ) + jiffies;
while (1) {
doorbell_register =
@@ -420,6 +421,12 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
return readl(&ctrl_info->registers->sis_driver_scratch);
}
+void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ writel(SIS_SOFT_RESET,
+ &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
static void __attribute__((unused)) verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct sis_base_struct,
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 2bf889dbf5ab..d018cb9c3f82 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -33,5 +33,6 @@ int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info);
int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
+void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 5295277d6325..5e824fd6047a 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -127,7 +127,6 @@ static struct scsi_host_template snic_host_template = {
.this_id = -1,
.cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
.can_queue = SNIC_MAX_IO_REQ,
- .use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SNIC_MAX_SG_DESC_CNT,
.max_sectors = 0x800,
.shost_attrs = snic_attrs,
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
index fc60c933d6c0..458eaba24c78 100644
--- a/drivers/scsi/snic/snic_trc.c
+++ b/drivers/scsi/snic/snic_trc.c
@@ -126,7 +126,7 @@ snic_trc_init(void)
int tbuf_sz = 0, ret;
tbuf_sz = (snic_trace_max_pages * PAGE_SIZE);
- tbuf = vmalloc(tbuf_sz);
+ tbuf = vzalloc(tbuf_sz);
if (!tbuf) {
SNIC_ERR("Failed to Allocate Trace Buffer Size. %d\n", tbuf_sz);
SNIC_ERR("Trace Facility not enabled.\n");
@@ -135,7 +135,6 @@ snic_trc_init(void)
return ret;
}
- memset(tbuf, 0, tbuf_sz);
trc->buf = (struct snic_trc_data *) tbuf;
spin_lock_init(&trc->lock);
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 9b20643ab49d..f6bef7ad65e7 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1489,6 +1489,7 @@ static struct scsi_host_template driver_template = {
.eh_abort_handler = stex_abort,
.eh_host_reset_handler = stex_reset,
.this_id = -1,
+ .dma_boundary = PAGE_SIZE - 1,
};
static struct pci_device_id stex_pci_tbl[] = {
@@ -1617,19 +1618,6 @@ static struct st_card_info stex_card_info[] = {
},
};
-static int stex_set_dma_mask(struct pci_dev * pdev)
-{
- int ret;
-
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- return 0;
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!ret)
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- return ret;
-}
-
static int stex_request_irq(struct st_hba *hba)
{
struct pci_dev *pdev = hba->pdev;
@@ -1710,7 +1698,9 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_release_regions;
}
- err = stex_set_dma_mask(pdev);
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
pci_name(pdev));
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8f88348ebe42..84380bae20f1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1698,7 +1698,6 @@ static struct scsi_host_template scsi_driver = {
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 2048,
.this_id = -1,
- .use_clustering = ENABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
.no_write_same = 1,
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 9492638296c8..95a7ea7eefa0 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -500,7 +500,7 @@ static struct scsi_host_template sun3_scsi_template = {
.this_id = 7,
.sg_tablesize = SG_NONE,
.cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.cmd_size = NCR5380_CMD_SIZE,
};
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index a11efbcb7f8b..c71bd01fef94 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -529,11 +529,10 @@ static int esp_sbus_probe(struct platform_device *op)
int hme = 0;
int ret;
- if (dp->parent &&
- (!strcmp(dp->parent->name, "espdma") ||
- !strcmp(dp->parent->name, "dma")))
+ if (of_node_name_eq(dp->parent, "espdma") ||
+ of_node_name_eq(dp->parent, "dma"))
dma_node = dp->parent;
- else if (!strcmp(dp->name, "SUNW,fas")) {
+ else if (of_node_name_eq(dp, "SUNW,fas")) {
dma_node = op->dev.of_node;
hme = 1;
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 5f10aa9bad9b..57f6d63e4c40 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1312,9 +1312,9 @@ static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
sprintf(np->s.inst_name, "sym%d", np->s.unit);
if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) &&
- !pci_set_dma_mask(pdev, DMA_DAC_MASK)) {
+ !dma_set_mask(&pdev->dev, DMA_DAC_MASK)) {
set_dac(np);
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printf_warning("%s: No suitable DMA available\n", sym_name(np));
goto attach_failed;
}
@@ -1660,7 +1660,6 @@ static struct scsi_host_template sym2_template = {
.eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,
.eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
.this_id = 7,
- .use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0xFFFF,
#ifdef SYM_LINUX_PROC_INFO_SUPPORT
.show_info = sym_show_info,
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 2ddd426323e9..2ddbb26d9c26 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -80,6 +80,14 @@ config SCSI_UFSHCD_PLATFORM
If unsure, say N.
+config SCSI_UFS_CDNS_PLATFORM
+ tristate "Cadence UFS Controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM
+ help
+ This selects the Cadence-specific additions to UFSHCD platform driver.
+
+ If unsure, say N.
+
config SCSI_UFS_DWC_TC_PLATFORM
tristate "DesignWare platform support using a G210 Test Chip"
depends on SCSI_UFSHCD_PLATFORM
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index aca481329828..a3bd70c3652c 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -2,6 +2,7 @@
# UFSHCD makefile
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
+obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
ufshcd-core-y += ufshcd.o ufs-sysfs.o
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
new file mode 100644
index 000000000000..4a37b4f57164
--- /dev/null
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform UFS Host driver for Cadence controller
+ *
+ * Copyright (C) 2018 Cadence Design Systems, Inc.
+ *
+ * Authors:
+ * Jan Kotas <jank@cadence.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/time.h>
+
+#include "ufshcd-pltfrm.h"
+
+#define CDNS_UFS_REG_HCLKDIV 0xFC
+
+/**
+ * Sets HCLKDIV register value based on the core_clk
+ * @hba: host controller instance
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ unsigned long core_clk_rate = 0;
+ u32 core_clk_div = 0;
+
+ if (list_empty(head))
+ return 0;
+
+ list_for_each_entry(clki, head, list) {
+ if (IS_ERR_OR_NULL(clki->clk))
+ continue;
+ if (!strcmp(clki->name, "core_clk"))
+ core_clk_rate = clk_get_rate(clki->clk);
+ }
+
+ if (!core_clk_rate) {
+ dev_err(hba->dev, "%s: unable to find core_clk rate\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ core_clk_div = core_clk_rate / USEC_PER_SEC;
+
+ ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV);
+ /**
+ * Make sure the register was updated,
+ * UniPro layer will not work with an incorrect value.
+ */
+ mb();
+
+ return 0;
+}
+
+/**
+ * Sets clocks used by the controller
+ * @hba: host controller instance
+ * @on: if true, enable clocks, otherwise disable
+ * @status: notify stage (pre, post change)
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ if ((!on) || (status == PRE_CHANGE))
+ return 0;
+
+ return cdns_ufs_set_hclkdiv(hba);
+}
+
+static struct ufs_hba_variant_ops cdns_pltfm_hba_vops = {
+ .name = "cdns-ufs-pltfm",
+ .setup_clocks = cdns_ufs_setup_clocks,
+};
+
+/**
+ * cdns_ufs_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to platform device handle
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+
+ /* Perform generic probe */
+ err = ufshcd_pltfrm_init(pdev, &cdns_pltfm_hba_vops);
+ if (err)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+
+ return err;
+}
+
+/**
+ * cdns_ufs_pltfrm_remove - removes the ufs driver
+ * @pdev: pointer to platform device handle
+ *
+ * Always returns 0
+ */
+static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct of_device_id cdns_ufs_of_match[] = {
+ { .compatible = "cdns,ufshc" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
+
+static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
+ .suspend = ufshcd_pltfrm_suspend,
+ .resume = ufshcd_pltfrm_resume,
+ .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+ .runtime_resume = ufshcd_pltfrm_runtime_resume,
+ .runtime_idle = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver cdns_ufs_pltfrm_driver = {
+ .probe = cdns_ufs_pltfrm_probe,
+ .remove = cdns_ufs_pltfrm_remove,
+ .driver = {
+ .name = "cdns-ufshcd",
+ .pm = &cdns_ufs_dev_pm_ops,
+ .of_match_table = cdns_ufs_of_match,
+ },
+};
+
+module_platform_driver(cdns_ufs_pltfrm_driver);
+
+MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
+MODULE_DESCRIPTION("Cadence UFS host controller platform driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 58087d3916d0..dd65fea07687 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -46,7 +46,7 @@
#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
-#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
+#define UFS_SENSE_SIZE 18
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -378,6 +378,20 @@ enum query_opcode {
UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
};
+/* bRefClkFreq attribute values */
+enum ufs_ref_clk_freq {
+ REF_CLK_FREQ_19_2_MHZ = 0,
+ REF_CLK_FREQ_26_MHZ = 1,
+ REF_CLK_FREQ_38_4_MHZ = 2,
+ REF_CLK_FREQ_52_MHZ = 3,
+ REF_CLK_FREQ_INVAL = -1,
+};
+
+struct ufs_ref_clk {
+ unsigned long freq_hz;
+ enum ufs_ref_clk_freq val;
+};
+
/* Query response result code */
enum {
QUERY_RESULT_SUCCESS = 0x00,
@@ -444,7 +458,7 @@ struct utp_cmd_rsp {
__be32 residual_transfer_count;
__be32 reserved[4];
__be16 sense_data_len;
- u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
+ u8 sense_data[UFS_SENSE_SIZE];
};
/**
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f1c57cd33b5b..9ba7671b84f8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -51,8 +51,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
-#define UFSHCD_REQ_SENSE_SIZE 18
-
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
@@ -1551,6 +1549,7 @@ start:
* currently running. Hence, fall through to cancel gating
* work and to enable clocks.
*/
+ /* fallthrough */
case CLKS_OFF:
ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON;
@@ -1562,6 +1561,7 @@ start:
* fall through to check if we should wait for this
* work to be done or not.
*/
+ /* fallthrough */
case REQ_CLKS_ON:
if (async) {
rc = -EAGAIN;
@@ -1890,11 +1890,10 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
int len_to_copy;
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
- len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+ len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
- memcpy(lrbp->sense_buffer,
- lrbp->ucd_rsp_ptr->sr.sense_data,
- min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
+ memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
+ len_to_copy);
}
}
@@ -2456,7 +2455,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
- lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
+ lrbp->sense_bufflen = UFS_SENSE_SIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -4620,6 +4619,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
switch (scsi_status) {
case SAM_STAT_CHECK_CONDITION:
ufshcd_copy_sense_data(lrbp);
+ /* fallthrough */
case SAM_STAT_GOOD:
result |= DID_OK << 16 |
COMMAND_COMPLETE << 8 |
@@ -6701,6 +6701,74 @@ static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
+static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
+ {19200000, REF_CLK_FREQ_19_2_MHZ},
+ {26000000, REF_CLK_FREQ_26_MHZ},
+ {38400000, REF_CLK_FREQ_38_4_MHZ},
+ {52000000, REF_CLK_FREQ_52_MHZ},
+ {0, REF_CLK_FREQ_INVAL},
+};
+
+static enum ufs_ref_clk_freq
+ufs_get_bref_clk_from_hz(unsigned long freq)
+{
+ int i;
+
+ for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
+ if (ufs_ref_clk_freqs[i].freq_hz == freq)
+ return ufs_ref_clk_freqs[i].val;
+
+ return REF_CLK_FREQ_INVAL;
+}
+
+void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
+{
+ unsigned long freq;
+
+ freq = clk_get_rate(refclk);
+
+ hba->dev_ref_clk_freq =
+ ufs_get_bref_clk_from_hz(freq);
+
+ if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
+ dev_err(hba->dev,
+ "invalid ref_clk setting = %ld\n", freq);
+}
+
+static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
+{
+ int err;
+ u32 ref_clk;
+ u32 freq = hba->dev_ref_clk_freq;
+
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
+
+ if (err) {
+ dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
+ err);
+ goto out;
+ }
+
+ if (ref_clk == freq)
+ goto out; /* nothing to update */
+
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
+
+ if (err) {
+ dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
+ ufs_ref_clk_freqs[freq].freq_hz);
+ goto out;
+ }
+
+ dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
+ ufs_ref_clk_freqs[freq].freq_hz);
+
+out:
+ return err;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
@@ -6766,6 +6834,12 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
"%s: Failed getting max supported power mode\n",
__func__);
} else {
+ /*
+ * Set the right value to bRefClkFreq before attempting to
+ * switch to HS gears.
+ */
+ if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
+ ufshcd_set_dev_ref_clk(hba);
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
@@ -6910,6 +6984,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
+ .dma_boundary = PAGE_SIZE - 1,
};
static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -7252,6 +7327,14 @@ static int ufshcd_init_clocks(struct ufs_hba *hba)
goto out;
}
+ /*
+ * Parse device ref clk freq as per device tree "ref_clk".
+ * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
+ * in ufshcd_alloc_host().
+ */
+ if (!strcmp(clki->name, "ref_clk"))
+ ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
+
if (clki->max_freq) {
ret = clk_set_rate(clki->clk, clki->max_freq);
if (ret) {
@@ -7379,19 +7462,19 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
0,
0,
0,
- UFSHCD_REQ_SENSE_SIZE,
+ UFS_SENSE_SIZE,
0};
char *buffer;
int ret;
- buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
+ buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
- UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
+ UFS_SENSE_SIZE, NULL, NULL,
msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@@ -8105,6 +8188,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
hba->host = host;
hba->dev = dev;
*hba_handle = hba;
+ hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
INIT_LIST_HEAD(&hba->clk_list_head);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 1a1c2b487a4e..69ba7445d2b3 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -550,6 +550,7 @@ struct ufs_hba {
void *priv;
unsigned int irq;
bool is_irq_enabled;
+ enum ufs_ref_clk_freq dev_ref_clk_freq;
/* Interrupt aggregation support is broken */
#define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
@@ -768,6 +769,7 @@ void ufshcd_remove(struct ufs_hba *);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
unsigned long timeout_ms, bool can_sleep);
+void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
static inline void check_upiu_size(void)
{
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c3c95b314286..772b976e4ee4 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -68,33 +68,6 @@ struct virtio_scsi_vq {
struct virtqueue *vq;
};
-/*
- * Per-target queue state.
- *
- * This struct holds the data needed by the queue steering policy. When a
- * target is sent multiple requests, we need to drive them to the same queue so
- * that FIFO processing order is kept. However, if a target was idle, we can
- * choose a queue arbitrarily. In this case the queue is chosen according to
- * the current VCPU, so the driver expects the number of request queues to be
- * equal to the number of VCPUs. This makes it easy and fast to select the
- * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
- * (each virtqueue's affinity is set to the CPU that "owns" the queue).
- *
- * tgt_seq is held to serialize reading and writing req_vq.
- *
- * Decrements of reqs are never concurrent with writes of req_vq: before the
- * decrement reqs will be != 0; after the decrement the virtqueue completion
- * routine will not use the req_vq so it can be changed by a new request.
- * Thus they can happen outside the tgt_seq, provided of course we make reqs
- * an atomic_t.
- */
-struct virtio_scsi_target_state {
- seqcount_t tgt_seq;
-
- /* Currently active virtqueue for requests sent to this target. */
- struct virtio_scsi_vq *req_vq;
-};
-
/* Driver instance state */
struct virtio_scsi {
struct virtio_device *vdev;
@@ -693,29 +666,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
-static int virtscsi_target_alloc(struct scsi_target *starget)
-{
- struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
- struct virtio_scsi *vscsi = shost_priv(sh);
-
- struct virtio_scsi_target_state *tgt =
- kmalloc(sizeof(*tgt), GFP_KERNEL);
- if (!tgt)
- return -ENOMEM;
-
- seqcount_init(&tgt->tgt_seq);
- tgt->req_vq = &vscsi->req_vqs[0];
-
- starget->hostdata = tgt;
- return 0;
-}
-
-static void virtscsi_target_destroy(struct scsi_target *starget)
-{
- struct virtio_scsi_target_state *tgt = starget->hostdata;
- kfree(tgt);
-}
-
static int virtscsi_map_queues(struct Scsi_Host *shost)
{
struct virtio_scsi *vscsi = shost_priv(shost);
@@ -748,9 +698,6 @@ static struct scsi_host_template virtscsi_host_template = {
.slave_alloc = virtscsi_device_alloc,
.dma_boundary = UINT_MAX,
- .use_clustering = ENABLE_CLUSTERING,
- .target_alloc = virtscsi_target_alloc,
- .target_destroy = virtscsi_target_destroy,
.map_queues = virtscsi_map_queues,
.track_queue_depth = 1,
.force_blk_mq = 1,
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 0d6b2a88fc8e..ecee4b3ff073 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1007,7 +1007,6 @@ static struct scsi_host_template pvscsi_template = {
.sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
.dma_boundary = UINT_MAX,
.max_sectors = 0xffff,
- .use_clustering = ENABLE_CLUSTERING,
.change_queue_depth = pvscsi_change_queue_depth,
.eh_abort_handler = pvscsi_abort,
.eh_device_reset_handler = pvscsi_device_reset,
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 974bfb3f30f4..e3310e9488d2 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -153,8 +153,6 @@ static int wd719x_direct_cmd(struct wd719x *wd, u8 opcode, u8 dev, u8 lun,
static void wd719x_destroy(struct wd719x *wd)
{
- struct wd719x_scb *scb;
-
/* stop the RISC */
if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0,
WD719X_WAIT_FOR_RISC))
@@ -162,37 +160,35 @@ static void wd719x_destroy(struct wd719x *wd)
/* disable RISC */
wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
- /* free all SCBs */
- list_for_each_entry(scb, &wd->active_scbs, list)
- pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
- scb->phys);
- list_for_each_entry(scb, &wd->free_scbs, list)
- pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
- scb->phys);
+ WARN_ON_ONCE(!list_empty(&wd->active_scbs));
+
/* free internal buffers */
- pci_free_consistent(wd->pdev, wd->fw_size, wd->fw_virt, wd->fw_phys);
+ dma_free_coherent(&wd->pdev->dev, wd->fw_size, wd->fw_virt,
+ wd->fw_phys);
wd->fw_virt = NULL;
- pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
- wd->hash_phys);
+ dma_free_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
+ wd->hash_phys);
wd->hash_virt = NULL;
- pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param),
- wd->params, wd->params_phys);
+ dma_free_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param),
+ wd->params, wd->params_phys);
wd->params = NULL;
free_irq(wd->pdev->irq, wd);
}
-/* finish a SCSI command, mark SCB (if any) as free, unmap buffers */
-static void wd719x_finish_cmd(struct scsi_cmnd *cmd, int result)
+/* finish a SCSI command, unmap buffers */
+static void wd719x_finish_cmd(struct wd719x_scb *scb, int result)
{
+ struct scsi_cmnd *cmd = scb->cmd;
struct wd719x *wd = shost_priv(cmd->device->host);
- struct wd719x_scb *scb = (struct wd719x_scb *) cmd->host_scribble;
- if (scb) {
- list_move(&scb->list, &wd->free_scbs);
- dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- scsi_dma_unmap(cmd);
- }
+ list_del(&scb->list);
+
+ dma_unmap_single(&wd->pdev->dev, scb->phys,
+ sizeof(struct wd719x_scb), DMA_BIDIRECTIONAL);
+ scsi_dma_unmap(cmd);
+ dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+
cmd->result = result << 16;
cmd->scsi_done(cmd);
}
@@ -202,36 +198,10 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
int i, count_sg;
unsigned long flags;
- struct wd719x_scb *scb;
+ struct wd719x_scb *scb = scsi_cmd_priv(cmd);
struct wd719x *wd = shost_priv(sh);
- dma_addr_t phys;
- cmd->host_scribble = NULL;
-
- /* get a free SCB - either from existing ones or allocate a new one */
- spin_lock_irqsave(wd->sh->host_lock, flags);
- scb = list_first_entry_or_null(&wd->free_scbs, struct wd719x_scb, list);
- if (scb) {
- list_del(&scb->list);
- phys = scb->phys;
- } else {
- spin_unlock_irqrestore(wd->sh->host_lock, flags);
- scb = pci_alloc_consistent(wd->pdev, sizeof(struct wd719x_scb),
- &phys);
- spin_lock_irqsave(wd->sh->host_lock, flags);
- if (!scb) {
- dev_err(&wd->pdev->dev, "unable to allocate SCB\n");
- wd719x_finish_cmd(cmd, DID_ERROR);
- spin_unlock_irqrestore(wd->sh->host_lock, flags);
- return 0;
- }
- }
- memset(scb, 0, sizeof(struct wd719x_scb));
- list_add(&scb->list, &wd->active_scbs);
-
- scb->phys = phys;
scb->cmd = cmd;
- cmd->host_scribble = (char *) scb;
scb->CDB_tag = 0; /* Tagged queueing not supported yet */
scb->devid = cmd->device->id;
@@ -240,10 +210,19 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
/* copy the command */
memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len);
+ /* map SCB */
+ scb->phys = dma_map_single(&wd->pdev->dev, scb, sizeof(*scb),
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(&wd->pdev->dev, scb->phys))
+ goto out_error;
+
/* map sense buffer */
scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE;
cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&wd->pdev->dev, cmd->SCp.dma_handle))
+ goto out_unmap_scb;
scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle);
/* request autosense */
@@ -258,11 +237,8 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
/* Scather/gather */
count_sg = scsi_dma_map(cmd);
- if (count_sg < 0) {
- wd719x_finish_cmd(cmd, DID_ERROR);
- spin_unlock_irqrestore(wd->sh->host_lock, flags);
- return 0;
- }
+ if (count_sg < 0)
+ goto out_unmap_sense;
BUG_ON(count_sg > WD719X_SG);
if (count_sg) {
@@ -283,19 +259,33 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
scb->data_p = 0;
}
+ spin_lock_irqsave(wd->sh->host_lock, flags);
+
/* check if the Command register is free */
if (wd719x_readb(wd, WD719X_AMR_COMMAND) != WD719X_CMD_READY) {
spin_unlock_irqrestore(wd->sh->host_lock, flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
+ list_add(&scb->list, &wd->active_scbs);
+
/* write pointer to the AMR */
wd719x_writel(wd, WD719X_AMR_SCB_IN, scb->phys);
/* send SCB opcode */
wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_PROCESS_SCB);
spin_unlock_irqrestore(wd->sh->host_lock, flags);
+ return 0;
+out_unmap_sense:
+ dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+out_unmap_scb:
+ dma_unmap_single(&wd->pdev->dev, scb->phys, sizeof(*scb),
+ DMA_BIDIRECTIONAL);
+out_error:
+ cmd->result = DID_ERROR << 16;
+ cmd->scsi_done(cmd);
return 0;
}
@@ -327,8 +317,8 @@ static int wd719x_chip_init(struct wd719x *wd)
wd->fw_size = ALIGN(fw_wcs->size, 4) + fw_risc->size;
if (!wd->fw_virt)
- wd->fw_virt = pci_alloc_consistent(wd->pdev, wd->fw_size,
- &wd->fw_phys);
+ wd->fw_virt = dma_alloc_coherent(&wd->pdev->dev, wd->fw_size,
+ &wd->fw_phys, GFP_KERNEL);
if (!wd->fw_virt) {
ret = -ENOMEM;
goto wd719x_init_end;
@@ -464,7 +454,7 @@ static int wd719x_abort(struct scsi_cmnd *cmd)
{
int action, result;
unsigned long flags;
- struct wd719x_scb *scb = (struct wd719x_scb *)cmd->host_scribble;
+ struct wd719x_scb *scb = scsi_cmd_priv(cmd);
struct wd719x *wd = shost_priv(cmd->device->host);
dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag);
@@ -526,10 +516,8 @@ static int wd719x_host_reset(struct scsi_cmnd *cmd)
result = FAILED;
/* flush all SCBs */
- list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) {
- struct scsi_cmnd *tmp_cmd = scb->cmd;
- wd719x_finish_cmd(tmp_cmd, result);
- }
+ list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list)
+ wd719x_finish_cmd(scb, result);
spin_unlock_irqrestore(wd->sh->host_lock, flags);
return result;
@@ -555,7 +543,6 @@ static inline void wd719x_interrupt_SCB(struct wd719x *wd,
union wd719x_regs regs,
struct wd719x_scb *scb)
{
- struct scsi_cmnd *cmd;
int result;
/* now have to find result from card */
@@ -643,9 +630,8 @@ static inline void wd719x_interrupt_SCB(struct wd719x *wd,
result = DID_ERROR;
break;
}
- cmd = scb->cmd;
- wd719x_finish_cmd(cmd, result);
+ wd719x_finish_cmd(scb, result);
}
static irqreturn_t wd719x_interrupt(int irq, void *dev_id)
@@ -809,7 +795,6 @@ static int wd719x_board_found(struct Scsi_Host *sh)
int ret;
INIT_LIST_HEAD(&wd->active_scbs);
- INIT_LIST_HEAD(&wd->free_scbs);
sh->base = pci_resource_start(wd->pdev, 0);
@@ -820,17 +805,18 @@ static int wd719x_board_found(struct Scsi_Host *sh)
wd->fw_virt = NULL;
/* memory area for host (EEPROM) parameters */
- wd->params = pci_alloc_consistent(wd->pdev,
- sizeof(struct wd719x_host_param),
- &wd->params_phys);
+ wd->params = dma_alloc_coherent(&wd->pdev->dev,
+ sizeof(struct wd719x_host_param),
+ &wd->params_phys, GFP_KERNEL);
if (!wd->params) {
dev_warn(&wd->pdev->dev, "unable to allocate parameter buffer\n");
return -ENOMEM;
}
/* memory area for the RISC for hash table of outstanding requests */
- wd->hash_virt = pci_alloc_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE,
- &wd->hash_phys);
+ wd->hash_virt = dma_alloc_coherent(&wd->pdev->dev,
+ WD719X_HASH_TABLE_SIZE,
+ &wd->hash_phys, GFP_KERNEL);
if (!wd->hash_virt) {
dev_warn(&wd->pdev->dev, "unable to allocate hash buffer\n");
ret = -ENOMEM;
@@ -862,10 +848,10 @@ static int wd719x_board_found(struct Scsi_Host *sh)
fail_free_irq:
free_irq(wd->pdev->irq, wd);
fail_free_hash:
- pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
+ dma_free_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
wd->hash_phys);
fail_free_params:
- pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param),
+ dma_free_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param),
wd->params, wd->params_phys);
return ret;
@@ -874,6 +860,7 @@ fail_free_params:
static struct scsi_host_template wd719x_template = {
.module = THIS_MODULE,
.name = "Western Digital 719x",
+ .cmd_size = sizeof(struct wd719x_scb),
.queuecommand = wd719x_queuecommand,
.eh_abort_handler = wd719x_abort,
.eh_device_reset_handler = wd719x_dev_reset,
@@ -884,7 +871,6 @@ static struct scsi_host_template wd719x_template = {
.can_queue = 255,
.this_id = 7,
.sg_tablesize = WD719X_SG,
- .use_clustering = ENABLE_CLUSTERING,
};
static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d)
@@ -897,7 +883,7 @@ static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d)
if (err)
goto fail;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n");
goto disable_device;
}
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
index 0455b1633ca7..abaabd419a54 100644
--- a/drivers/scsi/wd719x.h
+++ b/drivers/scsi/wd719x.h
@@ -74,7 +74,6 @@ struct wd719x {
void *hash_virt; /* hash table CPU address */
dma_addr_t hash_phys; /* hash table bus address */
struct list_head active_scbs;
- struct list_head free_scbs;
};
/* timeout delays in microsecs */
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 61389bdc7926..f0068e96a177 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -696,7 +696,6 @@ static struct scsi_host_template scsifront_sht = {
.this_id = -1,
.cmd_size = sizeof(struct vscsifrnt_shadow),
.sg_tablesize = VSCSIIF_SG_TABLESIZE,
- .use_clustering = DISABLE_CLUSTERING,
.proc_name = "scsifront",
};
@@ -1112,7 +1111,7 @@ static void scsifront_backend_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's Closing state -- fallthrough */
+ /* fall through - Missed the backend's Closing state */
case XenbusStateClosing:
scsifront_disconnect(info);
break;
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 69e6abe14abf..c57d66a7405f 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -237,12 +237,6 @@ static struct scsi_host_template rtsx_host_template = {
/* limit the total size of a transfer to 120 KB */
.max_sectors = 240,
- /* merge commands... this seems to help performance, but
- * periodically someone should test to see which setting is more
- * optimal.
- */
- .use_clustering = 1,
-
/* emulated HBA */
.emulated = 1,
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 4fc521c51c0e..5cf93e8eb77c 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -645,7 +645,6 @@ static struct scsi_host_template visorhba_driver_template = {
.this_id = -1,
.slave_alloc = visorhba_slave_alloc,
.slave_destroy = visorhba_slave_destroy,
- .use_clustering = ENABLE_CLUSTERING,
};
/*
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index c1d5a173553d..984941e036c8 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1493,8 +1493,6 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
iscsit_stop_dataout_timer(cmd);
- transport_check_aborted_status(se_cmd,
- (hdr->flags & ISCSI_FLAG_CMD_FINAL));
return iscsit_dump_data_payload(conn, payload_length, 1);
}
} else {
@@ -1509,12 +1507,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
* TASK_ABORTED status.
*/
if (se_cmd->transport_state & CMD_T_ABORTED) {
- if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
- if (--cmd->outstanding_r2ts < 1) {
- iscsit_stop_dataout_timer(cmd);
- transport_check_aborted_status(
- se_cmd, 1);
- }
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL &&
+ --cmd->outstanding_r2ts < 1)
+ iscsit_stop_dataout_timer(cmd);
return iscsit_dump_data_payload(conn, payload_length, 1);
}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 95d0a22b2ad6..a5481dfeae8d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1343,11 +1343,6 @@ static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
/* Start functions for target_core_fabric_ops */
-static char *iscsi_get_fabric_name(void)
-{
- return "iSCSI";
-}
-
static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
@@ -1549,9 +1544,9 @@ static void lio_release_cmd(struct se_cmd *se_cmd)
const struct target_core_fabric_ops iscsi_ops = {
.module = THIS_MODULE,
- .name = "iscsi",
+ .fabric_alias = "iscsi",
+ .fabric_name = "iSCSI",
.node_acl_size = sizeof(struct iscsi_node_acl),
- .get_fabric_name = iscsi_get_fabric_name,
.tpg_get_wwn = lio_tpg_get_endpoint_wwn,
.tpg_get_tag = lio_tpg_get_tag,
.tpg_get_default_depth = lio_tpg_get_default_depth,
@@ -1596,4 +1591,6 @@ const struct target_core_fabric_ops iscsi_ops = {
.tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs,
.tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs,
.tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs,
+
+ .write_pending_must_be_called = true,
};
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index a211e8154f4c..1b54a9c70851 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -943,20 +943,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
return 0;
}
spin_unlock_bh(&cmd->istate_lock);
- /*
- * Determine if delayed TASK_ABORTED status for WRITEs
- * should be sent now if no unsolicited data out
- * payloads are expected, or if the delayed status
- * should be sent after unsolicited data out with
- * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
- */
- if (transport_check_aborted_status(se_cmd,
- (cmd->unsolicited_data == 0)) != 0)
+ if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
return 0;
- /*
- * Otherwise send CHECK_CONDITION and sense for
- * exception
- */
return transport_send_check_condition_and_sense(se_cmd,
cmd->sense_reason, 0);
}
@@ -974,13 +962,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
if (!(cmd->cmd_flags &
ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
- /*
- * Send the delayed TASK_ABORTED status for
- * WRITEs if no more unsolicitied data is
- * expected.
- */
- if (transport_check_aborted_status(se_cmd, 1)
- != 0)
+ if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
return 0;
iscsit_set_dataout_sequence_values(cmd);
@@ -995,11 +977,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
if ((cmd->data_direction == DMA_TO_DEVICE) &&
!(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
- /*
- * Send the delayed TASK_ABORTED status for WRITEs if
- * no more nsolicitied data is expected.
- */
- if (transport_check_aborted_status(se_cmd, 1) != 0)
+ if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
return 0;
iscsit_set_unsoliticed_dataout(cmd);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index bc8918f382e4..7bd7c0c0db6f 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -324,7 +324,7 @@ static struct scsi_host_template tcm_loop_driver_template = {
.sg_tablesize = 256,
.cmd_per_lun = 1024,
.max_sectors = 0xFFFF,
- .use_clustering = DISABLE_CLUSTERING,
+ .dma_boundary = PAGE_SIZE - 1,
.slave_alloc = tcm_loop_slave_alloc,
.module = THIS_MODULE,
.track_queue_depth = 1,
@@ -460,11 +460,6 @@ static void tcm_loop_release_core_bus(void)
pr_debug("Releasing TCM Loop Core BUS\n");
}
-static char *tcm_loop_get_fabric_name(void)
-{
- return "loopback";
-}
-
static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
{
return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
@@ -1149,8 +1144,7 @@ static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
static const struct target_core_fabric_ops loop_ops = {
.module = THIS_MODULE,
- .name = "loopback",
- .get_fabric_name = tcm_loop_get_fabric_name,
+ .fabric_name = "loopback",
.tpg_get_wwn = tcm_loop_get_endpoint_wwn,
.tpg_get_tag = tcm_loop_get_tag,
.tpg_check_demo_mode = tcm_loop_check_demo_mode,
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 3d10189ecedc..08cee13dfb9a 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1694,11 +1694,6 @@ static int sbp_check_false(struct se_portal_group *se_tpg)
return 0;
}
-static char *sbp_get_fabric_name(void)
-{
- return "sbp";
-}
-
static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
@@ -2323,8 +2318,7 @@ static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
static const struct target_core_fabric_ops sbp_ops = {
.module = THIS_MODULE,
- .name = "sbp",
- .get_fabric_name = sbp_get_fabric_name,
+ .fabric_name = "sbp",
.tpg_get_wwn = sbp_get_fabric_wwn,
.tpg_get_tag = sbp_get_tag,
.tpg_check_demo_mode = sbp_check_true,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 4f134b0c3e29..6b0d9beacf90 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -451,7 +451,7 @@ static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
pr_debug("[%s]: ALUA TG Port not available, "
"SenseKey: NOT_READY, ASC/ASCQ: "
"0x04/0x%02x\n",
- cmd->se_tfo->get_fabric_name(), alua_ascq);
+ cmd->se_tfo->fabric_name, alua_ascq);
cmd->scsi_asc = 0x04;
cmd->scsi_ascq = alua_ascq;
@@ -1229,13 +1229,13 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
- db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ db_root, se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
lun->unpacked_lun);
} else {
path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
- db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ db_root, se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
lun->unpacked_lun);
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f6b1549f4142..72016d0dfca5 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -172,7 +172,10 @@ static struct target_fabric_configfs *target_core_get_fabric(
mutex_lock(&g_tf_lock);
list_for_each_entry(tf, &g_tf_list, tf_list) {
- if (!strcmp(tf->tf_ops->name, name)) {
+ const char *cmp_name = tf->tf_ops->fabric_alias;
+ if (!cmp_name)
+ cmp_name = tf->tf_ops->fabric_name;
+ if (!strcmp(cmp_name, name)) {
atomic_inc(&tf->tf_access_cnt);
mutex_unlock(&g_tf_lock);
return tf;
@@ -249,7 +252,7 @@ static struct config_group *target_core_register_fabric(
return ERR_PTR(-EINVAL);
}
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
- " %s\n", tf->tf_ops->name);
+ " %s\n", tf->tf_ops->fabric_name);
/*
* On a successful target_core_get_fabric() look, the returned
* struct target_fabric_configfs *tf will contain a usage reference.
@@ -282,7 +285,7 @@ static void target_core_deregister_fabric(
" tf list\n", config_item_name(item));
pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
- " %s\n", tf->tf_ops->name);
+ " %s\n", tf->tf_ops->fabric_name);
atomic_dec(&tf->tf_access_cnt);
pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
@@ -342,17 +345,20 @@ EXPORT_SYMBOL(target_undepend_item);
static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
{
- if (!tfo->name) {
- pr_err("Missing tfo->name\n");
- return -EINVAL;
+ if (tfo->fabric_alias) {
+ if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) {
+ pr_err("Passed alias: %s exceeds "
+ "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias);
+ return -EINVAL;
+ }
}
- if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) {
- pr_err("Passed name: %s exceeds TARGET_FABRIC"
- "_NAME_SIZE\n", tfo->name);
+ if (!tfo->fabric_name) {
+ pr_err("Missing tfo->fabric_name\n");
return -EINVAL;
}
- if (!tfo->get_fabric_name) {
- pr_err("Missing tfo->get_fabric_name()\n");
+ if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) {
+ pr_err("Passed name: %s exceeds "
+ "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name);
return -EINVAL;
}
if (!tfo->tpg_get_wwn) {
@@ -486,7 +492,7 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
mutex_lock(&g_tf_lock);
list_for_each_entry(t, &g_tf_list, tf_list) {
- if (!strcmp(t->tf_ops->name, fo->name)) {
+ if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
mutex_unlock(&g_tf_lock);
@@ -532,9 +538,9 @@ DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
-DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_format);
DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
@@ -592,6 +598,7 @@ static ssize_t _name##_store(struct config_item *item, const char *page, \
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
@@ -613,12 +620,17 @@ static void dev_set_t10_wwn_model_alias(struct se_device *dev)
const char *configname;
configname = config_item_name(&dev->dev_group.cg_item);
- if (strlen(configname) >= 16) {
+ if (strlen(configname) >= INQUIRY_MODEL_LEN) {
pr_warn("dev[%p]: Backstore name '%s' is too long for "
- "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
+ "INQUIRY_MODEL, truncating to 15 characters\n", dev,
configname);
}
- snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
+ /*
+ * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
+ * here without potentially breaking existing setups, so continue to
+ * truncate one byte shorter than what can be carried in INQUIRY.
+ */
+ strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
}
static ssize_t emulate_model_alias_store(struct config_item *item,
@@ -640,11 +652,12 @@ static ssize_t emulate_model_alias_store(struct config_item *item,
if (ret < 0)
return ret;
+ BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
if (flag) {
dev_set_t10_wwn_model_alias(dev);
} else {
- strncpy(&dev->t10_wwn.model[0],
- dev->transport->inquiry_prod, 16);
+ strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
+ sizeof(dev->t10_wwn.model));
}
da->emulate_model_alias = flag;
return count;
@@ -1116,9 +1129,10 @@ CONFIGFS_ATTR(, emulate_tpu);
CONFIGFS_ATTR(, emulate_tpws);
CONFIGFS_ATTR(, emulate_caw);
CONFIGFS_ATTR(, emulate_3pc);
+CONFIGFS_ATTR(, emulate_pr);
CONFIGFS_ATTR(, pi_prot_type);
CONFIGFS_ATTR_RO(, hw_pi_prot_type);
-CONFIGFS_ATTR(, pi_prot_format);
+CONFIGFS_ATTR_WO(, pi_prot_format);
CONFIGFS_ATTR(, pi_prot_verify);
CONFIGFS_ATTR(, enforce_pr_isids);
CONFIGFS_ATTR(, is_nonrot);
@@ -1156,6 +1170,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_emulate_tpws,
&attr_emulate_caw,
&attr_emulate_3pc,
+ &attr_emulate_pr,
&attr_pi_prot_type,
&attr_hw_pi_prot_type,
&attr_pi_prot_format,
@@ -1211,6 +1226,74 @@ static struct t10_wwn *to_t10_wwn(struct config_item *item)
}
/*
+ * STANDARD and VPD page 0x83 T10 Vendor Identification
+ */
+static ssize_t target_wwn_vendor_id_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
+}
+
+static ssize_t target_wwn_vendor_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
+ struct se_device *dev = t10_wwn->t10_dev;
+ /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
+ unsigned char buf[INQUIRY_VENDOR_LEN + 2];
+ char *stripped = NULL;
+ size_t len;
+ int i;
+
+ len = strlcpy(buf, page, sizeof(buf));
+ if (len < sizeof(buf)) {
+ /* Strip any newline added from userspace. */
+ stripped = strstrip(buf);
+ len = strlen(stripped);
+ }
+ if (len > INQUIRY_VENDOR_LEN) {
+ pr_err("Emulated T10 Vendor Identification exceeds"
+ " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
+ "\n");
+ return -EOVERFLOW;
+ }
+
+ /*
+ * SPC 4.3.1:
+ * ASCII data fields shall contain only ASCII printable characters (i.e.,
+ * code values 20h to 7Eh) and may be terminated with one or more ASCII
+ * null (00h) characters.
+ */
+ for (i = 0; i < len; i++) {
+ if ((stripped[i] < 0x20) || (stripped[i] > 0x7E)) {
+ pr_err("Emulated T10 Vendor Identification contains"
+ " non-ASCII-printable characters\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Check to see if any active exports exist. If they do exist, fail
+ * here as changing this information on the fly (underneath the
+ * initiator side OS dependent multipath code) could cause negative
+ * effects.
+ */
+ if (dev->export_count) {
+ pr_err("Unable to set T10 Vendor Identification while"
+ " active %d exports exist\n", dev->export_count);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
+ strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
+
+ pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
+ " %s\n", dev->t10_wwn.vendor);
+
+ return count;
+}
+
+/*
* VPD page 0x80 Unit serial
*/
static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
@@ -1356,6 +1439,7 @@ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
/* VPD page 0x83 Association: SCSI Target Device */
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
+CONFIGFS_ATTR(target_wwn_, vendor_id);
CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
@@ -1363,6 +1447,7 @@ CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
+ &target_wwn_attr_vendor_id,
&target_wwn_attr_vpd_unit_serial,
&target_wwn_attr_vpd_protocol_identifier,
&target_wwn_attr_vpd_assoc_logical_unit,
@@ -1400,7 +1485,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
- se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_nacl->se_tpg->se_tpg_tfo->fabric_name,
se_nacl->initiatorname, i_buf);
}
@@ -1414,7 +1499,7 @@ static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
if (se_nacl) {
len = sprintf(page,
"SPC-2 Reservation: %s Initiator: %s\n",
- se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_nacl->se_tpg->se_tpg_tfo->fabric_name,
se_nacl->initiatorname);
} else {
len = sprintf(page, "No SPC-2 Reservation holder\n");
@@ -1427,6 +1512,9 @@ static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
struct se_device *dev = pr_to_dev(item);
int ret;
+ if (!dev->dev_attrib.emulate_pr)
+ return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
+
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "Passthrough\n");
@@ -1489,13 +1577,13 @@ static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
tfo = se_tpg->se_tpg_tfo;
len += sprintf(page+len, "SPC-3 Reservation: %s"
- " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
+ " Target Node Endpoint: %s\n", tfo->fabric_name,
tfo->tpg_get_wwn(se_tpg));
len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
" Identifier Tag: %hu %s Portal Group Tag: %hu"
" %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
- tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
- tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun);
+ tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
+ tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
@@ -1526,7 +1614,7 @@ static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
core_pr_dump_initiator_port(pr_reg, i_buf,
PR_REG_ISID_ID_LEN);
sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
- tfo->get_fabric_name(),
+ tfo->fabric_name,
pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
pr_reg->pr_res_generation);
@@ -1567,12 +1655,14 @@ static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
{
struct se_device *dev = pr_to_dev(item);
+ if (!dev->dev_attrib.emulate_pr)
+ return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "SPC_PASSTHROUGH\n");
- else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return sprintf(page, "SPC2_RESERVATIONS\n");
- else
- return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
+
+ return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
}
static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
@@ -1580,7 +1670,8 @@ static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
{
struct se_device *dev = pr_to_dev(item);
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (!dev->dev_attrib.emulate_pr ||
+ (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
@@ -1592,7 +1683,8 @@ static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
{
struct se_device *dev = pr_to_dev(item);
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (!dev->dev_attrib.emulate_pr ||
+ (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1638,7 +1730,8 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
u16 tpgt = 0;
u8 type = 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (!dev->dev_attrib.emulate_pr ||
+ (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return count;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return count;
@@ -2746,7 +2839,7 @@ static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
struct se_portal_group *tpg = lun->lun_tpg;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
- "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ "/%s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 47b5ef153135..93c56f4a9911 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -95,7 +95,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
deve->lun_access_ro) {
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08llx\n",
- se_cmd->se_tfo->get_fabric_name(),
+ se_cmd->se_tfo->fabric_name,
unpacked_lun);
rcu_read_unlock();
ret = TCM_WRITE_PROTECTED;
@@ -114,7 +114,7 @@ out_unlock:
if (unpacked_lun != 0) {
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08llx\n",
- se_cmd->se_tfo->get_fabric_name(),
+ se_cmd->se_tfo->fabric_name,
unpacked_lun);
return TCM_NON_EXISTENT_LUN;
}
@@ -188,7 +188,7 @@ out_unlock:
if (!se_lun) {
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08llx\n",
- se_cmd->se_tfo->get_fabric_name(),
+ se_cmd->se_tfo->fabric_name,
unpacked_lun);
return -ENODEV;
}
@@ -237,7 +237,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- tpg->se_tpg_tfo->get_fabric_name());
+ tpg->se_tpg_tfo->fabric_name);
continue;
}
if (lun->lun_rtpi != rtpi)
@@ -571,9 +571,9 @@ int core_dev_add_lun(
return rc;
pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
- " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
- tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
+ tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
@@ -604,9 +604,9 @@ void core_dev_del_lun(
struct se_lun *lun)
{
pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
- " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " device object\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
- tpg->se_tpg_tfo->get_fabric_name());
+ tpg->se_tpg_tfo->fabric_name);
core_tpg_remove_lun(tpg, lun);
}
@@ -621,7 +621,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
pr_err("%s InitiatorName exceeds maximum size.\n",
- tpg->se_tpg_tfo->get_fabric_name());
+ tpg->se_tpg_tfo->fabric_name);
*ret = -EOVERFLOW;
return NULL;
}
@@ -664,7 +664,7 @@ int core_dev_add_initiator_node_lun_acl(
return -EINVAL;
pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
- " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
lun_access_ro ? "RO" : "RW",
nacl->initiatorname);
@@ -697,7 +697,7 @@ int core_dev_del_initiator_node_lun_acl(
pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
" InitiatorNode: %s Mapped LUN: %llu\n",
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
nacl->initiatorname, lacl->mapped_lun);
@@ -709,9 +709,9 @@ void core_dev_free_initiator_node_lun_acl(
struct se_lun_acl *lacl)
{
pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
- " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg),
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
kfree(lacl);
@@ -720,36 +720,17 @@ void core_dev_free_initiator_node_lun_acl(
static void scsi_dump_inquiry(struct se_device *dev)
{
struct t10_wwn *wwn = &dev->t10_wwn;
- char buf[17];
- int i, device_type;
+ int device_type = dev->transport->get_device_type(dev);
+
/*
* Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
*/
- for (i = 0; i < 8; i++)
- if (wwn->vendor[i] >= 0x20)
- buf[i] = wwn->vendor[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Vendor: %s\n", buf);
-
- for (i = 0; i < 16; i++)
- if (wwn->model[i] >= 0x20)
- buf[i] = wwn->model[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Model: %s\n", buf);
-
- for (i = 0; i < 4; i++)
- if (wwn->revision[i] >= 0x20)
- buf[i] = wwn->revision[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Revision: %s\n", buf);
-
- device_type = dev->transport->get_device_type(dev);
+ pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
+ wwn->vendor);
+ pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
+ wwn->model);
+ pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
+ wwn->revision);
pr_debug(" Type: %s ", scsi_device_type(device_type));
}
@@ -805,6 +786,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
+ dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
@@ -822,13 +804,19 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
xcopy_lun = &dev->xcopy_lun;
rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
- init_completion(&xcopy_lun->lun_ref_comp);
init_completion(&xcopy_lun->lun_shutdown_comp);
INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
xcopy_lun->lun_tpg = &xcopy_pt_tpg;
+ /* Preload the default INQUIRY const values */
+ strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
+ strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
+ sizeof(dev->t10_wwn.model));
+ strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
+ sizeof(dev->t10_wwn.revision));
+
return dev;
}
@@ -987,35 +975,10 @@ int target_configure_device(struct se_device *dev)
goto out_destroy_device;
/*
- * Startup the struct se_device processing thread
- */
- dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
- dev->transport->name);
- if (!dev->tmr_wq) {
- pr_err("Unable to create tmr workqueue for %s\n",
- dev->transport->name);
- ret = -ENOMEM;
- goto out_free_alua;
- }
-
- /*
* Setup work_queue for QUEUE_FULL
*/
INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
- /*
- * Preload the initial INQUIRY const values if we are doing
- * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
- * passthrough because this is being provided by the backend LLD.
- */
- if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
- strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
- strncpy(&dev->t10_wwn.model[0],
- dev->transport->inquiry_prod, 16);
- strncpy(&dev->t10_wwn.revision[0],
- dev->transport->inquiry_rev, 4);
- }
-
scsi_dump_inquiry(dev);
spin_lock(&hba->device_lock);
@@ -1026,8 +989,6 @@ int target_configure_device(struct se_device *dev)
return 0;
-out_free_alua:
- core_alua_free_lu_gp_mem(dev);
out_destroy_device:
dev->transport->destroy_device(dev);
out_free_index:
@@ -1046,8 +1007,6 @@ void target_free_device(struct se_device *dev)
WARN_ON(!list_empty(&dev->dev_sep_list));
if (target_dev_configured(dev)) {
- destroy_workqueue(dev->tmr_wq);
-
dev->transport->destroy_device(dev);
mutex_lock(&device_mutex);
@@ -1159,6 +1118,18 @@ passthrough_parse_cdb(struct se_cmd *cmd,
}
/*
+ * With emulate_pr disabled, all reservation requests should fail,
+ * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
+ */
+ if (!dev->dev_attrib.emulate_pr &&
+ ((cdb[0] == PERSISTENT_RESERVE_IN) ||
+ (cdb[0] == PERSISTENT_RESERVE_OUT) ||
+ (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
+ (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ /*
* For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
* emulate the response, since tcmu does not have the information
* required to process these commands.
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index aa2f4f632ebe..9a6e20a2af7d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -203,7 +203,7 @@ static ssize_t target_fabric_mappedlun_write_protect_store(
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %llu Write Protect bit to %s\n",
- se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->fabric_name,
se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
return count;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 0c6635587930..853344415963 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -138,7 +138,6 @@ int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-int transport_cmd_finish_abort(struct se_cmd *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -148,7 +147,6 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
void transport_clear_lun_ref(struct se_lun *);
-void transport_send_task_abort(struct se_cmd *);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
bool target_check_wce(struct se_device *dev);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 10db5656fd5d..397f38cb7f4e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -235,7 +235,7 @@ target_scsi2_reservation_release(struct se_cmd *cmd)
tpg = sess->se_tpg;
pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->"
" MAPPED LUN: %llu for %s\n",
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
sess->se_node_acl->initiatorname);
@@ -278,7 +278,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
if (dev->dev_reserved_node_acl &&
(dev->dev_reserved_node_acl != sess->se_node_acl)) {
pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
- tpg->se_tpg_tfo->get_fabric_name());
+ tpg->se_tpg_tfo->fabric_name);
pr_err("Original reserver LUN: %llu %s\n",
cmd->se_lun->unpacked_lun,
dev->dev_reserved_node_acl->initiatorname);
@@ -297,7 +297,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
}
pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu"
- " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " for %s\n", tpg->se_tpg_tfo->fabric_name,
cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
sess->se_node_acl->initiatorname);
@@ -914,11 +914,11 @@ static void core_scsi3_aptpl_reserve(
pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
- tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
+ tpg->se_tpg_tfo->fabric_name, node_acl->initiatorname,
i_buf);
}
@@ -1036,19 +1036,19 @@ static void __core_scsi3_dump_registration(
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
- " Node: %s%s\n", tfo->get_fabric_name(), (register_type == REGISTER_AND_MOVE) ?
+ " Node: %s%s\n", tfo->fabric_name, (register_type == REGISTER_AND_MOVE) ?
"_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ?
"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
i_buf);
pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
- tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
+ tfo->fabric_name, tfo->tpg_get_wwn(se_tpg),
tfo->tpg_get_tag(se_tpg));
pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
- " Port(s)\n", tfo->get_fabric_name(),
+ " Port(s)\n", tfo->fabric_name,
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
dev->transport->name);
pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
- " 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
+ " 0x%08x APTPL: %d\n", tfo->fabric_name,
pr_reg->pr_res_key, pr_reg->pr_res_generation,
pr_reg->pr_reg_aptpl);
}
@@ -1329,7 +1329,7 @@ static void __core_scsi3_free_registration(
*/
while (atomic_read(&pr_reg->pr_res_holders) != 0) {
pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
- tfo->get_fabric_name());
+ tfo->fabric_name);
cpu_relax();
}
@@ -1341,15 +1341,15 @@ static void __core_scsi3_free_registration(
spin_lock(&pr_tmpl->registration_lock);
pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
- " Node: %s%s\n", tfo->get_fabric_name(),
+ " Node: %s%s\n", tfo->fabric_name,
pr_reg->pr_reg_nacl->initiatorname,
i_buf);
pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
- " Port(s)\n", tfo->get_fabric_name(),
+ " Port(s)\n", tfo->fabric_name,
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
dev->transport->name);
pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
- " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
+ " 0x%08x\n", tfo->fabric_name, pr_reg->pr_res_key,
pr_reg->pr_res_generation);
if (!preempt_and_abort_list) {
@@ -1645,7 +1645,7 @@ core_scsi3_decode_spec_i_port(
dest_tpg = tmp_tpg;
pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
" %s Port RTPI: %hu\n",
- dest_tpg->se_tpg_tfo->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->fabric_name,
dest_node_acl->initiatorname, dest_rtpi);
spin_lock(&dev->se_port_lock);
@@ -1662,7 +1662,7 @@ core_scsi3_decode_spec_i_port(
pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
" tid_len: %d for %s + %s\n",
- dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
+ dest_tpg->se_tpg_tfo->fabric_name, cmd->data_length,
tpdl, tid_len, i_str, iport_ptr);
if (tid_len > tpdl) {
@@ -1683,7 +1683,7 @@ core_scsi3_decode_spec_i_port(
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve"
" from destination RTPI: %hu\n",
- dest_tpg->se_tpg_tfo->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->fabric_name,
dest_rtpi);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
@@ -1704,7 +1704,7 @@ core_scsi3_decode_spec_i_port(
pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
" dest_se_deve mapped_lun: %llu\n",
- dest_tpg->se_tpg_tfo->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->fabric_name,
dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
/*
@@ -1815,7 +1815,7 @@ core_scsi3_decode_spec_i_port(
pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
" registered Transport ID for Node: %s%s Mapped LUN:"
- " %llu\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
+ " %llu\n", dest_tpg->se_tpg_tfo->fabric_name,
dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
dest_se_deve->mapped_lun : 0);
@@ -1913,7 +1913,7 @@ static int core_scsi3_update_aptpl_buf(
"res_holder=1\nres_type=%02x\n"
"res_scope=%02x\nres_all_tg_pt=%d\n"
"mapped_lun=%llu\n", reg_count,
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_res_type,
pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
@@ -1923,7 +1923,7 @@ static int core_scsi3_update_aptpl_buf(
"initiator_fabric=%s\ninitiator_node=%s\n%s"
"sa_res_key=%llu\nres_holder=0\n"
"res_all_tg_pt=%d\nmapped_lun=%llu\n",
- reg_count, tpg->se_tpg_tfo->get_fabric_name(),
+ reg_count, tpg->se_tpg_tfo->fabric_name,
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
pr_reg->pr_res_mapped_lun);
@@ -1942,7 +1942,7 @@ static int core_scsi3_update_aptpl_buf(
*/
snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
"tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:"
- " %d\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " %d\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun,
@@ -2168,7 +2168,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
pr_reg->pr_res_key = sa_res_key;
pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
" Key for %s to: 0x%016Lx PRgeneration:"
- " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
+ " 0x%08x\n", cmd->se_tfo->fabric_name,
(register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "",
pr_reg->pr_reg_nacl->initiatorname,
pr_reg->pr_res_key, pr_reg->pr_res_generation);
@@ -2356,9 +2356,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s while reservation already held by"
" [%s]: %s, returning RESERVATION_CONFLICT\n",
- cmd->se_tfo->get_fabric_name(),
+ cmd->se_tfo->fabric_name,
se_sess->se_node_acl->initiatorname,
- pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2379,9 +2379,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
" [%s]: %s trying to change TYPE and/or SCOPE,"
" while reservation already held by [%s]: %s,"
" returning RESERVATION_CONFLICT\n",
- cmd->se_tfo->get_fabric_name(),
+ cmd->se_tfo->fabric_name,
se_sess->se_node_acl->initiatorname,
- pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2414,10 +2414,10 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type),
+ cmd->se_tfo->fabric_name, core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
- cmd->se_tfo->get_fabric_name(),
+ cmd->se_tfo->fabric_name,
se_sess->se_node_acl->initiatorname,
i_buf);
spin_unlock(&dev->dev_reservation_lock);
@@ -2506,12 +2506,12 @@ out:
if (!dev->dev_pr_res_holder) {
pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tfo->get_fabric_name(), (explicit) ? "explicit" :
+ tfo->fabric_name, (explicit) ? "explicit" :
"implicit", core_scsi3_pr_dump_type(pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
}
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
- tfo->get_fabric_name(), se_nacl->initiatorname,
+ tfo->fabric_name, se_nacl->initiatorname,
i_buf);
/*
* Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
@@ -2609,9 +2609,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
" reservation from [%s]: %s with different TYPE "
"and/or SCOPE while reservation already held by"
" [%s]: %s, returning RESERVATION_CONFLICT\n",
- cmd->se_tfo->get_fabric_name(),
+ cmd->se_tfo->fabric_name,
se_sess->se_node_acl->initiatorname,
- pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2752,7 +2752,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
spin_unlock(&pr_tmpl->registration_lock);
pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
- cmd->se_tfo->get_fabric_name());
+ cmd->se_tfo->fabric_name);
core_scsi3_update_and_write_aptpl(cmd->se_dev, false);
@@ -2791,11 +2791,11 @@ static void __core_scsi3_complete_pro_preempt(
pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
+ tfo->fabric_name, (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
- tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
+ tfo->fabric_name, (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
nacl->initiatorname, i_buf);
/*
* For PREEMPT_AND_ABORT, add the preempting reservation's
@@ -3282,7 +3282,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
" proto_ident: 0x%02x does not match ident: 0x%02x"
" from fabric: %s\n", proto_ident,
dest_se_tpg->proto_id,
- dest_tf_ops->get_fabric_name());
+ dest_tf_ops->fabric_name);
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
@@ -3299,7 +3299,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
buf = NULL;
pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
- " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
+ " %s\n", dest_tf_ops->fabric_name, (iport_ptr != NULL) ?
"port" : "device", initiator_str, (iport_ptr != NULL) ?
iport_ptr : "");
/*
@@ -3344,7 +3344,7 @@ after_iport_check:
if (!dest_node_acl) {
pr_err("Unable to locate %s dest_node_acl for"
- " TransportID%s\n", dest_tf_ops->get_fabric_name(),
+ " TransportID%s\n", dest_tf_ops->fabric_name,
initiator_str);
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
@@ -3360,7 +3360,7 @@ after_iport_check:
}
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
- " %s from TransportID\n", dest_tf_ops->get_fabric_name(),
+ " %s from TransportID\n", dest_tf_ops->fabric_name,
dest_node_acl->initiatorname);
/*
@@ -3370,7 +3370,7 @@ after_iport_check:
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
- " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
+ " %hu\n", dest_tf_ops->fabric_name, rtpi);
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
@@ -3385,7 +3385,7 @@ after_iport_check:
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
" ACL for dest_se_deve->mapped_lun: %llu\n",
- dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
+ dest_tf_ops->fabric_name, dest_node_acl->initiatorname,
dest_se_deve->mapped_lun);
/*
@@ -3501,13 +3501,13 @@ after_iport_check:
pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
" created new reservation holder TYPE: %s on object RTPI:"
- " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
+ " %hu PRGeneration: 0x%08x\n", dest_tf_ops->fabric_name,
core_scsi3_pr_dump_type(type), rtpi,
dest_pr_reg->pr_res_generation);
pr_debug("SPC-3 PR Successfully moved reservation from"
" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
- tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
- i_buf, dest_tf_ops->get_fabric_name(),
+ tf_ops->fabric_name, pr_reg_nacl->initiatorname,
+ i_buf, dest_tf_ops->fabric_name,
dest_node_acl->initiatorname, (iport_ptr != NULL) ?
iport_ptr : "");
/*
@@ -4095,6 +4095,8 @@ target_check_reservation(struct se_cmd *cmd)
return 0;
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
+ if (!dev->dev_attrib.emulate_pr)
+ return 0;
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return 0;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index c062d363dce3..b5388a106567 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -179,20 +179,20 @@ out_free:
static void
pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
{
- unsigned char *buf;
-
if (sdev->inquiry_len < INQUIRY_LEN)
return;
-
- buf = sdev->inquiry;
- if (!buf)
- return;
/*
- * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
+ * Use sdev->inquiry data from drivers/scsi/scsi_scan.c:scsi_add_lun()
*/
- memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
- memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
- memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
+ BUILD_BUG_ON(sizeof(wwn->vendor) != INQUIRY_VENDOR_LEN + 1);
+ snprintf(wwn->vendor, sizeof(wwn->vendor),
+ "%." __stringify(INQUIRY_VENDOR_LEN) "s", sdev->vendor);
+ BUILD_BUG_ON(sizeof(wwn->model) != INQUIRY_MODEL_LEN + 1);
+ snprintf(wwn->model, sizeof(wwn->model),
+ "%." __stringify(INQUIRY_MODEL_LEN) "s", sdev->model);
+ BUILD_BUG_ON(sizeof(wwn->revision) != INQUIRY_REVISION_LEN + 1);
+ snprintf(wwn->revision, sizeof(wwn->revision),
+ "%." __stringify(INQUIRY_REVISION_LEN) "s", sdev->rev);
}
static int
@@ -811,7 +811,6 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
struct scsi_device *sd = pdv->pdv_sd;
unsigned char host_id[16];
ssize_t bl;
- int i;
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
snprintf(host_id, 16, "%d", pdv->pdv_host_id);
@@ -824,29 +823,12 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
host_id);
if (sd) {
- bl += sprintf(b + bl, " ");
- bl += sprintf(b + bl, "Vendor: ");
- for (i = 0; i < 8; i++) {
- if (ISPRINT(sd->vendor[i])) /* printable character? */
- bl += sprintf(b + bl, "%c", sd->vendor[i]);
- else
- bl += sprintf(b + bl, " ");
- }
- bl += sprintf(b + bl, " Model: ");
- for (i = 0; i < 16; i++) {
- if (ISPRINT(sd->model[i])) /* printable character ? */
- bl += sprintf(b + bl, "%c", sd->model[i]);
- else
- bl += sprintf(b + bl, " ");
- }
- bl += sprintf(b + bl, " Rev: ");
- for (i = 0; i < 4; i++) {
- if (ISPRINT(sd->rev[i])) /* printable character ? */
- bl += sprintf(b + bl, "%c", sd->rev[i]);
- else
- bl += sprintf(b + bl, " ");
- }
- bl += sprintf(b + bl, "\n");
+ bl += sprintf(b + bl, " Vendor: %."
+ __stringify(INQUIRY_VENDOR_LEN) "s", sd->vendor);
+ bl += sprintf(b + bl, " Model: %."
+ __stringify(INQUIRY_MODEL_LEN) "s", sd->model);
+ bl += sprintf(b + bl, " Rev: %."
+ __stringify(INQUIRY_REVISION_LEN) "s\n", sd->rev);
}
return bl;
}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index f459118bc11b..47094ae01c04 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -108,12 +108,19 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
buf[7] = 0x2; /* CmdQue=1 */
- memcpy(&buf[8], "LIO-ORG ", 8);
- memset(&buf[16], 0x20, 16);
+ /*
+ * ASCII data fields described as being left-aligned shall have any
+ * unused bytes at the end of the field (i.e., highest offset) and the
+ * unused bytes shall be filled with ASCII space characters (20h).
+ */
+ memset(&buf[8], 0x20,
+ INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN);
+ memcpy(&buf[8], dev->t10_wwn.vendor,
+ strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
memcpy(&buf[16], dev->t10_wwn.model,
- min_t(size_t, strlen(dev->t10_wwn.model), 16));
+ strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN));
memcpy(&buf[32], dev->t10_wwn.revision,
- min_t(size_t, strlen(dev->t10_wwn.revision), 4));
+ strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN));
buf[4] = 31; /* Set additional length to 31 */
return 0;
@@ -251,7 +258,10 @@ check_t10_vend_desc:
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
buf[off+2] = 0x0;
- memcpy(&buf[off+4], "LIO-ORG", 8);
+ /* left align Vendor ID and pad with spaces */
+ memset(&buf[off+4], 0x20, INQUIRY_VENDOR_LEN);
+ memcpy(&buf[off+4], dev->t10_wwn.vendor,
+ strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
/* Extra Byte for NULL Terminator */
id_len++;
/* Identifier Length */
@@ -1281,6 +1291,14 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
+ if (!dev->dev_attrib.emulate_pr &&
+ ((cdb[0] == PERSISTENT_RESERVE_IN) ||
+ (cdb[0] == PERSISTENT_RESERVE_OUT) ||
+ (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
+ (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
switch (cdb[0]) {
case MODE_SELECT:
*size = cdb[4];
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index f0db91ebd735..8d9ceedfd455 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -246,43 +246,25 @@ static ssize_t target_stat_lu_lu_name_show(struct config_item *item, char *page)
static ssize_t target_stat_lu_vend_show(struct config_item *item, char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
- int i;
- char str[sizeof(dev->t10_wwn.vendor)+1];
- /* scsiLuVendorId */
- for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
- str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
- dev->t10_wwn.vendor[i] : ' ';
- str[i] = '\0';
- return snprintf(page, PAGE_SIZE, "%s\n", str);
+ return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_VENDOR_LEN)
+ "s\n", dev->t10_wwn.vendor);
}
static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
- int i;
- char str[sizeof(dev->t10_wwn.model)+1];
- /* scsiLuProductId */
- for (i = 0; i < sizeof(dev->t10_wwn.model); i++)
- str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
- dev->t10_wwn.model[i] : ' ';
- str[i] = '\0';
- return snprintf(page, PAGE_SIZE, "%s\n", str);
+ return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_MODEL_LEN)
+ "s\n", dev->t10_wwn.model);
}
static ssize_t target_stat_lu_rev_show(struct config_item *item, char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
- int i;
- char str[sizeof(dev->t10_wwn.revision)+1];
- /* scsiLuRevisionId */
- for (i = 0; i < sizeof(dev->t10_wwn.revision); i++)
- str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
- dev->t10_wwn.revision[i] : ' ';
- str[i] = '\0';
- return snprintf(page, PAGE_SIZE, "%s\n", str);
+ return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_REVISION_LEN)
+ "s\n", dev->t10_wwn.revision);
}
static ssize_t target_stat_lu_dev_type_show(struct config_item *item, char *page)
@@ -612,7 +594,7 @@ static ssize_t target_stat_tgt_port_name_show(struct config_item *item,
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
lun->lun_rtpi);
rcu_read_unlock();
return ret;
@@ -767,7 +749,7 @@ static ssize_t target_stat_transport_device_show(struct config_item *item,
if (dev) {
/* scsiTransportType */
ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
- tpg->se_tpg_tfo->get_fabric_name());
+ tpg->se_tpg_tfo->fabric_name);
}
rcu_read_unlock();
return ret;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 6d1179a7f043..ad0061e09d4c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -163,18 +163,23 @@ void core_tmr_abort_task(
continue;
printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
- se_cmd->se_tfo->get_fabric_name(), ref_tag);
+ se_cmd->se_tfo->fabric_name, ref_tag);
- if (!__target_check_io_state(se_cmd, se_sess, 0))
+ if (!__target_check_io_state(se_cmd, se_sess,
+ dev->dev_attrib.emulate_tas))
continue;
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- cancel_work_sync(&se_cmd->work);
- transport_wait_for_tasks(se_cmd);
+ /*
+ * Ensure that this ABORT request is visible to the LU RESET
+ * code.
+ */
+ if (!tmr->tmr_dev)
+ WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd,
+ se_cmd->orig_fe_lun) < 0);
- if (!transport_cmd_finish_abort(se_cmd))
- target_put_sess_cmd(se_cmd);
+ target_put_cmd_and_wait(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
@@ -268,14 +273,28 @@ static void core_tmr_drain_tmr_list(
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
- cancel_work_sync(&cmd->work);
- transport_wait_for_tasks(cmd);
-
- if (!transport_cmd_finish_abort(cmd))
- target_put_sess_cmd(cmd);
+ target_put_cmd_and_wait(cmd);
}
}
+/**
+ * core_tmr_drain_state_list() - abort SCSI commands associated with a device
+ *
+ * @dev: Device for which to abort outstanding SCSI commands.
+ * @prout_cmd: Pointer to the SCSI PREEMPT AND ABORT if this function is called
+ * to realize the PREEMPT AND ABORT functionality.
+ * @tmr_sess: Session through which the LUN RESET has been received.
+ * @tas: Task Aborted Status (TAS) bit from the SCSI control mode page.
+ * A quote from SPC-4, paragraph "7.5.10 Control mode page":
+ * "A task aborted status (TAS) bit set to zero specifies that
+ * aborted commands shall be terminated by the device server
+ * without any response to the application client. A TAS bit set
+ * to one specifies that commands aborted by the actions of an I_T
+ * nexus other than the I_T nexus on which the command was
+ * received shall be completed with TASK ABORTED status."
+ * @preempt_and_abort_list: For the PREEMPT AND ABORT functionality, a list
+ * with registrations that will be preempted.
+ */
static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
@@ -350,18 +369,7 @@ static void core_tmr_drain_state_list(
cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
cmd->pr_res_key);
- /*
- * If the command may be queued onto a workqueue cancel it now.
- *
- * This is equivalent to removal from the execute queue in the
- * loop above, but we do it down here given that
- * cancel_work_sync may block.
- */
- cancel_work_sync(&cmd->work);
- transport_wait_for_tasks(cmd);
-
- if (!transport_cmd_finish_abort(cmd))
- target_put_sess_cmd(cmd);
+ target_put_cmd_and_wait(cmd);
}
}
@@ -398,7 +406,7 @@ int core_tmr_lun_reset(
if (tmr_nacl && tmr_tpg) {
pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
- tmr_tpg->se_tpg_tfo->get_fabric_name(),
+ tmr_tpg->se_tpg_tfo->fabric_name,
tmr_nacl->initiatorname);
}
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 02e8a5d86658..e2ace1059437 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -151,7 +151,7 @@ void core_tpg_add_node_to_devs(
pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
" access for LUN in Demo Mode\n",
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
lun_access_ro ? "READ-ONLY" : "READ-WRITE");
@@ -176,7 +176,7 @@ target_set_nacl_queue_depth(struct se_portal_group *tpg,
if (!acl->queue_depth) {
pr_warn("Queue depth for %s Initiator Node: %s is 0,"
- "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
+ "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
acl->initiatorname);
acl->queue_depth = 1;
}
@@ -227,11 +227,11 @@ static void target_add_node_acl(struct se_node_acl *acl)
pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n",
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg),
acl->dynamic_node_acl ? "DYNAMIC" : "",
acl->queue_depth,
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
acl->initiatorname);
}
@@ -313,7 +313,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
- " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " for %s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
mutex_unlock(&tpg->acl_node_mutex);
return acl;
@@ -321,7 +321,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
pr_err("ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
- " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " request.\n", tpg->se_tpg_tfo->fabric_name,
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
mutex_unlock(&tpg->acl_node_mutex);
return ERR_PTR(-EEXIST);
@@ -380,9 +380,9 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
core_free_device_list_for_node(acl, tpg);
pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
- tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
+ tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
kfree(acl);
}
@@ -418,7 +418,7 @@ int core_tpg_set_initiator_node_queue_depth(
pr_debug("Successfully changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
- acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
+ acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
return 0;
@@ -512,7 +512,7 @@ int core_tpg_register(
spin_unlock_bh(&tpg_lock);
pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
- "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
+ "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
@@ -528,7 +528,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
LIST_HEAD(node_list);
pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
- "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
+ "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
@@ -577,7 +577,6 @@ struct se_lun *core_tpg_alloc_lun(
}
lun->unpacked_lun = unpacked_lun;
atomic_set(&lun->lun_acl_count, 0);
- init_completion(&lun->lun_ref_comp);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_deve_list);
INIT_LIST_HEAD(&lun->lun_dev_link);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2cfd61d62e97..ef9e75b359d4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -224,19 +224,28 @@ void transport_subsystem_check_init(void)
sub_api_initialized = 1;
}
+static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
+{
+ struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
+
+ wake_up(&sess->cmd_list_wq);
+}
+
/**
* transport_init_session - initialize a session object
* @se_sess: Session object pointer.
*
* The caller must have zero-initialized @se_sess before calling this function.
*/
-void transport_init_session(struct se_session *se_sess)
+int transport_init_session(struct se_session *se_sess)
{
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
spin_lock_init(&se_sess->sess_cmd_lock);
init_waitqueue_head(&se_sess->cmd_list_wq);
+ return percpu_ref_init(&se_sess->cmd_count,
+ target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
}
EXPORT_SYMBOL(transport_init_session);
@@ -247,6 +256,7 @@ EXPORT_SYMBOL(transport_init_session);
struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
+ int ret;
se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
if (!se_sess) {
@@ -254,7 +264,11 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
- transport_init_session(se_sess);
+ ret = transport_init_session(se_sess);
+ if (ret < 0) {
+ kmem_cache_free(se_sess_cache, se_sess);
+ return ERR_PTR(ret);
+ }
se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
@@ -273,14 +287,11 @@ int transport_alloc_session_tags(struct se_session *se_sess,
{
int rc;
- se_sess->sess_cmd_map = kcalloc(tag_size, tag_num,
- GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
+ se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!se_sess->sess_cmd_map) {
- se_sess->sess_cmd_map = vzalloc(array_size(tag_size, tag_num));
- if (!se_sess->sess_cmd_map) {
- pr_err("Unable to allocate se_sess->sess_cmd_map\n");
- return -ENOMEM;
- }
+ pr_err("Unable to allocate se_sess->sess_cmd_map\n");
+ return -ENOMEM;
}
rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
@@ -397,7 +408,7 @@ void __transport_register_session(
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
- se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
+ se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
}
EXPORT_SYMBOL(__transport_register_session);
@@ -581,6 +592,7 @@ void transport_free_session(struct se_session *se_sess)
sbitmap_queue_free(&se_sess->sess_tag_pool);
kvfree(se_sess->sess_cmd_map);
}
+ percpu_ref_exit(&se_sess->cmd_count);
kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);
@@ -602,7 +614,7 @@ void transport_deregister_session(struct se_session *se_sess)
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
- se_tpg->se_tpg_tfo->get_fabric_name());
+ se_tpg->se_tpg_tfo->fabric_name);
/*
* If last kref is dropping now for an explicit NodeACL, awake sleeping
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
@@ -695,32 +707,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
percpu_ref_put(&lun->lun_ref);
}
-int transport_cmd_finish_abort(struct se_cmd *cmd)
-{
- bool send_tas = cmd->transport_state & CMD_T_TAS;
- bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
- int ret = 0;
-
- if (send_tas)
- transport_send_task_abort(cmd);
-
- if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
- transport_lun_remove_cmd(cmd);
- /*
- * Allow the fabric driver to unmap any resources before
- * releasing the descriptor via TFO->release_cmd()
- */
- if (!send_tas)
- cmd->se_tfo->aborted_task(cmd);
-
- if (transport_cmd_check_stop_to_fabric(cmd))
- return 1;
- if (!send_tas && ack_kref)
- ret = target_put_sess_cmd(cmd);
-
- return ret;
-}
-
static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -770,12 +756,88 @@ void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
}
EXPORT_SYMBOL(transport_copy_sense_to_cmd);
+static void target_handle_abort(struct se_cmd *cmd)
+{
+ bool tas = cmd->transport_state & CMD_T_TAS;
+ bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
+ int ret;
+
+ pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
+
+ if (tas) {
+ if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
+ cmd->t_task_cdb[0], cmd->tag);
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret) {
+ transport_handle_queue_full(cmd, cmd->se_dev,
+ ret, false);
+ return;
+ }
+ } else {
+ cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
+ cmd->se_tfo->queue_tm_rsp(cmd);
+ }
+ } else {
+ /*
+ * Allow the fabric driver to unmap any resources before
+ * releasing the descriptor via TFO->release_cmd().
+ */
+ cmd->se_tfo->aborted_task(cmd);
+ if (ack_kref)
+ WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
+ /*
+ * To do: establish a unit attention condition on the I_T
+ * nexus associated with cmd. See also the paragraph "Aborting
+ * commands" in SAM.
+ */
+ }
+
+ WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
+
+ transport_lun_remove_cmd(cmd);
+
+ transport_cmd_check_stop_to_fabric(cmd);
+}
+
+static void target_abort_work(struct work_struct *work)
+{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+
+ target_handle_abort(cmd);
+}
+
+static bool target_cmd_interrupted(struct se_cmd *cmd)
+{
+ int post_ret;
+
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ if (cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd, false, &post_ret);
+ INIT_WORK(&cmd->work, target_abort_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return true;
+ } else if (cmd->transport_state & CMD_T_STOP) {
+ if (cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd, false, &post_ret);
+ complete_all(&cmd->t_transport_stop_comp);
+ return true;
+ }
+
+ return false;
+}
+
+/* May be called from interrupt context so must not sleep. */
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
{
- struct se_device *dev = cmd->se_dev;
int success;
unsigned long flags;
+ if (target_cmd_interrupted(cmd))
+ return;
+
cmd->scsi_status = scsi_status;
spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -791,34 +853,12 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
break;
}
- /*
- * Check for case where an explicit ABORT_TASK has been received
- * and transport_wait_for_tasks() will be waiting for completion..
- */
- if (cmd->transport_state & CMD_T_ABORTED ||
- cmd->transport_state & CMD_T_STOP) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- /*
- * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
- * release se_device->caw_sem obtained by sbc_compare_and_write()
- * since target_complete_ok_work() or target_complete_failure_work()
- * won't be called to invoke the normal CAW completion callbacks.
- */
- if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
- up(&dev->caw_sem);
- }
- complete_all(&cmd->t_transport_stop_comp);
- return;
- } else if (!success) {
- INIT_WORK(&cmd->work, target_complete_failure_work);
- } else {
- INIT_WORK(&cmd->work, target_complete_ok_work);
- }
-
cmd->t_state = TRANSPORT_COMPLETE;
cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ INIT_WORK(&cmd->work, success ? target_complete_ok_work :
+ target_complete_failure_work);
if (cmd->se_cmd_flags & SCF_USE_CPUID)
queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
else
@@ -880,7 +920,7 @@ void target_qf_do_work(struct work_struct *work)
atomic_dec_mb(&dev->dev_qf_count);
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
- " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
+ " context: %s\n", cmd->se_tfo->fabric_name, cmd,
(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
: "UNKNOWN");
@@ -1244,7 +1284,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
} else if (size != cmd->data_length) {
pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
- " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
+ " 0x%02x\n", cmd->se_tfo->fabric_name,
cmd->data_length, size, cmd->t_task_cdb[0]);
if (cmd->data_direction == DMA_TO_DEVICE) {
@@ -1316,7 +1356,8 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
- cmd->compl = NULL;
+ cmd->free_compl = NULL;
+ cmd->abrt_compl = NULL;
spin_lock_init(&cmd->t_state_lock);
INIT_WORK(&cmd->work, NULL);
kref_init(&cmd->cmd_kref);
@@ -1396,7 +1437,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
ret = dev->transport->parse_cdb(cmd);
if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
- cmd->se_tfo->get_fabric_name(),
+ cmd->se_tfo->fabric_name,
cmd->se_sess->se_node_acl->initiatorname,
cmd->t_task_cdb[0]);
if (ret)
@@ -1792,8 +1833,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
if (cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
- if (transport_check_aborted_status(cmd, 1))
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ INIT_WORK(&cmd->work, target_abort_work);
+ queue_work(target_completion_wq, &cmd->work);
return;
+ }
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
@@ -1999,8 +2043,6 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
return true;
}
-static int __transport_check_aborted_status(struct se_cmd *, int);
-
void target_execute_cmd(struct se_cmd *cmd)
{
/*
@@ -2009,20 +2051,10 @@ void target_execute_cmd(struct se_cmd *cmd)
*
* If the received CDB has already been aborted stop processing it here.
*/
- spin_lock_irq(&cmd->t_state_lock);
- if (__transport_check_aborted_status(cmd, 1)) {
- spin_unlock_irq(&cmd->t_state_lock);
- return;
- }
- if (cmd->transport_state & CMD_T_STOP) {
- pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
- __func__, __LINE__, cmd->tag);
-
- spin_unlock_irq(&cmd->t_state_lock);
- complete_all(&cmd->t_transport_stop_comp);
+ if (target_cmd_interrupted(cmd))
return;
- }
+ spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_PROCESSING;
cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
@@ -2571,7 +2603,8 @@ transport_generic_new_cmd(struct se_cmd *cmd)
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*/
- if (cmd->transport_state & CMD_T_STOP) {
+ if (cmd->transport_state & CMD_T_STOP &&
+ !cmd->se_tfo->write_pending_must_be_called) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
__func__, __LINE__, cmd->tag);
@@ -2635,13 +2668,29 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
}
/*
+ * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
+ * finished.
+ */
+void target_put_cmd_and_wait(struct se_cmd *cmd)
+{
+ DECLARE_COMPLETION_ONSTACK(compl);
+
+ WARN_ON_ONCE(cmd->abrt_compl);
+ cmd->abrt_compl = &compl;
+ target_put_sess_cmd(cmd);
+ wait_for_completion(&compl);
+}
+
+/*
* This function is called by frontend drivers after processing of a command
* has finished.
*
- * The protocol for ensuring that either the regular flow or the TMF
- * code drops one reference is as follows:
+ * The protocol for ensuring that either the regular frontend command
+ * processing flow or target_handle_abort() code drops one reference is as
+ * follows:
* - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
- * the frontend driver to drop one reference, synchronously or asynchronously.
+ * the frontend driver to call this function synchronously or asynchronously.
+ * That will cause one reference to be dropped.
* - During regular command processing the target core sets CMD_T_COMPLETE
* before invoking one of the .queue_*() functions.
* - The code that aborts commands skips commands and TMFs for which
@@ -2653,7 +2702,7 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
* - For aborted commands for which CMD_T_TAS has been set .queue_status() will
* be called and will drop a reference.
* - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
- * will be called. transport_cmd_finish_abort() will drop the final reference.
+ * will be called. target_handle_abort() will drop the final reference.
*/
int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
@@ -2677,9 +2726,8 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
transport_lun_remove_cmd(cmd);
}
if (aborted)
- cmd->compl = &compl;
- if (!aborted || tas)
- ret = target_put_sess_cmd(cmd);
+ cmd->free_compl = &compl;
+ ret = target_put_sess_cmd(cmd);
if (aborted) {
pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
wait_for_completion(&compl);
@@ -2719,6 +2767,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
}
se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
+ percpu_ref_get(&se_sess->cmd_count);
out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2743,21 +2792,24 @@ static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
- struct completion *compl = se_cmd->compl;
+ struct completion *free_compl = se_cmd->free_compl;
+ struct completion *abrt_compl = se_cmd->abrt_compl;
unsigned long flags;
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_del_init(&se_cmd->se_cmd_list);
- if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
- wake_up(&se_sess->cmd_list_wq);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
- if (compl)
- complete(compl);
+ if (free_compl)
+ complete(free_compl);
+ if (abrt_compl)
+ complete(abrt_compl);
+
+ percpu_ref_put(&se_sess->cmd_count);
}
/**
@@ -2886,6 +2938,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
se_sess->sess_tearing_down = 1;
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+ percpu_ref_kill(&se_sess->cmd_count);
}
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
@@ -2900,52 +2954,24 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
WARN_ON_ONCE(!se_sess->sess_tearing_down);
- spin_lock_irq(&se_sess->sess_cmd_lock);
do {
- ret = wait_event_lock_irq_timeout(
- se_sess->cmd_list_wq,
- list_empty(&se_sess->sess_cmd_list),
- se_sess->sess_cmd_lock, 180 * HZ);
+ ret = wait_event_timeout(se_sess->cmd_list_wq,
+ percpu_ref_is_zero(&se_sess->cmd_count),
+ 180 * HZ);
list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
target_show_cmd("session shutdown: still waiting for ",
cmd);
} while (ret <= 0);
- spin_unlock_irq(&se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
-static void target_lun_confirm(struct percpu_ref *ref)
-{
- struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
-
- complete(&lun->lun_ref_comp);
-}
-
+/*
+ * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
+ * all references to the LUN have been released. Called during LUN shutdown.
+ */
void transport_clear_lun_ref(struct se_lun *lun)
{
- /*
- * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
- * the initial reference and schedule confirm kill to be
- * executed after one full RCU grace period has completed.
- */
- percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
- /*
- * The first completion waits for percpu_ref_switch_to_atomic_rcu()
- * to call target_lun_confirm after lun->lun_ref has been marked
- * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
- * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
- * fails for all new incoming I/O.
- */
- wait_for_completion(&lun->lun_ref_comp);
- /*
- * The second completion waits for percpu_ref_put_many() to
- * invoke ->release() after lun->lun_ref has switched to
- * atomic_t mode, and lun->lun_ref.count has reached zero.
- *
- * At this point all target-core lun->lun_ref references have
- * been dropped via transport_lun_remove_cmd(), and it's safe
- * to proceed with the remaining LUN shutdown.
- */
+ percpu_ref_kill(&lun->lun_ref);
wait_for_completion(&lun->lun_shutdown_comp);
}
@@ -3229,6 +3255,8 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
{
unsigned long flags;
+ WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
+
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -3245,114 +3273,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
-static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
- __releases(&cmd->t_state_lock)
- __acquires(&cmd->t_state_lock)
-{
- int ret;
-
- assert_spin_locked(&cmd->t_state_lock);
- WARN_ON_ONCE(!irqs_disabled());
-
- if (!(cmd->transport_state & CMD_T_ABORTED))
- return 0;
- /*
- * If cmd has been aborted but either no status is to be sent or it has
- * already been sent, just return
- */
- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
- if (send_status)
- cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
- return 1;
- }
-
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
- " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
-
- cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
- trace_target_cmd_complete(cmd);
-
- spin_unlock_irq(&cmd->t_state_lock);
- ret = cmd->se_tfo->queue_status(cmd);
- if (ret)
- transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
- spin_lock_irq(&cmd->t_state_lock);
-
- return 1;
-}
-
-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
-{
- int ret;
-
- spin_lock_irq(&cmd->t_state_lock);
- ret = __transport_check_aborted_status(cmd, send_status);
- spin_unlock_irq(&cmd->t_state_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(transport_check_aborted_status);
-
-void transport_send_task_abort(struct se_cmd *cmd)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- /*
- * If there are still expected incoming fabric WRITEs, we wait
- * until until they have completed before sending a TASK_ABORTED
- * response. This response with TASK_ABORTED status will be
- * queued back to fabric module by transport_check_aborted_status().
- */
- if (cmd->data_direction == DMA_TO_DEVICE) {
- if (cmd->se_tfo->write_pending_status(cmd) != 0) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- goto send_abort;
- }
- cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- }
-send_abort:
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
-
- transport_lun_remove_cmd(cmd);
-
- pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
- cmd->t_task_cdb[0], cmd->tag);
-
- trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo->queue_status(cmd);
- if (ret)
- transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
-}
-
static void target_tmr_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->transport_state & CMD_T_ABORTED) {
- tmr->response = TMR_FUNCTION_REJECTED;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- goto check_stop;
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED)
+ goto aborted;
switch (tmr->function) {
case TMR_ABORT_TASK:
@@ -3386,18 +3315,16 @@ static void target_tmr_work(struct work_struct *work)
break;
}
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->transport_state & CMD_T_ABORTED) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- goto check_stop;
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED)
+ goto aborted;
cmd->se_tfo->queue_tm_rsp(cmd);
-check_stop:
- transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
+ return;
+
+aborted:
+ target_handle_abort(cmd);
}
int transport_generic_handle_tmr(
@@ -3416,16 +3343,15 @@ int transport_generic_handle_tmr(
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (aborted) {
- pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
- "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
- cmd->se_tmr_req->ref_task_tag, cmd->tag);
- transport_lun_remove_cmd(cmd);
- transport_cmd_check_stop_to_fabric(cmd);
+ pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
+ cmd->se_tmr_req->function,
+ cmd->se_tmr_req->ref_task_tag, cmd->tag);
+ target_handle_abort(cmd);
return 0;
}
INIT_WORK(&cmd->work, target_tmr_work);
- queue_work(cmd->se_dev->tmr_wq, &cmd->work);
+ schedule_work(&cmd->work);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index c8ac242ce888..ced1c10364eb 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -266,7 +266,7 @@ bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
pr_debug("[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
- nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ nacl->se_tpg->se_tpg_tfo->fabric_name,
(dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
@@ -327,7 +327,7 @@ int core_scsi3_ua_clear_for_request_sense(
pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x,"
- " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->fabric_name,
cmd->orig_fe_lun, *asc, *ascq);
return (head) ? -EPERM : 0;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9cd404acdb82..1e6d24943565 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -958,7 +958,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
* 0 success
* 1 internally queued to wait for ring memory to free.
*/
-static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
+static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 70adcfdca8d1..c2e1fc927fdf 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -399,11 +399,6 @@ struct se_portal_group xcopy_pt_tpg;
static struct se_session xcopy_pt_sess;
static struct se_node_acl xcopy_pt_nacl;
-static char *xcopy_pt_get_fabric_name(void)
-{
- return "xcopy-pt";
-}
-
static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
@@ -463,7 +458,7 @@ static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
}
static const struct target_core_fabric_ops xcopy_pt_tfo = {
- .get_fabric_name = xcopy_pt_get_fabric_name,
+ .fabric_name = "xcopy-pt",
.get_cmd_state = xcopy_pt_get_cmd_state,
.release_cmd = xcopy_pt_release_cmd,
.check_stop_free = xcopy_pt_check_stop_free,
@@ -479,6 +474,8 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
int target_xcopy_setup_pt(void)
{
+ int ret;
+
xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
if (!xcopy_wq) {
pr_err("Unable to allocate xcopy_wq\n");
@@ -496,7 +493,9 @@ int target_xcopy_setup_pt(void)
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
- transport_init_session(&xcopy_pt_sess);
+ ret = transport_init_session(&xcopy_pt_sess);
+ if (ret < 0)
+ return ret;
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index e55c4d537592..1ce49518d440 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -392,11 +392,6 @@ static inline struct ft_tpg *ft_tpg(struct se_portal_group *se_tpg)
return container_of(se_tpg, struct ft_tpg, se_tpg);
}
-static char *ft_get_fabric_name(void)
-{
- return "fc";
-}
-
static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
{
return ft_tpg(se_tpg)->lport_wwn->name;
@@ -427,9 +422,8 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
static const struct target_core_fabric_ops ft_fabric_ops = {
.module = THIS_MODULE,
- .name = "fc",
+ .fabric_name = "fc",
.node_acl_size = sizeof(struct ft_node_acl),
- .get_fabric_name = ft_get_fabric_name,
.tpg_get_wwn = ft_get_fabric_wwn,
.tpg_get_tag = ft_get_tag,
.tpg_check_demo_mode = ft_check_false,
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 106988a6661a..34f5982cab78 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1256,11 +1256,6 @@ static int usbg_check_false(struct se_portal_group *se_tpg)
return 0;
}
-static char *usbg_get_fabric_name(void)
-{
- return "usb_gadget";
-}
-
static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct usbg_tpg *tpg = container_of(se_tpg,
@@ -1718,8 +1713,7 @@ static int usbg_check_stop_free(struct se_cmd *se_cmd)
static const struct target_core_fabric_ops usbg_ops = {
.module = THIS_MODULE,
- .name = "usb_gadget",
- .get_fabric_name = usbg_get_fabric_name,
+ .fabric_name = "usb_gadget",
.tpg_get_wwn = usbg_get_fabric_wwn,
.tpg_get_tag = usbg_get_tag,
.tpg_check_demo_mode = usbg_check_true,
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 9f2f563c82ed..607be1f4fe27 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -632,7 +632,6 @@ static struct scsi_host_template mts_scsi_host_template = {
.sg_tablesize = SG_ALL,
.can_queue = 1,
.this_id = -1,
- .use_clustering = 1,
.emulated = 1,
.slave_alloc = mts_slave_alloc,
.slave_configure = mts_slave_configure,
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index e227bb5b794f..fde2e71a6ade 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -639,13 +639,6 @@ static const struct scsi_host_template usb_stor_host_template = {
*/
.max_sectors = 240,
- /*
- * merge commands... this seems to help performance, but
- * periodically someone should test to see which setting is more
- * optimal.
- */
- .use_clustering = 1,
-
/* emulated HBA */
.emulated = 1,
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 1f7b401c4d04..36742e8e7edc 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -879,6 +879,7 @@ static struct scsi_host_template uas_host_template = {
.this_id = -1,
.sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
+ .dma_boundary = PAGE_SIZE - 1,
};
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 50dffe83714c..a08472ae5b1b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -285,11 +285,6 @@ static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
return 0;
}
-static char *vhost_scsi_get_fabric_name(void)
-{
- return "vhost";
-}
-
static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
@@ -2289,8 +2284,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
static const struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
- .name = "vhost",
- .get_fabric_name = vhost_scsi_get_fabric_name,
+ .fabric_name = "vhost",
.tpg_get_wwn = vhost_scsi_get_fabric_wwn,
.tpg_get_tag = vhost_scsi_get_tpgt,
.tpg_check_demo_mode = vhost_scsi_check_true,
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 14a3d4cbc2a7..c9e23a126218 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1712,11 +1712,6 @@ static struct configfs_attribute *scsiback_wwn_attrs[] = {
NULL,
};
-static char *scsiback_get_fabric_name(void)
-{
- return "xen-pvscsi";
-}
-
static int scsiback_port_link(struct se_portal_group *se_tpg,
struct se_lun *lun)
{
@@ -1810,8 +1805,7 @@ static int scsiback_check_false(struct se_portal_group *se_tpg)
static const struct target_core_fabric_ops scsiback_ops = {
.module = THIS_MODULE,
- .name = "xen-pvscsi",
- .get_fabric_name = scsiback_get_fabric_name,
+ .fabric_name = "xen-pvscsi",
.tpg_get_wwn = scsiback_get_fabric_wwn,
.tpg_get_tag = scsiback_get_tag,
.tpg_check_demo_mode = scsiback_check_true,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 45552e6eae1e..338604dff7d0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -340,7 +340,6 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
- unsigned char cluster;
unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned;
};
@@ -658,11 +657,6 @@ static inline bool queue_is_mq(struct request_queue *q)
return q->mq_ops;
}
-static inline unsigned int blk_queue_cluster(struct request_queue *q)
-{
- return q->limits.cluster;
-}
-
static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue *q)
{
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 38c95d66ab12..68133842e6d7 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -135,7 +135,6 @@ enum {
ATA_SHT_EMULATED = 1,
ATA_SHT_THIS_ID = -1,
- ATA_SHT_USE_CLUSTERING = 1,
/* struct ata_taskfile flags */
ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
@@ -1360,7 +1359,6 @@ extern struct device_attribute *ata_common_sdev_attrs[];
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
- .use_clustering = ATA_SHT_USE_CLUSTERING, \
.proc_name = drv_name, \
.slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index aa760df8c6b3..6ca954e9f752 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -42,9 +42,6 @@ struct scsi_transport_template;
#define MODE_INITIATOR 0x01
#define MODE_TARGET 0x02
-#define DISABLE_CLUSTERING 0
-#define ENABLE_CLUSTERING 1
-
struct scsi_host_template {
struct module *module;
const char *name;
@@ -364,6 +361,11 @@ struct scsi_host_template {
unsigned int max_sectors;
/*
+ * Maximum size in bytes of a single segment.
+ */
+ unsigned int max_segment_size;
+
+ /*
* DMA scatter gather segment boundary limit. A segment crossing this
* boundary will be split in two.
*/
@@ -413,16 +415,6 @@ struct scsi_host_template {
unsigned unchecked_isa_dma:1;
/*
- * True if this host adapter can make good use of clustering.
- * I originally thought that if the tablesize was large that it
- * was a waste of CPU cycles to prepare a cluster list, but
- * it works out that the Buslogic is faster if you use a smaller
- * number of segments (i.e. use clustering). I guess it is
- * inefficient.
- */
- unsigned use_clustering:1;
-
- /*
* True for emulated SCSI host adapters (e.g. ATAPI).
*/
unsigned emulated:1;
@@ -596,6 +588,7 @@ struct Scsi_Host {
short unsigned int sg_tablesize;
short unsigned int sg_prot_tablesize;
unsigned int max_sectors;
+ unsigned int max_segment_size;
unsigned long dma_boundary;
/*
* In scsi-mq mode, the number of hardware queues supported by the LLD.
@@ -613,7 +606,6 @@ struct Scsi_Host {
unsigned active_mode:2;
unsigned unchecked_isa_dma:1;
- unsigned use_clustering:1;
/*
* Host has requested that no further requests come through for the
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index e3bdb0550a59..69b7b955902c 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -46,6 +46,10 @@
/* Used by transport_get_inquiry_vpd_device_ident() */
#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
+#define INQUIRY_VENDOR_LEN 8
+#define INQUIRY_MODEL_LEN 16
+#define INQUIRY_REVISION_LEN 4
+
/* Attempts before moving from SHORT to LONG */
#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
@@ -87,6 +91,8 @@
#define DA_EMULATE_3PC 1
/* No Emulation for PSCSI by default */
#define DA_EMULATE_ALUA 0
+/* Emulate SCSI2 RESERVE/RELEASE and Persistent Reservations by default */
+#define DA_EMULATE_PR 1
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
#define DA_ENFORCE_PR_ISIDS 1
/* Force SPC-3 PR Activate Persistence across Target Power Loss */
@@ -134,7 +140,6 @@ enum se_cmd_flags_table {
SCF_SENT_CHECK_CONDITION = 0x00000800,
SCF_OVERFLOW_BIT = 0x00001000,
SCF_UNDERFLOW_BIT = 0x00002000,
- SCF_SEND_DELAYED_TAS = 0x00004000,
SCF_ALUA_NON_OPTIMIZED = 0x00008000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
SCF_COMPARE_AND_WRITE = 0x00080000,
@@ -314,9 +319,13 @@ struct t10_vpd {
};
struct t10_wwn {
- char vendor[8];
- char model[16];
- char revision[4];
+ /*
+ * SCSI left aligned strings may not be null terminated. +1 to ensure a
+ * null terminator is always present.
+ */
+ char vendor[INQUIRY_VENDOR_LEN + 1];
+ char model[INQUIRY_MODEL_LEN + 1];
+ char revision[INQUIRY_REVISION_LEN + 1];
char unit_serial[INQUIRY_VPD_SERIAL_LEN];
spinlock_t t10_vpd_lock;
struct se_device *t10_dev;
@@ -474,7 +483,8 @@ struct se_cmd {
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
struct list_head se_cmd_list;
- struct completion *compl;
+ struct completion *free_compl;
+ struct completion *abrt_compl;
const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
@@ -601,6 +611,7 @@ struct se_session {
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
void *fabric_sess_ptr;
+ struct percpu_ref cmd_count;
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
@@ -664,7 +675,7 @@ struct se_dev_attrib {
int emulate_tpws;
int emulate_caw;
int emulate_3pc;
- int pi_prot_format;
+ int emulate_pr;
enum target_prot_type pi_prot_type;
enum target_prot_type hw_pi_prot_type;
int pi_prot_verify;
@@ -731,7 +742,6 @@ struct se_lun {
struct scsi_port_stats lun_stats;
struct config_group lun_group;
struct se_port_stat_grps port_stat_grps;
- struct completion lun_ref_comp;
struct completion lun_shutdown_comp;
struct percpu_ref lun_ref;
struct list_head lun_dev_link;
@@ -794,7 +804,6 @@ struct se_device {
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
- struct workqueue_struct *tmr_wq;
struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
struct list_head state_list;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index f4147b398431..ee5ddd81cd8d 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -8,7 +8,18 @@
struct target_core_fabric_ops {
struct module *module;
- const char *name;
+ /*
+ * XXX: Special case for iscsi/iSCSI...
+ * If non-null, fabric_alias is used for matching target/$fabric
+ * ConfigFS paths. If null, fabric_name is used for this (see below).
+ */
+ const char *fabric_alias;
+ /*
+ * fabric_name is used for matching target/$fabric ConfigFS paths
+ * without a fabric_alias (see above). It's also used for the ALUA state
+ * path and is stored on disk with PR state.
+ */
+ const char *fabric_name;
size_t node_acl_size;
/*
* Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
@@ -23,7 +34,6 @@ struct target_core_fabric_ops {
* XXX: Currently assumes single PAGE_SIZE per scatterlist entry
*/
u32 max_data_sg_nents;
- char *(*get_fabric_name)(void);
char *(*tpg_get_wwn)(struct se_portal_group *);
u16 (*tpg_get_tag)(struct se_portal_group *);
u32 (*tpg_get_default_depth)(struct se_portal_group *);
@@ -101,6 +111,13 @@ struct target_core_fabric_ops {
struct configfs_attribute **tfc_tpg_nacl_attrib_attrs;
struct configfs_attribute **tfc_tpg_nacl_auth_attrs;
struct configfs_attribute **tfc_tpg_nacl_param_attrs;
+
+ /*
+ * Set this member variable to true if the SCSI transport protocol
+ * (e.g. iSCSI) requires that the Data-Out buffer is transferred in
+ * its entirety before a command is aborted.
+ */
+ bool write_pending_must_be_called;
};
int target_register_template(const struct target_core_fabric_ops *fo);
@@ -116,7 +133,7 @@ struct se_session *target_setup_session(struct se_portal_group *,
struct se_session *, void *));
void target_remove_session(struct se_session *);
-void transport_init_session(struct se_session *);
+int transport_init_session(struct se_session *se_sess);
struct se_session *transport_alloc_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
@@ -149,12 +166,12 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
int transport_handle_cdb_direct(struct se_cmd *);
sense_reason_t transport_generic_new_cmd(struct se_cmd *);
+void target_put_cmd_and_wait(struct se_cmd *cmd);
void target_execute_cmd(struct se_cmd *cmd);
int transport_generic_free_cmd(struct se_cmd *, int);
bool transport_wait_for_tasks(struct se_cmd *);
-int transport_check_aborted_status(struct se_cmd *, int);
int transport_send_check_condition_and_sense(struct se_cmd *,
sense_reason_t, int);
int target_get_sess_cmd(struct se_cmd *, bool);
diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h
new file mode 100644
index 000000000000..87408faf6e4e
--- /dev/null
+++ b/include/trace/events/iscsi.h
@@ -0,0 +1,107 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iscsi
+
+#if !defined(_TRACE_ISCSI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ISCSI_H
+
+#include <linux/tracepoint.h>
+
+/* max debug message length */
+#define ISCSI_MSG_MAX 256
+
+/*
+ * Declare tracepoint helper function.
+ */
+void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *),
+ struct device *dev, const char *fmt, ...);
+
+/*
+ * Declare event class for iscsi debug messages.
+ */
+DECLARE_EVENT_CLASS(iscsi_log_msg,
+
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev) )
+ __dynamic_array(char, msg, ISCSI_MSG_MAX )
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ vsnprintf(__get_str(msg), ISCSI_MSG_MAX, vaf->fmt, *vaf->va);
+ ),
+
+ TP_printk("%s: %s",__get_str(dname), __get_str(msg)
+ )
+);
+
+/*
+ * Define event to capture iscsi connection debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_conn,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi session debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_session,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi error handling debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_eh,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi tcp debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_tcp,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi sw tcp debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_sw_tcp,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi transport session debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_trans_session,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi transport connection debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_trans_conn,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+#endif /* _TRACE_ISCSI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>