summaryrefslogtreecommitdiff
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 05:29:18 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 05:29:18 +0400
commit9a50aaefc1b896e734bf7faf3d085f71a360ce97 (patch)
tree82fd9003d6c1d1a7dcf107f46d77b92f11d3f697 /drivers/scsi/qla2xxx
parent1e345ac6869cd2f2d5d4b780fc5d5332dd1e8905 (diff)
parent19ac97ff701ae3c0eda09dd88f590806f1c4eb2d (diff)
downloadlinux-9a50aaefc1b896e734bf7faf3d085f71a360ce97.tar.xz
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This patch set consists of the usual driver updates (megaraid_sas, arcmsr, be2iscsi, lpfc, mpt2sas, mpt3sas, qla2xxx, ufs) plus several assorted fixes and miscellaneous updates (including the pci_msix_enable_range() changes that have been pending for a while)" * tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (202 commits) scsi: add a CONFIG_SCSI_MQ_DEFAULT option ufs: definitions for phy interface ufs: tune bkops while power managment events ufs: Add support for clock scaling using devfreq framework ufs: Add freq-table-hz property for UFS device ufs: Add support for clock gating ufs: refactor configuring power mode ufs: add UFS power management support ufs: introduce well known logical unit in ufs ufs: manually add well known logical units ufs: Active Power Mode - configuring bActiveICCLevel ufs: improve init sequence ufs: refactor query descriptor API support ufs: add voting support for host controller power ufs: Add clock initialization support ufs: Add regulator enable support ufs: Allow vendor specific initialization scsi: don't add scsi_device if its already visible scsi: fix the type for well known LUs scsi: fix comment in struct Scsi_Host definition ...
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h191
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c943
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c132
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c693
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h31
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c45
23 files changed, 1943 insertions, 552 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 16fe5196e6d9..82b92c414a9c 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -484,7 +484,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
start == (ha->flt_region_fw * 4))
valid = 1;
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
- || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
+ || IS_QLA27XX(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7065,
@@ -987,6 +988,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
continue;
if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
+ if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+ continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
iter->attr);
@@ -1014,7 +1017,7 @@ qla2x00_fw_version_show(struct device *dev,
char fw_str[128];
return scnprintf(buf, PAGE_SIZE, "%s\n",
- ha->isp_ops->fw_version_str(vha, fw_str));
+ ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
}
static ssize_t
@@ -1440,7 +1443,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int rval = QLA_FUNCTION_FAILED;
- uint16_t state[5];
+ uint16_t state[6];
uint32_t pstate;
if (IS_QLAFX00(vha->hw)) {
@@ -1456,8 +1459,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
if (rval != QLA_SUCCESS)
memset(state, -1, sizeof(state));
- return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
- state[1], state[2], state[3], state[4]);
+ return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ state[0], state[1], state[2], state[3], state[4], state[5]);
}
static ssize_t
@@ -1924,7 +1927,8 @@ qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
- qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
+ qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)));
}
static void
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 524f9eb7fcd1..2e2bb6f45ce6 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1390,7 +1390,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
start == (ha->flt_region_fw * 4))
valid = 1;
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
- IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7058,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c72ee97bf3f7..d77fe43793b6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,19 +11,15 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x017d | 0x004b,0x0141 |
- * | | | 0x0144,0x0146 |
+ * | Module Init and Probe | 0x017d | 0x0144,0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e-0x0170 |
- * | Mailbox commands | 0x118d | 0x1018-0x1019 |
- * | | | 0x10ca |
- * | | | 0x1115-0x1116 |
- * | | | 0x111a-0x111b |
- * | | | 0x1155-0x1158 |
- * | Device Discovery | 0x2095 | 0x2020-0x2022, |
+ * | Mailbox commands | 0x118d | 0x1115-0x1116 |
+ * | | | 0x111a-0x111b |
+ * | Device Discovery | 0x2016 | 0x2020-0x2022, |
* | | | 0x2011-0x2012, |
- * | | | 0x2016 |
- * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b |
+ * | | | 0x2099-0x20a4 |
+ * | Queue Command and IO tracing | 0x3059 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
@@ -31,10 +27,10 @@
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
* | Async Events | 0x5087 | 0x502b-0x502f |
- * | | | 0x5047,0x5052 |
+ * | | | 0x5047 |
* | | | 0x5084,0x5075 |
* | | | 0x503d,0x5044 |
- * | | | 0x507b |
+ * | | | 0x507b,0x505f |
* | Timer Routines | 0x6012 | |
* | User Space Interactions | 0x70e2 | 0x7018,0x702e |
* | | | 0x7020,0x7024 |
@@ -64,13 +60,15 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc00c | |
- * | Misc | 0xd212 | 0xd017-0xd019 |
- * | | | 0xd020 |
- * | | | 0xd030-0xd0ff |
+ * | Misc | 0xd213 | 0xd011-0xd017 |
+ * | | | 0xd021,0xd024 |
+ * | | | 0xd025,0xd029 |
+ * | | | 0xd02a,0xd02e |
+ * | | | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
- * | | | 0xd213-0xd2fe |
- * | Target Mode | 0xe078 | |
- * | Target Mode Management | 0xf072 | 0xf002-0xf003 |
+ * | | | 0xd214-0xd2fe |
+ * | Target Mode | 0xe079 | |
+ * | Target Mode Management | 0xf072 | 0xf002 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b64399153135..5f6b2960cccb 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -191,6 +191,11 @@
* reset-recovery completion is
* second
*/
+/* ISP2031: Values for laser on/off */
+#define PORT_0_2031 0x00201340
+#define PORT_1_2031 0x00201350
+#define LASER_ON_2031 0x01800100
+#define LASER_OFF_2031 0x01800180
/*
* The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
@@ -261,6 +266,7 @@
#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */
#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */
#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
+#define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
@@ -803,6 +809,7 @@ struct mbx_cmd_32 {
#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */
#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */
#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */
+#define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */
#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
Notification */
@@ -948,6 +955,7 @@ struct mbx_cmd_32 {
#define MBC_WRITE_SFP 0x30 /* Write SFP Data. */
#define MBC_READ_SFP 0x31 /* Read SFP Data. */
#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */
+#define MBC_DPORT_DIAGNOSTICS 0x47 /* D-Port Diagnostics */
#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */
#define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */
#define MBC_MID_GET_VP_ENTRY 0x4a /* MID Get VP Entry. */
@@ -2016,6 +2024,8 @@ typedef struct fc_port {
unsigned long last_ramp_up;
uint16_t port_id;
+
+ unsigned long retry_delay_timestamp;
} fc_port_t;
#include "qla_mr.h"
@@ -2056,10 +2066,21 @@ static const char * const port_state_str[] = {
#define CT_REJECT_RESPONSE 0x8001
#define CT_ACCEPT_RESPONSE 0x8002
-#define CT_REASON_INVALID_COMMAND_CODE 0x01
-#define CT_REASON_CANNOT_PERFORM 0x09
-#define CT_REASON_COMMAND_UNSUPPORTED 0x0b
-#define CT_EXPL_ALREADY_REGISTERED 0x10
+#define CT_REASON_INVALID_COMMAND_CODE 0x01
+#define CT_REASON_CANNOT_PERFORM 0x09
+#define CT_REASON_COMMAND_UNSUPPORTED 0x0b
+#define CT_EXPL_ALREADY_REGISTERED 0x10
+#define CT_EXPL_HBA_ATTR_NOT_REGISTERED 0x11
+#define CT_EXPL_MULTIPLE_HBA_ATTR 0x12
+#define CT_EXPL_INVALID_HBA_BLOCK_LENGTH 0x13
+#define CT_EXPL_MISSING_REQ_HBA_ATTR 0x14
+#define CT_EXPL_PORT_NOT_REGISTERED_ 0x15
+#define CT_EXPL_MISSING_HBA_ID_PORT_LIST 0x16
+#define CT_EXPL_HBA_NOT_REGISTERED 0x17
+#define CT_EXPL_PORT_ATTR_NOT_REGISTERED 0x20
+#define CT_EXPL_PORT_NOT_REGISTERED 0x21
+#define CT_EXPL_MULTIPLE_PORT_ATTR 0x22
+#define CT_EXPL_INVALID_PORT_BLOCK_LENGTH 0x23
#define NS_N_PORT_TYPE 0x01
#define NS_NL_PORT_TYPE 0x02
@@ -2116,33 +2137,40 @@ static const char * const port_state_str[] = {
* HBA attribute types.
*/
#define FDMI_HBA_ATTR_COUNT 9
-#define FDMI_HBA_NODE_NAME 1
-#define FDMI_HBA_MANUFACTURER 2
-#define FDMI_HBA_SERIAL_NUMBER 3
-#define FDMI_HBA_MODEL 4
-#define FDMI_HBA_MODEL_DESCRIPTION 5
-#define FDMI_HBA_HARDWARE_VERSION 6
-#define FDMI_HBA_DRIVER_VERSION 7
-#define FDMI_HBA_OPTION_ROM_VERSION 8
-#define FDMI_HBA_FIRMWARE_VERSION 9
+#define FDMIV2_HBA_ATTR_COUNT 17
+#define FDMI_HBA_NODE_NAME 0x1
+#define FDMI_HBA_MANUFACTURER 0x2
+#define FDMI_HBA_SERIAL_NUMBER 0x3
+#define FDMI_HBA_MODEL 0x4
+#define FDMI_HBA_MODEL_DESCRIPTION 0x5
+#define FDMI_HBA_HARDWARE_VERSION 0x6
+#define FDMI_HBA_DRIVER_VERSION 0x7
+#define FDMI_HBA_OPTION_ROM_VERSION 0x8
+#define FDMI_HBA_FIRMWARE_VERSION 0x9
#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
+#define FDMI_HBA_NODE_SYMBOLIC_NAME 0xc
+#define FDMI_HBA_VENDOR_ID 0xd
+#define FDMI_HBA_NUM_PORTS 0xe
+#define FDMI_HBA_FABRIC_NAME 0xf
+#define FDMI_HBA_BOOT_BIOS_NAME 0x10
+#define FDMI_HBA_TYPE_VENDOR_IDENTIFIER 0xe0
struct ct_fdmi_hba_attr {
uint16_t type;
uint16_t len;
union {
uint8_t node_name[WWN_SIZE];
- uint8_t manufacturer[32];
- uint8_t serial_num[8];
+ uint8_t manufacturer[64];
+ uint8_t serial_num[32];
uint8_t model[16];
uint8_t model_desc[80];
- uint8_t hw_version[16];
+ uint8_t hw_version[32];
uint8_t driver_version[32];
uint8_t orom_version[16];
- uint8_t fw_version[16];
+ uint8_t fw_version[32];
uint8_t os_version[128];
- uint8_t max_ct_len[4];
+ uint32_t max_ct_len;
} a;
};
@@ -2151,16 +2179,56 @@ struct ct_fdmi_hba_attributes {
struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
};
+struct ct_fdmiv2_hba_attr {
+ uint16_t type;
+ uint16_t len;
+ union {
+ uint8_t node_name[WWN_SIZE];
+ uint8_t manufacturer[32];
+ uint8_t serial_num[32];
+ uint8_t model[16];
+ uint8_t model_desc[80];
+ uint8_t hw_version[16];
+ uint8_t driver_version[32];
+ uint8_t orom_version[16];
+ uint8_t fw_version[32];
+ uint8_t os_version[128];
+ uint32_t max_ct_len;
+ uint8_t sym_name[256];
+ uint32_t vendor_id;
+ uint32_t num_ports;
+ uint8_t fabric_name[WWN_SIZE];
+ uint8_t bios_name[32];
+ uint8_t vendor_indentifer[8];
+ } a;
+};
+
+struct ct_fdmiv2_hba_attributes {
+ uint32_t count;
+ struct ct_fdmiv2_hba_attr entry[FDMIV2_HBA_ATTR_COUNT];
+};
+
/*
* Port attribute types.
*/
#define FDMI_PORT_ATTR_COUNT 6
-#define FDMI_PORT_FC4_TYPES 1
-#define FDMI_PORT_SUPPORT_SPEED 2
-#define FDMI_PORT_CURRENT_SPEED 3
-#define FDMI_PORT_MAX_FRAME_SIZE 4
-#define FDMI_PORT_OS_DEVICE_NAME 5
-#define FDMI_PORT_HOST_NAME 6
+#define FDMIV2_PORT_ATTR_COUNT 16
+#define FDMI_PORT_FC4_TYPES 0x1
+#define FDMI_PORT_SUPPORT_SPEED 0x2
+#define FDMI_PORT_CURRENT_SPEED 0x3
+#define FDMI_PORT_MAX_FRAME_SIZE 0x4
+#define FDMI_PORT_OS_DEVICE_NAME 0x5
+#define FDMI_PORT_HOST_NAME 0x6
+#define FDMI_PORT_NODE_NAME 0x7
+#define FDMI_PORT_NAME 0x8
+#define FDMI_PORT_SYM_NAME 0x9
+#define FDMI_PORT_TYPE 0xa
+#define FDMI_PORT_SUPP_COS 0xb
+#define FDMI_PORT_FABRIC_NAME 0xc
+#define FDMI_PORT_FC4_TYPE 0xd
+#define FDMI_PORT_STATE 0x101
+#define FDMI_PORT_COUNT 0x102
+#define FDMI_PORT_ID 0x103
#define FDMI_PORT_SPEED_1GB 0x1
#define FDMI_PORT_SPEED_2GB 0x2
@@ -2171,7 +2239,11 @@ struct ct_fdmi_hba_attributes {
#define FDMI_PORT_SPEED_32GB 0x40
#define FDMI_PORT_SPEED_UNKNOWN 0x8000
-struct ct_fdmi_port_attr {
+#define FC_CLASS_2 0x04
+#define FC_CLASS_3 0x08
+#define FC_CLASS_2_3 0x0C
+
+struct ct_fdmiv2_port_attr {
uint16_t type;
uint16_t len;
union {
@@ -2181,12 +2253,40 @@ struct ct_fdmi_port_attr {
uint32_t max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[32];
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ uint8_t port_sym_name[128];
+ uint32_t port_type;
+ uint32_t port_supported_cos;
+ uint8_t fabric_name[WWN_SIZE];
+ uint8_t port_fc4_type[32];
+ uint32_t port_state;
+ uint32_t num_ports;
+ uint32_t port_id;
} a;
};
/*
* Port Attribute Block.
*/
+struct ct_fdmiv2_port_attributes {
+ uint32_t count;
+ struct ct_fdmiv2_port_attr entry[FDMIV2_PORT_ATTR_COUNT];
+};
+
+struct ct_fdmi_port_attr {
+ uint16_t type;
+ uint16_t len;
+ union {
+ uint8_t fc4_types[32];
+ uint32_t sup_speed;
+ uint32_t cur_speed;
+ uint32_t max_frame_size;
+ uint8_t os_dev_name[32];
+ uint8_t host_name[32];
+ } a;
+};
+
struct ct_fdmi_port_attributes {
uint32_t count;
struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
@@ -2286,6 +2386,13 @@ struct ct_sns_req {
struct {
uint8_t hba_identifier[8];
+ uint32_t entry_count;
+ uint8_t port_name[8];
+ struct ct_fdmiv2_hba_attributes attrs;
+ } rhba2;
+
+ struct {
+ uint8_t hba_identifier[8];
struct ct_fdmi_hba_attributes attrs;
} rhat;
@@ -2296,6 +2403,11 @@ struct ct_sns_req {
struct {
uint8_t port_name[8];
+ struct ct_fdmiv2_port_attributes attrs;
+ } rpa2;
+
+ struct {
+ uint8_t port_name[8];
} dhba;
struct {
@@ -2522,7 +2634,7 @@ struct isp_operations {
int (*load_risc) (struct scsi_qla_host *, uint32_t *);
char * (*pci_info_str) (struct scsi_qla_host *, char *);
- char * (*fw_version_str) (struct scsi_qla_host *, char *);
+ char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t);
irq_handler_t intr_handler;
void (*enable_intrs) (struct qla_hw_data *);
@@ -2664,6 +2776,9 @@ struct qla_statistics {
uint32_t control_requests;
uint64_t jiffies_at_last_reset;
+ uint32_t stat_max_pend_cmds;
+ uint32_t stat_max_qfull_cmds_alloc;
+ uint32_t stat_max_qfull_cmds_dropped;
};
struct bidi_statistics {
@@ -2786,8 +2901,22 @@ struct qlt_hw_data {
uint8_t saved_add_firmware_options[2];
uint8_t tgt_node_name[WWN_SIZE];
+
+ struct list_head q_full_list;
+ uint32_t num_pend_cmds;
+ uint32_t num_qfull_cmds_alloc;
+ uint32_t num_qfull_cmds_dropped;
+ spinlock_t q_full_lock;
+ uint32_t leak_exchg_thresh_hold;
};
+#define MAX_QFULL_CMDS_ALLOC 8192
+#define Q_FULL_THRESH_HOLD_PERCENT 90
+#define Q_FULL_THRESH_HOLD(ha) \
+ ((ha->fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
+
+#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -2834,7 +2963,8 @@ struct qla_hw_data {
uint32_t mr_reset_hdlr_active:1;
uint32_t mr_intr_valid:1;
- /* 34 bits */
+ uint32_t fawwpn_enabled:1;
+ /* 35 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -3032,6 +3162,7 @@ struct qla_hw_data {
#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
+#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3333,6 +3464,7 @@ struct qla_hw_data {
struct work_struct board_disable;
struct mr_data_fx00 mr;
+ uint32_t chip_reset;
struct qlt_hw_data tgt;
int allow_cna_fw_dump;
@@ -3402,6 +3534,11 @@ typedef struct scsi_qla_host {
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
+ unsigned long pci_flags;
+#define PFLG_DISCONNECTED 0 /* PCI device removed */
+#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */
+#define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */
+
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
#define DFLG_NO_CABLE BIT_1
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index eb8f57249f1d..42bb357bf56b 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -91,7 +91,7 @@ struct nvram_24xx {
/* Firmware Initialization Control Block. */
uint16_t version;
uint16_t reserved_1;
- uint16_t frame_payload_size;
+ __le16 frame_payload_size;
uint16_t execution_throttle;
uint16_t exchange_count;
uint16_t hard_address;
@@ -317,8 +317,8 @@ struct init_cb_24xx {
* BIT 3 = Reserved
* BIT 4 = Enable Target Mode
* BIT 5 = Disable Initiator Mode
- * BIT 6 = Reserved
- * BIT 7 = Reserved
+ * BIT 6 = Acquire FA-WWN
+ * BIT 7 = Enable D-port Diagnostics
*
* BIT 8 = Reserved
* BIT 9 = Non Participating LIP
@@ -567,7 +567,7 @@ struct sts_entry_24xx {
#define SF_TRANSFERRED_DATA BIT_11
#define SF_FCP_RSP_DMA BIT_0
- uint16_t reserved_2;
+ uint16_t retry_delay;
uint16_t scsi_status; /* SCSI status. */
#define SS_CONFIRMATION_REQ BIT_12
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d646540db3ac..b1865a72ce59 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,6 +72,7 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
+extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
extern fc_port_t *
@@ -475,7 +476,8 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
-bool qla2x00_check_reg_for_disconnect(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
extern int qla2x00_beacon_off(struct scsi_qla_host *);
@@ -561,7 +563,7 @@ extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_fdmi_register(scsi_qla_host_t *);
extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
-extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
+extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
/*
* Global Function Prototypes in qla_attr.c source file.
@@ -613,7 +615,7 @@ extern void qlafx00_soft_reset(scsi_qla_host_t *);
extern int qlafx00_chip_diag(scsi_qla_host_t *);
extern void qlafx00_config_rings(struct scsi_qla_host *);
extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
-extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
+extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t);
extern irqreturn_t qlafx00_intr_handler(int, void *);
extern void qlafx00_enable_intrs(struct qla_hw_data *);
extern void qlafx00_disable_intrs(struct qla_hw_data *);
@@ -765,4 +767,5 @@ extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
extern int qla8044_abort_isp(scsi_qla_host_t *);
extern int qla8044_check_fw_alive(struct scsi_qla_host *);
+extern void qlt_host_reset_handler(struct qla_hw_data *ha);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index a0df3b1b3823..dccc4dcc39c8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -6,6 +6,7 @@
*/
#include "qla_def.h"
#include "qla_target.h"
+#include <linux/utsname.h>
static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -143,10 +144,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
if (ct_rsp->header.response !=
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
- "%s failed rejected request on port_id: "
- "%02x%02x%02x.\n", routine,
- vha->d_id.b.domain, vha->d_id.b.area,
- vha->d_id.b.al_pa);
+ "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n",
+ routine, vha->d_id.b.domain,
+ vha->d_id.b.area, vha->d_id.b.al_pa,
+ comp_status, ct_rsp->header.response);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
0x2078, (uint8_t *)&ct_rsp->header,
sizeof(struct ct_rsp_hdr));
@@ -622,15 +623,16 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
}
void
-qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
+qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
{
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(ha))
- sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number,
+ snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
ha->mr.fw_version, qla2x00_version_str);
else
- sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
+ snprintf(snn, size,
+ "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version, qla2x00_version_str);
}
@@ -670,7 +672,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
/* Prepare the Symbolic Node Name */
- qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name);
+ qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
+ sizeof(ct_req->req.rsnn_nn.sym_node_name));
/* Calculate SNN length */
ct_req->req.rsnn_nn.name_len =
@@ -1263,7 +1266,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
- uint8_t *entries;
+ void *entries;
struct ct_fdmi_hba_attr *eiter;
struct qla_hw_data *ha = vha->hw;
@@ -1288,7 +1291,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
entries = ct_req->req.rhba.hba_identifier;
/* Nodename. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
@@ -1298,11 +1301,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"NodeName = %8phN.\n", eiter->a.node_name);
/* Manufacturer. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
alen = strlen(QLA2XXX_MANUFACTURER);
- strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+ "%s", "QLogic Corporation");
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1310,12 +1314,19 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Manufacturer = %s.\n", eiter->a.manufacturer);
/* Serial number. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
- sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
- sprintf(eiter->a.serial_num, "%c%05d", 'A' + sn / 100000, sn % 100000);
+ if (IS_FWI2_CAPABLE(ha))
+ qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
+ sizeof(eiter->a.serial_num));
+ else {
+ sn = ((ha->serial0 & 0x1f) << 16) |
+ (ha->serial2 << 8) | ha->serial1;
+ snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
+ "%c%05d", 'A' + sn / 100000, sn % 100000);
+ }
alen = strlen(eiter->a.serial_num);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1323,11 +1334,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Serial no. = %s.\n", eiter->a.serial_num);
/* Model name. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
- strcpy(eiter->a.model, ha->model_number);
+ snprintf(eiter->a.model, sizeof(eiter->a.model),
+ "%s", ha->model_number);
alen = strlen(eiter->a.model);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1335,11 +1347,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Model Name = %s.\n", eiter->a.model);
/* Model description. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
- strncpy(eiter->a.model_desc, ha->model_desc, 80);
+ snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
+ "%s", ha->model_desc);
alen = strlen(eiter->a.model_desc);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1347,11 +1360,23 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Model Desc = %s.\n", eiter->a.model_desc);
/* Hardware version. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
- strcpy(eiter->a.hw_version, ha->adapter_id);
+ if (!IS_FWI2_CAPABLE(ha)) {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ }
alen = strlen(eiter->a.hw_version);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1359,11 +1384,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Hardware ver = %s.\n", eiter->a.hw_version);
/* Driver version. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
- strcpy(eiter->a.driver_version, qla2x00_version_str);
+ snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
+ "%s", qla2x00_version_str);
alen = strlen(eiter->a.driver_version);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1371,11 +1397,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Driver ver = %s.\n", eiter->a.driver_version);
/* Option ROM version. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
- strcpy(eiter->a.orom_version, "0.00");
+ snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
+ "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
alen = strlen(eiter->a.orom_version);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1383,11 +1410,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Optrom vers = %s.\n", eiter->a.orom_version);
/* Firmware version */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
- ha->isp_ops->fw_version_str(vha, eiter->a.fw_version);
+ ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
+ sizeof(eiter->a.fw_version));
alen = strlen(eiter->a.fw_version);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1419,6 +1447,11 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_disc, vha, 0x2034,
"HBA already registered.\n");
rval = QLA_ALREADY_REGISTERED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ad,
+ "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
}
} else {
ql_dbg(ql_dbg_disc, vha, 0x2035,
@@ -1429,6 +1462,534 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
}
/**
+ * qla2x00_fdmi_rpa() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+{
+ int rval, alen;
+ uint32_t size;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
+ struct ct_fdmi_port_attr *eiter;
+ struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+ struct new_utsname *p_sysid = NULL;
+
+ /* Issue RPA */
+ /* Prepare common MS IOCB */
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
+ RPA_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare FDMI command arguments -- attribute block, attributes. */
+ memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
+ size = WWN_SIZE + 4;
+
+ /* Attributes */
+ ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
+ entries = ct_req->req.rpa.port_name;
+
+ /* FC4 types. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
+ eiter->len = cpu_to_be16(4 + 32);
+ eiter->a.fc4_types[2] = 0x01;
+ size += 4 + 32;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2039,
+ "FC4_TYPES=%02x %02x.\n",
+ eiter->a.fc4_types[2],
+ eiter->a.fc4_types[1]);
+
+ /* Supported speed. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
+ if (IS_CNA_CAPABLE(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_10GB);
+ else if (IS_QLA27XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_32GB|
+ FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB);
+ else if (IS_QLA2031(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB);
+ else if (IS_QLA25XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else if (IS_QLA24XX_TYPE(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else if (IS_QLA23XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_1GB);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203a,
+ "Supported_Speed=%x.\n", eiter->a.sup_speed);
+
+ /* Current speed. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
+ switch (ha->link_data_rate) {
+ case PORT_SPEED_1GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_1GB);
+ break;
+ case PORT_SPEED_2GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_2GB);
+ break;
+ case PORT_SPEED_4GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_4GB);
+ break;
+ case PORT_SPEED_8GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_8GB);
+ break;
+ case PORT_SPEED_10GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_10GB);
+ break;
+ case PORT_SPEED_16GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_16GB);
+ break;
+ case PORT_SPEED_32GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_32GB);
+ break;
+ default:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
+ break;
+ }
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203b,
+ "Current_Speed=%x.\n", eiter->a.cur_speed);
+
+ /* Max frame size. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
+ eiter->len = cpu_to_be16(4 + 4);
+ eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
+ le16_to_cpu(icb24->frame_payload_size) :
+ le16_to_cpu(ha->init_cb->frame_payload_size);
+ eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203c,
+ "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
+
+ /* OS device name. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
+ snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+ "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+ alen = strlen(eiter->a.os_dev_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204b,
+ "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
+
+ /* Hostname. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
+ p_sysid = utsname();
+ if (p_sysid) {
+ snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", p_sysid->nodename);
+ } else {
+ snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", fc_host_system_hostname(vha->host));
+ }
+ alen = strlen(eiter->a.host_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
+
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+ ql_dbg(ql_dbg_disc, vha, 0x203e,
+ "RPA portname %016llx, size = %d.\n",
+ wwn_to_u64(ct_req->req.rpa.port_name), size);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
+ entries, size);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2040,
+ "RPA issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20cd,
+ "RPA already registered.\n");
+ rval = QLA_ALREADY_REGISTERED;
+ }
+
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2041,
+ "RPA exiting normally.\n");
+ }
+
+ return rval;
+}
+
+/**
+ * qla2x00_fdmiv2_rhba() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
+{
+ int rval, alen;
+ uint32_t size, sn;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
+ struct ct_fdmiv2_hba_attr *eiter;
+ struct qla_hw_data *ha = vha->hw;
+ struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+ struct new_utsname *p_sysid = NULL;
+
+ /* Issue RHBA */
+ /* Prepare common MS IOCB */
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
+ RHBA_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare FDMI command arguments -- attribute block, attributes. */
+ memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
+ ct_req->req.rhba2.entry_count = cpu_to_be32(1);
+ memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
+ size = 2 * WWN_SIZE + 4 + 4;
+
+ /* Attributes */
+ ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
+ entries = ct_req->req.rhba2.hba_identifier;
+
+ /* Nodename. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x207d,
+ "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+
+ /* Manufacturer. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
+ snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+ "%s", "QLogic Corporation");
+ eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
+ alen = strlen(eiter->a.manufacturer);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a5,
+ "Manufacturer = %s.\n", eiter->a.manufacturer);
+
+ /* Serial number. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
+ if (IS_FWI2_CAPABLE(ha))
+ qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
+ sizeof(eiter->a.serial_num));
+ else {
+ sn = ((ha->serial0 & 0x1f) << 16) |
+ (ha->serial2 << 8) | ha->serial1;
+ snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
+ "%c%05d", 'A' + sn / 100000, sn % 100000);
+ }
+ alen = strlen(eiter->a.serial_num);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a6,
+ "Serial no. = %s.\n", eiter->a.serial_num);
+
+ /* Model name. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
+ snprintf(eiter->a.model, sizeof(eiter->a.model),
+ "%s", ha->model_number);
+ alen = strlen(eiter->a.model);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a7,
+ "Model Name = %s.\n", eiter->a.model);
+
+ /* Model description. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
+ snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
+ "%s", ha->model_desc);
+ alen = strlen(eiter->a.model_desc);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a8,
+ "Model Desc = %s.\n", eiter->a.model_desc);
+
+ /* Hardware version. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
+ if (!IS_FWI2_CAPABLE(ha)) {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ }
+ alen = strlen(eiter->a.hw_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a9,
+ "Hardware ver = %s.\n", eiter->a.hw_version);
+
+ /* Driver version. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
+ snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
+ "%s", qla2x00_version_str);
+ alen = strlen(eiter->a.driver_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20aa,
+ "Driver ver = %s.\n", eiter->a.driver_version);
+
+ /* Option ROM version. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+ snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
+ "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen = strlen(eiter->a.orom_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha , 0x20ab,
+ "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
+ eiter->a.orom_version[0]);
+
+ /* Firmware version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
+ ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
+ sizeof(eiter->a.fw_version));
+ alen = strlen(eiter->a.fw_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ac,
+ "Firmware vers = %s.\n", eiter->a.fw_version);
+
+ /* OS Name and Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
+ p_sysid = utsname();
+ if (p_sysid) {
+ snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s %s",
+ p_sysid->sysname, p_sysid->release, p_sysid->version);
+ } else {
+ snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s", "Linux", fc_host_system_hostname(vha->host));
+ }
+ alen = strlen(eiter->a.os_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ae,
+ "OS Name and Version = %s.\n", eiter->a.os_version);
+
+ /* MAX CT Payload Length */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
+ eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
+ le16_to_cpu(icb24->frame_payload_size) :
+ le16_to_cpu(ha->init_cb->frame_payload_size);
+ eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20af,
+ "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
+
+ /* Node Sybolic Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
+ qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
+ sizeof(eiter->a.sym_name));
+ alen = strlen(eiter->a.sym_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b0,
+ "Symbolic Name = %s.\n", eiter->a.sym_name);
+
+ /* Vendor Id */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
+ eiter->a.vendor_id = cpu_to_be32(0x1077);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b1,
+ "Vendor Id = %x.\n", eiter->a.vendor_id);
+
+ /* Num Ports */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
+ eiter->a.num_ports = cpu_to_be32(1);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b2,
+ "Port Num = %x.\n", eiter->a.num_ports);
+
+ /* Fabric Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b3,
+ "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+
+ /* BIOS Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
+ snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
+ "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen = strlen(eiter->a.bios_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b4,
+ "BIOS Name = %s\n", eiter->a.bios_name);
+
+ /* Vendor Identifier */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
+ snprintf(eiter->a.vendor_indentifer, sizeof(eiter->a.vendor_indentifer),
+ "%s", "QLGC");
+ alen = strlen(eiter->a.vendor_indentifer);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b1,
+ "Vendor Identifier = %s.\n", eiter->a.vendor_indentifer);
+
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b5,
+ "RHBA identifier = %016llx.\n",
+ wwn_to_u64(ct_req->req.rhba2.hba_identifier));
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
+ entries, size);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x20b7,
+ "RHBA issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20b8,
+ "HBA already registered.\n");
+ rval = QLA_ALREADY_REGISTERED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2016,
+ "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ }
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20b9,
+ "RHBA FDMI V2 exiting normally.\n");
+ }
+
+ return rval;
+}
+
+/**
* qla2x00_fdmi_dhba() -
* @ha: HA context
*
@@ -1477,23 +2038,24 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
}
/**
- * qla2x00_fdmi_rpa() -
+ * qla2x00_fdmiv2_rpa() -
* @ha: HA context
*
* Returns 0 on success.
*/
static int
-qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
{
int rval, alen;
- uint32_t size, max_frame_size;
+ uint32_t size;
struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
- uint8_t *entries;
- struct ct_fdmi_port_attr *eiter;
+ void *entries;
+ struct ct_fdmiv2_port_attr *eiter;
struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+ struct new_utsname *p_sysid = NULL;
/* Issue RPA */
/* Prepare common MS IOCB */
@@ -1505,147 +2067,258 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
+ memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
size = WWN_SIZE + 4;
/* Attributes */
- ct_req->req.rpa.attrs.count =
- __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT - 1);
- entries = ct_req->req.rpa.port_name;
+ ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
+ entries = ct_req->req.rpa2.port_name;
/* FC4 types. */
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES);
- eiter->len = __constant_cpu_to_be16(4 + 32);
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
+ eiter->len = cpu_to_be16(4 + 32);
eiter->a.fc4_types[2] = 0x01;
size += 4 + 32;
- ql_dbg(ql_dbg_disc, vha, 0x2039,
+ ql_dbg(ql_dbg_disc, vha, 0x20ba,
"FC4_TYPES=%02x %02x.\n",
eiter->a.fc4_types[2],
eiter->a.fc4_types[1]);
/* Supported speed. */
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
- eiter->len = __constant_cpu_to_be16(4 + 4);
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
if (IS_CNA_CAPABLE(ha))
- eiter->a.sup_speed = __constant_cpu_to_be32(
+ eiter->a.sup_speed = cpu_to_be32(
FDMI_PORT_SPEED_10GB);
else if (IS_QLA27XX(ha))
- eiter->a.sup_speed = __constant_cpu_to_be32(
- FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB|
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_32GB|
+ FDMI_PORT_SPEED_16GB|
FDMI_PORT_SPEED_8GB);
+ else if (IS_QLA2031(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB);
else if (IS_QLA25XX(ha))
- eiter->a.sup_speed = __constant_cpu_to_be32(
- FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
else if (IS_QLA24XX_TYPE(ha))
- eiter->a.sup_speed = __constant_cpu_to_be32(
- FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_4GB);
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
else if (IS_QLA23XX(ha))
- eiter->a.sup_speed =__constant_cpu_to_be32(
- FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB);
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
else
- eiter->a.sup_speed = __constant_cpu_to_be32(
+ eiter->a.sup_speed = cpu_to_be32(
FDMI_PORT_SPEED_1GB);
size += 4 + 4;
- ql_dbg(ql_dbg_disc, vha, 0x203a,
- "Supported_Speed=%x.\n", eiter->a.sup_speed);
+ ql_dbg(ql_dbg_disc, vha, 0x20bb,
+ "Supported Port Speed = %x.\n", eiter->a.sup_speed);
/* Current speed. */
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
- eiter->len = __constant_cpu_to_be16(4 + 4);
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
switch (ha->link_data_rate) {
case PORT_SPEED_1GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_1GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
break;
case PORT_SPEED_2GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_2GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
break;
case PORT_SPEED_4GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_4GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
break;
case PORT_SPEED_8GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
break;
case PORT_SPEED_10GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
break;
case PORT_SPEED_16GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
break;
case PORT_SPEED_32GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
break;
default:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
break;
}
size += 4 + 4;
- ql_dbg(ql_dbg_disc, vha, 0x203b,
- "Current_Speed=%x.\n", eiter->a.cur_speed);
+ ql_dbg(ql_dbg_disc, vha, 0x20bc,
+ "Current_Speed = %x.\n", eiter->a.cur_speed);
/* Max frame size. */
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->len = __constant_cpu_to_be16(4 + 4);
- max_frame_size = IS_FWI2_CAPABLE(ha) ?
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
+ eiter->len = cpu_to_be16(4 + 4);
+ eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
le16_to_cpu(icb24->frame_payload_size):
le16_to_cpu(ha->init_cb->frame_payload_size);
- eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
+ eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
size += 4 + 4;
- ql_dbg(ql_dbg_disc, vha, 0x203c,
- "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
+ ql_dbg(ql_dbg_disc, vha, 0x20bc,
+ "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
/* OS device name. */
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
alen = strlen(QLA2XXX_DRIVER_NAME);
- strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+ "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- ql_dbg(ql_dbg_disc, vha, 0x204b,
- "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20be,
+ "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
/* Hostname. */
- if (strlen(fc_host_system_hostname(vha->host))) {
- ct_req->req.rpa.attrs.count =
- __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME);
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
+ p_sysid = utsname();
+ if (p_sysid) {
+ snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", p_sysid->nodename);
+ } else {
snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
"%s", fc_host_system_hostname(vha->host));
- alen = strlen(eiter->a.host_name);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x203d,
- "HostName=%s.\n", eiter->a.host_name);
}
+ alen = strlen(eiter->a.host_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203d,
+ "HostName=%s.\n", eiter->a.host_name);
+
+ /* Node Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
+ memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c0,
+ "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+
+ /* Port Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_NAME);
+ memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c1,
+ "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
+
+ /* Port Symbolic Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
+ qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
+ sizeof(eiter->a.port_sym_name));
+ alen = strlen(eiter->a.port_sym_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c2,
+ "port symbolic name = %s\n", eiter->a.port_sym_name);
+
+ /* Port Type */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
+ eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c3,
+ "Port Type = %x.\n", eiter->a.port_type);
+
+ /* Class of Service */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
+ eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c4,
+ "Supported COS = %08x\n", eiter->a.port_supported_cos);
+
+ /* Port Fabric Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c5,
+ "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+
+ /* FC4_type */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
+ eiter->a.port_fc4_type[0] = 0;
+ eiter->a.port_fc4_type[1] = 0;
+ eiter->a.port_fc4_type[2] = 1;
+ eiter->a.port_fc4_type[3] = 0;
+ eiter->len = cpu_to_be16(4 + 32);
+ size += 4 + 32;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c6,
+ "Port Active FC4 Type = %02x %02x.\n",
+ eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
+
+ /* Port State */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_STATE);
+ eiter->a.port_state = cpu_to_be32(1);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c7,
+ "Port State = %x.\n", eiter->a.port_state);
+
+ /* Number of Ports */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
+ eiter->a.num_ports = cpu_to_be32(1);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ "Number of ports = %x.\n", eiter->a.num_ports);
+
+ /* Port Id */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_ID);
+ eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ "Port Id = %x.\n", eiter->a.port_id);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
ql_dbg(ql_dbg_disc, vha, 0x203e,
"RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
entries, size);
/* Execute MS IOCB */
@@ -1653,14 +2326,26 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2040,
- "RPA issue IOCB failed (%d).\n", rval);
+ ql_dbg(ql_dbg_disc, vha, 0x20cb,
+ "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ce,
+ "RPA FDMI v2 already registered\n");
+ rval = QLA_ALREADY_REGISTERED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2020,
+ "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ }
} else {
- ql_dbg(ql_dbg_disc, vha, 0x2041,
- "RPA exiting nornally.\n");
+ ql_dbg(ql_dbg_disc, vha, 0x20cc,
+ "RPA FDMI V2 exiting normally.\n");
}
return rval;
@@ -1675,8 +2360,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
int
qla2x00_fdmi_register(scsi_qla_host_t *vha)
{
- int rval;
- struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha = vha->hw;
if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
IS_QLAFX00(ha))
@@ -1686,6 +2371,26 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
if (rval)
return rval;
+ rval = qla2x00_fdmiv2_rhba(vha);
+ if (rval) {
+ if (rval != QLA_ALREADY_REGISTERED)
+ goto try_fdmi;
+
+ rval = qla2x00_fdmi_dhba(vha);
+ if (rval)
+ goto try_fdmi;
+
+ rval = qla2x00_fdmiv2_rhba(vha);
+ if (rval)
+ goto try_fdmi;
+ }
+ rval = qla2x00_fdmiv2_rpa(vha);
+ if (rval)
+ goto try_fdmi;
+
+ goto out;
+
+try_fdmi:
rval = qla2x00_fdmi_rhba(vha);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
@@ -1700,7 +2405,7 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
return rval;
}
rval = qla2x00_fdmi_rpa(vha);
-
+out:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 46990f4ceb40..a4dde7e80dbd 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1848,7 +1848,9 @@ enable_82xx_npiv:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
- if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
+ if (IS_QLA27XX(ha))
+ ha->flags.fac_supported = 1;
+ else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
uint32_t size;
rval = qla81xx_fac_get_sector_size(vha, &size);
@@ -2196,6 +2198,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
mid_init_cb->init_cb.execution_throttle =
cpu_to_le16(ha->fw_xcb_count);
+ /* D-Port Status */
+ if (IS_DPORT_CAPABLE(ha))
+ mid_init_cb->init_cb.firmware_options_1 |=
+ cpu_to_le16(BIT_7);
+ /* Enable FA-WWPN */
+ ha->flags.fawwpn_enabled =
+ (mid_init_cb->init_cb.firmware_options_1 & BIT_6) ? 1 : 0;
+ ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n",
+ (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
}
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
@@ -2224,7 +2235,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
unsigned long wtime, mtime, cs84xx_time;
uint16_t min_wait; /* Minimum wait time if loop is down */
uint16_t wait_time; /* Wait time if loop is coming ready */
- uint16_t state[5];
+ uint16_t state[6];
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(vha->hw))
@@ -2329,8 +2340,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
} while (1);
ql_dbg(ql_dbg_taskm, vha, 0x803a,
- "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
- state[1], state[2], state[3], state[4], jiffies);
+ "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
+ state[1], state[2], state[3], state[4], state[5], jiffies);
if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
ql_log(ql_log_warn, vha, 0x803b,
@@ -2596,18 +2607,18 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
- nv->frame_payload_size = __constant_cpu_to_le16(2048);
+ nv->frame_payload_size = 2048;
nv->special_options[1] = BIT_7;
} else if (IS_QLA2200(ha)) {
nv->firmware_options[0] = BIT_2 | BIT_1;
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
- nv->frame_payload_size = __constant_cpu_to_le16(1024);
+ nv->frame_payload_size = 1024;
} else if (IS_QLA2100(ha)) {
nv->firmware_options[0] = BIT_3 | BIT_1;
nv->firmware_options[1] = BIT_5;
- nv->frame_payload_size = __constant_cpu_to_le16(1024);
+ nv->frame_payload_size = 1024;
}
nv->max_iocb_allocation = __constant_cpu_to_le16(256);
@@ -2643,7 +2654,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
* are valid.
*/
if (ia64_platform_is("sn2")) {
- nv->frame_payload_size = __constant_cpu_to_le16(2048);
+ nv->frame_payload_size = 2048;
if (IS_QLA23XX(ha))
nv->special_options[1] = BIT_7;
}
@@ -3192,7 +3203,7 @@ static void
qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
int rval;
- uint16_t mb[4];
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
struct qla_hw_data *ha = vha->hw;
if (!IS_IIDMA_CAPABLE(ha))
@@ -4564,6 +4575,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
/* Requeue all commands in outstanding command list. */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
}
+
+ ha->chip_reset++;
+ /* memory barrier */
+ wmb();
}
/*
@@ -4958,7 +4973,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
nv->version = __constant_cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = __constant_cpu_to_le16(2048);
+ nv->frame_payload_size = 2048;
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
nv->exchange_count = __constant_cpu_to_le16(0);
nv->hard_address = __constant_cpu_to_le16(124);
@@ -5225,7 +5240,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
ql_log(ql_log_fatal, vha, 0x008f,
"Failed to load segment %d of firmware.\n",
fragment);
- break;
+ return QLA_FUNCTION_FAILED;
}
faddr += dlen;
@@ -5528,7 +5543,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ql_log(ql_log_fatal, vha, 0x0098,
"Failed to load segment %d of firmware.\n",
fragment);
- break;
+ return QLA_FUNCTION_FAILED;
}
fwcode += dlen;
@@ -5905,7 +5920,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
nv->version = __constant_cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = __constant_cpu_to_le16(2048);
+ nv->frame_payload_size = 2048;
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
nv->exchange_count = __constant_cpu_to_le16(0);
nv->port_name[0] = 0x21;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index b3b1d6fc2d6c..fee9eb7c8a60 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -279,3 +279,11 @@ qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
complete(&ha->mbx_intr_comp);
}
}
+
+static inline void
+qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
+{
+ if (retry_delay)
+ fcport->retry_delay_timestamp = jiffies +
+ (retry_delay * HZ / 10);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 150529d98db4..f0edb07f3198 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1858,6 +1858,17 @@ static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
}
/* Generic Control-SRB manipulation functions. */
+
+/* hardware_lock assumed to be held. */
+void *
+qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
+{
+ if (qla2x00_reset_active(vha))
+ return NULL;
+
+ return qla2x00_alloc_iocbs(vha, sp);
+}
+
void *
qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
{
@@ -1901,7 +1912,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
skip_cmd_array:
/* Check for room on request queue. */
- if (req->cnt < req_cnt) {
+ if (req->cnt < req_cnt + 2) {
if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
@@ -1920,7 +1931,7 @@ skip_cmd_array:
req->cnt = req->length -
(req->ring_index - cnt);
}
- if (req->cnt < req_cnt)
+ if (req->cnt < req_cnt + 2)
goto queuing_error;
/* Prep packet */
@@ -2648,7 +2659,7 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
-void
+static void
qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
{
struct srb_iocb *aio = &sp->u.iocb_cmd;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 550a4a31f51a..a04a1b1f7f32 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -56,16 +56,8 @@ qla2100_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
hccr = RD_REG_WORD(&reg->hccr);
- /* Check for PCI disconnection */
- if (hccr == 0xffff) {
- /*
- * Schedule this on the default system workqueue so that
- * all the adapter workqueues and the DPC thread can be
- * shutdown cleanly.
- */
- schedule_work(&ha->board_disable);
+ if (qla2x00_check_reg16_for_disconnect(vha, hccr))
break;
- }
if (hccr & HCCR_RISC_PAUSE) {
if (pci_channel_offline(ha->pdev))
break;
@@ -121,21 +113,31 @@ qla2100_intr_handler(int irq, void *dev_id)
}
bool
-qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
+qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
{
/* Check for PCI disconnection */
if (reg == 0xffffffff) {
- /*
- * Schedule this on the default system workqueue so that all the
- * adapter workqueues and the DPC thread can be shutdown
- * cleanly.
- */
- schedule_work(&vha->hw->board_disable);
+ if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
+ !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
+ !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
+ /*
+ * Schedule this (only once) on the default system
+ * workqueue so that all the adapter workqueues and the
+ * DPC thread can be shutdown cleanly.
+ */
+ schedule_work(&vha->hw->board_disable);
+ }
return true;
} else
return false;
}
+bool
+qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
+{
+ return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
+}
+
/**
* qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
* @irq:
@@ -174,7 +176,7 @@ qla2300_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
- if (qla2x00_check_reg_for_disconnect(vha, stat))
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSR_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
@@ -573,8 +575,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- uint32_t rscn_entry, host_pid;
+ uint32_t rscn_entry, host_pid, tmp_pid;
unsigned long flags;
+ fc_port_t *fcport = NULL;
/* Setup to process RIO completion. */
handle_cnt = 0;
@@ -730,7 +733,7 @@ skip_rio:
else
ha->link_data_rate = mb[1];
- ql_dbg(ql_dbg_async, vha, 0x500a,
+ ql_log(ql_log_info, vha, 0x500a,
"LOOP UP detected (%s Gbps).\n",
qla2x00_get_link_speed_str(ha, ha->link_data_rate));
@@ -743,13 +746,23 @@ skip_rio:
? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
: mbx;
- ql_dbg(ql_dbg_async, vha, 0x500b,
+ ql_log(ql_log_info, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
mb[1], mb[2], mb[3], mbx);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ /*
+ * In case of loop down, restore WWPN from
+ * NVRAM in case of FA-WWPN capable ISP
+ */
+ if (ha->flags.fawwpn_enabled) {
+ void *wwpn = ha->init_cb->port_name;
+
+ memcpy(vha->port_name, wwpn, WWN_SIZE);
+ }
+
vha->device_flags |= DFLG_NO_CABLE;
qla2x00_mark_all_devices_lost(vha, 1);
}
@@ -908,7 +921,8 @@ skip_rio:
* it. Otherwise ignore it and Wait for RSCN to come in.
*/
atomic_set(&vha->loop_down_timer, 0);
- if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
+ atomic_read(&vha->loop_state) != LOOP_DEAD) {
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
@@ -920,9 +934,6 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x5012,
"Port database changed %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
- ql_log(ql_log_warn, vha, 0x505f,
- "Link is operational (%s Gbps).\n",
- qla2x00_get_link_speed_str(ha, ha->link_data_rate));
/*
* Mark all devices as missing so we will login again.
@@ -969,6 +980,20 @@ skip_rio:
if (qla2x00_is_a_vp_did(vha, rscn_entry))
break;
+ /*
+ * Search for the rport related to this RSCN entry and mark it
+ * as lost.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&fcport->state) != FCS_ONLINE)
+ continue;
+ tmp_pid = fcport->d_id.b24;
+ if (fcport->d_id.b24 == rscn_entry) {
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ break;
+ }
+ }
+
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
@@ -1086,6 +1111,14 @@ skip_rio:
qla83xx_handle_8200_aen(vha, mb);
break;
+ case MBA_DPORT_DIAGNOSTICS:
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1],
+ mb[1] == 0 ? "start" :
+ mb[1] == 1 ? "done (ok)" :
+ mb[1] == 2 ? "done (error)" : "other");
+ break;
+
default:
ql_dbg(ql_dbg_async, vha, 0x5057,
"Unknown AEN:%04x %04x %04x %04x\n",
@@ -1975,6 +2008,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int logit = 1;
int res = 0;
uint16_t state_flags = 0;
+ uint16_t retry_delay = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -2068,6 +2102,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
host_to_fcp_swap(sts24->data, sizeof(sts24->data));
ox_id = le16_to_cpu(sts24->ox_id);
par_sense_len = sizeof(sts24->data);
+ /* Valid values of the retry delay timer are 0x1-0xffef */
+ if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
+ retry_delay = sts24->retry_delay;
} else {
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le16_to_cpu(sts->req_sense_length);
@@ -2102,6 +2139,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
comp_status = CS_DATA_OVERRUN;
/*
+ * Check retry_delay_timer value if we receive a busy or
+ * queue full.
+ */
+ if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
+ lscsi_status == SAM_STAT_BUSY)
+ qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
+
+ /*
* Based on Host and scsi status generate status code for Linux
*/
switch (comp_status) {
@@ -2633,7 +2678,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
- if (qla2x00_check_reg_for_disconnect(vha, stat))
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
@@ -2723,7 +2768,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
* we process the response queue.
*/
stat = RD_REG_DWORD(&reg->host_status);
- if (qla2x00_check_reg_for_disconnect(vha, stat))
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
goto out;
qla24xx_process_response_queue(vha, rsp);
if (!ha->flags.disable_msix_handshake) {
@@ -2763,7 +2808,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
- if (qla2x00_check_reg_for_disconnect(vha, hccr))
+ if (qla2x00_check_reg32_for_disconnect(vha, hccr))
goto out;
queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
@@ -2798,7 +2843,7 @@ qla24xx_msix_default(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
do {
stat = RD_REG_DWORD(&reg->host_status);
- if (qla2x00_check_reg_for_disconnect(vha, stat))
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
@@ -2923,27 +2968,22 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
for (i = 0; i < ha->msix_count; i++)
entries[i].entry = i;
- ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
- if (ret) {
- if (ret < MIN_MSIX_COUNT)
- goto msix_failed;
-
+ ret = pci_enable_msix_range(ha->pdev,
+ entries, MIN_MSIX_COUNT, ha->msix_count);
+ if (ret < 0) {
+ ql_log(ql_log_fatal, vha, 0x00c7,
+ "MSI-X: Failed to enable support, "
+ "giving up -- %d/%d.\n",
+ ha->msix_count, ret);
+ goto msix_out;
+ } else if (ret < ha->msix_count) {
ql_log(ql_log_warn, vha, 0x00c6,
"MSI-X: Failed to enable support "
"-- %d/%d\n Retry with %d vectors.\n",
ha->msix_count, ret, ret);
- ha->msix_count = ret;
- ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
- if (ret) {
-msix_failed:
- ql_log(ql_log_fatal, vha, 0x00c7,
- "MSI-X: Failed to enable support, "
- "giving up -- %d/%d.\n",
- ha->msix_count, ret);
- goto msix_out;
- }
- ha->max_rsp_queues = ha->msix_count - 1;
}
+ ha->msix_count = ret;
+ ha->max_rsp_queues = ha->msix_count - 1;
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
if (!ha->msix_entries) {
@@ -3103,10 +3143,11 @@ skip_msi:
}
clear_risc_ints:
+ if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+ goto fail;
spin_lock_irq(&ha->hardware_lock);
- if (!IS_FWI2_CAPABLE(ha))
- WRT_REG_WORD(&reg->isp.semaphore, 0);
+ WRT_REG_WORD(&reg->isp.semaphore, 0);
spin_unlock_irq(&ha->hardware_lock);
fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d9aafc003be2..72971daa2552 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -117,7 +117,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
command = mcp->mb[0];
mboxes = mcp->out_mb;
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
+ ql_dbg(ql_dbg_mbx, vha, 0x1111,
"Mailbox registers (OUT):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
@@ -373,7 +373,7 @@ premature_exit:
mbx_done:
if (rval) {
- ql_log(ql_log_warn, base_vha, 0x1020,
+ ql_dbg(ql_dbg_disc, base_vha, 0x1020,
"**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
} else {
@@ -1085,6 +1085,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
if (IS_CNA_CAPABLE(vha->hw))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
+ if (IS_FWI2_CAPABLE(vha->hw))
+ mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -1118,6 +1120,22 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
}
+ /* If FA-WWN supported */
+ if (mcp->mb[7] & BIT_14) {
+ vha->port_name[0] = MSB(mcp->mb[16]);
+ vha->port_name[1] = LSB(mcp->mb[16]);
+ vha->port_name[2] = MSB(mcp->mb[17]);
+ vha->port_name[3] = LSB(mcp->mb[17]);
+ vha->port_name[4] = MSB(mcp->mb[18]);
+ vha->port_name[5] = LSB(mcp->mb[18]);
+ vha->port_name[6] = MSB(mcp->mb[19]);
+ vha->port_name[7] = LSB(mcp->mb[19]);
+ fc_host_port_name(vha->host) =
+ wwn_to_u64(vha->port_name);
+ ql_dbg(ql_dbg_mbx, vha, 0x10ca,
+ "FA-WWN acquired %016llx\n",
+ wwn_to_u64(vha->port_name));
+ }
}
return rval;
@@ -1546,7 +1564,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
if (IS_FWI2_CAPABLE(vha->hw))
- mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
else
mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -1560,6 +1578,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
states[2] = mcp->mb[3];
states[3] = mcp->mb[4];
states[4] = mcp->mb[5];
+ states[5] = mcp->mb[6]; /* DPORT status */
}
if (rval != QLA_SUCCESS) {
@@ -3328,8 +3347,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
+ /* FA-WWN is only for physical port */
+ if (!vp_idx) {
+ void *wwpn = ha->init_cb->port_name;
+
+ if (!MSB(stat)) {
+ if (rptid_entry->vp_idx_map[1] & BIT_6)
+ wwpn = rptid_entry->reserved_4 + 8;
+ }
+ memcpy(vha->port_name, wwpn, WWN_SIZE);
+ fc_host_port_name(vha->host) =
+ wwn_to_u64(vha->port_name);
+ ql_dbg(ql_dbg_mbx, vha, 0x1018,
+ "FA-WWN portname %016llx (%x)\n",
+ fc_host_port_name(vha->host), MSB(stat));
+ }
+
vp = vha;
- if (vp_idx == 0 && (MSB(stat) != 1))
+ if (vp_idx == 0)
goto reg_needed;
if (MSB(stat) != 0 && MSB(stat) != 2) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 89998244f48d..5c2e0317f1c0 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -702,6 +702,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_in = &reg->isp25mq.req_q_in;
req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
+ req->out_ptr = (void *)(req->ring + req->length);
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
@@ -811,6 +812,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
+ rsp->in_ptr = (void *)(rsp->ring + rsp->length);
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 4775baa8b6a0..80867599527d 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -695,11 +695,11 @@ qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
}
char *
-qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
+qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
{
struct qla_hw_data *ha = vha->hw;
- sprintf(str, "%s", ha->mr.fw_version);
+ snprintf(str, size, "%s", ha->mr.fw_version);
return str;
}
@@ -1551,7 +1551,10 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
ha->mr.fw_reset_timer_tick =
QLAFX00_MAX_RESET_INTERVAL;
}
- ha->mr.old_aenmbx0_state = aenmbx0;
+ if (ha->mr.old_aenmbx0_state != aenmbx0) {
+ ha->mr.old_aenmbx0_state = aenmbx0;
+ ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ }
ha->mr.fw_reset_timer_tick--;
}
if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
@@ -1675,17 +1678,16 @@ qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
fc_port_t *fcport;
/* Check for matching device in remote port list. */
- fcport = NULL;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->tgt_id == tgt_id) {
ql_dbg(ql_dbg_async, vha, 0x5072,
"Matching fcport(%p) found with TGT-ID: 0x%x "
"and Remote TGT_ID: 0x%x\n",
fcport, fcport->tgt_id, tgt_id);
- break;
+ return fcport;
}
}
- return fcport;
+ return NULL;
}
static void
@@ -2924,7 +2926,7 @@ qlafx00_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; clr_intr = 0) {
stat = QLAFX00_RD_INTR_REG(ha);
- if (qla2x00_check_reg_for_disconnect(vha, stat))
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
if (!intr_stat)
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 58f3c912d96e..54cb2ac9339b 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -857,7 +857,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
break;
if (timeout >= qla82xx_rom_lock_timeout) {
lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
- ql_log(ql_log_warn, vha, 0xb157,
+ ql_dbg(ql_dbg_p3p, vha, 0xb157,
"%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
__func__, ha->portnum, lock_owner);
return -1;
@@ -2123,7 +2123,7 @@ qla82xx_msix_default(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
do {
host_int = RD_REG_DWORD(&reg->host_int);
- if (qla2x00_check_reg_for_disconnect(vha, host_int))
+ if (qla2x00_check_reg32_for_disconnect(vha, host_int))
break;
if (host_int) {
stat = RD_REG_DWORD(&reg->host_status);
@@ -2184,7 +2184,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
host_int = RD_REG_DWORD(&reg->host_int);
- if (qla2x00_check_reg_for_disconnect(vha, host_int))
+ if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
qla24xx_process_response_queue(vha, rsp);
WRT_REG_DWORD(&reg->host_int, 0);
@@ -2219,7 +2219,7 @@ qla82xx_poll(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
host_int = RD_REG_DWORD(&reg->host_int);
- if (qla2x00_check_reg_for_disconnect(vha, host_int))
+ if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
if (host_int) {
stat = RD_REG_DWORD(&reg->host_status);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index be9698d920c2..dabd25429c58 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -105,7 +105,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd,
"based on total number of SG elements.");
int ql2xfdmienable=1;
-module_param(ql2xfdmienable, int, S_IRUGO);
+module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
"0 - no FDMI. Default is 1 - perform FDMI.");
@@ -238,7 +238,9 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
static int qla2x00_change_queue_type(struct scsi_device *, int);
+static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
+static void qla83xx_disable_laser(scsi_qla_host_t *vha);
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
@@ -547,14 +549,13 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
}
static char *
-qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
+qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
{
char un_str[10];
struct qla_hw_data *ha = vha->hw;
- sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
- ha->fw_minor_version,
- ha->fw_subminor_version);
+ snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
+ ha->fw_minor_version, ha->fw_subminor_version);
if (ha->fw_attributes & BIT_9) {
strcat(str, "FLX");
@@ -586,11 +587,11 @@ qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
}
static char *
-qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
+qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
{
struct qla_hw_data *ha = vha->hw;
- sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
+ snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
return str;
}
@@ -730,6 +731,15 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
}
+ /*
+ * Return target busy if we've received a non-zero retry_delay_timer
+ * in a FCP_RSP.
+ */
+ if (time_after(jiffies, fcport->retry_delay_timestamp))
+ fcport->retry_delay_timestamp = 0;
+ else
+ goto qc24_target_busy;
+
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto qc24_host_busy;
@@ -860,8 +870,10 @@ qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- while ((!(vha->flags.online) || ha->dpc_active ||
- ha->flags.mbox_busy))
+ while (((qla2x00_reset_active(vha)) || ha->dpc_active ||
+ ha->flags.mbox_busy) ||
+ test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
+ test_bit(FX00_TARGET_SCAN, &vha->dpc_flags))
msleep(1000);
}
@@ -1351,6 +1363,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+ qlt_host_reset_handler(ha);
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
@@ -2384,6 +2398,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
ha->tgt.enable_class_2 = ql2xenableclass2;
+ INIT_LIST_HEAD(&ha->tgt.q_full_list);
+ spin_lock_init(&ha->tgt.q_full_lock);
/* Clear our data area */
ha->bars = bars;
@@ -2527,7 +2543,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->portnum = PCI_FUNC(ha->pdev->devfn);
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
- req_length = REQUEST_ENTRY_CNT_24XX;
+ req_length = REQUEST_ENTRY_CNT_83XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
@@ -2631,6 +2647,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
pci_set_drvdata(pdev, base_vha);
+ set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
host = base_vha->host;
base_vha->req = req;
@@ -2923,10 +2940,11 @@ skip_dpc:
pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
base_vha->host_no,
- ha->isp_ops->fw_version_str(base_vha, fw_str));
+ ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
qlt_add_target(ha, base_vha);
+ clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
return 0;
probe_init_failed:
@@ -2954,16 +2972,8 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
- if (IS_QLA82XX(ha)) {
- qla82xx_idc_lock(ha);
- qla82xx_clear_drv_active(ha);
- qla82xx_idc_unlock(ha);
- }
- if (IS_QLA8044(ha)) {
- qla8044_idc_lock(ha);
- qla8044_clear_drv_active(ha);
- qla8044_idc_unlock(ha);
- }
+ qla2x00_clear_drv_active(ha);
+
iospace_config_failed:
if (IS_P3P_TYPE(ha)) {
if (!ha->nx_pcibase)
@@ -3026,6 +3036,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
qla2x00_free_irqs(vha);
qla2x00_free_fw_dump(ha);
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
}
/* Deletes all the virtual ports for a given ha */
@@ -3119,10 +3132,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
}
static void
-qla2x00_clear_drv_active(scsi_qla_host_t *vha)
+qla2x00_clear_drv_active(struct qla_hw_data *ha)
{
- struct qla_hw_data *ha = vha->hw;
-
if (IS_QLA8044(ha)) {
qla8044_idc_lock(ha);
qla8044_clear_drv_active(ha);
@@ -3140,15 +3151,25 @@ qla2x00_remove_one(struct pci_dev *pdev)
scsi_qla_host_t *base_vha;
struct qla_hw_data *ha;
+ base_vha = pci_get_drvdata(pdev);
+ ha = base_vha->hw;
+
+ /* Indicate device removal to prevent future board_disable and wait
+ * until any pending board_disable has completed. */
+ set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ cancel_work_sync(&ha->board_disable);
+
/*
- * If the PCI device is disabled that means that probe failed and any
- * resources should be have cleaned up on probe exit.
+ * If the PCI device is disabled then there was a PCI-disconnect and
+ * qla2x00_disable_board_on_pci_error has taken care of most of the
+ * resources.
*/
- if (!atomic_read(&pdev->enable_cnt))
+ if (!atomic_read(&pdev->enable_cnt)) {
+ scsi_host_put(base_vha->host);
+ kfree(ha);
+ pci_set_drvdata(pdev, NULL);
return;
-
- base_vha = pci_get_drvdata(pdev);
- ha = base_vha->hw;
+ }
qla2x00_wait_for_hba_ready(base_vha);
@@ -3173,6 +3194,10 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla84xx_put_chip(base_vha);
+ /* Laser should be disabled only for ISP2031 */
+ if (IS_QLA2031(ha))
+ qla83xx_disable_laser(base_vha);
+
/* Disable timer */
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
@@ -3191,9 +3216,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_free_device(base_vha);
- scsi_host_put(base_vha->host);
+ qla2x00_clear_drv_active(ha);
- qla2x00_clear_drv_active(base_vha);
+ scsi_host_put(base_vha->host);
qla2x00_unmap_iobases(ha);
@@ -4808,18 +4833,15 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
qla82xx_md_free(base_vha);
qla2x00_free_queues(ha);
- scsi_host_put(base_vha->host);
-
qla2x00_unmap_iobases(ha);
pci_release_selected_regions(ha->pdev, ha->bars);
- kfree(ha);
- ha = NULL;
-
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+ /*
+ * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
+ */
}
/**************************************************************************
@@ -5192,13 +5214,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
*/
if (!pci_channel_offline(ha->pdev)) {
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
- if (w == 0xffff)
- /*
- * Schedule this on the default system workqueue so that
- * all the adapter workqueues and the DPC thread can be
- * shutdown cleanly.
- */
- schedule_work(&ha->board_disable);
+ qla2x00_check_reg16_for_disconnect(vha, w);
}
/* Make sure qla82xx_watchdog is run only for physical port */
@@ -5706,6 +5722,32 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
ha->flags.eeh_busy = 0;
}
+static void
+qla83xx_disable_laser(scsi_qla_host_t *vha)
+{
+ uint32_t reg, data, fn;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
+
+ /* pci func #/port # */
+ ql_dbg(ql_dbg_init, vha, 0x004b,
+ "Disabling Laser for hba: %p\n", vha);
+
+ fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
+ (BIT_15|BIT_14|BIT_13|BIT_12));
+
+ fn = (fn >> 12);
+
+ if (fn & 1)
+ reg = PORT_1_2031;
+ else
+ reg = PORT_0_2031;
+
+ data = LASER_OFF_2031;
+
+ qla83xx_wr_reg(vha, reg, data);
+}
+
static const struct pci_error_handlers qla2xxx_err_handler = {
.error_detected = qla2xxx_pci_error_detected,
.mmio_enabled = qla2xxx_pci_mmio_enabled,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bca173e56f16..b656a05613e8 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2580,7 +2580,8 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
uint32_t faddr, left, burst;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha))
goto try_fast;
if (offset & 0xfff)
goto slow_read;
@@ -3091,7 +3092,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
ha->fw_revision[2] = dcode[2];
ha->fw_revision[3] = dcode[3];
ql_dbg(ql_dbg_init, vha, 0x0060,
- "Firmware revision %d.%d.%d.%d.\n",
+ "Firmware revision %d.%d.%d (%x).\n",
ha->fw_revision[0], ha->fw_revision[1],
ha->fw_revision[2], ha->fw_revision[3]);
}
@@ -3162,7 +3163,7 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
}
if (pos < end - len && *pos != 0x78)
- return snprintf(str, size, "%.*s", len, pos + 3);
+ return scnprintf(str, size, "%.*s", len, pos + 3);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e632e14180cf..829752cfd73f 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -42,6 +42,11 @@
#include "qla_def.h"
#include "qla_target.h"
+static int ql2xtgt_tape_enable;
+module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xtgt_tape_enable,
+ "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
+
static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
module_param(qlini_mode, charp, S_IRUGO);
MODULE_PARM_DESC(qlini_mode,
@@ -54,6 +59,8 @@ MODULE_PARM_DESC(qlini_mode,
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+static int temp_sam_status = SAM_STAT_BUSY;
+
/*
* From scsi/fc/fc_fcp.h
*/
@@ -101,6 +108,10 @@ static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
*cmd, struct atio_from_isp *atio, int ha_locked);
static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
struct qla_tgt_srr_imm *imm, int ha_lock);
+static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd);
+static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status, int qfull);
/*
* Global Variables
*/
@@ -178,6 +189,27 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
return NULL;
}
+static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+
+ vha->hw->tgt.num_pend_cmds++;
+ if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
+ vha->hw->qla_stats.stat_max_pend_cmds =
+ vha->hw->tgt.num_pend_cmds;
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+ vha->hw->tgt.num_pend_cmds--;
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+
void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
{
@@ -1008,6 +1040,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
"qla_target(%d): Sending 24xx Notify Ack %d\n",
vha->vp_idx, nack->u.isp24.status);
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
}
@@ -1031,7 +1065,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
return;
- resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
if (!resp) {
ql_dbg(ql_dbg_tgt, vha, 0xe04a,
"qla_target(%d): %s failed: unable to allocate "
@@ -1085,6 +1119,8 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
vha->vha_tgt.qla_tgt->abts_resp_expected++;
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
}
@@ -1102,7 +1138,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
return;
- ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
if (ctio == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe04b,
"qla_target(%d): %s failed: unable to allocate "
@@ -1130,6 +1166,8 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
CTIO7_FLAGS_TERMINATE);
ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
@@ -1178,6 +1216,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+ mcmd->reset_count = vha->hw->chip_reset;
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
abts->exchange_addr_to_abort);
@@ -1300,6 +1339,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
ctio->u.status1.response_len = __constant_cpu_to_le16(8);
ctio->u.status1.sense_data[0] = resp_code;
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(ha, ha->req);
}
@@ -1321,6 +1362,21 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
mcmd, mcmd->fc_tm_rsp, mcmd->flags);
spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
+ /*
+ * Either a chip reset is active or this request was from
+ * previous life, just abort the processing.
+ */
+ ql_dbg(ql_dbg_async, vha, 0xe100,
+ "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
+ qla2x00_reset_active(vha), mcmd->reset_count,
+ ha->chip_reset);
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+
if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
0, 0, 0, 0, 0, 0);
@@ -1397,8 +1453,6 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
}
}
- ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
- prm->seg_cnt, prm->req_cnt);
return 0;
out_err:
@@ -1431,17 +1485,12 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
uint32_t req_cnt)
{
- struct qla_hw_data *ha = vha->hw;
- device_reg_t __iomem *reg = ha->iobase;
- uint32_t cnt;
+ uint32_t cnt, cnt_in;
if (vha->req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
+ cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
+ cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
- ql_dbg(ql_dbg_tgt, vha, 0xe00a,
- "Request ring circled: cnt=%d, vha->->ring_index=%d, "
- "vha->req->cnt=%d, req_cnt=%d\n", cnt,
- vha->req->ring_index, vha->req->cnt, req_cnt);
if (vha->req->ring_index < cnt)
vha->req->cnt = cnt - vha->req->ring_index;
else
@@ -1450,11 +1499,10 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
}
if (unlikely(vha->req->cnt < (req_cnt + 2))) {
- ql_dbg(ql_dbg_tgt, vha, 0xe00b,
- "qla_target(%d): There is no room in the "
- "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
- "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
- vha->req->cnt, req_cnt);
+ ql_dbg(ql_dbg_io, vha, 0x305a,
+ "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
+ vha->vp_idx, vha->req->ring_index,
+ vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
return -EAGAIN;
}
vha->req->cnt -= req_cnt;
@@ -1491,7 +1539,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
if (h > DEFAULT_OUTSTANDING_COMMANDS)
h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
if (h == ha->tgt.current_handle) {
- ql_dbg(ql_dbg_tgt, vha, 0xe04e,
+ ql_dbg(ql_dbg_io, vha, 0x305b,
"qla_target(%d): Ran out of "
"empty cmd slots in ha %p\n", vha->vp_idx, ha);
h = QLA_TGT_NULL_HANDLE;
@@ -1548,9 +1596,6 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
pkt->u.status0.ox_id = cpu_to_le16(temp);
pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
- ql_dbg(ql_dbg_tgt, vha, 0xe00c,
- "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
- vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);
return 0;
}
@@ -1608,14 +1653,6 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
}
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
- ql_dbg(ql_dbg_tgt, vha, 0xe00d,
- "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
- (long long unsigned int)
- pci_dma_hi32(sg_dma_address(prm->sg)),
- (long long unsigned int)
- pci_dma_lo32(sg_dma_address(prm->sg)),
- (int)sg_dma_len(prm->sg));
-
prm->sg = sg_next(prm->sg);
}
}
@@ -1633,11 +1670,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe00e,
- "iocb->scsi_status=%x, iocb->flags=%x\n",
- le16_to_cpu(pkt24->u.status0.scsi_status),
- le16_to_cpu(pkt24->u.status0.flags));
-
pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
/* Setup packet address segment pointer */
@@ -1655,7 +1687,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
}
/* If scatter gather */
- ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
/* Load command entry data segments */
for (cnt = 0;
@@ -1670,14 +1701,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
}
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
- ql_dbg(ql_dbg_tgt, vha, 0xe010,
- "S/G Segment phys_addr=%llx:%llx, len=%d\n",
- (long long unsigned int)pci_dma_hi32(sg_dma_address(
- prm->sg)),
- (long long unsigned int)pci_dma_lo32(sg_dma_address(
- prm->sg)),
- (int)sg_dma_len(prm->sg));
-
prm->sg = sg_next(prm->sg);
}
@@ -1708,6 +1731,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
se_cmd, cmd->tag);
cmd->state = QLA_TGT_STATE_ABORTED;
+ cmd->cmd_flags |= BIT_6;
qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
@@ -1715,10 +1739,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
- vha->vp_idx, cmd->tag,
- be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
-
prm->cmd = cmd;
prm->tgt = tgt;
prm->rq_result = scsi_status;
@@ -1729,15 +1749,10 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
prm->req_cnt = 1;
prm->add_status_pkt = 0;
- ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
- prm->rq_result, xmit_type);
-
/* Send marker if required */
if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
return -EFAULT;
- ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
-
if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
if (qlt_pci_map_calc_cnt(prm) != 0)
return -EAGAIN;
@@ -1747,7 +1762,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
prm->residual = se_cmd->residual_count;
- ql_dbg(ql_dbg_tgt, vha, 0xe014,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
"Residual underflow: %d (tag %d, "
"op %x, bufflen %d, rq_result %x)\n", prm->residual,
cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
@@ -1755,7 +1770,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
prm->rq_result |= SS_RESIDUAL_UNDER;
} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
prm->residual = se_cmd->residual_count;
- ql_dbg(ql_dbg_tgt, vha, 0xe015,
+ ql_dbg(ql_dbg_io, vha, 0x305d,
"Residual overflow: %d (tag %d, "
"op %x, bufflen %d, rq_result %x)\n", prm->residual,
cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
@@ -1778,10 +1793,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
}
}
- ql_dbg(ql_dbg_tgt, vha, 0xe016,
- "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
- prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
-
return 0;
}
@@ -2310,6 +2321,21 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+ /*
+ * Either a chip reset is active or this request was from
+ * previous life, just abort the processing.
+ */
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ ql_dbg(ql_dbg_async, vha, 0xe101,
+ "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
+ qla2x00_reset_active(vha), cmd->reset_count,
+ ha->chip_reset);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return 0;
+ }
+
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(vha, full_req_cnt);
if (unlikely(res))
@@ -2358,8 +2384,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
struct ctio7_to_24xx *ctio =
(struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
- ql_dbg(ql_dbg_tgt, vha, 0xe019,
- "Building additional status packet\n");
+ ql_dbg(ql_dbg_io, vha, 0x305e,
+ "Building additional status packet 0x%p.\n",
+ ctio);
/*
* T10Dif: ctio_crc2_to_fw overlay ontop of
@@ -2391,11 +2418,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+ cmd->cmd_sent_to_fw = 1;
- ql_dbg(ql_dbg_tgt, vha, 0xe01a,
- "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
- pkt, scsi_status);
-
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2430,17 +2456,27 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
return -EIO;
- ql_dbg(ql_dbg_tgt, vha, 0xe01b,
- "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
- __func__, (int)vha->vp_idx, &cmd->se_cmd,
- be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
-
/* Calculate number of entries and segments required */
if (qlt_pci_map_calc_cnt(&prm) != 0)
return -EAGAIN;
spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+ /*
+ * Either a chip reset is active or this request was from
+ * previous life, just abort the processing.
+ */
+ cmd->state = QLA_TGT_STATE_NEED_DATA;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ ql_dbg(ql_dbg_async, vha, 0xe102,
+ "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
+ qla2x00_reset_active(vha), cmd->reset_count,
+ ha->chip_reset);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return 0;
+ }
+
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(vha, prm.req_cnt);
if (res != 0)
@@ -2460,7 +2496,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
qlt_load_data_segments(&prm, vha);
cmd->state = QLA_TGT_STATE_NEED_DATA;
+ cmd->cmd_sent_to_fw = 1;
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2503,7 +2542,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
"iocb(s) %p Returned STATUS.\n", sts);
ql_dbg(ql_dbg_tgt, vha, 0xf075,
- "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
+ "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
@@ -2626,7 +2665,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
- pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
if (pkt == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe050,
"qla_target(%d): %s failed: unable to allocate "
@@ -2669,6 +2708,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
if (ctio24->u.status1.residual != 0)
ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
return ret;
}
@@ -2684,24 +2725,19 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
if (ha_locked) {
rc = __qlt_send_term_exchange(vha, cmd, atio);
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, atio, 0, 0);
goto done;
}
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
rc = __qlt_send_term_exchange(vha, cmd, atio);
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, atio, 0, 0);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
done:
- /*
- * Terminate exchange will tell fw to release any active CTIO
- * that's in FW posession and cleanup the exchange.
- *
- * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
- * down at FW. Free the cmd later when CTIO comes back later
- * w/aborted(0x2) status.
- *
- * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
- * back w/some err. Free the cmd now.
- */
- if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {
+ if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
+ !cmd->cmd_sent_to_fw)) {
if (!ha_locked && !in_interrupt())
msleep(250); /* just in case */
@@ -2712,6 +2748,53 @@ done:
return;
}
+static void qlt_init_term_exchange(struct scsi_qla_host *vha)
+{
+ struct list_head free_list;
+ struct qla_tgt_cmd *cmd, *tcmd;
+
+ vha->hw->tgt.leak_exchg_thresh_hold =
+ (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
+
+ cmd = tcmd = NULL;
+ if (!list_empty(&vha->hw->tgt.q_full_list)) {
+ INIT_LIST_HEAD(&free_list);
+ list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
+
+ list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
+ list_del(&cmd->cmd_list);
+ /* This cmd was never sent to TCM. There is no need
+ * to schedule free or call free_cmd
+ */
+ qlt_free_cmd(cmd);
+ vha->hw->tgt.num_qfull_cmds_alloc--;
+ }
+ }
+ vha->hw->tgt.num_qfull_cmds_dropped = 0;
+}
+
+static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
+{
+ uint32_t total_leaked;
+
+ total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
+
+ if (vha->hw->tgt.leak_exchg_thresh_hold &&
+ (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe079,
+ "Chip reset due to exchange starvation: %d/%d.\n",
+ total_leaked, vha->hw->fw_xcb_count);
+
+ if (IS_P3P_TYPE(vha->hw))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+
+}
+
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
struct qla_tgt_sess *sess = cmd->sess;
@@ -2721,7 +2804,13 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
__func__, &cmd->se_cmd,
be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+ BUG_ON(cmd->cmd_in_wq);
+
+ if (!cmd->q_full)
+ qlt_decr_num_pend_cmds(cmd->vha);
+
BUG_ON(cmd->sg_mapped);
+ cmd->jiffies_at_free = get_jiffies_64();
if (unlikely(cmd->free_sg))
kfree(cmd->sg);
@@ -2729,6 +2818,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
WARN_ON(1);
return;
}
+ cmd->jiffies_at_free = get_jiffies_64();
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
}
EXPORT_SYMBOL(qlt_free_cmd);
@@ -2742,6 +2832,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
struct qla_tgt_srr_imm *imm;
tgt->ctio_srr_id++;
+ cmd->cmd_flags |= BIT_15;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
"qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
@@ -2863,11 +2954,9 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
CTIO_INTERMEDIATE_HANDLE_MARK);
if (handle != QLA_TGT_NULL_HANDLE) {
- if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
- "SKIP_HANDLE CTIO\n");
+ if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
return NULL;
- }
+
/* handle-1 is actually used */
if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052,
@@ -2894,6 +2983,81 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return cmd;
}
+/* hardware_lock should be held by caller. */
+static void
+qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t handle;
+
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
+ handle = qlt_make_handle(vha);
+
+ /* TODO: fix debug message type and ids. */
+ if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_io, vha, 0xff00,
+ "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
+ } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ cmd->write_data_transferred = 0;
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+
+ ql_dbg(ql_dbg_io, vha, 0xff01,
+ "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
+
+ ha->tgt.tgt_ops->handle_data(cmd);
+ return;
+ } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ ql_dbg(ql_dbg_io, vha, 0xff02,
+ "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
+ } else {
+ ql_dbg(ql_dbg_io, vha, 0xff03,
+ "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
+ cmd->state);
+ dump_stack();
+ }
+
+ cmd->cmd_flags |= BIT_12;
+ ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+void
+qlt_host_reset_handler(struct qla_hw_data *ha)
+{
+ struct qla_tgt_cmd *cmd;
+ unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ scsi_qla_host_t *vha = NULL;
+ struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
+ uint32_t i;
+
+ if (!base_vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || qla_ini_mode_enabled(base_vha)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+ "Target mode disabled\n");
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
+ "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
+ base_vha->dpc_flags);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
+ cmd = qlt_get_cmd(base_vha, i);
+ if (!cmd)
+ continue;
+ /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
+ vha = cmd->vha;
+ qlt_abort_cmd_on_host_reset(vha, cmd);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -2905,10 +3069,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
struct target_core_fabric_ops *tfo;
struct qla_tgt_cmd *cmd;
- ql_dbg(ql_dbg_tgt, vha, 0xe01e,
- "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
- vha->vp_idx, ctio, status, handle);
-
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
/* That could happen only in case of an error/reset/abort */
if (status != CTIO_SUCCESS) {
@@ -2925,6 +3085,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
se_cmd = &cmd->se_cmd;
tfo = se_cmd->se_tfo;
+ cmd->cmd_sent_to_fw = 0;
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
@@ -3011,7 +3172,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
* level.
*/
if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
- (cmd->state != QLA_TGT_STATE_ABORTED)) {
+ (cmd->state != QLA_TGT_STATE_ABORTED)) {
+ cmd->cmd_flags |= BIT_13;
if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
return;
}
@@ -3019,7 +3181,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
- ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
+ ;
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
int rx_status = 0;
@@ -3030,10 +3192,6 @@ skip_term:
else
cmd->write_data_transferred = 1;
- ql_dbg(ql_dbg_tgt, vha, 0xe020,
- "Data received, context %x, rx_status %d\n",
- 0x0, rx_status);
-
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
@@ -3051,6 +3209,7 @@ skip_term:
dump_stack();
}
+
ha->tgt.tgt_ops->free_cmd(cmd);
}
@@ -3103,6 +3262,8 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
uint32_t data_length;
int ret, fcp_task_attr, data_dir, bidi = 0;
+ cmd->cmd_in_wq = 0;
+ cmd->cmd_flags |= BIT_1;
if (tgt->tgt_stop)
goto out_term;
@@ -3128,11 +3289,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
&atio->u.isp24.fcp_cmnd.add_cdb[
atio->u.isp24.fcp_cmnd.add_cdb_len]));
- ql_dbg(ql_dbg_tgt, vha, 0xe022,
- "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
- cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
- cmd->atio.u.isp24.fcp_hdr.ox_id);
-
ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
fcp_task_attr, data_dir, bidi);
if (ret != 0)
@@ -3146,13 +3302,16 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
return;
out_term:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
+ ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
/*
* cmd has not sent to target yet, so pass NULL as the second
* argument to qlt_send_term_exchange() and free the memory here.
*/
+ cmd->cmd_flags |= BIT_2;
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+
+ qlt_decr_num_pend_cmds(vha);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3183,6 +3342,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
+ qlt_incr_num_pend_cmds(vha);
cmd->vha = vha;
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
@@ -3264,7 +3424,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd;
if (unlikely(tgt->tgt_stop)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+ ql_dbg(ql_dbg_io, vha, 0x3061,
"New command while device %p is shutting down\n", tgt);
return -EFAULT;
}
@@ -3277,6 +3437,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENOMEM;
memcpy(&op->atio, atio, sizeof(*atio));
+ op->vha = vha;
INIT_WORK(&op->work, qlt_create_sess_from_atio);
queue_work(qla_tgt_wq, &op->work);
return 0;
@@ -3288,12 +3449,19 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
cmd = qlt_get_tag(vha, sess, atio);
if (!cmd) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
+ ql_dbg(ql_dbg_io, vha, 0x3062,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
ha->tgt.tgt_ops->put_sess(sess);
return -ENOMEM;
}
+ cmd->cmd_flags = 0;
+ cmd->jiffies_at_alloc = get_jiffies_64();
+
+ cmd->reset_count = vha->hw->chip_reset;
+
+ cmd->cmd_in_wq = 1;
+ cmd->cmd_flags |= BIT_0;
INIT_WORK(&cmd->work, qlt_do_work);
queue_work(qla_tgt_wq, &cmd->work);
return 0;
@@ -3327,6 +3495,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
}
mcmd->tmr_func = fn;
mcmd->flags = flags;
+ mcmd->reset_count = vha->hw->chip_reset;
switch (fn) {
case QLA_TGT_CLEAR_ACA:
@@ -3462,6 +3631,7 @@ static int __qlt_abort_task(struct scsi_qla_host *vha,
lun = a->u.isp24.fcp_cmnd.lun;
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+ mcmd->reset_count = vha->hw->chip_reset;
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
le16_to_cpu(iocb->u.isp2x.seq_id));
@@ -3753,8 +3923,10 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
qlt_send_notify_ack(vha, ntfy,
0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (xmit_type & QLA_TGT_XMIT_DATA)
+ if (xmit_type & QLA_TGT_XMIT_DATA) {
+ cmd->cmd_flags |= BIT_8;
qlt_rdy_to_xfer(cmd);
+ }
} else {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
"qla_target(%d): SRR for out data for cmd "
@@ -3772,8 +3944,10 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
}
/* Transmit response in case of status and data-in cases */
- if (resp)
+ if (resp) {
+ cmd->cmd_flags |= BIT_7;
qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+ }
return;
@@ -3786,8 +3960,10 @@ out_reject:
if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
cmd->state = QLA_TGT_STATE_DATA_IN;
dump_stack();
- } else
+ } else {
+ cmd->cmd_flags |= BIT_9;
qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ }
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -3901,7 +4077,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
tgt->imm_srr_id++;
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
+ ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
vha->vp_idx);
imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
@@ -4121,7 +4297,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
* This function sends busy to ISP 2xxx or 24xx.
*/
-static void qlt_send_busy(struct scsi_qla_host *vha,
+static int __qlt_send_busy(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint16_t status)
{
struct ctio7_to_24xx *ctio24;
@@ -4133,16 +4309,16 @@ static void qlt_send_busy(struct scsi_qla_host *vha,
atio->u.isp24.fcp_hdr.s_id);
if (!sess) {
qlt_send_term_exchange(vha, NULL, atio, 1);
- return;
+ return 0;
}
/* Sending marker isn't necessary, since we called from ISR */
pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
if (!pkt) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
+ ql_dbg(ql_dbg_io, vha, 0x3063,
"qla_target(%d): %s failed: unable to allocate "
"request packet", vha->vp_idx, __func__);
- return;
+ return -ENOMEM;
}
pkt->entry_count = 1;
@@ -4167,13 +4343,192 @@ static void qlt_send_busy(struct scsi_qla_host *vha,
*/
ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
ctio24->u.status1.scsi_status = cpu_to_le16(status);
- ctio24->u.status1.residual = get_unaligned((uint32_t *)
- &atio->u.isp24.fcp_cmnd.add_cdb[
- atio->u.isp24.fcp_cmnd.add_cdb_len]);
- if (ctio24->u.status1.residual != 0)
- ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
-
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
+ return 0;
+}
+
+/*
+ * This routine is used to allocate a command for either a QFull condition
+ * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
+ * out previously.
+ */
+static void
+qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status, int qfull)
+{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ struct se_session *se_sess;
+ struct qla_tgt_cmd *cmd;
+ int tag;
+
+ if (unlikely(tgt->tgt_stop)) {
+ ql_dbg(ql_dbg_io, vha, 0x300a,
+ "New command while device %p is shutting down\n", tgt);
+ return;
+ }
+
+ if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
+ vha->hw->tgt.num_qfull_cmds_dropped++;
+ if (vha->hw->tgt.num_qfull_cmds_dropped >
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
+ vha->hw->tgt.num_qfull_cmds_dropped;
+
+ ql_dbg(ql_dbg_io, vha, 0x3068,
+ "qla_target(%d): %s: QFull CMD dropped[%d]\n",
+ vha->vp_idx, __func__,
+ vha->hw->tgt.num_qfull_cmds_dropped);
+
+ qlt_chk_exch_leak_thresh_hold(vha);
+ return;
+ }
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id
+ (vha, atio->u.isp24.fcp_hdr.s_id);
+ if (!sess)
+ return;
+
+ se_sess = sess->se_sess;
+
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ if (tag < 0)
+ return;
+
+ cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+ if (!cmd) {
+ ql_dbg(ql_dbg_io, vha, 0x3009,
+ "qla_target(%d): %s: Allocation of cmd failed\n",
+ vha->vp_idx, __func__);
+
+ vha->hw->tgt.num_qfull_cmds_dropped++;
+ if (vha->hw->tgt.num_qfull_cmds_dropped >
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
+ vha->hw->tgt.num_qfull_cmds_dropped;
+
+ qlt_chk_exch_leak_thresh_hold(vha);
+ return;
+ }
+
+ memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+
+ qlt_incr_num_pend_cmds(vha);
+ INIT_LIST_HEAD(&cmd->cmd_list);
+ memcpy(&cmd->atio, atio, sizeof(*atio));
+
+ cmd->tgt = vha->vha_tgt.qla_tgt;
+ cmd->vha = vha;
+ cmd->reset_count = vha->hw->chip_reset;
+ cmd->q_full = 1;
+
+ if (qfull) {
+ cmd->q_full = 1;
+ /* NOTE: borrowing the state field to carry the status */
+ cmd->state = status;
+ } else
+ cmd->term_exchg = 1;
+
+ list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
+
+ vha->hw->tgt.num_qfull_cmds_alloc++;
+ if (vha->hw->tgt.num_qfull_cmds_alloc >
+ vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
+ vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
+ vha->hw->tgt.num_qfull_cmds_alloc;
+}
+
+int
+qlt_free_qfull_cmds(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ struct qla_tgt_cmd *cmd, *tcmd;
+ struct list_head free_list;
+ int rc = 0;
+
+ if (list_empty(&ha->tgt.q_full_list))
+ return 0;
+
+ INIT_LIST_HEAD(&free_list);
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+
+ if (list_empty(&ha->tgt.q_full_list)) {
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ return 0;
+ }
+
+ list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
+ if (cmd->q_full)
+ /* cmd->state is a borrowed field to hold status */
+ rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
+ else if (cmd->term_exchg)
+ rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
+
+ if (rc == -ENOMEM)
+ break;
+
+ if (cmd->q_full)
+ ql_dbg(ql_dbg_io, vha, 0x3006,
+ "%s: busy sent for ox_id[%04x]\n", __func__,
+ be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+ else if (cmd->term_exchg)
+ ql_dbg(ql_dbg_io, vha, 0x3007,
+ "%s: Term exchg sent for ox_id[%04x]\n", __func__,
+ be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+ else
+ ql_dbg(ql_dbg_io, vha, 0x3008,
+ "%s: Unexpected cmd in QFull list %p\n", __func__,
+ cmd);
+
+ list_del(&cmd->cmd_list);
+ list_add_tail(&cmd->cmd_list, &free_list);
+
+ /* piggy back on hardware_lock for protection */
+ vha->hw->tgt.num_qfull_cmds_alloc--;
+ }
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+ cmd = NULL;
+
+ list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
+ list_del(&cmd->cmd_list);
+ /* This cmd was never sent to TCM. There is no need
+ * to schedule free or call free_cmd
+ */
+ qlt_free_cmd(cmd);
+ }
+ return rc;
+}
+
+static void
+qlt_send_busy(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status)
+{
+ int rc = 0;
+
+ rc = __qlt_send_busy(vha, atio, status);
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, atio, status, 1);
+}
+
+static int
+qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t status;
+
+ if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
+ return 0;
+
+ status = temp_sam_status;
+ qlt_send_busy(vha, atio, status);
+ return 1;
}
/* ha->hardware_lock supposed to be held on entry */
@@ -4186,14 +4541,10 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
int rc;
if (unlikely(tgt == NULL)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
+ ql_dbg(ql_dbg_io, vha, 0x3064,
"ATIO pkt, but no tgt (ha %p)", ha);
return;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe02c,
- "qla_target(%d): ATIO pkt %p: type %02x count %02x",
- vha->vp_idx, atio, atio->u.raw.entry_type,
- atio->u.raw.entry_count);
/*
* In tgt_stop mode we also should allow all requests to pass.
* Otherwise, some commands can stuck.
@@ -4203,33 +4554,28 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
- ql_dbg(ql_dbg_tgt, vha, 0xe02d,
- "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
- vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
- atio->u.isp24.fcp_cmnd.rddata,
- atio->u.isp24.fcp_cmnd.wrdata,
- atio->u.isp24.fcp_cmnd.cdb[0],
- atio->u.isp24.fcp_cmnd.add_cdb_len,
- be32_to_cpu(get_unaligned((uint32_t *)
- &atio->u.isp24.fcp_cmnd.add_cdb[
- atio->u.isp24.fcp_cmnd.add_cdb_len])),
- atio->u.isp24.fcp_hdr.s_id[0],
- atio->u.isp24.fcp_hdr.s_id[1],
- atio->u.isp24.fcp_hdr.s_id[2]);
-
if (unlikely(atio->u.isp24.exchange_addr ==
ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe058,
+ ql_dbg(ql_dbg_io, vha, 0x3065,
"qla_target(%d): ATIO_TYPE7 "
"received with UNKNOWN exchange address, "
"sending QUEUE_FULL\n", vha->vp_idx);
qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
break;
}
- if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
+
+
+
+ if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
+ rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ if (rc != 0) {
+ tgt->irq_cmd_count--;
+ return;
+ }
rc = qlt_handle_cmd_for_atio(vha, atio);
- else
+ } else {
rc = qlt_handle_task_mgmt(vha, atio);
+ }
if (unlikely(rc != 0)) {
if (rc == -ESRCH) {
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
@@ -4293,11 +4639,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
return;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe02f,
- "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
- "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
- pkt->entry_count, pkt->entry_status, pkt->handle);
-
/*
* In tgt_stop mode we also should allow all requests to pass.
* Otherwise, some commands can stuck.
@@ -4310,9 +4651,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe030,
- "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
- entry->entry_type, vha->vp_idx);
qlt_do_ctio_completion(vha, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
@@ -4323,15 +4661,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
{
struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
int rc;
- ql_dbg(ql_dbg_tgt, vha, 0xe031,
- "ACCEPT_TGT_IO instance %d status %04x "
- "lun %04x read/write %d data_length %04x "
- "target_id %02x rx_id %04x\n ", vha->vp_idx,
- le16_to_cpu(atio->u.isp2x.status),
- le16_to_cpu(atio->u.isp2x.lun),
- atio->u.isp2x.execution_codes,
- le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
- atio), atio->u.isp2x.rx_id);
if (atio->u.isp2x.status !=
__constant_cpu_to_le16(ATIO_CDB_VALID)) {
ql_dbg(ql_dbg_tgt, vha, 0xe05e,
@@ -4340,10 +4669,12 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
le16_to_cpu(atio->u.isp2x.status));
break;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe032,
- "FCP CDB: 0x%02x, sizeof(cdb): %lu",
- atio->u.isp2x.cdb[0], (unsigned long
- int)sizeof(atio->u.isp2x.cdb));
+
+ rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ if (rc != 0) {
+ tgt->irq_cmd_count--;
+ return;
+ }
rc = qlt_handle_cmd_for_atio(vha, atio);
if (unlikely(rc != 0)) {
@@ -4376,8 +4707,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
case CONTINUE_TGT_IO_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe033,
- "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
qlt_do_ctio_completion(vha, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
@@ -4387,8 +4716,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
case CTIO_A64_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
- vha->vp_idx);
qlt_do_ctio_completion(vha, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
@@ -4492,11 +4819,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int login_code;
- ql_dbg(ql_dbg_tgt, vha, 0xe039,
- "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
- vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
- ha->operating_mode, ha->current_topology);
-
if (!ha->tgt.tgt_ops)
return;
@@ -4573,11 +4895,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
break;
default:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
- "qla_target(%d): Async event %#x occurred: "
- "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
- code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
break;
}
@@ -4598,8 +4915,6 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
return NULL;
}
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
-
fcport->loop_id = loop_id;
rc = qla2x00_get_port_database(vha, fcport, 0);
@@ -4898,6 +5213,10 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
qlt_release(vha->vha_tgt.qla_tgt);
return 0;
}
+
+ /* free left over qfull cmds */
+ qlt_init_term_exchange(vha);
+
mutex_lock(&qla_tgt_mutex);
list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
mutex_unlock(&qla_tgt_mutex);
@@ -5295,8 +5614,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
/* Enable initial LIP */
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
- /* Enable FC tapes support */
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ if (ql2xtgt_tape_enable)
+ /* Enable FC Tape support */
+ nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+ else
+ /* Disable FC Tape support */
+ nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
+
/* Disable Full Login after LIP */
nv->host_p &= __constant_cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
@@ -5378,8 +5702,13 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
/* Enable initial LIP */
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
- /* Enable FC tapes support */
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ if (ql2xtgt_tape_enable)
+ /* Enable FC tape support */
+ nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+ else
+ /* Disable FC tape support */
+ nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
+
/* Disable Full Login after LIP */
nv->host_p &= __constant_cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d1d24fb0160a..8ff330f7d6f5 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -915,6 +915,10 @@ struct qla_tgt_cmd {
unsigned int aborted:1; /* Needed in case of SRR */
unsigned int write_data_transferred:1;
unsigned int ctx_dsd_alloced:1;
+ unsigned int q_full:1;
+ unsigned int term_exchg:1;
+ unsigned int cmd_sent_to_fw:1;
+ unsigned int cmd_in_wq:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@@ -923,10 +927,12 @@ struct qla_tgt_cmd {
uint32_t tag;
uint32_t unpacked_lun;
enum dma_data_direction dma_data_direction;
+ uint32_t reset_count;
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
struct scsi_qla_host *vha;
+ struct list_head cmd_list;
struct atio_from_isp atio;
/* t10dif */
@@ -935,6 +941,29 @@ struct qla_tgt_cmd {
uint32_t blk_sz;
struct crc_context *ctx;
+ uint64_t jiffies_at_alloc;
+ uint64_t jiffies_at_free;
+ /* BIT_0 - Atio Arrival / schedule to work
+ * BIT_1 - qlt_do_work
+ * BIT_2 - qlt_do work failed
+ * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
+ * BIT_4 - read respond/tcm_qla2xx_queue_data_in
+ * BIT_5 - status respond / tcm_qla2xx_queue_status
+ * BIT_6 - tcm request to abort/Term exchange.
+ * pre_xmit_response->qlt_send_term_exchange
+ * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
+ * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
+ * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
+ * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
+ * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
+ * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
+ * BIT_13 - Bad completion -
+ * qlt_ctio_do_completion --> qlt_term_ctio_exchange
+ * BIT_14 - Back end data received/sent.
+ * BIT_15 - SRR prepare ctio
+ * BIT_16 - complete free
+ */
+ uint32_t cmd_flags;
};
struct qla_tgt_sess_work_param {
@@ -958,6 +987,7 @@ struct qla_tgt_mgmt_cmd {
struct se_cmd se_cmd;
struct work_struct free_work;
unsigned int flags;
+ uint32_t reset_count;
#define QLA24XX_MGMT_SEND_NACK 1
union {
struct atio_from_isp atio;
@@ -1089,5 +1119,6 @@ extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *);
+extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index cb9a0c4bc419..a8c0c7362e48 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -128,18 +128,10 @@ qla27xx_insert32(uint32_t value, void *buf, ulong *len)
static inline void
qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
{
- ulong cnt = size;
- if (buf && mem) {
+ if (buf && mem && size) {
buf += *len;
- while (cnt >= sizeof(uint32_t)) {
- *(__le32 *)buf = cpu_to_le32p(mem);
- buf += sizeof(uint32_t);
- mem += sizeof(uint32_t);
- cnt -= sizeof(uint32_t);
- }
- if (cnt)
- memcpy(buf, mem, cnt);
+ memcpy(buf, mem, size);
}
*len += size;
}
@@ -151,8 +143,6 @@ qla27xx_read8(void *window, void *buf, ulong *len)
if (buf) {
value = RD_REG_BYTE((__iomem void *)window);
- ql_dbg(ql_dbg_misc, NULL, 0xd011,
- "%s: -> %x\n", __func__, value);
}
qla27xx_insert32(value, buf, len);
}
@@ -164,8 +154,6 @@ qla27xx_read16(void *window, void *buf, ulong *len)
if (buf) {
value = RD_REG_WORD((__iomem void *)window);
- ql_dbg(ql_dbg_misc, NULL, 0xd012,
- "%s: -> %x\n", __func__, value);
}
qla27xx_insert32(value, buf, len);
}
@@ -177,8 +165,6 @@ qla27xx_read32(void *window, void *buf, ulong *len)
if (buf) {
value = RD_REG_DWORD((__iomem void *)window);
- ql_dbg(ql_dbg_misc, NULL, 0xd013,
- "%s: -> %x\n", __func__, value);
}
qla27xx_insert32(value, buf, len);
}
@@ -197,10 +183,6 @@ qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
{
void *window = (void *)reg + offset;
- if (buf) {
- ql_dbg(ql_dbg_misc, NULL, 0xd014,
- "%s: @%x\n", __func__, offset);
- }
qla27xx_read32(window, buf, len);
}
@@ -211,8 +193,6 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
__iomem void *window = reg + offset;
if (buf) {
- ql_dbg(ql_dbg_misc, NULL, 0xd015,
- "%s: @%x <- %x\n", __func__, offset, data);
WRT_REG_DWORD(window, data);
}
}
@@ -225,11 +205,6 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
void *window = (void *)reg + offset;
void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
- if (buf) {
- ql_dbg(ql_dbg_misc, NULL, 0xd016,
- "%s: base=%x offset=%x count=%x width=%x\n",
- __func__, addr, offset, count, width);
- }
qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
while (count--) {
qla27xx_insert32(addr, buf, len);
@@ -380,14 +355,9 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
ent->t262.start_addr = start;
ent->t262.end_addr = end;
}
- } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
- ql_dbg(ql_dbg_misc, vha, 0xd021,
- "%s: unsupported ddr ram\n", __func__);
- qla27xx_skip_entry(ent, buf);
- goto done;
} else {
ql_dbg(ql_dbg_misc, vha, 0xd022,
- "%s: unknown area %u\n", __func__, ent->t262.ram_area);
+ "%s: unknown area %x\n", __func__, ent->t262.ram_area);
qla27xx_skip_entry(ent, buf);
goto done;
}
@@ -402,8 +372,6 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
dwords = end - start + 1;
if (buf) {
- ql_dbg(ql_dbg_misc, vha, 0xd024,
- "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
buf += *len;
qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
}
@@ -448,13 +416,9 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
count++;
}
}
- } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
- ql_dbg(ql_dbg_misc, vha, 0xd025,
- "%s: unsupported atio queue\n", __func__);
- qla27xx_skip_entry(ent, buf);
} else {
ql_dbg(ql_dbg_misc, vha, 0xd026,
- "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
+ "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
qla27xx_skip_entry(ent, buf);
}
@@ -549,17 +513,9 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
"%s: missing eft\n", __func__);
qla27xx_skip_entry(ent, buf);
}
- } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) {
- ql_dbg(ql_dbg_misc, vha, 0xd029,
- "%s: unsupported exchange offload buffer\n", __func__);
- qla27xx_skip_entry(ent, buf);
- } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) {
- ql_dbg(ql_dbg_misc, vha, 0xd02a,
- "%s: unsupported extended login buffer\n", __func__);
- qla27xx_skip_entry(ent, buf);
} else {
ql_dbg(ql_dbg_misc, vha, 0xd02b,
- "%s: unknown buf %x\n", __func__, ent->t268.buf_type);
+ "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
qla27xx_skip_entry(ent, buf);
}
@@ -695,13 +651,9 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
count++;
}
}
- } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
- ql_dbg(ql_dbg_misc, vha, 0xd02e,
- "%s: unsupported atio queue\n", __func__);
- qla27xx_skip_entry(ent, buf);
} else {
ql_dbg(ql_dbg_misc, vha, 0xd02f,
- "%s: unknown queue %u\n", __func__, ent->t274.queue_type);
+ "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
qla27xx_skip_entry(ent, buf);
}
@@ -715,6 +667,32 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
}
static int
+qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong offset = offsetof(typeof(*ent), t275.buffer);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd213,
+ "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
+ if (!ent->t275.length) {
+ ql_dbg(ql_dbg_misc, vha, 0xd020,
+ "%s: buffer zero length\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+ if (offset + ent->t275.length > ent->hdr.entry_size) {
+ ql_dbg(ql_dbg_misc, vha, 0xd030,
+ "%s: buffer overflow\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
+done:
+ return false;
+}
+
+static int
qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -726,7 +704,7 @@ qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
}
struct qla27xx_fwdt_entry_call {
- int type;
+ uint type;
int (*call)(
struct scsi_qla_host *,
struct qla27xx_fwdt_entry *,
@@ -756,18 +734,21 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
{ ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
{ ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
{ ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
+ { ENTRY_TYPE_WRITE_BUF , qla27xx_fwdt_entry_t275 } ,
{ -1 , qla27xx_fwdt_entry_other }
};
-static inline int (*qla27xx_find_entry(int type))
+static inline int (*qla27xx_find_entry(uint type))
(struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
{
struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
- while (list->type != -1 && list->type != type)
+ while (list->type < type)
list++;
- return list->call;
+ if (list->type == type)
+ return list->call;
+ return qla27xx_fwdt_entry_other;
}
static inline void *
@@ -792,6 +773,15 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
break;
ent = qla27xx_next_entry(ent);
}
+
+ if (count)
+ ql_dbg(ql_dbg_misc, vha, 0xd018,
+ "%s: residual count (%lx)\n", __func__, count);
+
+ if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
+ ql_dbg(ql_dbg_misc, vha, 0xd019,
+ "%s: missing end (%lx)\n", __func__, count);
+
ql_dbg(ql_dbg_misc, vha, 0xd01b,
"%s: len=%lx\n", __func__, *len);
}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index 1967424c8e64..141c1c5e73f4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -53,6 +53,7 @@ struct __packed qla27xx_fwdt_template {
#define ENTRY_TYPE_RDREMRAM 272
#define ENTRY_TYPE_PCICFG 273
#define ENTRY_TYPE_GET_SHADOW 274
+#define ENTRY_TYPE_WRITE_BUF 275
#define CAPTURE_FLAG_PHYS_ONLY BIT_0
#define CAPTURE_FLAG_PHYS_VIRT BIT_1
@@ -193,6 +194,11 @@ struct __packed qla27xx_fwdt_entry {
uint8_t queue_type;
uint8_t reserved[3];
} t274;
+
+ struct __packed {
+ uint32_t length;
+ uint8_t buffer[];
+ } t275;
};
};
@@ -208,6 +214,8 @@ struct __packed qla27xx_fwdt_entry {
#define T268_BUF_TYPE_EXTD_TRACE 1
#define T268_BUF_TYPE_EXCH_BUFOFF 2
#define T268_BUF_TYPE_EXTD_LOGIN 3
+#define T268_BUF_TYPE_REQ_MIRROR 4
+#define T268_BUF_TYPE_RSP_MIRROR 5
#define T274_QUEUE_TYPE_REQ_SHAD 1
#define T274_QUEUE_TYPE_RSP_SHAD 2
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 4d2c98cbec4f..d88b86214ec5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.08-k"
+#define QLA2XXX_VERSION "8.07.00.16-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index e2beab962096..031b2961c6b7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -50,8 +50,12 @@
#include "qla_target.h"
#include "tcm_qla2xxx.h"
-struct workqueue_struct *tcm_qla2xxx_free_wq;
-struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+static struct workqueue_struct *tcm_qla2xxx_free_wq;
+static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
/*
* Parse WWN.
@@ -386,6 +390,11 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ cmd->cmd_in_wq = 0;
+
+ WARN_ON(cmd->cmd_flags & BIT_16);
+
+ cmd->cmd_flags |= BIT_16;
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
@@ -396,6 +405,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
*/
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{
+ cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
@@ -405,6 +415,13 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
*/
static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
{
+ struct qla_tgt_cmd *cmd;
+
+ if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ cmd->cmd_flags |= BIT_14;
+ }
+
return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
}
@@ -511,8 +528,13 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
{
- struct qla_tgt_cmd *cmd = container_of(se_cmd,
- struct qla_tgt_cmd, se_cmd);
+ struct qla_tgt_cmd *cmd;
+
+ /* check for task mgmt cmd */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ return 0xffffffff;
+
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
return cmd->tag;
}
@@ -562,6 +584,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
* Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status.
*/
+ cmd->cmd_in_wq = 0;
+ cmd->cmd_flags |= BIT_11;
if (!cmd->write_data_transferred) {
/*
* Check if se_cmd has already been aborted via LUN_RESET, and
@@ -590,6 +614,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
{
+ cmd->cmd_flags |= BIT_10;
+ cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
@@ -633,6 +659,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
+ cmd->cmd_flags |= BIT_4;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
@@ -640,6 +667,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
cmd->offset = 0;
+ cmd->cmd_flags |= BIT_3;
cmd->prot_sg_cnt = se_cmd->t_prot_nents;
cmd->prot_sg = se_cmd->t_prot_sg;
@@ -665,6 +693,11 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->offset = 0;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+ if (cmd->cmd_flags & BIT_5) {
+ pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
+ dump_stack();
+ }
+ cmd->cmd_flags |= BIT_5;
if (se_cmd->data_direction == DMA_FROM_DEVICE) {
/*
@@ -734,10 +767,6 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
cmd->sg_mapped = 0;
}
-/* Local pointer to allocated TCM configfs fabric module */
-struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
-struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
-
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
/*