summaryrefslogtreecommitdiff
path: root/drivers/scsi/libiscsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/libiscsi.c')
-rw-r--r--drivers/scsi/libiscsi.c178
1 files changed, 113 insertions, 65 deletions
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index cee1dbaa1b93..3d74fdd9f31a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -523,16 +523,6 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;
- spin_lock_bh(&conn->taskqueuelock);
- if (!list_empty(&task->running)) {
- pr_debug_once("%s while task on list", __func__);
- list_del_init(&task->running);
- }
- spin_unlock_bh(&conn->taskqueuelock);
-
- if (conn->task == task)
- conn->task = NULL;
-
if (READ_ONCE(conn->ping_task) == task)
WRITE_ONCE(conn->ping_task, NULL);
@@ -564,9 +554,40 @@ void iscsi_complete_scsi_task(struct iscsi_task *task,
}
EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
+/*
+ * Must be called with back and frwd lock
+ */
+static bool cleanup_queued_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ bool early_complete = false;
+
+ /* Bad target might have completed task while it was still running */
+ if (task->state == ISCSI_TASK_COMPLETED)
+ early_complete = true;
+
+ if (!list_empty(&task->running)) {
+ list_del_init(&task->running);
+ /*
+ * If it's on a list but still running, this could be from
+ * a bad target sending a rsp early, cleanup from a TMF, or
+ * session recovery.
+ */
+ if (task->state == ISCSI_TASK_RUNNING ||
+ task->state == ISCSI_TASK_COMPLETED)
+ __iscsi_put_task(task);
+ }
+
+ if (conn->task == task) {
+ conn->task = NULL;
+ __iscsi_put_task(task);
+ }
+
+ return early_complete;
+}
/*
- * session back_lock must be held and if not called for a task that is
+ * session frwd_lock must be held and if not called for a task that is
* still pending or from the xmit thread, then xmit thread must
* be suspended.
*/
@@ -585,6 +606,13 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
if (!sc)
return;
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
+ if (cleanup_queued_task(task)) {
+ spin_unlock_bh(&conn->session->back_lock);
+ return;
+ }
+
if (task->state == ISCSI_TASK_PENDING) {
/*
* cmd never made it to the xmit thread, so we should not count
@@ -600,9 +628,6 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
sc->result = err << 16;
scsi_set_resid(sc, scsi_bufflen(sc));
-
- /* regular RX path uses back_lock */
- spin_lock_bh(&conn->session->back_lock);
iscsi_complete_task(task, state);
spin_unlock_bh(&conn->session->back_lock);
}
@@ -748,9 +773,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (session->tt->xmit_task(task))
goto free_task;
} else {
- spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->mgmtqueue);
- spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -1411,31 +1434,61 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
return 0;
}
-static int iscsi_xmit_task(struct iscsi_conn *conn)
+static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
+ bool was_requeue)
{
- struct iscsi_task *task = conn->task;
int rc;
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
- return -ENODATA;
-
spin_lock_bh(&conn->session->back_lock);
- if (conn->task == NULL) {
+
+ if (!conn->task) {
+ /* Take a ref so we can access it after xmit_task() */
+ __iscsi_get_task(task);
+ } else {
+ /* Already have a ref from when we failed to send it last call */
+ conn->task = NULL;
+ }
+
+ /*
+ * If this was a requeue for a R2T we have an extra ref on the task in
+ * case a bad target sends a cmd rsp before we have handled the task.
+ */
+ if (was_requeue)
+ __iscsi_put_task(task);
+
+ /*
+ * Do this after dropping the extra ref because if this was a requeue
+ * it's removed from that list and cleanup_queued_task would miss it.
+ */
+ if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ /*
+ * Save the task and ref in case we weren't cleaning up this
+ * task and get woken up again.
+ */
+ conn->task = task;
spin_unlock_bh(&conn->session->back_lock);
return -ENODATA;
}
- __iscsi_get_task(task);
spin_unlock_bh(&conn->session->back_lock);
+
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->frwd_lock);
if (!rc) {
/* done with this task */
task->last_xfer = jiffies;
- conn->task = NULL;
}
/* regular RX path uses back_lock */
spin_lock(&conn->session->back_lock);
+ if (rc && task->state == ISCSI_TASK_RUNNING) {
+ /*
+ * get an extra ref that is released next time we access it
+ * as conn->task above.
+ */
+ __iscsi_get_task(task);
+ conn->task = task;
+ }
+
__iscsi_put_task(task);
spin_unlock(&conn->session->back_lock);
return rc;
@@ -1445,9 +1498,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
* iscsi_requeue_task - requeue task to run from session workqueue
* @task: task to requeue
*
- * LLDs that need to run a task from the session workqueue should call
- * this. The session frwd_lock must be held. This should only be called
- * by software drivers.
+ * Callers must have taken a ref to the task that is going to be requeued.
*/
void iscsi_requeue_task(struct iscsi_task *task)
{
@@ -1457,11 +1508,18 @@ void iscsi_requeue_task(struct iscsi_task *task)
* this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones
*/
- spin_lock_bh(&conn->taskqueuelock);
- if (list_empty(&task->running))
+ spin_lock_bh(&conn->session->frwd_lock);
+ if (list_empty(&task->running)) {
list_add_tail(&task->running, &conn->requeue);
- spin_unlock_bh(&conn->taskqueuelock);
+ } else {
+ /*
+ * Don't need the extra ref since it's already requeued and
+ * has a ref.
+ */
+ iscsi_put_task(task);
+ }
iscsi_conn_queue_work(conn);
+ spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1487,7 +1545,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
}
if (conn->task) {
- rc = iscsi_xmit_task(conn);
+ rc = iscsi_xmit_task(conn, conn->task, false);
if (rc)
goto done;
}
@@ -1497,49 +1555,41 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* only have one nop-out as a ping from us and targets should not
* overflow us with nop-ins
*/
- spin_lock_bh(&conn->taskqueuelock);
check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
- conn->task = list_entry(conn->mgmtqueue.next,
- struct iscsi_task, running);
- list_del_init(&conn->task->running);
- spin_unlock_bh(&conn->taskqueuelock);
- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+ task = list_entry(conn->mgmtqueue.next, struct iscsi_task,
+ running);
+ list_del_init(&task->running);
+ if (iscsi_prep_mgmt_task(conn, task)) {
/* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock);
- __iscsi_put_task(conn->task);
+ __iscsi_put_task(task);
spin_unlock_bh(&conn->session->back_lock);
- conn->task = NULL;
- spin_lock_bh(&conn->taskqueuelock);
continue;
}
- rc = iscsi_xmit_task(conn);
+ rc = iscsi_xmit_task(conn, task, false);
if (rc)
goto done;
- spin_lock_bh(&conn->taskqueuelock);
}
/* process pending command queue */
while (!list_empty(&conn->cmdqueue)) {
- conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
- running);
- list_del_init(&conn->task->running);
- spin_unlock_bh(&conn->taskqueuelock);
+ task = list_entry(conn->cmdqueue.next, struct iscsi_task,
+ running);
+ list_del_init(&task->running);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
- fail_scsi_task(conn->task, DID_IMM_RETRY);
- spin_lock_bh(&conn->taskqueuelock);
+ fail_scsi_task(task, DID_IMM_RETRY);
continue;
}
- rc = iscsi_prep_scsi_cmd_pdu(conn->task);
+ rc = iscsi_prep_scsi_cmd_pdu(task);
if (rc) {
if (rc == -ENOMEM || rc == -EACCES)
- fail_scsi_task(conn->task, DID_IMM_RETRY);
+ fail_scsi_task(task, DID_IMM_RETRY);
else
- fail_scsi_task(conn->task, DID_ABORT);
- spin_lock_bh(&conn->taskqueuelock);
+ fail_scsi_task(task, DID_ABORT);
continue;
}
- rc = iscsi_xmit_task(conn);
+ rc = iscsi_xmit_task(conn, task, false);
if (rc)
goto done;
/*
@@ -1547,7 +1597,6 @@ check_mgmt:
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
- spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
@@ -1561,21 +1610,17 @@ check_mgmt:
task = list_entry(conn->requeue.next, struct iscsi_task,
running);
+
if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
break;
- conn->task = task;
- list_del_init(&conn->task->running);
- conn->task->state = ISCSI_TASK_RUNNING;
- spin_unlock_bh(&conn->taskqueuelock);
- rc = iscsi_xmit_task(conn);
+ list_del_init(&task->running);
+ rc = iscsi_xmit_task(conn, task, true);
if (rc)
goto done;
- spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
- spin_unlock_bh(&conn->taskqueuelock);
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1741,9 +1786,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto prepd_reject;
}
} else {
- spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->cmdqueue);
- spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -2914,7 +2957,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
- spin_lock_init(&conn->taskqueuelock);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */
@@ -3080,10 +3122,16 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
ISCSI_DBG_SESSION(conn->session,
"failing mgmt itt 0x%x state %d\n",
task->itt, task->state);
+
+ spin_lock_bh(&session->back_lock);
+ if (cleanup_queued_task(task)) {
+ spin_unlock_bh(&session->back_lock);
+ continue;
+ }
+
state = ISCSI_TASK_ABRT_SESS_RECOV;
if (task->state == ISCSI_TASK_PENDING)
state = ISCSI_TASK_COMPLETED;
- spin_lock_bh(&session->back_lock);
iscsi_complete_task(task, state);
spin_unlock_bh(&session->back_lock);
}