summaryrefslogtreecommitdiff
path: root/drivers/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-29 03:22:10 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-29 03:22:10 +0300
commitd72cd4ad4174cfd2257c426ad51e4f53bcfde9c9 (patch)
treeb291d1c28bbf6ce61edc3bdf022ea857414230f6 /drivers/target
parent238da4d004856ac5f832899f6f3fa27c0102381f (diff)
parent7a3beeae289385f7be9f61a33a6e4f6c7e2400d3 (diff)
downloadlinux-d72cd4ad4174cfd2257c426ad51e4f53bcfde9c9.tar.xz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This consists of the usual driver updates (ufs, target, tcmu, smartpqi, lpfc, zfcp, qla2xxx, mpt3sas, pm80xx). The major core change is using a sbitmap instead of an atomic for queue tracking" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (412 commits) scsi: target: tcm_fc: Fix a kernel-doc header scsi: target: Shorten ALUA error messages scsi: target: Fix two format specifiers scsi: target: Compare explicitly with SAM_STAT_GOOD scsi: sd: Introduce a new local variable in sd_check_events() scsi: dc395x: Open-code status_byte(u8) calls scsi: 53c700: Open-code status_byte(u8) calls scsi: smartpqi: Remove unused functions scsi: qla4xxx: Remove an unused function scsi: myrs: Remove unused functions scsi: myrb: Remove unused functions scsi: mpt3sas: Fix two kernel-doc headers scsi: fcoe: Suppress a compiler warning scsi: libfc: Fix a format specifier scsi: aacraid: Remove an unused function scsi: core: Introduce enum scsi_disposition scsi: core: Modify the scsi_send_eh_cmnd() return value for the SDEV_BLOCK case scsi: core: Rename scsi_softirq_done() into scsi_complete() scsi: core: Remove an incorrect comment scsi: core: Make the scsi_alloc_sgtables() documentation more accurate ...
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/iscsi_target.c20
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c17
-rw-r--r--drivers/target/loopback/tcm_loop.c60
-rw-r--r--drivers/target/loopback/tcm_loop.h1
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_configfs.c36
-rw-r--r--drivers/target/target_core_device.c12
-rw-r--r--drivers/target/target_core_fabric_configfs.c58
-rw-r--r--drivers/target/target_core_file.c3
-rw-r--r--drivers/target/target_core_iblock.c81
-rw-r--r--drivers/target/target_core_iblock.h10
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c42
-rw-r--r--drivers/target/target_core_pscsi.c7
-rw-r--r--drivers/target/target_core_rd.c27
-rw-r--r--drivers/target/target_core_rd.h1
-rw-r--r--drivers/target/target_core_sbc.c4
-rw-r--r--drivers/target/target_core_spc.c6
-rw-r--r--drivers/target/target_core_stat.c3
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c300
-rw-r--r--drivers/target/target_core_user.c440
-rw-r--r--drivers/target/target_core_xcopy.c10
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c14
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
28 files changed, 741 insertions, 432 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index e5c443bfbdf9..2c54c5d8412d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1154,10 +1154,10 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
- transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
- conn->sess->se_sess, be32_to_cpu(hdr->data_length),
- cmd->data_direction, sam_task_attr,
- cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
+ __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, be32_to_cpu(hdr->data_length),
+ cmd->data_direction, sam_task_attr,
+ cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
@@ -1167,7 +1167,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
target_get_sess_cmd(&cmd->se_cmd, true);
cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
- cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb);
+ cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb,
+ GFP_KERNEL);
+
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
return iscsit_add_reject_cmd(cmd,
@@ -2012,10 +2014,10 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
buf);
}
- transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
- conn->sess->se_sess, 0, DMA_NONE,
- TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
- scsilun_to_int(&hdr->lun));
+ __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
+ scsilun_to_int(&hdr->lun));
target_get_sess_cmd(&cmd->se_cmd, true);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0fa1d57b26fa..f4a24fa5058e 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -161,14 +161,13 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
char *str, *str2, *ip_str, *port_str;
struct sockaddr_storage sockaddr = { };
int ret;
- char buf[MAX_PORTAL_LEN + 1];
+ char buf[MAX_PORTAL_LEN + 1] = { };
if (strlen(name) > MAX_PORTAL_LEN) {
pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
(int)strlen(name), MAX_PORTAL_LEN);
return ERR_PTR(-EOVERFLOW);
}
- memset(buf, 0, MAX_PORTAL_LEN + 1);
snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
str = strstr(buf, "[");
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 8b40f10976ff..151e2949bb75 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -28,7 +28,6 @@
#include "iscsi_target_auth.h"
#define MAX_LOGIN_PDUS 7
-#define TEXT_LEN 4096
void convert_null_to_semi(char *buf, int len)
{
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 35e75a3569c9..cce3a827059e 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -28,7 +28,6 @@
/* Instance Attributes Table */
#define ISCSI_INST_NUM_NODES 1
#define ISCSI_INST_DESCR "Storage Engine Target"
-#define ISCSI_INST_LAST_FAILURE_TYPE 0
#define ISCSI_DISCONTINUITY_TIME 0
#define ISCSI_NODE_INDEX 1
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 9468b017b4a7..6dd5810e2af1 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -28,23 +28,6 @@
#include "iscsi_target_util.h"
#include "iscsi_target.h"
-#define PRINT_BUFF(buff, len) \
-{ \
- int zzz; \
- \
- pr_debug("%d:\n", __LINE__); \
- for (zzz = 0; zzz < len; zzz++) { \
- if (zzz % 16 == 0) { \
- if (zzz) \
- pr_debug("\n"); \
- pr_debug("%4i: ", zzz); \
- } \
- pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
- } \
- if ((len + 1) % 16) \
- pr_debug("\n"); \
-}
-
extern struct list_head g_tiqn_list;
extern spinlock_t tiqn_lock;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index badba437e5f9..2687fd7d45db 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -39,7 +39,6 @@
#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
-static struct workqueue_struct *tcm_loop_workqueue;
static struct kmem_cache *tcm_loop_cmd_cache;
static int tcm_loop_hba_no_cnt;
@@ -67,8 +66,12 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
+ struct scsi_cmnd *sc = tl_cmd->sc;
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ else
+ sc->scsi_done(sc);
}
static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
@@ -102,10 +105,8 @@ static struct device_driver tcm_loop_driverfs = {
*/
static struct device *tcm_loop_primary;
-static void tcm_loop_submission_work(struct work_struct *work)
+static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
{
- struct tcm_loop_cmd *tl_cmd =
- container_of(work, struct tcm_loop_cmd, work);
struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
struct scsi_cmnd *sc = tl_cmd->sc;
struct tcm_loop_nexus *tl_nexus;
@@ -113,7 +114,6 @@ static void tcm_loop_submission_work(struct work_struct *work)
struct tcm_loop_tpg *tl_tpg;
struct scatterlist *sgl_bidi = NULL;
u32 sgl_bidi_count = 0, transfer_length;
- int rc;
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
@@ -151,21 +151,20 @@ static void tcm_loop_submission_work(struct work_struct *work)
}
se_cmd->tag = tl_cmd->sc_cmd_tag;
- rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
- &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
- transfer_length, TCM_SIMPLE_TAG,
- sc->sc_data_direction, 0,
- scsi_sglist(sc), scsi_sg_count(sc),
- sgl_bidi, sgl_bidi_count,
- scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
- if (rc < 0) {
- set_host_byte(sc, DID_NO_CONNECT);
- goto out_done;
- }
+ target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0],
+ tl_cmd->sc->device->lun, transfer_length,
+ TCM_SIMPLE_TAG, sc->sc_data_direction, 0);
+
+ if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc),
+ scsi_sg_count(sc), sgl_bidi, sgl_bidi_count,
+ scsi_prot_sglist(sc), scsi_prot_sg_count(sc),
+ GFP_ATOMIC))
+ return;
+
+ target_queue_submission(se_cmd);
return;
out_done:
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
sc->scsi_done(sc);
}
@@ -175,24 +174,18 @@ out_done:
*/
static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
{
- struct tcm_loop_cmd *tl_cmd;
+ struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
__func__, sc->device->host->host_no, sc->device->id,
sc->device->channel, sc->device->lun, sc->cmnd[0],
scsi_bufflen(sc));
- tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
- if (!tl_cmd) {
- set_host_byte(sc, DID_ERROR);
- sc->scsi_done(sc);
- return 0;
- }
-
+ memset(tl_cmd, 0, sizeof(*tl_cmd));
tl_cmd->sc = sc;
tl_cmd->sc_cmd_tag = sc->request->tag;
- INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
- queue_work(tcm_loop_workqueue, &tl_cmd->work);
+
+ tcm_loop_target_queue_cmd(tl_cmd);
return 0;
}
@@ -320,6 +313,7 @@ static struct scsi_host_template tcm_loop_driver_template = {
.dma_boundary = PAGE_SIZE - 1,
.module = THIS_MODULE,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct tcm_loop_cmd),
};
static int tcm_loop_driver_probe(struct device *dev)
@@ -580,7 +574,6 @@ static int tcm_loop_queue_data_or_status(const char *func,
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
(se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
scsi_set_resid(sc, se_cmd->residual_count);
- sc->scsi_done(sc);
return 0;
}
@@ -1164,17 +1157,13 @@ static int __init tcm_loop_fabric_init(void)
{
int ret = -ENOMEM;
- tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
- if (!tcm_loop_workqueue)
- goto out;
-
tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
sizeof(struct tcm_loop_cmd),
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
- goto out_destroy_workqueue;
+ goto out;
}
ret = tcm_loop_alloc_core_bus();
@@ -1191,8 +1180,6 @@ out_release_core_bus:
tcm_loop_release_core_bus();
out_destroy_cache:
kmem_cache_destroy(tcm_loop_cmd_cache);
-out_destroy_workqueue:
- destroy_workqueue(tcm_loop_workqueue);
out:
return ret;
}
@@ -1202,7 +1189,6 @@ static void __exit tcm_loop_fabric_exit(void)
target_unregister_template(&loop_ops);
tcm_loop_release_core_bus();
kmem_cache_destroy(tcm_loop_cmd_cache);
- destroy_workqueue(tcm_loop_workqueue);
}
MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index d3110909a213..437663b3905c 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -16,7 +16,6 @@ struct tcm_loop_cmd {
struct scsi_cmnd *sc;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tl_se_cmd;
- struct work_struct work;
struct completion tmr_done;
/* Sense buffer that will be mapped into outgoing status */
unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 2a6165febd3b..ce84f93c183a 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1218,11 +1218,9 @@ static void sbp_handle_command(struct sbp_target_request *req)
/* only used for printk until we do TMRs */
req->se_cmd.tag = req->orb_pointer;
- if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
- req->sense_buf, unpacked_lun, data_length,
- TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
- goto err;
-
+ target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
+ req->sense_buf, unpacked_lun, data_length,
+ TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF);
return;
err:
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f04352285155..4b2e49341ad6 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1494,7 +1494,7 @@ static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
- unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+ unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { };
/*
* If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
@@ -1536,7 +1536,6 @@ static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
* Also, strip any newline added from the userspace
* echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
*/
- memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
"%s", strstrip(buf));
@@ -1556,11 +1555,9 @@ static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct t10_vpd *vpd;
- unsigned char buf[VPD_TMP_BUF_SIZE];
+ unsigned char buf[VPD_TMP_BUF_SIZE] = { };
ssize_t len = 0;
- memset(buf, 0, VPD_TMP_BUF_SIZE);
-
spin_lock(&t10_wwn->t10_vpd_lock);
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
if (!vpd->protocol_identifier_set)
@@ -1663,9 +1660,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
{
struct se_node_acl *se_nacl;
struct t10_pr_registration *pr_reg;
- char i_buf[PR_REG_ISID_ID_LEN];
-
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
pr_reg = dev->dev_pr_res_holder;
if (!pr_reg)
@@ -2286,7 +2281,7 @@ static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
struct se_hba *hba = dev->se_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
- unsigned char buf[LU_GROUP_NAME_BUF];
+ unsigned char buf[LU_GROUP_NAME_BUF] = { };
int move = 0;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
@@ -2297,7 +2292,6 @@ static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
}
- memset(buf, 0, LU_GROUP_NAME_BUF);
memcpy(buf, page, count);
/*
* Any ALUA logical unit alias besides "NULL" means we will be
@@ -2615,9 +2609,7 @@ static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
struct se_hba *hba;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0, cur_len;
- unsigned char buf[LU_GROUP_NAME_BUF];
-
- memset(buf, 0, LU_GROUP_NAME_BUF);
+ unsigned char buf[LU_GROUP_NAME_BUF] = { };
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
@@ -2753,8 +2745,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
int new_state, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
- pr_err("Unable to do implicit ALUA on non valid"
- " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+ pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n");
return -EINVAL;
}
if (!target_dev_configured(dev)) {
@@ -2805,9 +2796,7 @@ static ssize_t target_tg_pt_gp_alua_access_status_store(
int new_status, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
- pr_err("Unable to do set ALUA access status on non"
- " valid tg_pt_gp ID: %hu\n",
- tg_pt_gp->tg_pt_gp_valid_id);
+ pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n");
return -EINVAL;
}
@@ -2860,9 +2849,7 @@ static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \
int ret; \
\
if (!t->tg_pt_gp_valid_id) { \
- pr_err("Unable to do set " #_name " ALUA state on non" \
- " valid tg_pt_gp ID: %hu\n", \
- t->tg_pt_gp_valid_id); \
+ pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \
return -EINVAL; \
} \
\
@@ -3020,9 +3007,7 @@ static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct se_lun *lun;
ssize_t len = 0, cur_len;
- unsigned char buf[TG_PT_GROUP_NAME_BUF];
-
- memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+ unsigned char buf[TG_PT_GROUP_NAME_BUF] = { };
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
@@ -3409,11 +3394,10 @@ static struct config_group *target_core_call_addhbatotarget(
{
char *se_plugin_str, *str, *str2;
struct se_hba *hba;
- char buf[TARGET_CORE_NAME_MAX_LEN];
+ char buf[TARGET_CORE_NAME_MAX_LEN] = { };
unsigned long plugin_dep_id = 0;
int ret;
- memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
pr_err("Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7787c527aad3..a8df9f0a82fa 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -735,8 +735,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->queue_cnt = nr_cpu_ids;
for (i = 0; i < dev->queue_cnt; i++) {
- INIT_LIST_HEAD(&dev->queues[i].state_list);
- spin_lock_init(&dev->queues[i].lock);
+ struct se_device_queue *q;
+
+ q = &dev->queues[i];
+ INIT_LIST_HEAD(&q->state_list);
+ spin_lock_init(&q->lock);
+
+ init_llist_head(&q->sq.cmd_list);
+ INIT_WORK(&q->sq.work, target_queued_submit_work);
}
dev->se_hba = hba;
@@ -1029,7 +1035,7 @@ int core_dev_setup_virtual_lun0(void)
{
struct se_hba *hba;
struct se_device *dev;
- char buf[] = "rd_pages=8,rd_nullio=1";
+ char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
int ret;
hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index ee85602213f7..fc7edc04ee09 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -892,6 +892,7 @@ static void target_fabric_release_wwn(struct config_item *item)
struct target_fabric_configfs *tf = wwn->wwn_tf;
configfs_remove_default_groups(&wwn->fabric_stat_group);
+ configfs_remove_default_groups(&wwn->param_group);
tf->tf_ops->fabric_drop_wwn(wwn);
}
@@ -918,6 +919,57 @@ TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
/* End of tfc_wwn_fabric_stats_cit */
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_show(struct config_item *item,
+ char *page)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+ param_group);
+ return sprintf(page, "%d\n",
+ wwn->cmd_compl_affinity == WORK_CPU_UNBOUND ?
+ SE_COMPL_AFFINITY_CURR_CPU : wwn->cmd_compl_affinity);
+}
+
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+ param_group);
+ int compl_val;
+
+ if (kstrtoint(page, 0, &compl_val))
+ return -EINVAL;
+
+ switch (compl_val) {
+ case SE_COMPL_AFFINITY_CPUID:
+ wwn->cmd_compl_affinity = compl_val;
+ break;
+ case SE_COMPL_AFFINITY_CURR_CPU:
+ wwn->cmd_compl_affinity = WORK_CPU_UNBOUND;
+ break;
+ default:
+ if (compl_val < 0 || compl_val >= nr_cpu_ids ||
+ !cpu_online(compl_val)) {
+ pr_err("Command completion value must be between %d and %d or an online CPU.\n",
+ SE_COMPL_AFFINITY_CPUID,
+ SE_COMPL_AFFINITY_CURR_CPU);
+ return -EINVAL;
+ }
+ wwn->cmd_compl_affinity = compl_val;
+ }
+
+ return count;
+}
+CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity);
+
+static struct configfs_attribute *target_fabric_wwn_param_attrs[] = {
+ &target_fabric_wwn_attr_cmd_completion_affinity,
+ NULL,
+};
+
+TF_CIT_SETUP(wwn_param, NULL, NULL, target_fabric_wwn_param_attrs);
+
/* Start of tfc_wwn_cit */
static struct config_group *target_fabric_make_wwn(
@@ -937,6 +989,7 @@ static struct config_group *target_fabric_make_wwn(
if (!wwn || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
+ wwn->cmd_compl_affinity = SE_COMPL_AFFINITY_CPUID;
wwn->wwn_tf = tf;
config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
@@ -945,6 +998,10 @@ static struct config_group *target_fabric_make_wwn(
&tf->tf_wwn_fabric_stats_cit);
configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
+ config_group_init_type_name(&wwn->param_group, "param",
+ &tf->tf_wwn_param_cit);
+ configfs_add_default_group(&wwn->param_group, &wwn->wwn_group);
+
if (tf->tf_ops->add_wwn_groups)
tf->tf_ops->add_wwn_groups(wwn);
return &wwn->wwn_group;
@@ -974,6 +1031,7 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
target_fabric_setup_discovery_cit(tf);
target_fabric_setup_wwn_cit(tf);
target_fabric_setup_wwn_fabric_stats_cit(tf);
+ target_fabric_setup_wwn_param_cit(tf);
target_fabric_setup_tpg_cit(tf);
target_fabric_setup_tpg_base_cit(tf);
target_fabric_setup_tpg_port_cit(tf);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 5a66854def95..ef4a8e189fba 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -498,6 +498,7 @@ fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
prot_length = nolb * se_dev->prot_length;
+ memset(buf, 0xff, bufsize);
for (prot = 0; prot < prot_length;) {
sector_t len = min_t(sector_t, bufsize, prot_length - prot);
ssize_t ret = kernel_write(prot_fd, buf, len, &pos);
@@ -523,7 +524,6 @@ fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
pr_err("Unable to allocate FILEIO prot buf\n");
return -ENOMEM;
}
- memset(buf, 0xff, PAGE_SIZE);
rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
@@ -882,7 +882,6 @@ static int fd_format_prot(struct se_device *dev)
(unsigned long long)(dev->transport->get_blocks(dev) + 1) *
dev->prot_length);
- memset(buf, 0xff, unit_size);
ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
buf, unit_size);
vfree(buf);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index ee3d52061281..d6fdd1c61f90 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -61,9 +61,18 @@ static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *nam
return NULL;
}
+ ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
+ GFP_KERNEL);
+ if (!ib_dev->ibd_plug)
+ goto free_dev;
+
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
return &ib_dev->dev;
+
+free_dev:
+ kfree(ib_dev);
+ return NULL;
}
static int iblock_configure_device(struct se_device *dev)
@@ -171,6 +180,7 @@ static void iblock_dev_call_rcu(struct rcu_head *p)
struct se_device *dev = container_of(p, struct se_device, rcu_head);
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ kfree(ib_dev->ibd_plug);
kfree(ib_dev);
}
@@ -188,6 +198,33 @@ static void iblock_destroy_device(struct se_device *dev)
bioset_exit(&ib_dev->ibd_bio_set);
}
+static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
+ struct iblock_dev_plug *ib_dev_plug;
+
+ /*
+ * Each se_device has a per cpu work this can be run from. Wwe
+ * shouldn't have multiple threads on the same cpu calling this
+ * at the same time.
+ */
+ ib_dev_plug = &ib_dev->ibd_plug[smp_processor_id()];
+ if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
+ return NULL;
+
+ blk_start_plug(&ib_dev_plug->blk_plug);
+ return &ib_dev_plug->se_plug;
+}
+
+static void iblock_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
+ struct iblock_dev_plug, se_plug);
+
+ blk_finish_plug(&ib_dev_plug->blk_plug);
+ clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
+}
+
static unsigned long long iblock_emulate_read_cap_with_block_size(
struct se_device *dev,
struct block_device *bd,
@@ -304,9 +341,8 @@ static void iblock_bio_done(struct bio *bio)
iblock_complete_cmd(cmd);
}
-static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
- int op_flags)
+static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
+ unsigned int opf)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
@@ -326,7 +362,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = opf;
return bio;
}
@@ -335,7 +371,10 @@ static void iblock_submit_bios(struct bio_list *list)
{
struct blk_plug plug;
struct bio *bio;
-
+ /*
+ * The block layer handles nested plugs, so just plug/unplug to handle
+ * fabric drivers that didn't support batching and multi bio cmds.
+ */
blk_start_plug(&plug);
while ((bio = bio_list_pop(list)))
submit_bio(bio);
@@ -477,7 +516,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
goto fail;
cmd->priv = ibr;
- bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
if (!bio)
goto fail_free_ibr;
@@ -490,8 +529,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
- bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
- 0);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
if (!bio)
goto fail_put_bios;
@@ -685,9 +723,11 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
+ unsigned int opf;
unsigned bio_cnt;
- int i, rc, op, op_flags = 0;
+ int i, rc;
struct sg_mapping_iter prot_miter;
+ unsigned int miter_dir;
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -696,15 +736,17 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
- op = REQ_OP_WRITE;
+ opf = REQ_OP_WRITE;
+ miter_dir = SG_MITER_TO_SG;
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
- op_flags = REQ_FUA;
+ opf |= REQ_FUA;
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- op_flags = REQ_FUA;
+ opf |= REQ_FUA;
}
} else {
- op = REQ_OP_READ;
+ opf = REQ_OP_READ;
+ miter_dir = SG_MITER_FROM_SG;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -718,7 +760,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
return 0;
}
- bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
+ bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
if (!bio)
goto fail_free_ibr;
@@ -730,8 +772,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
- op == REQ_OP_READ ? SG_MITER_FROM_SG :
- SG_MITER_TO_SG);
+ miter_dir);
for_each_sg(sgl, sg, sgl_nents, i) {
/*
@@ -752,8 +793,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
bio_cnt = 0;
}
- bio = iblock_get_bio(cmd, block_lba, sg_num, op,
- op_flags);
+ bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
if (!bio)
goto fail_put_bios;
@@ -813,7 +853,8 @@ static unsigned int iblock_get_lbppbe(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
- int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
+ unsigned int logs_per_phys =
+ bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
return ilog2(logs_per_phys);
}
@@ -867,6 +908,8 @@ static const struct target_backend_ops iblock_ops = {
.configure_device = iblock_configure_device,
.destroy_device = iblock_destroy_device,
.free_device = iblock_free_device,
+ .plug_device = iblock_plug_device,
+ .unplug_device = iblock_unplug_device,
.parse_cdb = iblock_parse_cdb,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index cefc641145b3..8c55375d2f75 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/blkdev.h>
#include <target/target_core_base.h>
#define IBLOCK_VERSION "4.0"
@@ -17,6 +18,14 @@ struct iblock_req {
#define IBDF_HAS_UDEV_PATH 0x01
+#define IBD_PLUGF_PLUGGED 0x01
+
+struct iblock_dev_plug {
+ struct se_dev_plug se_plug;
+ struct blk_plug blk_plug;
+ unsigned long flags;
+};
+
struct iblock_dev {
struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
@@ -24,6 +33,7 @@ struct iblock_dev {
struct bio_set ibd_bio_set;
struct block_device *ibd_bd;
bool ibd_readonly;
+ struct iblock_dev_plug *ibd_plug;
} ____cacheline_aligned;
#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e7b3c6e5d574..a343bcfa2180 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -34,6 +34,7 @@ struct target_fabric_configfs {
struct config_item_type tf_discovery_cit;
struct config_item_type tf_wwn_cit;
struct config_item_type tf_wwn_fabric_stats_cit;
+ struct config_item_type tf_wwn_param_cit;
struct config_item_type tf_tpg_cit;
struct config_item_type tf_tpg_base_cit;
struct config_item_type tf_tpg_lun_cit;
@@ -153,6 +154,7 @@ void target_qf_do_work(struct work_struct *work);
bool target_check_wce(struct se_device *dev);
bool target_check_fua(struct se_device *dev);
void __target_execute_cmd(struct se_cmd *, bool);
+void target_queued_submit_work(struct work_struct *work);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_device *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d4cc43afe05b..6fd5fec95539 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -896,9 +896,8 @@ static void core_scsi3_aptpl_reserve(
struct se_node_acl *node_acl,
struct t10_pr_registration *pr_reg)
{
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
spin_lock(&dev->dev_reservation_lock);
@@ -928,12 +927,10 @@ static int __core_scsi3_check_aptpl_registration(
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
- unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+ unsigned char i_port[PR_APTPL_MAX_IPORT_LEN] = { };
+ unsigned char t_port[PR_APTPL_MAX_TPORT_LEN] = { };
u16 tpgt;
- memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
- memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
/*
* Copy Initiator Port information from struct se_node_acl
*/
@@ -1023,9 +1020,8 @@ static void __core_scsi3_dump_registration(
enum register_type register_type)
{
struct se_portal_group *se_tpg = nacl->se_tpg;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
- memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
@@ -1204,10 +1200,10 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
struct se_session *sess)
{
struct se_portal_group *tpg = nacl->se_tpg;
- unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+ unsigned char buf[PR_REG_ISID_LEN] = { };
+ unsigned char *isid_ptr = NULL;
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
- memset(&buf[0], 0, PR_REG_ISID_LEN);
tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0],
PR_REG_ISID_LEN);
isid_ptr = &buf[0];
@@ -1285,11 +1281,10 @@ static void __core_scsi3_free_registration(
struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
struct se_dev_entry *deve;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
lockdep_assert_held(&pr_tmpl->registration_lock);
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
if (!list_empty(&pr_reg->pr_reg_list))
@@ -1642,8 +1637,7 @@ core_scsi3_decode_spec_i_port(
}
dest_tpg = tmp_tpg;
- pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
- " %s Port RTPI: %hu\n",
+ pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s Port RTPI: %u\n",
dest_tpg->se_tpg_tfo->fabric_name,
dest_node_acl->initiatorname, dest_rtpi);
@@ -1680,8 +1674,7 @@ core_scsi3_decode_spec_i_port(
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
dest_rtpi);
if (!dest_se_deve) {
- pr_err("Unable to locate %s dest_se_deve"
- " from destination RTPI: %hu\n",
+ pr_err("Unable to locate %s dest_se_deve from destination RTPI: %u\n",
dest_tpg->se_tpg_tfo->fabric_name,
dest_rtpi);
@@ -2059,7 +2052,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+ unsigned char isid_buf[PR_REG_ISID_LEN] = { };
+ unsigned char *isid_ptr = NULL;
sense_reason_t ret = TCM_NO_SENSE;
int pr_holder = 0, type;
@@ -2070,7 +2064,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
se_tpg = se_sess->se_tpg;
if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
- memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0],
PR_REG_ISID_LEN);
isid_ptr = &isid_buf[0];
@@ -2282,11 +2275,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_res_holder;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
sense_reason_t ret;
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
-
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2457,12 +2448,11 @@ static void __core_scsi3_complete_pro_release(
int unreg)
{
const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
int pr_res_type = 0, pr_res_scope = 0;
lockdep_assert_held(&dev->dev_reservation_lock);
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Go ahead and release the current PR reservation holder.
@@ -2768,11 +2758,10 @@ static void __core_scsi3_complete_pro_preempt(
{
struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
lockdep_assert_held(&dev->dev_reservation_lock);
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Do an implicit RELEASE of the existing reservation.
@@ -3158,7 +3147,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
const unsigned char *initiator_str;
- char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
+ char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { };
u32 tid_len, tmp_tid_len;
int new_reg = 0, type, scope, matching_iname;
sense_reason_t ret;
@@ -3170,7 +3159,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
tf_ops = se_tpg->se_tpg_tfo;
/*
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index d64c3ffdb52e..f2a11414366d 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -34,8 +34,6 @@
#include "target_core_internal.h"
#include "target_core_pscsi.h"
-#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
-
static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
{
return container_of(dev, struct pscsi_dev_virt, dev);
@@ -620,8 +618,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
unsigned char *buf;
buf = transport_kmap_data_sg(cmd);
- if (!buf)
+ if (!buf) {
; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
+ }
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@@ -1047,7 +1046,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
int result = scsi_req(req)->result;
u8 scsi_status = status_byte(result) << 1;
- if (scsi_status) {
+ if (scsi_status != SAM_STAT_GOOD) {
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
result);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index bf936bbeccfe..6648c1c90e19 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -530,12 +530,13 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
enum {
- Opt_rd_pages, Opt_rd_nullio, Opt_err
+ Opt_rd_pages, Opt_rd_nullio, Opt_rd_dummy, Opt_err
};
static match_table_t tokens = {
{Opt_rd_pages, "rd_pages=%d"},
{Opt_rd_nullio, "rd_nullio=%d"},
+ {Opt_rd_dummy, "rd_dummy=%d"},
{Opt_err, NULL}
};
@@ -574,6 +575,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
rd_dev->rd_flags |= RDF_NULLIO;
break;
+ case Opt_rd_dummy:
+ match_int(args, &arg);
+ if (arg != 1)
+ break;
+
+ pr_debug("RAMDISK: Setting DUMMY flag: %d\n", arg);
+ rd_dev->rd_flags |= RDF_DUMMY;
+ break;
default:
break;
}
@@ -590,12 +599,22 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
- " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
+ " SG_table_count: %u nullio: %d dummy: %d\n",
+ rd_dev->rd_page_count,
PAGE_SIZE, rd_dev->sg_table_count,
- !!(rd_dev->rd_flags & RDF_NULLIO));
+ !!(rd_dev->rd_flags & RDF_NULLIO),
+ !!(rd_dev->rd_flags & RDF_DUMMY));
return bl;
}
+static u32 rd_get_device_type(struct se_device *dev)
+{
+ if (RD_DEV(dev)->rd_flags & RDF_DUMMY)
+ return 0x3f; /* Unknown device type, not connected */
+ else
+ return sbc_get_device_type(dev);
+}
+
static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
@@ -647,7 +666,7 @@ static const struct target_backend_ops rd_mcp_ops = {
.parse_cdb = rd_parse_cdb,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_device_type = sbc_get_device_type,
+ .get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks,
.init_prot = rd_init_prot,
.free_prot = rd_free_prot,
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 8b88f9b14c3f..9ffda5c4b584 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -28,6 +28,7 @@ struct rd_dev_sg_table {
#define RDF_HAS_PAGE_COUNT 0x01
#define RDF_NULLIO 0x02
+#define RDF_DUMMY 0x04
struct rd_dev {
struct se_device dev;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index f7c527a826fd..7b07e557dc8d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -448,7 +448,7 @@ compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents,
sense_reason_t ret;
unsigned int offset;
size_t rc;
- int i;
+ int sg_cnt;
buf = kzalloc(cmp_len, GFP_KERNEL);
if (!buf) {
@@ -467,7 +467,7 @@ compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents,
*/
offset = 0;
ret = TCM_NO_SENSE;
- for_each_sg(read_sgl, sg, read_nents, i) {
+ for_each_sg(read_sgl, sg, read_nents, sg_cnt) {
unsigned int len = min(sg->length, cmp_len);
unsigned char *addr = kmap_atomic(sg_page(sg));
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index ca5579ebc81d..70a661801cb9 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -701,7 +701,6 @@ static sense_reason_t
spc_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_portal_group *tpg = cmd->se_lun->lun_tpg;
unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf;
@@ -715,10 +714,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- if (dev == rcu_access_pointer(tpg->tpg_virt_lun0->lun_se_dev))
- buf[0] = 0x3f; /* Not connected */
- else
- buf[0] = dev->transport->get_device_type(dev);
+ buf[0] = dev->transport->get_device_type(dev);
if (!(cdb[1] & 0x1)) {
if (cdb[2]) {
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 237309db4b33..62d15bcc3d93 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -31,9 +31,6 @@
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
#endif
-#define NONE "None"
-#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
-
#define SCSI_LU_INDEX 1
#define LU_COUNT 1
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 7347285471fa..e7fcbc09f9db 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -124,6 +124,8 @@ void core_tmr_abort_task(
int i;
for (i = 0; i < dev->queue_cnt; i++) {
+ flush_work(&dev->queues[i].sq.work);
+
spin_lock_irqsave(&dev->queues[i].lock, flags);
list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list,
state_list) {
@@ -302,6 +304,8 @@ static void core_tmr_drain_state_list(
* in the Control Mode Page.
*/
for (i = 0; i < dev->queue_cnt; i++) {
+ flush_work(&dev->queues[i].sq.work);
+
spin_lock_irqsave(&dev->queues[i].lock, flags);
list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list,
state_list) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 5ecb9f18a53d..8fbfe75c5744 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -41,6 +41,7 @@
#include <trace/events/target.h>
static struct workqueue_struct *target_completion_wq;
+static struct workqueue_struct *target_submission_wq;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
@@ -129,8 +130,15 @@ int init_se_kmem_caches(void)
if (!target_completion_wq)
goto out_free_lba_map_mem_cache;
+ target_submission_wq = alloc_workqueue("target_submission",
+ WQ_MEM_RECLAIM, 0);
+ if (!target_submission_wq)
+ goto out_free_completion_wq;
+
return 0;
+out_free_completion_wq:
+ destroy_workqueue(target_completion_wq);
out_free_lba_map_mem_cache:
kmem_cache_destroy(t10_alua_lba_map_mem_cache);
out_free_lba_map_cache:
@@ -153,6 +161,7 @@ out:
void release_se_kmem_caches(void)
{
+ destroy_workqueue(target_submission_wq);
destroy_workqueue(target_completion_wq);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
@@ -848,7 +857,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
/* May be called from interrupt context so must not sleep. */
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
{
- int success;
+ struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
+ int success, cpu;
unsigned long flags;
if (target_cmd_interrupted(cmd))
@@ -875,7 +885,13 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
target_complete_failure_work);
- queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
+
+ if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+ cpu = cmd->cpuid;
+ else
+ cpu = wwn->cmd_compl_affinity;
+
+ queue_work_on(cpu, target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(target_complete_cmd);
@@ -1304,7 +1320,7 @@ target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
* Compare the data buffer size from the CDB with the data buffer limit from the transport
* header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
*
- * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
+ * Note: target drivers set @cmd->data_length by calling __target_init_cmd().
*
* Return: TCM_NO_SENSE
*/
@@ -1371,7 +1387,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
*
* Preserves the value of @cmd->tag.
*/
-void transport_init_se_cmd(
+void __target_init_cmd(
struct se_cmd *cmd,
const struct target_core_fabric_ops *tfo,
struct se_session *se_sess,
@@ -1382,7 +1398,6 @@ void transport_init_se_cmd(
{
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
- INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
cmd->free_compl = NULL;
@@ -1391,6 +1406,7 @@ void transport_init_se_cmd(
INIT_WORK(&cmd->work, NULL);
kref_init(&cmd->cmd_kref);
+ cmd->t_task_cdb = &cmd->__t_task_cdb[0];
cmd->se_tfo = tfo;
cmd->se_sess = se_sess;
cmd->data_length = data_length;
@@ -1404,7 +1420,7 @@ void transport_init_se_cmd(
cmd->state_active = false;
}
-EXPORT_SYMBOL(transport_init_se_cmd);
+EXPORT_SYMBOL(__target_init_cmd);
static sense_reason_t
transport_check_alloc_task_attr(struct se_cmd *cmd)
@@ -1428,11 +1444,10 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
}
sense_reason_t
-target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
+target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
{
sense_reason_t ret;
- cmd->t_task_cdb = &cmd->__t_task_cdb[0];
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
@@ -1450,8 +1465,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
* setup the pointer from __t_task_cdb to t_task_cdb.
*/
if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
- cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
- GFP_KERNEL);
+ cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
if (!cmd->t_task_cdb) {
pr_err("Unable to allocate cmd->t_task_cdb"
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
@@ -1573,46 +1587,31 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
}
/**
- * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
- * se_cmd + use pre-allocated SGL memory.
- *
- * @se_cmd: command descriptor to submit
+ * target_init_cmd - initialize se_cmd
+ * @se_cmd: command descriptor to init
* @se_sess: associated se_sess for endpoint
- * @cdb: pointer to SCSI CDB
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
* @task_attr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
- * @sgl: struct scatterlist memory for unidirectional mapping
- * @sgl_count: scatterlist count for unidirectional mapping
- * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
- * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
- * @sgl_prot: struct scatterlist memory protection information
- * @sgl_prot_count: scatterlist count for protection information
*
* Task tags are supported if the caller has set @se_cmd->tag.
*
- * Returns non zero to signal active I/O shutdown failure. All other
- * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
- * but still return zero here.
+ * Returns:
+ * - less than zero to signal active I/O shutdown failure.
+ * - zero on success.
*
- * This may only be called from process context, and also currently
- * assumes internal allocation of fabric payload buffer by target-core.
+ * If the fabric driver calls target_stop_session, then it must check the
+ * return code and handle failures. This will never fail for other drivers,
+ * and the return code can be ignored.
*/
-int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
- unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
- u32 data_length, int task_attr, int data_dir, int flags,
- struct scatterlist *sgl, u32 sgl_count,
- struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
- struct scatterlist *sgl_prot, u32 sgl_prot_count)
+int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ unsigned char *sense, u64 unpacked_lun,
+ u32 data_length, int task_attr, int data_dir, int flags)
{
struct se_portal_group *se_tpg;
- sense_reason_t rc;
- int ret;
-
- might_sleep();
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
@@ -1621,52 +1620,71 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
if (flags & TARGET_SCF_USE_CPUID)
se_cmd->se_cmd_flags |= SCF_USE_CPUID;
/*
+ * Signal bidirectional data payloads to target-core
+ */
+ if (flags & TARGET_SCF_BIDI_OP)
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
+ if (flags & TARGET_SCF_UNKNOWN_SIZE)
+ se_cmd->unknown_data_length = 1;
+ /*
* Initialize se_cmd for target operation. From this point
* exceptions are handled by sending exception status via
* target_core_fabric_ops->queue_status() callback
*/
- transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
- data_length, data_dir, task_attr, sense,
- unpacked_lun);
+ __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
+ data_dir, task_attr, sense, unpacked_lun);
- if (flags & TARGET_SCF_UNKNOWN_SIZE)
- se_cmd->unknown_data_length = 1;
/*
* Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
* necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
- ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
- if (ret)
- return ret;
- /*
- * Signal bidirectional data payloads to target-core
- */
- if (flags & TARGET_SCF_BIDI_OP)
- se_cmd->se_cmd_flags |= SCF_BIDI;
+ return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+}
+EXPORT_SYMBOL_GPL(target_init_cmd);
- rc = target_cmd_init_cdb(se_cmd, cdb);
- if (rc) {
- transport_send_check_condition_and_sense(se_cmd, rc, 0);
- target_put_sess_cmd(se_cmd);
- return 0;
- }
+/**
+ * target_submit_prep - prepare cmd for submission
+ * @se_cmd: command descriptor to prep
+ * @cdb: pointer to SCSI CDB
+ * @sgl: struct scatterlist memory for unidirectional mapping
+ * @sgl_count: scatterlist count for unidirectional mapping
+ * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
+ * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
+ * @gfp: gfp allocation type
+ *
+ * Returns:
+ * - less than zero to signal failure.
+ * - zero on success.
+ *
+ * If failure is returned, lio will the callers queue_status to complete
+ * the cmd.
+ */
+int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
+ struct scatterlist *sgl, u32 sgl_count,
+ struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+ struct scatterlist *sgl_prot, u32 sgl_prot_count,
+ gfp_t gfp)
+{
+ sense_reason_t rc;
+
+ rc = target_cmd_init_cdb(se_cmd, cdb, gfp);
+ if (rc)
+ goto send_cc_direct;
/*
* Locate se_lun pointer and attach it to struct se_cmd
*/
rc = transport_lookup_cmd_lun(se_cmd);
- if (rc) {
- transport_send_check_condition_and_sense(se_cmd, rc, 0);
- target_put_sess_cmd(se_cmd);
- return 0;
- }
+ if (rc)
+ goto send_cc_direct;
rc = target_cmd_parse_cdb(se_cmd);
- if (rc != 0) {
- transport_generic_request_failure(se_cmd, rc);
- return 0;
- }
+ if (rc != 0)
+ goto generic_fail;
/*
* Save pointers for SGLs containing protection information,
@@ -1686,6 +1704,41 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
if (sgl_count != 0) {
BUG_ON(!sgl);
+ rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
+ sgl_bidi, sgl_bidi_count);
+ if (rc != 0)
+ goto generic_fail;
+ }
+
+ return 0;
+
+send_cc_direct:
+ transport_send_check_condition_and_sense(se_cmd, rc, 0);
+ target_put_sess_cmd(se_cmd);
+ return -EIO;
+
+generic_fail:
+ transport_generic_request_failure(se_cmd, rc);
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(target_submit_prep);
+
+/**
+ * target_submit - perform final initialization and submit cmd to LIO core
+ * @se_cmd: command descriptor to submit
+ *
+ * target_submit_prep must have been called on the cmd, and this must be
+ * called from process context.
+ */
+void target_submit(struct se_cmd *se_cmd)
+{
+ struct scatterlist *sgl = se_cmd->t_data_sg;
+ unsigned char *buf = NULL;
+
+ might_sleep();
+
+ if (se_cmd->t_data_nents != 0) {
+ BUG_ON(!sgl);
/*
* A work-around for tcm_loop as some userspace code via
* scsi-generic do not memset their associated read buffers,
@@ -1696,8 +1749,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
*/
if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
se_cmd->data_direction == DMA_FROM_DEVICE) {
- unsigned char *buf = NULL;
-
if (sgl)
buf = kmap(sg_page(sgl)) + sgl->offset;
@@ -1707,12 +1758,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
}
}
- rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
- sgl_bidi, sgl_bidi_count);
- if (rc != 0) {
- transport_generic_request_failure(se_cmd, rc);
- return 0;
- }
}
/*
@@ -1722,9 +1767,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
core_alua_check_nonop_delay(se_cmd);
transport_handle_cdb_direct(se_cmd);
- return 0;
}
-EXPORT_SYMBOL(target_submit_cmd_map_sgls);
+EXPORT_SYMBOL_GPL(target_submit);
/**
* target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
@@ -1741,25 +1785,109 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
*
* Task tags are supported if the caller has set @se_cmd->tag.
*
- * Returns non zero to signal active I/O shutdown failure. All other
- * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
- * but still return zero here.
- *
* This may only be called from process context, and also currently
* assumes internal allocation of fabric payload buffer by target-core.
*
* It also assumes interal target core SGL memory allocation.
+ *
+ * This function must only be used by drivers that do their own
+ * sync during shutdown and does not use target_stop_session. If there
+ * is a failure this function will call into the fabric driver's
+ * queue_status with a CHECK_CONDITION.
*/
-int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{
- return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
- unpacked_lun, data_length, task_attr, data_dir,
- flags, NULL, 0, NULL, 0, NULL, 0);
+ int rc;
+
+ rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length,
+ task_attr, data_dir, flags);
+ WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n");
+ if (rc)
+ return;
+
+ if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
+ GFP_KERNEL))
+ return;
+
+ target_submit(se_cmd);
}
EXPORT_SYMBOL(target_submit_cmd);
+
+static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
+{
+ struct se_dev_plug *se_plug;
+
+ if (!se_dev->transport->plug_device)
+ return NULL;
+
+ se_plug = se_dev->transport->plug_device(se_dev);
+ if (!se_plug)
+ return NULL;
+
+ se_plug->se_dev = se_dev;
+ /*
+ * We have a ref to the lun at this point, but the cmds could
+ * complete before we unplug, so grab a ref to the se_device so we
+ * can call back into the backend.
+ */
+ config_group_get(&se_dev->dev_group);
+ return se_plug;
+}
+
+static void target_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct se_device *se_dev = se_plug->se_dev;
+
+ se_dev->transport->unplug_device(se_plug);
+ config_group_put(&se_dev->dev_group);
+}
+
+void target_queued_submit_work(struct work_struct *work)
+{
+ struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
+ struct se_cmd *se_cmd, *next_cmd;
+ struct se_dev_plug *se_plug = NULL;
+ struct se_device *se_dev = NULL;
+ struct llist_node *cmd_list;
+
+ cmd_list = llist_del_all(&sq->cmd_list);
+ if (!cmd_list)
+ /* Previous call took what we were queued to submit */
+ return;
+
+ cmd_list = llist_reverse_order(cmd_list);
+ llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) {
+ if (!se_dev) {
+ se_dev = se_cmd->se_dev;
+ se_plug = target_plug_device(se_dev);
+ }
+
+ target_submit(se_cmd);
+ }
+
+ if (se_plug)
+ target_unplug_device(se_plug);
+}
+
+/**
+ * target_queue_submission - queue the cmd to run on the LIO workqueue
+ * @se_cmd: command descriptor to submit
+ */
+void target_queue_submission(struct se_cmd *se_cmd)
+{
+ struct se_device *se_dev = se_cmd->se_dev;
+ int cpu = se_cmd->cpuid;
+ struct se_cmd_queue *sq;
+
+ sq = &se_dev->queues[cpu].sq;
+ llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
+ queue_work_on(cpu, target_submission_wq, &sq->work);
+}
+EXPORT_SYMBOL_GPL(target_queue_submission);
+
static void target_complete_tmr_failure(struct work_struct *work)
{
struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
@@ -1799,8 +1927,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
- transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
- 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
+ __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+ 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
/*
* FIXME: Currently expect caller to handle se_cmd->se_tmr_req
* allocation failure.
@@ -2778,9 +2906,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
* invocations before se_cmd descriptor release.
*/
if (ack_kref) {
- if (!kref_get_unless_zero(&se_cmd->cmd_kref))
- return -EINVAL;
-
+ kref_get(&se_cmd->cmd_kref);
se_cmd->se_cmd_flags |= SCF_ACK_KREF;
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index bf73cd5f4b04..eec2fd573e2b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -8,13 +8,12 @@
#include <linux/spinlock.h>
#include <linux/module.h>
-#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/parser.h>
#include <linux/vmalloc.h>
#include <linux/uio_driver.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/highmem.h>
@@ -61,25 +60,27 @@
#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
-/* For cmd area, the size is fixed 8MB */
-#define CMDR_SIZE (8 * 1024 * 1024)
+/* For mailbox plus cmd ring, the size is fixed 8MB */
+#define MB_CMDR_SIZE (8 * 1024 * 1024)
+/* Offset of cmd ring is size of mailbox */
+#define CMDR_OFF sizeof(struct tcmu_mailbox)
+#define CMDR_SIZE (MB_CMDR_SIZE - CMDR_OFF)
/*
- * For data area, the block size is PAGE_SIZE and
- * the total size is 256K * PAGE_SIZE.
+ * For data area, the default block size is PAGE_SIZE and
+ * the default total size is 256K * PAGE_SIZE.
*/
-#define DATA_BLOCK_SIZE PAGE_SIZE
-#define DATA_BLOCK_SHIFT PAGE_SHIFT
-#define DATA_BLOCK_BITS_DEF (256 * 1024)
+#define DATA_PAGES_PER_BLK_DEF 1
+#define DATA_AREA_PAGES_DEF (256 * 1024)
-#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
-#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
+#define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT))
+#define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT))
/*
* Default number of global data blocks(512K * PAGE_SIZE)
* when the unmap thread will be started.
*/
-#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
+#define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024)
static u8 tcmu_kern_cmd_reply_supported;
static u8 tcmu_netlink_blocked;
@@ -111,6 +112,7 @@ struct tcmu_dev {
struct kref kref;
struct se_device se_dev;
+ struct se_dev_plug se_plug;
char *name;
struct se_hba *hba;
@@ -119,22 +121,25 @@ struct tcmu_dev {
#define TCMU_DEV_BIT_BROKEN 1
#define TCMU_DEV_BIT_BLOCKED 2
#define TCMU_DEV_BIT_TMR_NOTIFY 3
+#define TCM_DEV_BIT_PLUGGED 4
unsigned long flags;
struct uio_info uio_info;
struct inode *inode;
- struct tcmu_mailbox *mb_addr;
uint64_t dev_size;
+
+ struct tcmu_mailbox *mb_addr;
+ void *cmdr;
u32 cmdr_size;
u32 cmdr_last_cleaned;
/* Offset of data area from start of mb */
/* Must add data_off and mb_addr to get the address */
size_t data_off;
- size_t data_size;
+ int data_area_mb;
uint32_t max_blocks;
- size_t ring_size;
+ size_t mmap_pages;
struct mutex cmdr_lock;
struct list_head qfull_queue;
@@ -143,9 +148,11 @@ struct tcmu_dev {
uint32_t dbi_max;
uint32_t dbi_thresh;
unsigned long *data_bitmap;
- struct radix_tree_root data_blocks;
+ struct xarray data_pages;
+ uint32_t data_pages_per_blk;
+ uint32_t data_blk_size;
- struct idr commands;
+ struct xarray commands;
struct timer_list cmd_timer;
unsigned int cmd_time_out;
@@ -165,8 +172,6 @@ struct tcmu_dev {
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
-#define CMDR_OFF sizeof(struct tcmu_mailbox)
-
struct tcmu_cmd {
struct se_cmd *se_cmd;
struct tcmu_dev *tcmu_dev;
@@ -215,9 +220,9 @@ static LIST_HEAD(timed_out_udevs);
static struct kmem_cache *tcmu_cmd_cache;
-static atomic_t global_db_count = ATOMIC_INIT(0);
+static atomic_t global_page_count = ATOMIC_INIT(0);
static struct delayed_work tcmu_unmap_work;
-static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
+static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF;
static int tcmu_set_global_max_data_area(const char *str,
const struct kernel_param *kp)
@@ -233,8 +238,8 @@ static int tcmu_set_global_max_data_area(const char *str,
return -EINVAL;
}
- tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
- if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
+ tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb);
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages)
schedule_delayed_work(&tcmu_unmap_work, 0);
else
cancel_delayed_work_sync(&tcmu_unmap_work);
@@ -245,7 +250,7 @@ static int tcmu_set_global_max_data_area(const char *str,
static int tcmu_get_global_max_data_area(char *buffer,
const struct kernel_param *kp)
{
- return sprintf(buffer, "%d\n", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
}
static const struct kernel_param_ops tcmu_global_max_data_area_op = {
@@ -497,32 +502,39 @@ static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
struct tcmu_cmd *tcmu_cmd,
- int prev_dbi, int *iov_cnt)
+ int prev_dbi, int length, int *iov_cnt)
{
+ XA_STATE(xas, &udev->data_pages, 0);
struct page *page;
- int ret, dbi;
+ int i, cnt, dbi, dpi;
+ int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE);
dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
if (dbi == udev->dbi_thresh)
return -1;
- page = radix_tree_lookup(&udev->data_blocks, dbi);
- if (!page) {
- if (atomic_add_return(1, &global_db_count) >
- tcmu_global_max_blocks)
- schedule_delayed_work(&tcmu_unmap_work, 0);
+ dpi = dbi * udev->data_pages_per_blk;
+ /* Count the number of already allocated pages */
+ xas_set(&xas, dpi);
+ for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
+ cnt++;
+ for (i = cnt; i < page_cnt; i++) {
/* try to get new page from the mm */
page = alloc_page(GFP_NOIO);
if (!page)
- goto err_alloc;
+ break;
- ret = radix_tree_insert(&udev->data_blocks, dbi, page);
- if (ret)
- goto err_insert;
+ if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) {
+ __free_page(page);
+ break;
+ }
}
+ if (atomic_add_return(i - cnt, &global_page_count) >
+ tcmu_global_max_pages)
+ schedule_delayed_work(&tcmu_unmap_work, 0);
- if (dbi > udev->dbi_max)
+ if (i && dbi > udev->dbi_max)
udev->dbi_max = dbi;
set_bit(dbi, udev->data_bitmap);
@@ -531,35 +543,27 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
if (dbi != prev_dbi + 1)
*iov_cnt += 1;
- return dbi;
-err_insert:
- __free_page(page);
-err_alloc:
- atomic_dec(&global_db_count);
- return -1;
+ return i == page_cnt ? dbi : -1;
}
static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd, int dbi_cnt)
+ struct tcmu_cmd *tcmu_cmd, int length)
{
/* start value of dbi + 1 must not be a valid dbi */
int dbi = -2;
- int i, iov_cnt = 0;
+ int blk_data_len, iov_cnt = 0;
+ uint32_t blk_size = udev->data_blk_size;
- for (i = 0; i < dbi_cnt; i++) {
- dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, &iov_cnt);
+ for (; length > 0; length -= blk_size) {
+ blk_data_len = min_t(uint32_t, length, blk_size);
+ dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len,
+ &iov_cnt);
if (dbi < 0)
return -1;
}
return iov_cnt;
}
-static inline struct page *
-tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
-{
- return radix_tree_lookup(&udev->data_blocks, dbi);
-}
-
static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
{
kfree(tcmu_cmd->dbi);
@@ -570,14 +574,15 @@ static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
{
int i, len;
struct se_cmd *se_cmd = cmd->se_cmd;
+ uint32_t blk_size = cmd->tcmu_dev->data_blk_size;
- cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+ cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size);
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
len += se_cmd->t_bidi_data_sg[i].length;
- cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, DATA_BLOCK_SIZE);
+ cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size);
cmd->dbi_cnt += cmd->dbi_bidi_cnt;
cmd->data_len_bidi = len;
}
@@ -589,9 +594,8 @@ static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
/* Get the next dbi */
int dbi = tcmu_cmd_get_dbi(cmd);
- /* Do not add more than DATA_BLOCK_SIZE to iov */
- if (len > DATA_BLOCK_SIZE)
- len = DATA_BLOCK_SIZE;
+ /* Do not add more than udev->data_blk_size to iov */
+ len = min_t(int, len, udev->data_blk_size);
/*
* The following code will gather and map the blocks to the same iovec
@@ -603,7 +607,7 @@ static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
(*iov)++;
/* write offset relative to mb_addr */
(*iov)->iov_base = (void __user *)
- (udev->data_off + dbi * DATA_BLOCK_SIZE);
+ (udev->data_off + dbi * udev->data_blk_size);
}
(*iov)->iov_len += len;
@@ -617,7 +621,7 @@ static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
int dbi = -2;
/* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
- for (; data_length > 0; data_length -= DATA_BLOCK_SIZE)
+ for (; data_length > 0; data_length -= udev->data_blk_size)
dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
}
@@ -695,9 +699,11 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
struct scatterlist *sg, unsigned int sg_nents,
struct iovec **iov, size_t data_len)
{
+ XA_STATE(xas, &udev->data_pages, 0);
/* start value of dbi + 1 must not be a valid dbi */
int dbi = -2;
- size_t block_remaining, cp_len;
+ size_t page_remaining, cp_len;
+ int page_cnt, page_inx;
struct sg_mapping_iter sg_iter;
unsigned int sg_flags;
struct page *page;
@@ -715,37 +721,48 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
data_len);
else
dbi = tcmu_cmd_get_dbi(tcmu_cmd);
- page = tcmu_get_block_page(udev, dbi);
- if (direction == TCMU_DATA_AREA_TO_SG)
- flush_dcache_page(page);
- data_page_start = kmap_atomic(page);
- block_remaining = DATA_BLOCK_SIZE;
-
- while (block_remaining && data_len) {
- if (!sg_miter_next(&sg_iter)) {
- /* set length to 0 to abort outer loop */
- data_len = 0;
- pr_debug("tcmu_move_data: aborting data copy due to exhausted sg_list\n");
- break;
+
+ page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE);
+ if (page_cnt > udev->data_pages_per_blk)
+ page_cnt = udev->data_pages_per_blk;
+
+ xas_set(&xas, dbi * udev->data_pages_per_blk);
+ for (page_inx = 0; page_inx < page_cnt && data_len; page_inx++) {
+ page = xas_next(&xas);
+
+ if (direction == TCMU_DATA_AREA_TO_SG)
+ flush_dcache_page(page);
+ data_page_start = kmap_atomic(page);
+ page_remaining = PAGE_SIZE;
+
+ while (page_remaining && data_len) {
+ if (!sg_miter_next(&sg_iter)) {
+ /* set length to 0 to abort outer loop */
+ data_len = 0;
+ pr_debug("%s: aborting data copy due to exhausted sg_list\n",
+ __func__);
+ break;
+ }
+ cp_len = min3(sg_iter.length, page_remaining,
+ data_len);
+
+ data_addr = data_page_start +
+ PAGE_SIZE - page_remaining;
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ memcpy(data_addr, sg_iter.addr, cp_len);
+ else
+ memcpy(sg_iter.addr, data_addr, cp_len);
+
+ data_len -= cp_len;
+ page_remaining -= cp_len;
+ sg_iter.consumed = cp_len;
}
- cp_len = min3(sg_iter.length, block_remaining, data_len);
+ sg_miter_stop(&sg_iter);
- data_addr = data_page_start +
- DATA_BLOCK_SIZE - block_remaining;
+ kunmap_atomic(data_page_start);
if (direction == TCMU_SG_TO_DATA_AREA)
- memcpy(data_addr, sg_iter.addr, cp_len);
- else
- memcpy(sg_iter.addr, data_addr, cp_len);
-
- data_len -= cp_len;
- block_remaining -= cp_len;
- sg_iter.consumed = cp_len;
+ flush_dcache_page(page);
}
- sg_miter_stop(&sg_iter);
-
- kunmap_atomic(data_page_start);
- if (direction == TCMU_SG_TO_DATA_AREA)
- flush_dcache_page(page);
}
}
@@ -844,9 +861,9 @@ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
(udev->max_blocks - udev->dbi_thresh) + space;
if (blocks_left < cmd->dbi_cnt) {
- pr_debug("no data space: only %lu available, but ask for %lu\n",
- blocks_left * DATA_BLOCK_SIZE,
- cmd->dbi_cnt * DATA_BLOCK_SIZE);
+ pr_debug("no data space: only %lu available, but ask for %u\n",
+ blocks_left * udev->data_blk_size,
+ cmd->dbi_cnt * udev->data_blk_size);
return -1;
}
@@ -855,13 +872,12 @@ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
udev->dbi_thresh = udev->max_blocks;
}
- iov_cnt = tcmu_get_empty_blocks(udev, cmd,
- cmd->dbi_cnt - cmd->dbi_bidi_cnt);
+ iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length);
if (iov_cnt < 0)
return -1;
if (cmd->dbi_bidi_cnt) {
- ret = tcmu_get_empty_blocks(udev, cmd, cmd->dbi_bidi_cnt);
+ ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi);
if (ret < 0)
return -1;
}
@@ -941,7 +957,7 @@ static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
- hdr = (void *) mb + CMDR_OFF + cmd_head;
+ hdr = udev->cmdr + cmd_head;
tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
tcmu_hdr_set_len(&hdr->len_op, pad_size);
hdr->cmd_id = 0; /* not used for PAD */
@@ -959,6 +975,25 @@ static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
return cmd_head;
}
+static void tcmu_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct se_device *se_dev = se_plug->se_dev;
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags);
+ uio_event_notify(&udev->uio_info);
+}
+
+static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
+ return &udev->se_plug;
+
+ return NULL;
+}
+
/**
* queue_cmd_ring - queue cmd to ring or internally
* @tcmu_cmd: cmd to queue
@@ -977,11 +1012,12 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_mailbox *mb = udev->mb_addr;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, iov_bidi_cnt, cmd_id;
- uint32_t cmd_head;
+ int iov_cnt, iov_bidi_cnt;
+ uint32_t cmd_id, cmd_head;
uint64_t cdb_off;
+ uint32_t blk_size = udev->data_blk_size;
/* size of data buffer needed */
- size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE;
+ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size;
*scsi_err = TCM_NO_SENSE;
@@ -998,9 +1034,9 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
if (!list_empty(&udev->qfull_queue))
goto queue;
- if (data_length > udev->data_size) {
+ if (data_length > (size_t)udev->max_blocks * blk_size) {
pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
- data_length, udev->data_size);
+ data_length, (size_t)udev->max_blocks * blk_size);
*scsi_err = TCM_INVALID_CDB_FIELD;
return -1;
}
@@ -1031,8 +1067,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
*/
goto free_and_queue;
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
+ if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
+ GFP_NOWAIT) < 0) {
pr_err("tcmu: Could not allocate cmd id.\n");
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
@@ -1046,7 +1082,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
cmd_head = ring_insert_padding(udev, command_size);
- entry = (void *) mb + CMDR_OFF + cmd_head;
+ entry = udev->cmdr + cmd_head;
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
@@ -1086,8 +1122,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
- /* TODO: only if FLUSH and FUA? */
- uio_event_notify(&udev->uio_info);
+ if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
+ uio_event_notify(&udev->uio_info);
return 0;
@@ -1138,7 +1174,7 @@ queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
cmd_head = ring_insert_padding(udev, cmd_size);
- entry = (void *)mb + CMDR_OFF + cmd_head;
+ entry = udev->cmdr + cmd_head;
memset(entry, 0, cmd_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
@@ -1253,7 +1289,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
- tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_KERNEL);
+ tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO);
if (!tmr)
goto unlock;
@@ -1393,7 +1429,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
- struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
+ struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
/*
* Flush max. up to end of cmd ring since current entry might
@@ -1415,7 +1451,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
}
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
- cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
+ cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
if (!cmd) {
pr_err("cmd_id %u not found, ring is broken\n",
entry->hdr.cmd_id);
@@ -1432,8 +1468,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
if (free_space)
free_space = tcmu_run_tmr_queue(udev);
- if (atomic_read(&global_db_count) > tcmu_global_max_blocks &&
- idr_is_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages &&
+ xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
/*
* Allocated blocks exceeded global block limit, currently no
* more pending or waiting commands so try to reclaim blocks.
@@ -1548,7 +1584,10 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->cmd_time_out = TCMU_TIME_OUT;
udev->qfull_time_out = -1;
- udev->max_blocks = DATA_BLOCK_BITS_DEF;
+ udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
+ udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
+ udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
+
mutex_init(&udev->cmdr_lock);
INIT_LIST_HEAD(&udev->node);
@@ -1556,12 +1595,12 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&udev->qfull_queue);
INIT_LIST_HEAD(&udev->tmr_queue);
INIT_LIST_HEAD(&udev->inflight_queue);
- idr_init(&udev->commands);
+ xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
- INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+ xa_init(&udev->data_pages);
return &udev->se_dev;
}
@@ -1585,19 +1624,24 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
return -EINVAL;
}
-static void tcmu_blocks_release(struct radix_tree_root *blocks,
- int start, int end)
+static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
+ unsigned long last)
{
- int i;
+ XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk);
struct page *page;
+ u32 pages_freed = 0;
- for (i = start; i < end; i++) {
- page = radix_tree_delete(blocks, i);
- if (page) {
- __free_page(page);
- atomic_dec(&global_db_count);
- }
+ xas_lock(&xas);
+ xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) {
+ xas_store(&xas, NULL);
+ __free_page(page);
+ pages_freed++;
}
+ xas_unlock(&xas);
+
+ atomic_sub(pages_freed, &global_page_count);
+
+ return pages_freed;
}
static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
@@ -1616,7 +1660,7 @@ static void tcmu_dev_kref_release(struct kref *kref)
struct se_device *dev = &udev->se_dev;
struct tcmu_cmd *cmd;
bool all_expired = true;
- int i;
+ unsigned long i;
vfree(udev->mb_addr);
udev->mb_addr = NULL;
@@ -1628,7 +1672,7 @@ static void tcmu_dev_kref_release(struct kref *kref)
/* Upper layer should drain all requests before calling this */
mutex_lock(&udev->cmdr_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
+ xa_for_each(&udev->commands, i, cmd) {
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
all_expired = false;
}
@@ -1636,10 +1680,10 @@ static void tcmu_dev_kref_release(struct kref *kref)
tcmu_remove_all_queued_tmr(udev);
if (!list_empty(&udev->qfull_queue))
all_expired = false;
- idr_destroy(&udev->commands);
+ xa_destroy(&udev->commands);
WARN_ON(!all_expired);
- tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
+ tcmu_blocks_release(udev, 0, udev->dbi_max);
bitmap_free(udev->data_bitmap);
mutex_unlock(&udev->cmdr_lock);
@@ -1737,12 +1781,12 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma)
return -1;
}
-static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
+static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
{
struct page *page;
mutex_lock(&udev->cmdr_lock);
- page = tcmu_get_block_page(udev, dbi);
+ page = xa_load(&udev->data_pages, dpi);
if (likely(page)) {
mutex_unlock(&udev->cmdr_lock);
return page;
@@ -1752,12 +1796,11 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
* Userspace messed up and passed in a address not in the
* data iov passed to it.
*/
- pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
- dbi, udev->name);
- page = NULL;
+ pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n",
+ dpi, udev->name);
mutex_unlock(&udev->cmdr_lock);
- return page;
+ return NULL;
}
static void tcmu_vma_open(struct vm_area_struct *vma)
@@ -1802,11 +1845,11 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
addr = (void *)(unsigned long)info->mem[mi].addr + offset;
page = vmalloc_to_page(addr);
} else {
- uint32_t dbi;
+ uint32_t dpi;
/* For the dynamically growing data area pages */
- dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
- page = tcmu_try_get_block_page(udev, dbi);
+ dpi = (offset - udev->data_off) / PAGE_SIZE;
+ page = tcmu_try_get_data_page(udev, dpi);
if (!page)
return VM_FAULT_SIGBUS;
}
@@ -1832,7 +1875,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
vma->vm_private_data = udev;
/* Ensure the mmap is exactly the right size */
- if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
+ if (vma_pages(vma) != udev->mmap_pages)
return -EINVAL;
tcmu_vma_open(vma);
@@ -2065,6 +2108,7 @@ static int tcmu_configure_device(struct se_device *dev)
struct tcmu_dev *udev = TCMU_DEV(dev);
struct uio_info *info;
struct tcmu_mailbox *mb;
+ size_t data_size;
int ret = 0;
ret = tcmu_update_uio_info(udev);
@@ -2081,20 +2125,23 @@ static int tcmu_configure_device(struct se_device *dev)
goto err_bitmap_alloc;
}
- udev->mb_addr = vzalloc(CMDR_SIZE);
- if (!udev->mb_addr) {
+ mb = vzalloc(MB_CMDR_SIZE);
+ if (!mb) {
ret = -ENOMEM;
goto err_vzalloc;
}
/* mailbox fits in first part of CMDR space */
- udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
- udev->data_off = CMDR_SIZE;
- udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
+ udev->mb_addr = mb;
+ udev->cmdr = (void *)mb + CMDR_OFF;
+ udev->cmdr_size = CMDR_SIZE;
+ udev->data_off = MB_CMDR_SIZE;
+ data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
+ udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT;
+ udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
udev->dbi_thresh = 0; /* Default in Idle state */
/* Initialise the mailbox of the ring buffer */
- mb = udev->mb_addr;
mb->version = TCMU_MAILBOX_VERSION;
mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
TCMU_MAILBOX_FLAG_CAP_READ_LEN |
@@ -2103,14 +2150,13 @@ static int tcmu_configure_device(struct se_device *dev)
mb->cmdr_size = udev->cmdr_size;
WARN_ON(!PAGE_ALIGNED(udev->data_off));
- WARN_ON(udev->data_size % PAGE_SIZE);
- WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
+ WARN_ON(data_size % PAGE_SIZE);
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
- info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
+ info->mem[0].size = data_size + MB_CMDR_SIZE;
info->mem[0].memtype = UIO_MEM_NONE;
info->irqcontrol = tcmu_irqcontrol;
@@ -2226,16 +2272,16 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
{
struct tcmu_mailbox *mb;
struct tcmu_cmd *cmd;
- int i;
+ unsigned long i;
mutex_lock(&udev->cmdr_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
+ xa_for_each(&udev->commands, i, cmd) {
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
cmd->cmd_id, udev->name,
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
- idr_remove(&udev->commands, i);
+ xa_erase(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
@@ -2285,7 +2331,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
- Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
+ Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
+ Opt_err,
};
static match_table_t tokens = {
@@ -2295,6 +2342,7 @@ static match_table_t tokens = {
{Opt_hw_max_sectors, "hw_max_sectors=%d"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_max_data_area_mb, "max_data_area_mb=%d"},
+ {Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
{Opt_err, NULL}
};
@@ -2321,6 +2369,7 @@ static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
{
int val, ret;
+ uint32_t pages_per_blk = udev->data_pages_per_blk;
ret = match_int(arg, &val);
if (ret < 0) {
@@ -2328,11 +2377,20 @@ static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
ret);
return ret;
}
-
if (val <= 0) {
pr_err("Invalid max_data_area %d.\n", val);
return -EINVAL;
}
+ if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
+ val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages);
+ }
+ if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) {
+ pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n",
+ val, TCMU_MBS_TO_PAGES(val), pages_per_blk);
+ return -EINVAL;
+ }
mutex_lock(&udev->cmdr_lock);
if (udev->data_bitmap) {
@@ -2341,13 +2399,42 @@ static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
goto unlock;
}
- udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
- if (udev->max_blocks > tcmu_global_max_blocks) {
- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
- val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
- udev->max_blocks = tcmu_global_max_blocks;
+ udev->data_area_mb = val;
+ udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk;
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
+static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) {
+ pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n",
+ val, udev->data_area_mb,
+ TCMU_MBS_TO_PAGES(udev->data_area_mb));
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set data_pages_per_blk after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
}
+ udev->data_pages_per_blk = val;
+ udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val;
+
unlock:
mutex_unlock(&udev->cmdr_lock);
return ret;
@@ -2404,6 +2491,9 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
case Opt_max_data_area_mb:
ret = tcmu_set_max_blocks_param(udev, &args[0]);
break;
+ case Opt_data_pages_per_blk:
+ ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
+ break;
default:
break;
}
@@ -2424,8 +2514,8 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL");
bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
- bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
- TCMU_BLOCKS_TO_MBS(udev->max_blocks));
+ bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
+ bl += sprintf(b + bl, "DataPagesPerBlk: %u\n", udev->data_pages_per_blk);
return bl;
}
@@ -2519,11 +2609,21 @@ static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
- return snprintf(page, PAGE_SIZE, "%u\n",
- TCMU_BLOCKS_TO_MBS(udev->max_blocks));
+ return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb);
}
CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
+static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk);
+}
+CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
+
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
@@ -2835,6 +2935,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_cmd_time_out,
&tcmu_attr_qfull_time_out,
&tcmu_attr_max_data_area_mb,
+ &tcmu_attr_data_pages_per_blk,
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
@@ -2863,6 +2964,8 @@ static struct target_backend_ops tcmu_ops = {
.configure_device = tcmu_configure_device,
.destroy_device = tcmu_destroy_device,
.free_device = tcmu_free_device,
+ .unplug_device = tcmu_unplug_device,
+ .plug_device = tcmu_plug_device,
.parse_cdb = tcmu_parse_cdb,
.tmr_notify = tcmu_tmr_notify,
.set_configfs_dev_params = tcmu_set_configfs_dev_params,
@@ -2876,9 +2979,10 @@ static void find_free_blocks(void)
{
struct tcmu_dev *udev;
loff_t off;
- u32 start, end, block, total_freed = 0;
+ u32 pages_freed, total_pages_freed = 0;
+ u32 start, end, block, total_blocks_freed = 0;
- if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
+ if (atomic_read(&global_page_count) <= tcmu_global_max_pages)
return;
mutex_lock(&root_udev_mutex);
@@ -2919,20 +3023,22 @@ static void find_free_blocks(void)
}
/* Here will truncate the data area from off */
- off = udev->data_off + start * DATA_BLOCK_SIZE;
+ off = udev->data_off + (loff_t)start * udev->data_blk_size;
unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
/* Release the block pages */
- tcmu_blocks_release(&udev->data_blocks, start, end);
+ pages_freed = tcmu_blocks_release(udev, start, end - 1);
mutex_unlock(&udev->cmdr_lock);
- total_freed += end - start;
- pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
- total_freed, udev->name);
+ total_pages_freed += pages_freed;
+ total_blocks_freed += end - start;
+ pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n",
+ pages_freed, total_pages_freed, end - start,
+ total_blocks_freed, udev->name);
}
mutex_unlock(&root_udev_mutex);
- if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages)
schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 66d6f1d06f21..d31ed071cb08 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -554,7 +554,7 @@ static int target_xcopy_setup_pt_cmd(
}
cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
- if (target_cmd_init_cdb(cmd, cdb))
+ if (target_cmd_init_cdb(cmd, cdb, GFP_KERNEL))
return -EINVAL;
cmd->tag = 0;
@@ -615,8 +615,8 @@ static int target_xcopy_read_source(
pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
(unsigned long long)src_lba, src_sectors, length);
- transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
- DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
+ __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+ DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
remote_port);
@@ -660,8 +660,8 @@ static int target_xcopy_write_destination(
pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
(unsigned long long)dst_lba, dst_sectors, length);
- transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
- DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
+ __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+ DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
remote_port);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 768f250680d9..410b723f9d79 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -543,16 +543,22 @@ static void ft_send_work(struct work_struct *work)
fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
+
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
*/
- if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
- &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
- ntohl(fcp->fc_dl), task_attr, data_dir,
- TARGET_SCF_ACK_KREF))
+ if (target_init_cmd(&cmd->se_cmd, cmd->sess->se_sess,
+ &cmd->ft_sense_buffer[0],
+ scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl),
+ task_attr, data_dir, TARGET_SCF_ACK_KREF))
goto err;
+ if (target_submit_prep(&cmd->se_cmd, fcp->fc_cdb, NULL, 0, NULL, 0,
+ NULL, 0, GFP_KERNEL))
+ return;
+
+ target_submit(&cmd->se_cmd);
pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
return;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 23ce506d5402..593540da9346 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -410,7 +410,7 @@ not_target:
}
/**
- * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
+ * ft_prli() - Handle incoming or outgoing PRLI for the FCP target
* @rdata: remote port private
* @spp_len: service parameter page length
* @rspp: received service parameter page (NULL for outgoing PRLI)