summaryrefslogtreecommitdiff
path: root/drivers/target/target_core_user.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_user.c')
-rw-r--r--drivers/target/target_core_user.c447
1 files changed, 397 insertions, 50 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index beb5f098f32d..80ee130f8253 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -87,6 +87,8 @@
/* Default maximum of the global data blocks(512K * PAGE_SIZE) */
#define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
+static u8 tcmu_kern_cmd_reply_supported;
+
static struct device *tcmu_root_device;
struct tcmu_hba {
@@ -95,6 +97,13 @@ struct tcmu_hba {
#define TCMU_CONFIG_LEN 256
+struct tcmu_nl_cmd {
+ /* wake up thread waiting for reply */
+ struct completion complete;
+ int cmd;
+ int status;
+};
+
struct tcmu_dev {
struct list_head node;
struct kref kref;
@@ -135,6 +144,11 @@ struct tcmu_dev {
struct timer_list timeout;
unsigned int cmd_time_out;
+ spinlock_t nl_cmd_lock;
+ struct tcmu_nl_cmd curr_nl_cmd;
+ /* wake up threads waiting on curr_nl_cmd */
+ wait_queue_head_t nl_cmd_wq;
+
char dev_config[TCMU_CONFIG_LEN];
};
@@ -178,16 +192,128 @@ static const struct genl_multicast_group tcmu_mcgrps[] = {
[TCMU_MCGRP_CONFIG] = { .name = "config", },
};
+static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
+ [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
+ [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
+ [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
+ [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
+ [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
+};
+
+static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
+{
+ struct se_device *dev;
+ struct tcmu_dev *udev;
+ struct tcmu_nl_cmd *nl_cmd;
+ int dev_id, rc, ret = 0;
+ bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
+
+ if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
+ !info->attrs[TCMU_ATTR_DEVICE_ID]) {
+ printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
+ return -EINVAL;
+ }
+
+ dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
+ rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
+
+ dev = target_find_device(dev_id, !is_removed);
+ if (!dev) {
+ printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
+ completed_cmd, rc, dev_id);
+ return -ENODEV;
+ }
+ udev = TCMU_DEV(dev);
+
+ spin_lock(&udev->nl_cmd_lock);
+ nl_cmd = &udev->curr_nl_cmd;
+
+ pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
+ nl_cmd->cmd, completed_cmd, rc);
+
+ if (nl_cmd->cmd != completed_cmd) {
+ printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
+ completed_cmd, nl_cmd->cmd);
+ ret = -EINVAL;
+ } else {
+ nl_cmd->status = rc;
+ }
+
+ spin_unlock(&udev->nl_cmd_lock);
+ if (!is_removed)
+ target_undepend_item(&dev->dev_group.cg_item);
+ if (!ret)
+ complete(&nl_cmd->complete);
+ return ret;
+}
+
+static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
+{
+ return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
+}
+
+static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
+{
+ return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
+}
+
+static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
+}
+
+static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
+ tcmu_kern_cmd_reply_supported =
+ nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
+ printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
+ tcmu_kern_cmd_reply_supported);
+ }
+
+ return 0;
+}
+
+static const struct genl_ops tcmu_genl_ops[] = {
+ {
+ .cmd = TCMU_CMD_SET_FEATURES,
+ .flags = GENL_ADMIN_PERM,
+ .policy = tcmu_attr_policy,
+ .doit = tcmu_genl_set_features,
+ },
+ {
+ .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
+ .flags = GENL_ADMIN_PERM,
+ .policy = tcmu_attr_policy,
+ .doit = tcmu_genl_add_dev_done,
+ },
+ {
+ .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
+ .flags = GENL_ADMIN_PERM,
+ .policy = tcmu_attr_policy,
+ .doit = tcmu_genl_rm_dev_done,
+ },
+ {
+ .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
+ .flags = GENL_ADMIN_PERM,
+ .policy = tcmu_attr_policy,
+ .doit = tcmu_genl_reconfig_dev_done,
+ },
+};
+
/* Our generic netlink family */
static struct genl_family tcmu_genl_family __ro_after_init = {
.module = THIS_MODULE,
.hdrsize = 0,
.name = "TCM-USER",
- .version = 1,
+ .version = 2,
.maxattr = TCMU_ATTR_MAX,
.mcgrps = tcmu_mcgrps,
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
.netnsok = true,
+ .ops = tcmu_genl_ops,
+ .n_ops = ARRAY_SIZE(tcmu_genl_ops),
};
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
@@ -216,7 +342,6 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
page = radix_tree_lookup(&udev->data_blocks, dbi);
if (!page) {
-
if (atomic_add_return(1, &global_db_count) >
TCMU_GLOBAL_MAX_BLOCKS) {
atomic_dec(&global_db_count);
@@ -226,14 +351,11 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
/* try to get new page from the mm */
page = alloc_page(GFP_KERNEL);
if (!page)
- return false;
+ goto err_alloc;
ret = radix_tree_insert(&udev->data_blocks, dbi, page);
- if (ret) {
- __free_page(page);
- return false;
- }
-
+ if (ret)
+ goto err_insert;
}
if (dbi > udev->dbi_max)
@@ -243,6 +365,11 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
tcmu_cmd_set_dbi(tcmu_cmd, dbi);
return true;
+err_insert:
+ __free_page(page);
+err_alloc:
+ atomic_dec(&global_db_count);
+ return false;
}
static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
@@ -401,7 +528,7 @@ static inline size_t get_block_offset_user(struct tcmu_dev *dev,
DATA_BLOCK_SIZE - remaining;
}
-static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
+static inline size_t iov_tail(struct iovec *iov)
{
return (size_t)iov->iov_base + iov->iov_len;
}
@@ -437,10 +564,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
to_offset = get_block_offset_user(udev, dbi,
block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining;
- to = (void *)(unsigned long)to + offset;
+ to += offset;
if (*iov_cnt != 0 &&
- to_offset == iov_tail(udev, *iov)) {
+ to_offset == iov_tail(*iov)) {
(*iov)->iov_len += copy_bytes;
} else {
new_iov(iov, iov_cnt, udev);
@@ -510,7 +637,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining;
- from = (void *)(unsigned long)from + offset;
+ from += offset;
tcmu_flush_dcache_range(from, copy_bytes);
memcpy(to + sg->length - sg_remaining, from,
copy_bytes);
@@ -596,10 +723,7 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
}
}
- if (!tcmu_get_empty_blocks(udev, cmd))
- return false;
-
- return true;
+ return tcmu_get_empty_blocks(udev, cmd);
}
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
@@ -699,25 +823,24 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
entry = (void *) mb + CMDR_OFF + cmd_head;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
entry->hdr.cmd_id = 0; /* not used for PAD */
entry->hdr.kflags = 0;
entry->hdr.uflags = 0;
+ tcmu_flush_dcache_range(entry, sizeof(*entry));
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
WARN_ON(cmd_head != 0);
}
entry = (void *) mb + CMDR_OFF + cmd_head;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
+ memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
- entry->hdr.kflags = 0;
- entry->hdr.uflags = 0;
/* Handle allocating space from the data area */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@@ -736,11 +859,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
entry->req.iov_cnt = iov_cnt;
- entry->req.iov_dif_cnt = 0;
/* Handle BIDI commands */
+ iov_cnt = 0;
if (se_cmd->se_cmd_flags & SCF_BIDI) {
- iov_cnt = 0;
iov++;
ret = scatter_data_area(udev, tcmu_cmd,
se_cmd->t_bidi_data_sg,
@@ -753,8 +875,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
pr_err("tcmu: alloc and scatter bidi data failed\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- entry->req.iov_bidi_cnt = iov_cnt;
}
+ entry->req.iov_bidi_cnt = iov_cnt;
/*
* Recalaulate the command's base size and size according
@@ -830,8 +952,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
cmd->se_cmd);
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
- memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
- se_cmd->scsi_sense_length);
+ transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
/* Get Data-In buffer before clean up */
gather_data_area(udev, cmd, true);
@@ -989,6 +1110,9 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
setup_timer(&udev->timeout, tcmu_device_timedout,
(unsigned long)udev);
+ init_waitqueue_head(&udev->nl_cmd_wq);
+ spin_lock_init(&udev->nl_cmd_lock);
+
return &udev->se_dev;
}
@@ -1140,6 +1264,7 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
return -EBUSY;
udev->inode = inode;
+ kref_get(&udev->kref);
pr_debug("open\n");
@@ -1171,12 +1296,59 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
pr_debug("close\n");
- /* release ref from configure */
+ /* release ref from open */
kref_put(&udev->kref, tcmu_dev_kref_release);
return 0;
}
-static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
+static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
+{
+ struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
+
+ if (!tcmu_kern_cmd_reply_supported)
+ return;
+relock:
+ spin_lock(&udev->nl_cmd_lock);
+
+ if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
+ spin_unlock(&udev->nl_cmd_lock);
+ pr_debug("sleeping for open nl cmd\n");
+ wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
+ goto relock;
+ }
+
+ memset(nl_cmd, 0, sizeof(*nl_cmd));
+ nl_cmd->cmd = cmd;
+ init_completion(&nl_cmd->complete);
+
+ spin_unlock(&udev->nl_cmd_lock);
+}
+
+static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
+{
+ struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
+ int ret;
+ DEFINE_WAIT(__wait);
+
+ if (!tcmu_kern_cmd_reply_supported)
+ return 0;
+
+ pr_debug("sleeping for nl reply\n");
+ wait_for_completion(&nl_cmd->complete);
+
+ spin_lock(&udev->nl_cmd_lock);
+ nl_cmd->cmd = TCMU_CMD_UNSPEC;
+ ret = nl_cmd->status;
+ nl_cmd->status = 0;
+ spin_unlock(&udev->nl_cmd_lock);
+
+ wake_up_all(&udev->nl_cmd_wq);
+
+ return ret;;
+}
+
+static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
+ int reconfig_attr, const void *reconfig_data)
{
struct sk_buff *skb;
void *msg_header;
@@ -1190,22 +1362,51 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino
if (!msg_header)
goto free_skb;
- ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
+ ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
+ if (ret < 0)
+ goto free_skb;
+
+ ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
if (ret < 0)
goto free_skb;
- ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
+ ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
if (ret < 0)
goto free_skb;
+ if (cmd == TCMU_CMD_RECONFIG_DEVICE) {
+ switch (reconfig_attr) {
+ case TCMU_ATTR_DEV_CFG:
+ ret = nla_put_string(skb, reconfig_attr, reconfig_data);
+ break;
+ case TCMU_ATTR_DEV_SIZE:
+ ret = nla_put_u64_64bit(skb, reconfig_attr,
+ *((u64 *)reconfig_data),
+ TCMU_ATTR_PAD);
+ break;
+ case TCMU_ATTR_WRITECACHE:
+ ret = nla_put_u8(skb, reconfig_attr,
+ *((u8 *)reconfig_data));
+ break;
+ default:
+ BUG();
+ }
+
+ if (ret < 0)
+ goto free_skb;
+ }
+
genlmsg_end(skb, msg_header);
+ tcmu_init_genl_cmd_reply(udev, cmd);
+
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
-
/* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
+ if (!ret)
+ ret = tcmu_wait_genl_cmd_reply(udev);
return ret;
free_skb:
@@ -1213,19 +1414,14 @@ free_skb:
return ret;
}
-static int tcmu_configure_device(struct se_device *dev)
+static int tcmu_update_uio_info(struct tcmu_dev *udev)
{
- struct tcmu_dev *udev = TCMU_DEV(dev);
struct tcmu_hba *hba = udev->hba->hba_ptr;
struct uio_info *info;
- struct tcmu_mailbox *mb;
- size_t size;
- size_t used;
- int ret = 0;
+ size_t size, used;
char *str;
info = &udev->uio_info;
-
size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
udev->dev_config);
size += 1; /* for \0 */
@@ -1234,12 +1430,27 @@ static int tcmu_configure_device(struct se_device *dev)
return -ENOMEM;
used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
-
if (udev->dev_config[0])
snprintf(str + used, size - used, "/%s", udev->dev_config);
info->name = str;
+ return 0;
+}
+
+static int tcmu_configure_device(struct se_device *dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+ struct uio_info *info;
+ struct tcmu_mailbox *mb;
+ int ret = 0;
+
+ ret = tcmu_update_uio_info(udev);
+ if (ret)
+ return ret;
+
+ info = &udev->uio_info;
+
udev->mb_addr = vzalloc(CMDR_SIZE);
if (!udev->mb_addr) {
ret = -ENOMEM;
@@ -1290,6 +1501,8 @@ static int tcmu_configure_device(struct se_device *dev)
/* Other attributes can be configured in userspace */
if (!dev->dev_attrib.hw_max_sectors)
dev->dev_attrib.hw_max_sectors = 128;
+ if (!dev->dev_attrib.emulate_write_cache)
+ dev->dev_attrib.emulate_write_cache = 0;
dev->dev_attrib.hw_queue_depth = 128;
/*
@@ -1298,8 +1511,7 @@ static int tcmu_configure_device(struct se_device *dev)
*/
kref_get(&udev->kref);
- ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
- udev->uio_info.uio_dev->minor);
+ ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL);
if (ret)
goto err_netlink;
@@ -1355,6 +1567,14 @@ static void tcmu_blocks_release(struct tcmu_dev *udev)
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
+
+ /* release ref from init */
+ kref_put(&udev->kref, tcmu_dev_kref_release);
+}
+
+static void tcmu_destroy_device(struct se_device *dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(dev);
struct tcmu_cmd *cmd;
bool all_expired = true;
int i;
@@ -1379,14 +1599,11 @@ static void tcmu_free_device(struct se_device *dev)
tcmu_blocks_release(udev);
- if (tcmu_dev_configured(udev)) {
- tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
- udev->uio_info.uio_dev->minor);
+ tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
- uio_unregister_device(&udev->uio_info);
- }
+ uio_unregister_device(&udev->uio_info);
- /* release ref from init */
+ /* release ref from configure */
kref_put(&udev->kref, tcmu_dev_kref_release);
}
@@ -1546,6 +1763,129 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
}
CONFIGFS_ATTR(tcmu_, cmd_time_out);
+static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
+}
+
+static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ int ret, len;
+
+ len = strlen(page);
+ if (!len || len > TCMU_CONFIG_LEN - 1)
+ return -EINVAL;
+
+ /* Check if device has been configured before */
+ if (tcmu_dev_configured(udev)) {
+ ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
+ TCMU_ATTR_DEV_CFG, page);
+ if (ret) {
+ pr_err("Unable to reconfigure device\n");
+ return ret;
+ }
+ strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
+
+ ret = tcmu_update_uio_info(udev);
+ if (ret)
+ return ret;
+ return count;
+ }
+ strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
+
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, dev_config);
+
+static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
+}
+
+static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ u64 val;
+ int ret;
+
+ ret = kstrtou64(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Check if device has been configured before */
+ if (tcmu_dev_configured(udev)) {
+ ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
+ TCMU_ATTR_DEV_SIZE, &val);
+ if (ret) {
+ pr_err("Unable to reconfigure device\n");
+ return ret;
+ }
+ }
+ udev->dev_size = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, dev_size);
+
+static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+
+ return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
+}
+
+static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Check if device has been configured before */
+ if (tcmu_dev_configured(udev)) {
+ ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
+ TCMU_ATTR_WRITECACHE, &val);
+ if (ret) {
+ pr_err("Unable to reconfigure device\n");
+ return ret;
+ }
+ }
+
+ da->emulate_write_cache = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, emulate_write_cache);
+
+static struct configfs_attribute *tcmu_attrib_attrs[] = {
+ &tcmu_attr_cmd_time_out,
+ &tcmu_attr_dev_config,
+ &tcmu_attr_dev_size,
+ &tcmu_attr_emulate_write_cache,
+ NULL,
+};
+
static struct configfs_attribute **tcmu_attrs;
static struct target_backend_ops tcmu_ops = {
@@ -1556,6 +1896,7 @@ static struct target_backend_ops tcmu_ops = {
.detach_hba = tcmu_detach_hba,
.alloc_device = tcmu_alloc_device,
.configure_device = tcmu_configure_device,
+ .destroy_device = tcmu_destroy_device,
.free_device = tcmu_free_device,
.parse_cdb = tcmu_parse_cdb,
.set_configfs_dev_params = tcmu_set_configfs_dev_params,
@@ -1573,7 +1914,7 @@ static int unmap_thread_fn(void *data)
struct page *page;
int i;
- while (1) {
+ while (!kthread_should_stop()) {
DEFINE_WAIT(__wait);
prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
@@ -1645,7 +1986,7 @@ static int unmap_thread_fn(void *data)
static int __init tcmu_module_init(void)
{
- int ret, i, len = 0;
+ int ret, i, k, len = 0;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1670,7 +2011,10 @@ static int __init tcmu_module_init(void)
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
len += sizeof(struct configfs_attribute *);
}
- len += sizeof(struct configfs_attribute *) * 2;
+ for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
+ len += sizeof(struct configfs_attribute *);
+ }
+ len += sizeof(struct configfs_attribute *);
tcmu_attrs = kzalloc(len, GFP_KERNEL);
if (!tcmu_attrs) {
@@ -1681,7 +2025,10 @@ static int __init tcmu_module_init(void)
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
tcmu_attrs[i] = passthrough_attrib_attrs[i];
}
- tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
+ for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
+ tcmu_attrs[i] = tcmu_attrib_attrs[k];
+ i++;
+ }
tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
ret = transport_backend_register(&tcmu_ops);