target/user: Only support full command pass-through

After much discussion, give up on only passing a subset of SCSI commands
to userspace and pass them all. Based on what pscsi is doing, make sure
to set SCF_SCSI_DATA_CDB for I/O ops, and define attributes identical to
pscsi.

Make hw_block_size configurable via dev param.

Remove mention of command filtering from tcmu-design.txt.

Signed-off-by: Andy Grover <agrover@redhat.com>
Reviewed-by: Ilias Tsitsimpis <iliastsi@arrikto.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Andy Grover 2015-05-19 14:44:39 -07:00 committed by Nicholas Bellinger
parent cf87edc602
commit 9c1cd1b68c
2 changed files with 76 additions and 71 deletions

View file

@ -15,8 +15,7 @@ Contents:
a) Discovering and configuring TCMU uio devices a) Discovering and configuring TCMU uio devices
b) Waiting for events on the device(s) b) Waiting for events on the device(s)
c) Managing the command ring c) Managing the command ring
3) Command filtering 3) A final note
4) A final note
TCM Userspace Design TCM Userspace Design
@ -364,24 +363,6 @@ int handle_device_events(int fd, void *map)
} }
Command filtering
-----------------
Initial TCMU support is for a filtered commandset. Only IO-related
commands are presented to userspace, and the rest are handled by LIO's
in-kernel command emulation. The commands presented are all versions
of:
READ
WRITE
WRITE_VERIFY
XDWRITEREAD
WRITE_SAME
COMPARE_AND_WRITE
SYNCHRONIZE_CACHE
UNMAP
A final note A final note
------------ ------------

View file

@ -938,12 +938,13 @@ static void tcmu_free_device(struct se_device *dev)
} }
enum { enum {
Opt_dev_config, Opt_dev_size, Opt_err, Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
}; };
static match_table_t tokens = { static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"}, {Opt_dev_config, "dev_config=%s"},
{Opt_dev_size, "dev_size=%u"}, {Opt_dev_size, "dev_size=%u"},
{Opt_hw_block_size, "hw_block_size=%u"},
{Opt_err, NULL} {Opt_err, NULL}
}; };
@ -954,6 +955,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
char *orig, *ptr, *opts, *arg_p; char *orig, *ptr, *opts, *arg_p;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, token; int ret = 0, token;
unsigned long tmp_ul;
opts = kstrdup(page, GFP_KERNEL); opts = kstrdup(page, GFP_KERNEL);
if (!opts) if (!opts)
@ -986,6 +988,24 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
if (ret < 0) if (ret < 0)
pr_err("kstrtoul() failed for dev_size=\n"); pr_err("kstrtoul() failed for dev_size=\n");
break; break;
case Opt_hw_block_size:
arg_p = match_strdup(&args[0]);
if (!arg_p) {
ret = -ENOMEM;
break;
}
ret = kstrtoul(arg_p, 0, &tmp_ul);
kfree(arg_p);
if (ret < 0) {
pr_err("kstrtoul() failed for hw_block_size=\n");
break;
}
if (!tmp_ul) {
pr_err("hw_block_size must be nonzero\n");
break;
}
dev->dev_attrib.hw_block_size = tmp_ul;
break;
default: default:
break; break;
} }
@ -1015,20 +1035,6 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
dev->dev_attrib.block_size); dev->dev_attrib.block_size);
} }
static sense_reason_t
tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
int ret;
ret = tcmu_queue_cmd(se_cmd);
if (ret != 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
else
return TCM_NO_SENSE;
}
static sense_reason_t static sense_reason_t
tcmu_pass_op(struct se_cmd *se_cmd) tcmu_pass_op(struct se_cmd *se_cmd)
{ {
@ -1040,52 +1046,70 @@ tcmu_pass_op(struct se_cmd *se_cmd)
return TCM_NO_SENSE; return TCM_NO_SENSE;
} }
static struct sbc_ops tcmu_sbc_ops = {
.execute_rw = tcmu_execute_rw,
.execute_sync_cache = tcmu_pass_op,
.execute_write_same = tcmu_pass_op,
.execute_write_same_unmap = tcmu_pass_op,
.execute_unmap = tcmu_pass_op,
};
static sense_reason_t static sense_reason_t
tcmu_parse_cdb(struct se_cmd *cmd) tcmu_parse_cdb(struct se_cmd *cmd)
{ {
return sbc_parse_cdb(cmd, &tcmu_sbc_ops); unsigned char *cdb = cmd->t_task_cdb;
/*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.
*/
if (cdb[0] == REPORT_LUNS) {
cmd->execute_cmd = spc_emulate_report_luns;
return TCM_NO_SENSE;
}
/* Set DATA_CDB flag for ops that should have it */
switch (cdb[0]) {
case READ_6:
case READ_10:
case READ_12:
case READ_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY:
case WRITE_VERIFY_12:
case 0x8e: /* WRITE_VERIFY_16 */
case COMPARE_AND_WRITE:
case XDWRITEREAD_10:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case VARIABLE_LENGTH_CMD:
switch (get_unaligned_be16(&cdb[8])) {
case READ_32:
case WRITE_32:
case 0x0c: /* WRITE_VERIFY_32 */
case XDWRITEREAD_32:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
}
}
cmd->execute_cmd = tcmu_pass_op;
return TCM_NO_SENSE;
} }
DEF_TB_DEFAULT_ATTRIBS(tcmu); DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type);
TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
TB_DEV_ATTR_RO(tcmu, hw_block_size);
DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
static struct configfs_attribute *tcmu_backend_dev_attrs[] = { static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
&tcmu_dev_attrib_emulate_model_alias.attr,
&tcmu_dev_attrib_emulate_dpo.attr,
&tcmu_dev_attrib_emulate_fua_write.attr,
&tcmu_dev_attrib_emulate_fua_read.attr,
&tcmu_dev_attrib_emulate_write_cache.attr,
&tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
&tcmu_dev_attrib_emulate_tas.attr,
&tcmu_dev_attrib_emulate_tpu.attr,
&tcmu_dev_attrib_emulate_tpws.attr,
&tcmu_dev_attrib_emulate_caw.attr,
&tcmu_dev_attrib_emulate_3pc.attr,
&tcmu_dev_attrib_pi_prot_type.attr,
&tcmu_dev_attrib_hw_pi_prot_type.attr, &tcmu_dev_attrib_hw_pi_prot_type.attr,
&tcmu_dev_attrib_pi_prot_format.attr,
&tcmu_dev_attrib_enforce_pr_isids.attr,
&tcmu_dev_attrib_is_nonrot.attr,
&tcmu_dev_attrib_emulate_rest_reord.attr,
&tcmu_dev_attrib_force_pr_aptpl.attr,
&tcmu_dev_attrib_hw_block_size.attr, &tcmu_dev_attrib_hw_block_size.attr,
&tcmu_dev_attrib_block_size.attr,
&tcmu_dev_attrib_hw_max_sectors.attr, &tcmu_dev_attrib_hw_max_sectors.attr,
&tcmu_dev_attrib_optimal_sectors.attr,
&tcmu_dev_attrib_hw_queue_depth.attr, &tcmu_dev_attrib_hw_queue_depth.attr,
&tcmu_dev_attrib_queue_depth.attr,
&tcmu_dev_attrib_max_unmap_lba_count.attr,
&tcmu_dev_attrib_max_unmap_block_desc_count.attr,
&tcmu_dev_attrib_unmap_granularity.attr,
&tcmu_dev_attrib_unmap_granularity_alignment.attr,
&tcmu_dev_attrib_max_write_same_len.attr,
NULL, NULL,
}; };
@ -1094,7 +1118,7 @@ static struct se_subsystem_api tcmu_template = {
.inquiry_prod = "USER", .inquiry_prod = "USER",
.inquiry_rev = TCMU_VERSION, .inquiry_rev = TCMU_VERSION,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
.attach_hba = tcmu_attach_hba, .attach_hba = tcmu_attach_hba,
.detach_hba = tcmu_detach_hba, .detach_hba = tcmu_detach_hba,
.alloc_device = tcmu_alloc_device, .alloc_device = tcmu_alloc_device,