Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
 "The highlights this round include:

   - Introduce configfs support for unlocked configfs_depend_item()
     (krzysztof + andrezej)
   - Conversion of usb-gadget target driver to new function registration
     interface (andrzej + sebastian)
   - Enable qla2xxx FC target mode support for Extended Logins (himansu +
     giridhar)
   - Enable qla2xxx FC target mode support for Exchange Offload (himansu +
     giridhar)
   - Add qla2xxx FC target mode irq affinity notification + selective
     command queuing.  (quinn + himanshu)
   - Fix iscsi-target deadlock in se_node_acl configfs deletion (sagi +
     nab)
   - Convert se_node_acl configfs deletion + se_node_acl->queue_depth to
     proper se_session->sess_kref + target_get_session() usage.  (hch +
     sagi + nab)
   - Fix long-standing race between se_node_acl->acl_kref get and
     get_initiator_node_acl() lookup.  (hch + nab)
   - Fix target/user block-size handling, and make sure netlink reaches
     all network namespaces (sheng + andy)

  Note there is an outstanding bug-fix series for remote I_T nexus port
  TMR LUN_RESET has been posted and still being tested, and will likely
  become post -rc1 material at this point"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (56 commits)
  scsi: qla2xxxx: avoid type mismatch in comparison
  target/user: Make sure netlink would reach all network namespaces
  target: Obtain se_node_acl->acl_kref during get_initiator_node_acl
  target: Convert ACL change queue_depth se_session reference usage
  iscsi-target: Fix potential dead-lock during node acl delete
  ib_srpt: Convert acl lookup to modern get_initiator_node_acl usage
  tcm_fc: Convert acl lookup to modern get_initiator_node_acl usage
  tcm_fc: Wait for command completion before freeing a session
  target: Fix a memory leak in target_dev_lba_map_store()
  target: Support aborting tasks with a 64-bit tag
  usb/gadget: Remove set-but-not-used variables
  target: Remove an unused variable
  target: Fix indentation in target_core_configfs.c
  target/user: Allow user to set block size before enabling device
  iser-target: Fix non negative ERR_PTR isert_device_get usage
  target/fcoe: Add tag support to tcm_fc
  qla2xxx: Check for online flag instead of active reset when transmitting responses
  qla2xxx: Set all queues to 4k
  qla2xxx: Disable ZIO at start time.
  qla2xxx: Move atioq to a different lock to reduce lock contention
  ...
This commit is contained in:
Linus Torvalds 2016-01-20 17:20:53 -08:00
commit 71e4634e00
53 changed files with 4511 additions and 2703 deletions

View file

@ -0,0 +1,6 @@
What: /config/usb-gadget/gadget/functions/tcm.name
Date: Dec 2015
KernelVersion: 4.5
Description:
There are no attributes because all the configuration
is performed in the "target" subsystem of configfs.

View file

@ -350,7 +350,7 @@ isert_create_device_ib_res(struct isert_device *device)
dev_attr = &device->dev_attr;
ret = isert_query_device(device->ib_device, dev_attr);
if (ret)
return ret;
goto out;
/* asign function handlers */
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
@ -366,7 +366,7 @@ isert_create_device_ib_res(struct isert_device *device)
ret = isert_alloc_comps(device, dev_attr);
if (ret)
return ret;
goto out;
device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd)) {
@ -384,6 +384,9 @@ isert_create_device_ib_res(struct isert_device *device)
out_cq:
isert_free_comps(device);
out:
if (ret > 0)
ret = -EINVAL;
return ret;
}

View file

@ -2370,31 +2370,6 @@ static void srpt_release_channel_work(struct work_struct *w)
kfree(ch);
}
static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
u8 i_port_id[16])
{
struct srpt_node_acl *nacl;
list_for_each_entry(nacl, &sport->port_acl_list, list)
if (memcmp(nacl->i_port_id, i_port_id,
sizeof(nacl->i_port_id)) == 0)
return nacl;
return NULL;
}
static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
u8 i_port_id[16])
{
struct srpt_node_acl *nacl;
spin_lock_irq(&sport->port_acl_lock);
nacl = __srpt_lookup_acl(sport, i_port_id);
spin_unlock_irq(&sport->port_acl_lock);
return nacl;
}
/**
* srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
*
@ -2412,10 +2387,10 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
struct srp_login_rej *rej;
struct ib_cm_rep_param *rep_param;
struct srpt_rdma_ch *ch, *tmp_ch;
struct srpt_node_acl *nacl;
struct se_node_acl *se_acl;
u32 it_iu_len;
int i;
int ret = 0;
int i, ret = 0;
unsigned char *p;
WARN_ON_ONCE(irqs_disabled());
@ -2565,33 +2540,47 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
" RTR failed (error code = %d)\n", ret);
goto destroy_ib;
}
/*
* Use the initator port identifier as the session name.
* Use the initator port identifier as the session name, when
* checking against se_node_acl->initiatorname[] this can be
* with or without preceeding '0x'.
*/
snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
be64_to_cpu(*(__be64 *)ch->i_port_id),
be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
pr_debug("registering session %s\n", ch->sess_name);
nacl = srpt_lookup_acl(sport, ch->i_port_id);
if (!nacl) {
pr_info("Rejected login because no ACL has been"
" configured yet for initiator %s.\n", ch->sess_name);
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
goto destroy_ib;
}
p = &ch->sess_name[0];
ch->sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(ch->sess)) {
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_debug("Failed to create session\n");
goto deregister_session;
goto destroy_ib;
}
ch->sess->se_node_acl = &nacl->nacl;
transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
try_again:
se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p);
if (!se_acl) {
pr_info("Rejected login because no ACL has been"
" configured yet for initiator %s.\n", ch->sess_name);
/*
* XXX: Hack to retry of ch->i_port_id without leading '0x'
*/
if (p == &ch->sess_name[0]) {
p += 2;
goto try_again;
}
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
transport_free_session(ch->sess);
goto destroy_ib;
}
ch->sess->se_node_acl = se_acl;
transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
ch->sess_name, ch->cm_id);
@ -2635,8 +2624,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
release_channel:
srpt_set_ch_state(ch, CH_RELEASING);
transport_deregister_session_configfs(ch->sess);
deregister_session:
transport_deregister_session(ch->sess);
ch->sess = NULL;
@ -3273,8 +3260,6 @@ static void srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
INIT_WORK(&sport->work, srpt_refresh_port_work);
INIT_LIST_HEAD(&sport->port_acl_list);
spin_lock_init(&sport->port_acl_lock);
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
@ -3508,42 +3493,15 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
*/
static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
{
struct srpt_port *sport =
container_of(se_nacl->se_tpg, struct srpt_port, port_tpg_1);
struct srpt_node_acl *nacl =
container_of(se_nacl, struct srpt_node_acl, nacl);
u8 i_port_id[16];
if (srpt_parse_i_port_id(i_port_id, name) < 0) {
pr_err("invalid initiator port ID %s\n", name);
return -EINVAL;
}
memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
nacl->sport = sport;
spin_lock_irq(&sport->port_acl_lock);
list_add_tail(&nacl->list, &sport->port_acl_list);
spin_unlock_irq(&sport->port_acl_lock);
return 0;
}
/*
* configfs callback function invoked for
* rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
*/
static void srpt_cleanup_nodeacl(struct se_node_acl *se_nacl)
{
struct srpt_node_acl *nacl =
container_of(se_nacl, struct srpt_node_acl, nacl);
struct srpt_port *sport = nacl->sport;
spin_lock_irq(&sport->port_acl_lock);
list_del(&nacl->list);
spin_unlock_irq(&sport->port_acl_lock);
}
static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
char *page)
{
@ -3820,7 +3778,6 @@ static const struct target_core_fabric_ops srpt_template = {
.fabric_make_tpg = srpt_make_tpg,
.fabric_drop_tpg = srpt_drop_tpg,
.fabric_init_nodeacl = srpt_init_nodeacl,
.fabric_cleanup_nodeacl = srpt_cleanup_nodeacl,
.tfc_wwn_attrs = srpt_wwn_attrs,
.tfc_tpg_base_attrs = srpt_tpg_attrs,

View file

@ -364,11 +364,9 @@ struct srpt_port {
u16 sm_lid;
u16 lid;
union ib_gid gid;
spinlock_t port_acl_lock;
struct work_struct work;
struct se_portal_group port_tpg_1;
struct se_wwn port_wwn;
struct list_head port_acl_list;
struct srpt_port_attrib port_attrib;
};
@ -409,15 +407,9 @@ struct srpt_device {
/**
* struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
* @nacl: Target core node ACL information.
* @i_port_id: 128-bit SRP initiator port ID.
* @sport: port information.
* @list: Element of the per-HCA ACL list.
*/
struct srpt_node_acl {
struct se_node_acl nacl;
u8 i_port_id[16];
struct srpt_port *sport;
struct list_head list;
};
#endif /* IB_SRPT_H */

View file

@ -823,6 +823,41 @@ static struct bin_attribute sysfs_reset_attr = {
.write = qla2x00_sysfs_write_reset,
};
static ssize_t
qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
int type;
int rval = 0;
port_id_t did;
type = simple_strtol(buf, NULL, 10);
did.b.domain = (type & 0x00ff0000) >> 16;
did.b.area = (type & 0x0000ff00) >> 8;
did.b.al_pa = (type & 0x000000ff);
ql_log(ql_log_info, vha, 0x70e3, "portid=%02x%02x%02x done\n",
did.b.domain, did.b.area, did.b.al_pa);
ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
return count;
}
static struct bin_attribute sysfs_issue_logo_attr = {
.attr = {
.name = "issue_logo",
.mode = S_IWUSR,
},
.size = 0,
.write = qla2x00_issue_logo,
};
static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@ -937,6 +972,7 @@ static struct sysfs_entry {
{ "vpd", &sysfs_vpd_attr, 1 },
{ "sfp", &sysfs_sfp_attr, 1 },
{ "reset", &sysfs_reset_attr, },
{ "issue_logo", &sysfs_issue_logo_attr, },
{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
{ NULL },

View file

@ -14,25 +14,24 @@
* | Module Init and Probe | 0x017f | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e-0x0170 |
* | Mailbox commands | 0x118d | 0x1115-0x1116 |
* | | | 0x111a-0x111b |
* | Mailbox commands | 0x1192 | |
* | | | |
* | Device Discovery | 0x2016 | 0x2020-0x2022, |
* | | | 0x2011-0x2012, |
* | | | 0x2099-0x20a4 |
* | Queue Command and IO tracing | 0x3075 | 0x300b |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
* | Async Events | 0x508a | 0x502b-0x502f |
* | | | 0x5047 |
* | Async Events | 0x5089 | 0x502b-0x502f |
* | | | 0x5084,0x5075 |
* | | | 0x503d,0x5044 |
* | | | 0x507b,0x505f |
* | Timer Routines | 0x6012 | |
* | User Space Interactions | 0x70e2 | 0x7018,0x702e |
* | User Space Interactions | 0x70e65 | 0x7018,0x702e |
* | | | 0x7020,0x7024 |
* | | | 0x7039,0x7045 |
* | | | 0x7073-0x7075 |
@ -60,15 +59,11 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd300 | 0xd016-0xd017 |
* | | | 0xd021,0xd024 |
* | | | 0xd025,0xd029 |
* | | | 0xd02a,0xd02e |
* | | | 0xd031-0xd0ff |
* | Misc | 0xd301 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
* | Target Mode | 0xe080 | |
* | Target Mode Management | 0xf096 | 0xf002 |
* | Target Mode Management | 0xf09b | 0xf002 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000d | |
* ----------------------------------------------------------------------

View file

@ -259,7 +259,7 @@
#define LOOP_DOWN_TIME 255 /* 240 */
#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30)
#define DEFAULT_OUTSTANDING_COMMANDS 1024
#define DEFAULT_OUTSTANDING_COMMANDS 4096
#define MIN_OUTSTANDING_COMMANDS 128
/* ISP request and response entry counts (37-65535) */
@ -267,11 +267,13 @@
#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */
#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
#define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */
#define RESPONSE_ENTRY_CNT_83XX 4096 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
#define EXTENDED_EXCH_ENTRY_CNT 32768 /* Entries for offload case */
struct req_que;
struct qla_tgt_sess;
@ -309,6 +311,14 @@ struct srb_cmd {
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
struct els_logo_payload {
uint8_t opcode;
uint8_t rsvd[3];
uint8_t s_id[3];
uint8_t rsvd1[1];
uint8_t wwpn[WWN_SIZE];
};
/*
* SRB extensions.
*/
@ -322,6 +332,15 @@ struct srb_iocb {
uint16_t data[2];
} logio;
struct {
#define ELS_DCMD_TIMEOUT 20
#define ELS_DCMD_LOGO 0x5
uint32_t flags;
uint32_t els_cmd;
struct completion comp;
struct els_logo_payload *els_logo_pyld;
dma_addr_t els_logo_pyld_dma;
} els_logo;
struct {
/*
* Values for flags field below are as
* defined in tsk_mgmt_entry struct
@ -382,7 +401,7 @@ struct srb_iocb {
#define SRB_FXIOCB_DCMD 10
#define SRB_FXIOCB_BCMD 11
#define SRB_ABT_CMD 12
#define SRB_ELS_DCMD 13
typedef struct srb {
atomic_t ref_count;
@ -891,6 +910,7 @@ struct mbx_cmd_32 {
#define MBC_DISABLE_VI 0x24 /* Disable VI operation. */
#define MBC_ENABLE_VI 0x25 /* Enable VI operation. */
#define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */
#define MBC_GET_MEM_OFFLOAD_CNTRL_STAT 0x34 /* Memory Offload ctrl/Stat*/
#define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */
#define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */
#define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */
@ -2695,11 +2715,16 @@ struct isp_operations {
struct scsi_qla_host;
#define QLA83XX_RSPQ_MSIX_ENTRY_NUMBER 1 /* refer to qla83xx_msix_entries */
struct qla_msix_entry {
int have_irq;
uint32_t vector;
uint16_t entry;
struct rsp_que *rsp;
struct irq_affinity_notify irq_notify;
int cpuid;
};
#define WATCH_INTERVAL 1 /* number of seconds */
@ -2910,12 +2935,15 @@ struct qlt_hw_data {
uint32_t num_qfull_cmds_dropped;
spinlock_t q_full_lock;
uint32_t leak_exchg_thresh_hold;
spinlock_t sess_lock;
int rspq_vector_cpuid;
spinlock_t atio_lock ____cacheline_aligned;
};
#define MAX_QFULL_CMDS_ALLOC 8192
#define Q_FULL_THRESH_HOLD_PERCENT 90
#define Q_FULL_THRESH_HOLD(ha) \
((ha->fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
((ha->cur_fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
@ -2962,10 +2990,12 @@ struct qla_hw_data {
uint32_t isp82xx_no_md_cap:1;
uint32_t host_shutting_down:1;
uint32_t idc_compl_status:1;
uint32_t mr_reset_hdlr_active:1;
uint32_t mr_intr_valid:1;
uint32_t fawwpn_enabled:1;
uint32_t exlogins_enabled:1;
uint32_t exchoffld_enabled:1;
/* 35 bits */
} flags;
@ -3237,6 +3267,21 @@ struct qla_hw_data {
void *async_pd;
dma_addr_t async_pd_dma;
#define ENABLE_EXTENDED_LOGIN BIT_7
/* Extended Logins */
void *exlogin_buf;
dma_addr_t exlogin_buf_dma;
int exlogin_size;
#define ENABLE_EXCHANGE_OFFLD BIT_2
/* Exchange Offload */
void *exchoffld_buf;
dma_addr_t exchoffld_buf_dma;
int exchoffld_size;
int exchoffld_count;
void *swl;
/* These are used by mailbox operations. */
@ -3279,8 +3324,14 @@ struct qla_hw_data {
#define RISC_START_ADDRESS_2100 0x1000
#define RISC_START_ADDRESS_2300 0x800
#define RISC_START_ADDRESS_2400 0x100000
uint16_t fw_xcb_count;
uint16_t fw_iocb_count;
uint16_t orig_fw_tgt_xcb_count;
uint16_t cur_fw_tgt_xcb_count;
uint16_t orig_fw_xcb_count;
uint16_t cur_fw_xcb_count;
uint16_t orig_fw_iocb_count;
uint16_t cur_fw_iocb_count;
uint16_t fw_max_fcf_count;
uint32_t fw_shared_ram_start;
uint32_t fw_shared_ram_end;
@ -3323,6 +3374,9 @@ struct qla_hw_data {
uint32_t chain_offset;
struct dentry *dfs_dir;
struct dentry *dfs_fce;
struct dentry *dfs_tgt_counters;
struct dentry *dfs_fw_resource_cnt;
dma_addr_t fce_dma;
void *fce;
uint32_t fce_bufs;
@ -3480,6 +3534,18 @@ struct qla_hw_data {
int allow_cna_fw_dump;
};
struct qla_tgt_counters {
uint64_t qla_core_sbt_cmd;
uint64_t core_qla_que_buf;
uint64_t qla_core_ret_ctio;
uint64_t core_qla_snd_status;
uint64_t qla_core_ret_sta_ctio;
uint64_t core_qla_free_cmd;
uint64_t num_q_full_sent;
uint64_t num_alloc_iocb_failed;
uint64_t num_term_xchg_sent;
};
/*
* Qlogic scsi host structure
*/
@ -3595,6 +3661,10 @@ typedef struct scsi_qla_host {
atomic_t generation_tick;
/* Time when global fcport update has been scheduled */
int total_fcport_update_gen;
/* List of pending LOGOs, protected by tgt_mutex */
struct list_head logo_list;
/* List of pending PLOGI acks, protected by hw lock */
struct list_head plogi_ack_list;
uint32_t vp_abort_cnt;
@ -3632,6 +3702,7 @@ typedef struct scsi_qla_host {
atomic_t vref_count;
struct qla8044_reset_template reset_tmplt;
struct qla_tgt_counters tgt_counters;
} scsi_qla_host_t;
#define SET_VP_IDX 1

View file

@ -12,6 +12,85 @@
static struct dentry *qla2x00_dfs_root;
static atomic_t qla2x00_dfs_root_count;
static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
struct qla_hw_data *ha = vha->hw;
seq_puts(s, "FW Resource count\n\n");
seq_printf(s, "Original TGT exchg count[%d]\n",
ha->orig_fw_tgt_xcb_count);
seq_printf(s, "current TGT exchg count[%d]\n",
ha->cur_fw_tgt_xcb_count);
seq_printf(s, "original Initiator Exchange count[%d]\n",
ha->orig_fw_xcb_count);
seq_printf(s, "Current Initiator Exchange count[%d]\n",
ha->cur_fw_xcb_count);
seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count);
seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count);
seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports);
seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count);
return 0;
}
static int
qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
{
struct scsi_qla_host *vha = inode->i_private;
return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
}
static const struct file_operations dfs_fw_resource_cnt_ops = {
.open = qla_dfs_fw_resource_cnt_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int
qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
seq_puts(s, "Target Counters\n");
seq_printf(s, "qla_core_sbt_cmd = %lld\n",
vha->tgt_counters.qla_core_sbt_cmd);
seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
vha->tgt_counters.qla_core_ret_sta_ctio);
seq_printf(s, "qla_core_ret_ctio = %lld\n",
vha->tgt_counters.qla_core_ret_ctio);
seq_printf(s, "core_qla_que_buf = %lld\n",
vha->tgt_counters.core_qla_que_buf);
seq_printf(s, "core_qla_snd_status = %lld\n",
vha->tgt_counters.core_qla_snd_status);
seq_printf(s, "core_qla_free_cmd = %lld\n",
vha->tgt_counters.core_qla_free_cmd);
seq_printf(s, "num alloc iocb failed = %lld\n",
vha->tgt_counters.num_alloc_iocb_failed);
seq_printf(s, "num term exchange sent = %lld\n",
vha->tgt_counters.num_term_xchg_sent);
seq_printf(s, "num Q full sent = %lld\n",
vha->tgt_counters.num_q_full_sent);
return 0;
}
static int
qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
{
struct scsi_qla_host *vha = inode->i_private;
return single_open(file, qla_dfs_tgt_counters_show, vha);
}
static const struct file_operations dfs_tgt_counters_ops = {
.open = qla_dfs_tgt_counters_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int
qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
{
@ -146,6 +225,22 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
atomic_inc(&qla2x00_dfs_root_count);
create_nodes:
ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
if (!ha->dfs_fw_resource_cnt) {
ql_log(ql_log_warn, vha, 0x00fd,
"Unable to create debugFS fw_resource_count node.\n");
goto out;
}
ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
ha->dfs_dir, vha, &dfs_tgt_counters_ops);
if (!ha->dfs_tgt_counters) {
ql_log(ql_log_warn, vha, 0xd301,
"Unable to create debugFS tgt_counters node.\n");
goto out;
}
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
if (!ha->dfs_fce) {
@ -161,6 +256,17 @@ int
qla2x00_dfs_remove(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
if (ha->dfs_fw_resource_cnt) {
debugfs_remove(ha->dfs_fw_resource_cnt);
ha->dfs_fw_resource_cnt = NULL;
}
if (ha->dfs_tgt_counters) {
debugfs_remove(ha->dfs_tgt_counters);
ha->dfs_tgt_counters = NULL;
}
if (ha->dfs_fce) {
debugfs_remove(ha->dfs_fce);
ha->dfs_fce = NULL;

View file

@ -44,6 +44,8 @@ extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
extern int qla2x00_abort_isp(scsi_qla_host_t *);
@ -117,6 +119,8 @@ extern int ql2xdontresethba;
extern uint64_t ql2xmaxlun;
extern int ql2xmdcapmask;
extern int ql2xmdenable;
extern int ql2xexlogins;
extern int ql2xexchoffld;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@ -135,6 +139,10 @@ extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
extern int qla2x00_set_exlogins_buffer(struct scsi_qla_host *);
extern void qla2x00_free_exlogin_buffer(struct qla_hw_data *);
extern int qla2x00_set_exchoffld_buffer(struct scsi_qla_host *);
extern void qla2x00_free_exchoffld_buffer(struct qla_hw_data *);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
@ -323,8 +331,7 @@ extern int
qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *);
extern int
qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *,
uint16_t *, uint16_t *, uint16_t *, uint16_t *);
qla2x00_get_resource_cnts(scsi_qla_host_t *);
extern int
qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
@ -766,4 +773,11 @@ extern int qla8044_abort_isp(scsi_qla_host_t *);
extern int qla8044_check_fw_alive(struct scsi_qla_host *);
extern void qlt_host_reset_handler(struct qla_hw_data *ha);
extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *,
uint16_t *);
extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr);
extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *);
extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t);
extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *);
#endif /* _QLA_GBL_H */

View file

@ -1766,10 +1766,10 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
(ql2xmultique_tag || ql2xmaxqueues > 1)))
req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
else {
if (ha->fw_xcb_count <= ha->fw_iocb_count)
req->num_outstanding_cmds = ha->fw_xcb_count;
if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
req->num_outstanding_cmds = ha->cur_fw_xcb_count;
else
req->num_outstanding_cmds = ha->fw_iocb_count;
req->num_outstanding_cmds = ha->cur_fw_iocb_count;
}
req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
@ -1843,9 +1843,23 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init, vha, 0x00ca,
"Starting firmware.\n");
if (ql2xexlogins)
ha->flags.exlogins_enabled = 1;
if (ql2xexchoffld)
ha->flags.exchoffld_enabled = 1;
rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
if (rval == QLA_SUCCESS) {
rval = qla2x00_set_exlogins_buffer(vha);
if (rval != QLA_SUCCESS)
goto failed;
rval = qla2x00_set_exchoffld_buffer(vha);
if (rval != QLA_SUCCESS)
goto failed;
enable_82xx_npiv:
fw_major_version = ha->fw_major_version;
if (IS_P3P_TYPE(ha))
@ -1864,9 +1878,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
ha->max_npiv_vports =
MIN_MULTI_ID_FABRIC - 1;
}
qla2x00_get_resource_cnts(vha, NULL,
&ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
&ha->max_npiv_vports, NULL);
qla2x00_get_resource_cnts(vha);
/*
* Allocate the array of outstanding commands
@ -2248,7 +2260,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
if (IS_FWI2_CAPABLE(ha)) {
mid_init_cb->options = cpu_to_le16(BIT_1);
mid_init_cb->init_cb.execution_throttle =
cpu_to_le16(ha->fw_xcb_count);
cpu_to_le16(ha->cur_fw_xcb_count);
/* D-Port Status */
if (IS_DPORT_CAPABLE(ha))
mid_init_cb->init_cb.firmware_options_1 |=
@ -3053,6 +3065,26 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_READY);
ql_dbg(ql_dbg_disc, vha, 0x2069,
"LOOP READY.\n");
/*
* Process any ATIO queue entries that came in
* while we weren't online.
*/
if (qla_tgt_mode_enabled(vha)) {
if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
spin_lock_irqsave(&ha->tgt.atio_lock,
flags);
qlt_24xx_process_atio_queue(vha, 0);
spin_unlock_irqrestore(
&ha->tgt.atio_lock, flags);
} else {
spin_lock_irqsave(&ha->hardware_lock,
flags);
qlt_24xx_process_atio_queue(vha, 1);
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
}
}
}
}
@ -4907,7 +4939,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
unsigned long flags;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(vha)) {
@ -4929,17 +4960,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
vha->flags.online = 1;
/*
* Process any ATIO queue entries that came in
* while we weren't online.
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
if (qla_tgt_mode_enabled(vha))
qlt_24xx_process_atio_queue(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}

View file

@ -258,6 +258,8 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
(sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
if (sp->type == SRB_ELS_DCMD)
init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
}
static inline int

View file

@ -1868,6 +1868,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
}
queuing_error:
vha->tgt_counters.num_alloc_iocb_failed++;
return pkt;
}
@ -2009,6 +2010,190 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
}
}
static void
qla2x00_els_dcmd_sp_free(void *ptr, void *data)
{
struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr;
struct qla_hw_data *ha = vha->hw;
srb_t *sp = (srb_t *)data;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
kfree(sp->fcport);
if (elsio->u.els_logo.els_logo_pyld)
dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
elsio->u.els_logo.els_logo_pyld,
elsio->u.els_logo.els_logo_pyld_dma);
del_timer(&elsio->timer);
qla2x00_rel_sp(vha, sp);
}
static void
qla2x00_els_dcmd_iocb_timeout(void *data)
{
srb_t *sp = (srb_t *)data;
struct srb_iocb *lio = &sp->u.iocb_cmd;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
ql_dbg(ql_dbg_io, vha, 0x3069,
"%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
/* Abort the exchange */
spin_lock_irqsave(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
ql_dbg(ql_dbg_io, vha, 0x3070,
"mbx abort_command failed.\n");
} else {
ql_dbg(ql_dbg_io, vha, 0x3071,
"mbx abort_command success.\n");
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
complete(&lio->u.els_logo.comp);
}
static void
qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res)
{
srb_t *sp = (srb_t *)ptr;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = fcport->vha;
ql_dbg(ql_dbg_io, vha, 0x3072,
"%s hdl=%x, portid=%02x%02x%02x done\n",
sp->name, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
complete(&lio->u.els_logo.comp);
}
int
qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
port_id_t remote_did)
{
srb_t *sp;
fc_port_t *fcport = NULL;
struct srb_iocb *elsio = NULL;
struct qla_hw_data *ha = vha->hw;
struct els_logo_payload logo_pyld;
int rval = QLA_SUCCESS;
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
return -ENOMEM;
}
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
kfree(fcport);
ql_log(ql_log_info, vha, 0x70e6,
"SRB allocation failed\n");
return -ENOMEM;
}
elsio = &sp->u.iocb_cmd;
fcport->loop_id = 0xFFFF;
fcport->d_id.b.domain = remote_did.b.domain;
fcport->d_id.b.area = remote_did.b.area;
fcport->d_id.b.al_pa = remote_did.b.al_pa;
ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
sp->done = qla2x00_els_dcmd_sp_done;
sp->free = qla2x00_els_dcmd_sp_free;
elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
GFP_KERNEL);
if (!elsio->u.els_logo.els_logo_pyld) {
sp->free(vha, sp);
return QLA_FUNCTION_FAILED;
}
memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
elsio->u.els_logo.els_cmd = els_opcode;
logo_pyld.opcode = els_opcode;
logo_pyld.s_id[0] = vha->d_id.b.al_pa;
logo_pyld.s_id[1] = vha->d_id.b.area;
logo_pyld.s_id[2] = vha->d_id.b.domain;
host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
sizeof(struct els_logo_payload));
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
sp->free(vha, sp);
return QLA_FUNCTION_FAILED;
}
ql_dbg(ql_dbg_io, vha, 0x3074,
"%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
wait_for_completion(&elsio->u.els_logo.comp);
sp->free(vha, sp);
return rval;
}
static void
qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
scsi_qla_host_t *vha = sp->fcport->vha;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
els_iocb->sys_define = 0;
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = 1;
els_iocb->vp_index = vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = 0;
els_iocb->opcode = elsio->u.els_logo.els_cmd;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
els_iocb->control_flags = 0;
els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
els_iocb->tx_address[0] =
cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
els_iocb->tx_address[1] =
cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
els_iocb->rx_byte_count = 0;
els_iocb->rx_address[0] = 0;
els_iocb->rx_address[1] = 0;
els_iocb->rx_len = 0;
sp->fcport->vha->qla_stats.control_requests++;
}
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
@ -2623,6 +2808,9 @@ qla2x00_start_sp(srb_t *sp)
qlafx00_abort_iocb(sp, pkt) :
qla24xx_abort_iocb(sp, pkt);
break;
case SRB_ELS_DCMD:
qla24xx_els_logo_iocb(sp, pkt);
break;
default:
break;
}

View file

@ -18,6 +18,10 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
static void qla_irq_affinity_notify(struct irq_affinity_notify *,
const cpumask_t *);
static void qla_irq_affinity_release(struct kref *);
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@ -1418,6 +1422,12 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
case SRB_CT_CMD:
type = "ct pass-through";
break;
case SRB_ELS_DCMD:
type = "Driver ELS logo";
ql_dbg(ql_dbg_user, vha, 0x5047,
"Completing %s: (%p) type=%d.\n", type, sp, sp->type);
sp->done(vha, sp, 0);
return;
default:
ql_dbg(ql_dbg_user, vha, 0x503e,
"Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
@ -2542,6 +2552,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!vha->flags.online)
return;
if (rsp->msix->cpuid != smp_processor_id()) {
/* if kernel does not notify qla of IRQ's CPU change,
* then set it here.
*/
rsp->msix->cpuid = smp_processor_id();
ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
}
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
@ -2587,8 +2605,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
case ABTS_RECV_24XX:
/* ensure that the ATIO queue is empty */
qlt_24xx_process_atio_queue(vha);
if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
/* ensure that the ATIO queue is empty */
qlt_handle_abts_recv(vha, (response_t *)pkt);
break;
} else {
/* drop through */
qlt_24xx_process_atio_queue(vha, 1);
}
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case NOTIFY_ACK_TYPE:
@ -2755,13 +2779,22 @@ qla24xx_intr_handler(int irq, void *dev_id)
case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
case INTR_ATIO_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
case INTR_ATIO_QUE_UPDATE:{
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
qlt_24xx_process_atio_queue(vha, 1);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
break;
case INTR_ATIO_RSP_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
}
case INTR_ATIO_RSP_QUE_UPDATE: {
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
qlt_24xx_process_atio_queue(vha, 1);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
qla24xx_process_response_queue(vha, rsp);
break;
}
default:
ql_dbg(ql_dbg_async, vha, 0x504f,
"Unrecognized interrupt type (%d).\n", stat * 0xff);
@ -2920,13 +2953,22 @@ qla24xx_msix_default(int irq, void *dev_id)
case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
case INTR_ATIO_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
case INTR_ATIO_QUE_UPDATE:{
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
qlt_24xx_process_atio_queue(vha, 1);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
break;
case INTR_ATIO_RSP_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
}
case INTR_ATIO_RSP_QUE_UPDATE: {
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
qlt_24xx_process_atio_queue(vha, 1);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
qla24xx_process_response_queue(vha, rsp);
break;
}
default:
ql_dbg(ql_dbg_async, vha, 0x5051,
"Unrecognized interrupt type (%d).\n", stat & 0xff);
@ -2973,8 +3015,11 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
if (qentry->have_irq)
if (qentry->have_irq) {
/* un-register irq cpu affinity notification */
irq_set_affinity_notifier(qentry->vector, NULL);
free_irq(qentry->vector, qentry->rsp);
}
}
pci_disable_msix(ha->pdev);
kfree(ha->msix_entries);
@ -3037,6 +3082,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->entry = entries[i].entry;
qentry->have_irq = 0;
qentry->rsp = NULL;
qentry->irq_notify.notify = qla_irq_affinity_notify;
qentry->irq_notify.release = qla_irq_affinity_release;
qentry->cpuid = -1;
}
/* Enable MSI-X vectors for the base queue */
@ -3055,6 +3103,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->have_irq = 1;
qentry->rsp = rsp;
rsp->msix = qentry;
/* Register for CPU affinity notification. */
irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
/* Schedule work (ie. trigger a notification) to read cpu
* mask for this specific irq.
* kref_get is required because
* irq_affinity_notify() will do
* kref_put().
*/
kref_get(&qentry->irq_notify.kref);
schedule_work(&qentry->irq_notify.work);
}
/*
@ -3234,3 +3294,47 @@ int qla25xx_request_irq(struct rsp_que *rsp)
msix->rsp = rsp;
return ret;
}
/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct qla_msix_entry *e =
container_of(notify, struct qla_msix_entry, irq_notify);
struct qla_hw_data *ha;
struct scsi_qla_host *base_vha;
/* user is recommended to set mask to just 1 cpu */
e->cpuid = cpumask_first(mask);
ha = e->rsp->hw;
base_vha = pci_get_drvdata(ha->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
"%s: host %ld : vector %d cpu %d \n", __func__,
base_vha->host_no, e->vector, e->cpuid);
if (e->have_irq) {
if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
(e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
ha->tgt.rspq_vector_cpuid = e->cpuid;
ql_dbg(ql_dbg_init, base_vha, 0xffff,
"%s: host%ld: rspq vector %d cpu %d runtime change\n",
__func__, base_vha->host_no, e->vector, e->cpuid);
}
}
}
static void qla_irq_affinity_release(struct kref *ref)
{
struct irq_affinity_notify *notify =
container_of(ref, struct irq_affinity_notify, kref);
struct qla_msix_entry *e =
container_of(notify, struct qla_msix_entry, irq_notify);
struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
"%s: host%ld: vector %d cpu %d \n", __func__,
base_vha->host_no, e->vector, e->cpuid);
}

View file

@ -489,6 +489,13 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
EXTENDED_BB_CREDITS);
} else
mcp->mb[4] = 0;
if (ha->flags.exlogins_enabled)
mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
if (ha->flags.exchoffld_enabled)
mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
mcp->in_mb |= MBX_1;
} else {
@ -520,6 +527,226 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
return rval;
}
/*
* qla_get_exlogin_status
* Get extended login status
* uses the memory offload control/status Mailbox
*
* Input:
* ha: adapter state pointer.
* fwopt: firmware options
*
* Returns:
* qla2x00 local function status
*
* Context:
* Kernel context.
*/
#define FETCH_XLOGINS_STAT 0x8
int
qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
uint16_t *ex_logins_cnt)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
"Entered %s\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
mcp->mb[1] = FETCH_XLOGINS_STAT;
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_10|MBX_4|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
} else {
*buf_sz = mcp->mb[4];
*ex_logins_cnt = mcp->mb[10];
ql_log(ql_log_info, vha, 0x1190,
"buffer size 0x%x, exchange login count=%d\n",
mcp->mb[4], mcp->mb[10]);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
"Done %s.\n", __func__);
}
return rval;
}
/*
* qla_set_exlogin_mem_cfg
* set extended login memory configuration
* Mbx needs to be issues before init_cb is set
*
* Input:
* ha: adapter state pointer.
* buffer: buffer pointer
* phys_addr: physical address of buffer
* size: size of buffer
* TARGET_QUEUE_LOCK must be released
* ADAPTER_STATE_LOCK must be release
*
* Returns:
* qla2x00 local funxtion status code.
*
* Context:
* Kernel context.
*/
#define CONFIG_XLOGINS_MEM 0x3
int
qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
int configured_count;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
"Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
mcp->mb[1] = CONFIG_XLOGINS_MEM;
mcp->mb[2] = MSW(phys_addr);
mcp->mb[3] = LSW(phys_addr);
mcp->mb[6] = MSW(MSD(phys_addr));
mcp->mb[7] = LSW(MSD(phys_addr));
mcp->mb[8] = MSW(ha->exlogin_size);
mcp->mb[9] = LSW(ha->exlogin_size);
mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_11|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
} else {
configured_count = mcp->mb[11];
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
"Done %s.\n", __func__);
}
return rval;
}
/*
* qla_get_exchoffld_status
* Get exchange offload status
* uses the memory offload control/status Mailbox
*
* Input:
* ha: adapter state pointer.
* fwopt: firmware options
*
* Returns:
* qla2x00 local function status
*
* Context:
* Kernel context.
*/
#define FETCH_XCHOFFLD_STAT 0x2
int
qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
uint16_t *ex_logins_cnt)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
"Entered %s\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
mcp->mb[1] = FETCH_XCHOFFLD_STAT;
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_10|MBX_4|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
} else {
*buf_sz = mcp->mb[4];
*ex_logins_cnt = mcp->mb[10];
ql_log(ql_log_info, vha, 0x118e,
"buffer size 0x%x, exchange offload count=%d\n",
mcp->mb[4], mcp->mb[10]);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
"Done %s.\n", __func__);
}
return rval;
}
/*
* qla_set_exchoffld_mem_cfg
* Set exchange offload memory configuration
* Mbx needs to be issues before init_cb is set
*
* Input:
* ha: adapter state pointer.
* buffer: buffer pointer
* phys_addr: physical address of buffer
* size: size of buffer
* TARGET_QUEUE_LOCK must be released
* ADAPTER_STATE_LOCK must be release
*
* Returns:
* qla2x00 local funxtion status code.
*
* Context:
* Kernel context.
*/
#define CONFIG_XCHOFFLD_MEM 0x3
int
qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
"Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
mcp->mb[2] = MSW(phys_addr);
mcp->mb[3] = LSW(phys_addr);
mcp->mb[6] = MSW(MSD(phys_addr));
mcp->mb[7] = LSW(MSD(phys_addr));
mcp->mb[8] = MSW(ha->exlogin_size);
mcp->mb[9] = LSW(ha->exlogin_size);
mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_11|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
"Done %s.\n", __func__);
}
return rval;
}
/*
* qla2x00_get_fw_version
* Get firmware version.
@ -594,6 +821,16 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
"%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
__func__, mcp->mb[17], mcp->mb[16]);
if (ha->fw_attributes_h & 0x4)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
"%s: Firmware supports Extended Login 0x%x\n",
__func__, ha->fw_attributes_h);
if (ha->fw_attributes_h & 0x8)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
"%s: Firmware supports Exchange Offload 0x%x\n",
__func__, ha->fw_attributes_h);
}
if (IS_QLA27XX(ha)) {
@ -2383,10 +2620,9 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
* Kernel context.
*/
int
qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
@ -2414,19 +2650,16 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
mcp->mb[11], mcp->mb[12]);
if (cur_xchg_cnt)
*cur_xchg_cnt = mcp->mb[3];
if (orig_xchg_cnt)
*orig_xchg_cnt = mcp->mb[6];
if (cur_iocb_cnt)
*cur_iocb_cnt = mcp->mb[7];
if (orig_iocb_cnt)
*orig_iocb_cnt = mcp->mb[10];
if (vha->hw->flags.npiv_supported && max_npiv_vports)
*max_npiv_vports = mcp->mb[11];
if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
IS_QLA27XX(vha->hw)) && max_fcfs)
*max_fcfs = mcp->mb[12];
ha->orig_fw_tgt_xcb_count = mcp->mb[1];
ha->cur_fw_tgt_xcb_count = mcp->mb[2];
ha->cur_fw_xcb_count = mcp->mb[3];
ha->orig_fw_xcb_count = mcp->mb[6];
ha->cur_fw_iocb_count = mcp->mb[7];
ha->orig_fw_iocb_count = mcp->mb[10];
if (ha->flags.npiv_supported)
ha->max_npiv_vports = mcp->mb[11];
if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
ha->fw_max_fcf_count = mcp->mb[12];
}
return (rval);

View file

@ -221,6 +221,18 @@ MODULE_PARM_DESC(ql2xmdenable,
"0 - MiniDump disabled. "
"1 (Default) - MiniDump enabled.");
int ql2xexlogins = 0;
module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xexlogins,
"Number of extended Logins. "
"0 (Default)- Disabled.");
int ql2xexchoffld = 0;
module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xexchoffld,
"Number of exchanges to offload. "
"0 (Default)- Disabled.");
/*
* SCSI host template entry points
*/
@ -2324,6 +2336,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->tgt.enable_class_2 = ql2xenableclass2;
INIT_LIST_HEAD(&ha->tgt.q_full_list);
spin_lock_init(&ha->tgt.q_full_lock);
spin_lock_init(&ha->tgt.sess_lock);
spin_lock_init(&ha->tgt.atio_lock);
/* Clear our data area */
ha->bars = bars;
@ -2468,7 +2483,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_83XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
rsp_length = RESPONSE_ENTRY_CNT_83XX;
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
@ -2498,8 +2513,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->portnum = PCI_FUNC(ha->pdev->devfn);
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
req_length = REQUEST_ENTRY_CNT_83XX;
rsp_length = RESPONSE_ENTRY_CNT_83XX;
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
@ -3128,6 +3143,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
base_vha->flags.online = 0;
/* free DMA memory */
if (ha->exlogin_buf)
qla2x00_free_exlogin_buffer(ha);
/* free DMA memory */
if (ha->exchoffld_buf)
qla2x00_free_exchoffld_buffer(ha);
qla2x00_destroy_deferred_work(ha);
qlt_remove_target(ha, base_vha);
@ -3587,6 +3610,140 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
return -ENOMEM;
}
int
qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
{
int rval;
uint16_t size, max_cnt, temp;
struct qla_hw_data *ha = vha->hw;
/* Return if we don't need to alloacate any extended logins */
if (!ql2xexlogins)
return QLA_SUCCESS;
ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
max_cnt = 0;
rval = qla_get_exlogin_status(vha, &size, &max_cnt);
if (rval != QLA_SUCCESS) {
ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
"Failed to get exlogin status.\n");
return rval;
}
temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
ha->exlogin_size = (size * temp);
ql_log(ql_log_info, vha, 0xd024,
"EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
max_cnt, size, temp);
ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n",
ha->exlogin_size);
/* Get consistent memory for extended logins */
ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
if (!ha->exlogin_buf) {
ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
"Failed to allocate memory for exlogin_buf_dma.\n");
return -ENOMEM;
}
/* Now configure the dma buffer */
rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
if (rval) {
ql_log(ql_log_fatal, vha, 0x00cf,
"Setup extended login buffer ****FAILED****.\n");
qla2x00_free_exlogin_buffer(ha);
}
return rval;
}
/*
* qla2x00_free_exlogin_buffer
*
* Input:
* ha = adapter block pointer
*/
void
qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
{
if (ha->exlogin_buf) {
dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
ha->exlogin_buf, ha->exlogin_buf_dma);
ha->exlogin_buf = NULL;
ha->exlogin_size = 0;
}
}
int
qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
{
int rval;
uint16_t size, max_cnt, temp;
struct qla_hw_data *ha = vha->hw;
/* Return if we don't need to alloacate any extended logins */
if (!ql2xexchoffld)
return QLA_SUCCESS;
ql_log(ql_log_info, vha, 0xd014,
"Exchange offload count: %d.\n", ql2xexlogins);
max_cnt = 0;
rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
if (rval != QLA_SUCCESS) {
ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
"Failed to get exlogin status.\n");
return rval;
}
temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld;
ha->exchoffld_size = (size * temp);
ql_log(ql_log_info, vha, 0xd016,
"Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
max_cnt, size, temp);
ql_log(ql_log_info, vha, 0xd017,
"Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size);
/* Get consistent memory for extended logins */
ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
if (!ha->exchoffld_buf) {
ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
"Failed to allocate memory for exchoffld_buf_dma.\n");
return -ENOMEM;
}
/* Now configure the dma buffer */
rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma);
if (rval) {
ql_log(ql_log_fatal, vha, 0xd02e,
"Setup exchange offload buffer ****FAILED****.\n");
qla2x00_free_exchoffld_buffer(ha);
}
return rval;
}
/*
* qla2x00_free_exchoffld_buffer
*
* Input:
* ha = adapter block pointer
*/
void
qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
{
if (ha->exchoffld_buf) {
dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
ha->exchoffld_buf, ha->exchoffld_buf_dma);
ha->exchoffld_buf = NULL;
ha->exchoffld_size = 0;
}
}
/*
* qla2x00_free_fw_dump
* Frees fw dump stuff.
@ -3766,6 +3923,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->list);
INIT_LIST_HEAD(&vha->qla_cmd_list);
INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
INIT_LIST_HEAD(&vha->logo_list);
INIT_LIST_HEAD(&vha->plogi_ack_list);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);

File diff suppressed because it is too large Load diff

View file

@ -787,7 +787,7 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */
/* Special handles */
#define QLA_TGT_NULL_HANDLE 0
@ -835,6 +835,7 @@ struct qla_tgt {
* HW lock.
*/
int irq_cmd_count;
int atio_irq_cmd_count;
int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
@ -883,6 +884,7 @@ struct qla_tgt {
struct qla_tgt_sess_op {
struct scsi_qla_host *vha;
uint32_t chip_reset;
struct atio_from_isp atio;
struct work_struct work;
struct list_head cmd_list;
@ -896,6 +898,19 @@ enum qla_sess_deletion {
QLA_SESS_DELETION_IN_PROGRESS = 2,
};
typedef enum {
QLT_PLOGI_LINK_SAME_WWN,
QLT_PLOGI_LINK_CONFLICT,
QLT_PLOGI_LINK_MAX
} qlt_plogi_link_t;
typedef struct {
struct list_head list;
struct imm_ntfy_from_isp iocb;
port_id_t id;
int ref_count;
} qlt_plogi_ack_t;
/*
* Equivilant to IT Nexus (Initiator-Target)
*/
@ -907,8 +922,8 @@ struct qla_tgt_sess {
unsigned int deleted:2;
unsigned int local:1;
unsigned int logout_on_delete:1;
unsigned int plogi_ack_needed:1;
unsigned int keep_nport_handle:1;
unsigned int send_els_logo:1;
unsigned char logout_completed;
@ -925,9 +940,7 @@ struct qla_tgt_sess {
uint8_t port_name[WWN_SIZE];
struct work_struct free_work;
union {
struct imm_ntfy_from_isp tm_iocb;
};
qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
};
struct qla_tgt_cmd {
@ -949,6 +962,7 @@ struct qla_tgt_cmd {
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
unsigned int aborted:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@ -1120,6 +1134,14 @@ static inline uint32_t sid_to_key(const uint8_t *s_id)
return key;
}
static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
{
memset(p, 0, sizeof(*p));
p->b.domain = s_id[0];
p->b.area = s_id[1];
p->b.al_pa = s_id[2];
}
/*
* Exported symbols from qla_target.c LLD logic used by qla2xxx code..
*/
@ -1135,7 +1157,7 @@ extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
extern void qlt_24xx_config_rings(struct scsi_qla_host *);
extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_24xx *);

View file

@ -284,6 +284,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
WARN_ON(cmd->cmd_flags & BIT_16);
cmd->vha->tgt_counters.qla_core_ret_sta_ctio++;
cmd->cmd_flags |= BIT_16;
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
@ -295,9 +296,10 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
*/
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{
cmd->vha->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
}
/*
@ -342,9 +344,9 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
BUG_ON(!sess);
vha = sess->vha;
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
target_sess_cmd_list_set_waiting(se_sess);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
return 1;
}
@ -358,9 +360,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
BUG_ON(!sess);
vha = sess->vha;
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
qlt_unreg_sess(sess);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
@ -454,6 +456,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
return -EINVAL;
}
cmd->vha->tgt_counters.qla_core_sbt_cmd++;
return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
cmd->unpacked_lun, data_length, fcp_task_attr,
data_dir, flags);
@ -469,6 +472,7 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
cmd->cmd_in_wq = 0;
cmd->cmd_flags |= BIT_11;
cmd->vha->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
* Check if se_cmd has already been aborted via LUN_RESET, and
@ -500,7 +504,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
cmd->cmd_flags |= BIT_10;
cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
}
static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
@ -643,7 +647,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
{
@ -697,13 +701,13 @@ static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
if (!sess)
return;
assert_spin_locked(&sess->vha->hw->hardware_lock);
assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
}
static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
{
assert_spin_locked(&sess->vha->hw->hardware_lock);
assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
target_sess_cmd_list_set_waiting(sess->se_sess);
}
@ -1077,7 +1081,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
}
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
scsi_qla_host_t *vha,
@ -1116,7 +1120,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
}
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
static void tcm_qla2xxx_set_sess_by_s_id(
struct tcm_qla2xxx_lport *lport,
@ -1182,7 +1186,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
}
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
scsi_qla_host_t *vha,
@ -1221,7 +1225,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
}
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
static void tcm_qla2xxx_set_sess_by_loop_id(
struct tcm_qla2xxx_lport *lport,
@ -1285,7 +1289,7 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
}
/*
* Should always be called with qla_hw_data->hardware_lock held.
* Should always be called with qla_hw_data->tgt.sess_lock held.
*/
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
@ -1353,7 +1357,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
struct qla_tgt_sess *sess = qla_tgt_sess;
unsigned char port_name[36];
unsigned long flags;
int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count :
TCM_QLA2XXX_DEFAULT_TAGS;
lport = vha->vha_tgt.target_lport_ptr;
@ -1401,12 +1405,12 @@ static int tcm_qla2xxx_check_initiator_node_acl(
* And now setup the new se_nacl and session pointers into our HW lport
* mappings for fabric S_ID and LOOP_ID.
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
qla_tgt_sess, s_id);
tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
qla_tgt_sess, loop_id);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
/*
* Finally register the new FC Nexus with TCM
*/

View file

@ -1333,7 +1333,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
/*
* Check if a delayed TASK_ABORTED status needs to
* be sent now if the ISCSI_FLAG_CMD_FINAL has been
* received with the unsolicitied data out.
* received with the unsolicited data out.
*/
if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
iscsit_stop_dataout_timer(cmd);
@ -3435,7 +3435,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
if ((tpg->tpg_attrib.generate_node_acls == 0) &&
(tpg->tpg_attrib.demo_mode_discovery == 0) &&
(!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg,
(!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
cmd->conn->sess->sess_ops->InitiatorName))) {
continue;
}
@ -4459,9 +4459,6 @@ int iscsit_close_connection(
return 0;
}
spin_unlock_bh(&sess->conn_lock);
return 0;
}
int iscsit_close_session(struct iscsi_session *sess)

View file

@ -725,11 +725,8 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
if (iscsit_get_tpg(tpg) < 0)
return -EINVAL;
/*
* iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
*/
ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
config_item_name(acl_ci), cmdsn_depth, 1);
ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth);
pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
"InitiatorName: %s\n", config_item_name(wwn_ci),
@ -1593,28 +1590,30 @@ static int lio_tpg_check_prot_fabric_only(
}
/*
* Called with spin_lock_bh(struct se_portal_group->session_lock) held..
*
* Also, this function calls iscsit_inc_session_usage_count() on the
* This function calls iscsit_inc_session_usage_count() on the
* struct iscsi_session in question.
*/
static int lio_tpg_shutdown_session(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
spin_lock_bh(&se_tpg->session_lock);
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
spin_unlock_bh(&se_tpg->session_lock);
return 0;
}
atomic_set(&sess->session_reinstatement, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
iscsit_stop_session(sess, 1, 1);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_stop_session(sess, 1, 1);
return 1;
}

View file

@ -160,8 +160,7 @@ static int iscsit_handle_r2t_snack(
" protocol error.\n", cmd->init_task_tag, begrun,
(begrun + runlength), cmd->acked_data_sn);
return iscsit_reject_cmd(cmd,
ISCSI_REASON_PROTOCOL_ERROR, buf);
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if (runlength) {
@ -628,8 +627,8 @@ int iscsit_dataout_datapduinorder_no_fbit(
if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
if (!first_pdu)
first_pdu = &cmd->pdu_list[i];
xfer_len += cmd->pdu_list[i].length;
pdu_count++;
xfer_len += cmd->pdu_list[i].length;
pdu_count++;
} else if (pdu_count)
break;
}

View file

@ -1668,7 +1668,7 @@ void iscsi_set_session_parameters(
param->value);
} else if (!strcmp(param->name, INITIALR2T)) {
ops->InitialR2T = !strcmp(param->value, YES);
pr_debug("InitialR2T: %s\n",
pr_debug("InitialR2T: %s\n",
param->value);
} else if (!strcmp(param->name, IMMEDIATEDATA)) {
ops->ImmediateData = !strcmp(param->value, YES);

View file

@ -82,7 +82,7 @@ int iscsit_tmr_task_warm_reset(
pr_err("TMR Opcode TARGET_WARM_RESET authorization"
" failed for Initiator Node: %s\n",
sess->se_sess->se_node_acl->initiatorname);
return -1;
return -1;
}
/*
* Do the real work in transport_generic_do_tmr().

View file

@ -590,16 +590,6 @@ int iscsit_tpg_del_network_portal(
return iscsit_tpg_release_np(tpg_np, tpg, np);
}
int iscsit_tpg_set_initiator_node_queue_depth(
struct iscsi_portal_group *tpg,
unsigned char *initiatorname,
u32 queue_depth,
int force)
{
return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
initiatorname, queue_depth, force);
}
int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
{
unsigned char buf1[256], buf2[256], *none = NULL;

View file

@ -26,8 +26,6 @@ extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_gr
int);
extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
struct iscsi_tpg_np *);
extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
unsigned char *, u32, int);
extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);

View file

@ -1036,12 +1036,26 @@ static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
return -EINVAL;
}
static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
tl_hba->sh->host_no, tl_tpg->tl_tpgt);
}
CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
&tcm_loop_tpg_attr_nexus,
&tcm_loop_tpg_attr_transport_status,
&tcm_loop_tpg_attr_address,
NULL,
};

View file

@ -39,8 +39,6 @@
#include "sbp_target.h"
static const struct target_core_fabric_ops sbp_ops;
/* FireWire address region for management and command block address handlers */
static const struct fw_address_region sbp_register_region = {
.start = CSR_REGISTER_BASE + 0x10000,

View file

@ -278,7 +278,7 @@ EXPORT_SYMBOL(target_depend_item);
void target_undepend_item(struct config_item *item)
{
return configfs_undepend_item(&target_core_fabrics, item);
return configfs_undepend_item(item);
}
EXPORT_SYMBOL(target_undepend_item);
@ -499,6 +499,7 @@ DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
@ -548,7 +549,8 @@ static ssize_t _name##_store(struct config_item *item, const char *page,\
size_t count) \
{ \
printk_once(KERN_WARNING \
"ignoring deprecated ##_name## attribute\n"); \
"ignoring deprecated %s attribute\n", \
__stringify(_name)); \
return count; \
}
@ -866,6 +868,39 @@ static ssize_t emulate_rest_reord_store(struct config_item *item,
return count;
}
static ssize_t unmap_zeroes_data_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
bool flag;
int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
" unmap_zeroes_data while export_count is %d\n",
da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_configure_device().
*/
if (flag && !da->max_unmap_block_desc_count) {
pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
" because max_unmap_block_desc_count is zero\n",
da->da_dev);
return -ENOSYS;
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
da->da_dev, flag);
return 0;
}
/*
* Note, this can only be called on unexported SE Device Object.
*/
@ -998,6 +1033,7 @@ CONFIGFS_ATTR(, max_unmap_lba_count);
CONFIGFS_ATTR(, max_unmap_block_desc_count);
CONFIGFS_ATTR(, unmap_granularity);
CONFIGFS_ATTR(, unmap_granularity_alignment);
CONFIGFS_ATTR(, unmap_zeroes_data);
CONFIGFS_ATTR(, max_write_same_len);
/*
@ -1034,6 +1070,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_max_unmap_block_desc_count,
&attr_unmap_granularity,
&attr_unmap_granularity_alignment,
&attr_unmap_zeroes_data,
&attr_max_write_same_len,
NULL,
};
@ -1980,14 +2017,14 @@ static ssize_t target_dev_lba_map_store(struct config_item *item,
struct se_device *dev = to_device(item);
struct t10_alua_lba_map *lba_map = NULL;
struct list_head lba_list;
char *map_entries, *ptr;
char *map_entries, *orig, *ptr;
char state;
int pg_num = -1, pg;
int ret = 0, num = 0, pg_id, alua_state;
unsigned long start_lba = -1, end_lba = -1;
unsigned long segment_size = -1, segment_mult = -1;
map_entries = kstrdup(page, GFP_KERNEL);
orig = map_entries = kstrdup(page, GFP_KERNEL);
if (!map_entries)
return -ENOMEM;
@ -2085,7 +2122,7 @@ static ssize_t target_dev_lba_map_store(struct config_item *item,
} else
core_alua_set_lba_map(dev, &lba_list,
segment_size, segment_mult);
kfree(map_entries);
kfree(orig);
return count;
}

View file

@ -813,6 +813,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
dev->dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
dev->dev_attrib.unmap_zeroes_data =
DA_UNMAP_ZEROES_DATA_DEFAULT;
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
xcopy_lun = &dev->xcopy_lun;

View file

@ -138,6 +138,8 @@ static int iblock_configure_device(struct se_device *dev)
q->limits.discard_granularity >> 9;
dev->dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
dev->dev_attrib.unmap_zeroes_data =
q->limits.discard_zeroes_data;
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");

View file

@ -1457,8 +1457,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
{
struct se_lun_acl *lun_acl;
struct se_node_acl *nacl;
struct se_portal_group *tpg;
/*
* For nacl->dynamic_node_acl=1
*/
@ -1467,17 +1466,13 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
if (!lun_acl)
return 0;
nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg;
return target_depend_item(&lun_acl->se_lun_group.cg_item);
}
static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
{
struct se_lun_acl *lun_acl;
struct se_node_acl *nacl;
struct se_portal_group *tpg;
/*
* For nacl->dynamic_node_acl=1
*/
@ -1487,8 +1482,6 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
kref_put(&se_deve->pr_kref, target_pr_kref_release);
return;
}
nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg;
target_undepend_item(&lun_acl->se_lun_group.cg_item);
kref_put(&se_deve->pr_kref, target_pr_kref_release);

View file

@ -141,9 +141,17 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) {
buf[14] |= 0x80;
/*
* LBPRZ signifies that zeroes will be read back from an LBA after
* an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2)
*/
if (dev->dev_attrib.unmap_zeroes_data)
buf[14] |= 0x40;
}
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));

View file

@ -635,6 +635,18 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
if (dev->dev_attrib.emulate_tpws != 0)
buf[5] |= 0x40 | 0x20;
/*
* The unmap_zeroes_data set means that the underlying device supports
* REQ_DISCARD and has the discard_zeroes_data bit set. This satisfies
* the SBC requirements for LBPRZ, meaning that a subsequent read
* will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
* See sbc4r36 6.6.4.
*/
if (((dev->dev_attrib.emulate_tpu != 0) ||
(dev->dev_attrib.emulate_tpws != 0)) &&
(dev->dev_attrib.unmap_zeroes_data != 0))
buf[5] |= 0x04;
return 0;
}

View file

@ -201,7 +201,7 @@ static void core_tmr_drain_tmr_list(
/*
* If this function was called with a valid pr_res_key
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
* skip non registration key matching TMRs.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;

View file

@ -75,9 +75,21 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
unsigned char *initiatorname)
{
struct se_node_acl *acl;
/*
* Obtain se_node_acl->acl_kref using fabric driver provided
* initiatorname[] during node acl endpoint lookup driven by
* new se_session login.
*
* The reference is held until se_session shutdown -> release
* occurs via fabric driver invoked transport_deregister_session()
* or transport_free_session() code.
*/
mutex_lock(&tpg->acl_node_mutex);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl) {
if (!kref_get_unless_zero(&acl->acl_kref))
acl = NULL;
}
mutex_unlock(&tpg->acl_node_mutex);
return acl;
@ -157,28 +169,25 @@ void core_tpg_add_node_to_devs(
mutex_unlock(&tpg->tpg_lun_mutex);
}
/* core_set_queue_depth_for_node():
*
*
*/
static int core_set_queue_depth_for_node(
struct se_portal_group *tpg,
struct se_node_acl *acl)
static void
target_set_nacl_queue_depth(struct se_portal_group *tpg,
struct se_node_acl *acl, u32 queue_depth)
{
acl->queue_depth = queue_depth;
if (!acl->queue_depth) {
pr_err("Queue depth for %s Initiator Node: %s is 0,"
pr_warn("Queue depth for %s Initiator Node: %s is 0,"
"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
acl->initiatorname);
acl->queue_depth = 1;
}
return 0;
}
static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
const unsigned char *initiatorname)
{
struct se_node_acl *acl;
u32 queue_depth;
acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
GFP_KERNEL);
@ -193,24 +202,20 @@ static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
spin_lock_init(&acl->nacl_sess_lock);
mutex_init(&acl->lun_entry_mutex);
atomic_set(&acl->acl_pr_ref_count, 0);
if (tpg->se_tpg_tfo->tpg_get_default_depth)
acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
else
acl->queue_depth = 1;
queue_depth = 1;
target_set_nacl_queue_depth(tpg, acl, queue_depth);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
tpg->se_tpg_tfo->set_default_node_attributes(acl);
if (core_set_queue_depth_for_node(tpg, acl) < 0)
goto out_free_acl;
return acl;
out_free_acl:
kfree(acl);
return NULL;
}
static void target_add_node_acl(struct se_node_acl *acl)
@ -219,7 +224,6 @@ static void target_add_node_acl(struct se_node_acl *acl)
mutex_lock(&tpg->acl_node_mutex);
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++;
mutex_unlock(&tpg->acl_node_mutex);
pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
@ -232,6 +236,25 @@ static void target_add_node_acl(struct se_node_acl *acl)
acl->initiatorname);
}
bool target_tpg_has_node_acl(struct se_portal_group *tpg,
const char *initiatorname)
{
struct se_node_acl *acl;
bool found = false;
mutex_lock(&tpg->acl_node_mutex);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!strcmp(acl->initiatorname, initiatorname)) {
found = true;
break;
}
}
mutex_unlock(&tpg->acl_node_mutex);
return found;
}
EXPORT_SYMBOL(target_tpg_has_node_acl);
struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
@ -248,6 +271,15 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
acl = target_alloc_node_acl(tpg, initiatorname);
if (!acl)
return NULL;
/*
* When allocating a dynamically generated node_acl, go ahead
* and take the extra kref now before returning to the fabric
* driver caller.
*
* Note this reference will be released at session shutdown
* time within transport_free_session() code.
*/
kref_get(&acl->acl_kref);
acl->dynamic_node_acl = 1;
/*
@ -318,7 +350,6 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
acl->dynamic_node_acl = 0;
}
list_del(&acl->acl_list);
tpg->num_node_acls--;
mutex_unlock(&tpg->acl_node_mutex);
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
@ -329,7 +360,8 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
if (sess->sess_tearing_down != 0)
continue;
target_get_session(sess);
if (!target_get_session(sess))
continue;
list_move(&sess->sess_acl_list, &sess_list);
}
spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
@ -366,108 +398,52 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
*
*/
int core_tpg_set_initiator_node_queue_depth(
struct se_portal_group *tpg,
unsigned char *initiatorname,
u32 queue_depth,
int force)
struct se_node_acl *acl,
u32 queue_depth)
{
struct se_session *sess, *init_sess = NULL;
struct se_node_acl *acl;
LIST_HEAD(sess_list);
struct se_portal_group *tpg = acl->se_tpg;
struct se_session *sess, *sess_tmp;
unsigned long flags;
int dynamic_acl = 0;
mutex_lock(&tpg->acl_node_mutex);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!acl) {
pr_err("Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
mutex_unlock(&tpg->acl_node_mutex);
return -ENODEV;
}
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
dynamic_acl = 1;
}
mutex_unlock(&tpg->acl_node_mutex);
spin_lock_irqsave(&tpg->session_lock, flags);
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
if (sess->se_node_acl != acl)
continue;
if (!force) {
pr_err("Unable to change queue depth for %s"
" Initiator Node: %s while session is"
" operational. To forcefully change the queue"
" depth and force session reinstatement"
" use the \"force=1\" parameter.\n",
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
spin_unlock_irqrestore(&tpg->session_lock, flags);
mutex_lock(&tpg->acl_node_mutex);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
mutex_unlock(&tpg->acl_node_mutex);
return -EEXIST;
}
/*
* Determine if the session needs to be closed by our context.
*/
if (!tpg->se_tpg_tfo->shutdown_session(sess))
continue;
init_sess = sess;
break;
}
int rc;
/*
* User has requested to change the queue depth for a Initiator Node.
* Change the value in the Node's struct se_node_acl, and call
* core_set_queue_depth_for_node() to add the requested queue depth.
*
* Finally call tpg->se_tpg_tfo->close_session() to force session
* reinstatement to occur if there is an active session for the
* $FABRIC_MOD Initiator Node in question.
* target_set_nacl_queue_depth() to set the new queue depth.
*/
acl->queue_depth = queue_depth;
target_set_nacl_queue_depth(tpg, acl, queue_depth);
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
sess_acl_list) {
if (sess->sess_tearing_down != 0)
continue;
if (!target_get_session(sess))
continue;
spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
spin_unlock_irqrestore(&tpg->session_lock, flags);
/*
* Force session reinstatement if
* core_set_queue_depth_for_node() failed, because we assume
* the $FABRIC_MOD has already the set session reinstatement
* bit from tpg->se_tpg_tfo->shutdown_session() called above.
* Finally call tpg->se_tpg_tfo->close_session() to force session
* reinstatement to occur if there is an active session for the
* $FABRIC_MOD Initiator Node in question.
*/
if (init_sess)
tpg->se_tpg_tfo->close_session(init_sess);
mutex_lock(&tpg->acl_node_mutex);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
mutex_unlock(&tpg->acl_node_mutex);
return -EINVAL;
rc = tpg->se_tpg_tfo->shutdown_session(sess);
target_put_session(sess);
if (!rc) {
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
continue;
}
target_put_session(sess);
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
}
spin_unlock_irqrestore(&tpg->session_lock, flags);
/*
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
if (init_sess)
tpg->se_tpg_tfo->close_session(init_sess);
spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
pr_debug("Successfully changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", queue_depth,
initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg));
mutex_lock(&tpg->acl_node_mutex);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
mutex_unlock(&tpg->acl_node_mutex);
return 0;
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
@ -595,7 +571,6 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
*/
list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
list_del(&nacl->acl_list);
se_tpg->num_node_acls--;
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);

View file

@ -341,7 +341,6 @@ void __transport_register_session(
&buf[0], PR_REG_ISID_LEN);
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
kref_get(&se_nacl->acl_kref);
spin_lock_irq(&se_nacl->nacl_sess_lock);
/*
@ -384,9 +383,9 @@ static void target_release_session(struct kref *kref)
se_tpg->se_tpg_tfo->close_session(se_sess);
}
void target_get_session(struct se_session *se_sess)
int target_get_session(struct se_session *se_sess)
{
kref_get(&se_sess->sess_kref);
return kref_get_unless_zero(&se_sess->sess_kref);
}
EXPORT_SYMBOL(target_get_session);
@ -432,6 +431,7 @@ void target_put_nacl(struct se_node_acl *nacl)
{
kref_put(&nacl->acl_kref, target_complete_nacl);
}
EXPORT_SYMBOL(target_put_nacl);
void transport_deregister_session_configfs(struct se_session *se_sess)
{
@ -464,6 +464,15 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
void transport_free_session(struct se_session *se_sess)
{
struct se_node_acl *se_nacl = se_sess->se_node_acl;
/*
* Drop the se_node_acl->nacl_kref obtained from within
* core_tpg_get_initiator_node_acl().
*/
if (se_nacl) {
se_sess->se_node_acl = NULL;
target_put_nacl(se_nacl);
}
if (se_sess->sess_cmd_map) {
percpu_ida_destroy(&se_sess->sess_tag_pool);
kvfree(se_sess->sess_cmd_map);
@ -478,7 +487,7 @@ void transport_deregister_session(struct se_session *se_sess)
const struct target_core_fabric_ops *se_tfo;
struct se_node_acl *se_nacl;
unsigned long flags;
bool comp_nacl = true, drop_nacl = false;
bool drop_nacl = false;
if (!se_tpg) {
transport_free_session(se_sess);
@ -502,7 +511,6 @@ void transport_deregister_session(struct se_session *se_sess)
if (se_nacl && se_nacl->dynamic_node_acl) {
if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
list_del(&se_nacl->acl_list);
se_tpg->num_node_acls--;
drop_nacl = true;
}
}
@ -511,18 +519,16 @@ void transport_deregister_session(struct se_session *se_sess)
if (drop_nacl) {
core_tpg_wait_for_nacl_pr_ref(se_nacl);
core_free_device_list_for_node(se_nacl, se_tpg);
se_sess->se_node_acl = NULL;
kfree(se_nacl);
comp_nacl = false;
}
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->get_fabric_name());
/*
* If last kref is dropping now for an explicit NodeACL, awake sleeping
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
* removal context.
* removal context from within transport_free_session() code.
*/
if (se_nacl && comp_nacl)
target_put_nacl(se_nacl);
transport_free_session(se_sess);
}
@ -715,7 +721,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
queue_work(target_completion_wq, &cmd->work);
if (cmd->cpuid == -1)
queue_work(target_completion_wq, &cmd->work);
else
queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(target_complete_cmd);
@ -1309,7 +1318,7 @@ EXPORT_SYMBOL(target_setup_cmd_from_cdb);
/*
* Used by fabric module frontends to queue tasks directly.
* Many only be used from process context only
* May only be used from process context.
*/
int transport_handle_cdb_direct(
struct se_cmd *cmd)
@ -1582,7 +1591,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *sense, u64 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t gfp, unsigned int tag, int flags)
gfp_t gfp, u64 tag, int flags)
{
struct se_portal_group *se_tpg;
int ret;

View file

@ -152,6 +152,7 @@ static struct genl_family tcmu_genl_family = {
.maxattr = TCMU_ATTR_MAX,
.mcgrps = tcmu_mcgrps,
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
.netnsok = true,
};
static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
@ -194,7 +195,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
{
unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK;
unsigned long offset = offset_in_page(vaddr);
size = round_up(size+offset, PAGE_SIZE);
vaddr -= offset;
@ -840,7 +841,7 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino
genlmsg_end(skb, msg_header);
ret = genlmsg_multicast(&tcmu_genl_family, skb, 0,
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
/* We don't care if no one is listening */
@ -917,8 +918,10 @@ static int tcmu_configure_device(struct se_device *dev)
if (ret)
goto err_register;
/* User can set hw_block_size before enable the device */
if (dev->dev_attrib.hw_block_size == 0)
dev->dev_attrib.hw_block_size = 512;
/* Other attributes can be configured in userspace */
dev->dev_attrib.hw_block_size = 512;
dev->dev_attrib.hw_max_sectors = 128;
dev->dev_attrib.hw_queue_depth = 128;

View file

@ -166,7 +166,6 @@ void ft_aborted_task(struct se_cmd *);
*/
void ft_recv_req(struct ft_sess *, struct fc_frame *);
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
void ft_dump_cmd(struct ft_cmd *, const char *caller);

View file

@ -171,9 +171,31 @@ static ssize_t ft_nacl_node_name_store(struct config_item *item,
CONFIGFS_ATTR(ft_nacl_, node_name);
CONFIGFS_ATTR(ft_nacl_, port_name);
static ssize_t ft_nacl_tag_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
}
static ssize_t ft_nacl_tag_store(struct config_item *item,
const char *page, size_t count)
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
int ret;
ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
if (ret < 0)
return ret;
return count;
}
CONFIGFS_ATTR(ft_nacl_, tag);
static struct configfs_attribute *ft_nacl_base_attrs[] = {
&ft_nacl_attr_port_name,
&ft_nacl_attr_node_name,
&ft_nacl_attr_tag,
NULL,
};
@ -198,31 +220,6 @@ static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
return 0;
}
struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
{
struct ft_node_acl *found = NULL;
struct ft_node_acl *acl;
struct se_portal_group *se_tpg = &tpg->se_tpg;
struct se_node_acl *se_acl;
mutex_lock(&se_tpg->acl_node_mutex);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
pr_debug("acl %p port_name %llx\n",
acl, (unsigned long long)acl->node_auth.port_name);
if (acl->node_auth.port_name == rdata->ids.port_name ||
acl->node_auth.node_name == rdata->ids.node_name) {
pr_debug("acl %p port_name %llx matched\n", acl,
(unsigned long long)rdata->ids.port_name);
found = acl;
/* XXX need to hold onto ACL */
break;
}
}
mutex_unlock(&se_tpg->acl_node_mutex);
return found;
}
/*
* local_port port_group (tpg) ops.
*/

View file

@ -154,9 +154,9 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = from;
from += mem_off & ~PAGE_MASK;
from += offset_in_page(mem_off);
tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK)));
offset_in_page(mem_off)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr);
to += tlen;
@ -314,9 +314,9 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = to;
to += mem_off & ~PAGE_MASK;
to += offset_in_page(mem_off);
tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK)));
offset_in_page(mem_off)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr);

View file

@ -191,10 +191,15 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
* Caller holds ft_lport_lock.
*/
static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
struct ft_node_acl *acl)
struct fc_rport_priv *rdata)
{
struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
struct se_node_acl *se_acl;
struct ft_sess *sess;
struct hlist_head *head;
unsigned char initiatorname[TRANSPORT_IQN_LEN];
ft_format_wwn(&initiatorname[0], TRANSPORT_IQN_LEN, rdata->ids.port_name);
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash)
@ -212,7 +217,14 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
kfree(sess);
return NULL;
}
sess->se_sess->se_node_acl = &acl->se_node_acl;
se_acl = core_tpg_get_initiator_node_acl(se_tpg, &initiatorname[0]);
if (!se_acl) {
transport_free_session(sess->se_sess);
kfree(sess);
return NULL;
}
sess->se_sess->se_node_acl = se_acl;
sess->tport = tport;
sess->port_id = port_id;
kref_init(&sess->kref); /* ref for table entry */
@ -221,7 +233,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
pr_debug("port_id %x sess %p\n", port_id, sess);
transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
transport_register_session(&tport->tpg->se_tpg, se_acl,
sess->se_sess, sess);
return sess;
}
@ -260,6 +272,14 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
return NULL;
}
static void ft_close_sess(struct ft_sess *sess)
{
transport_deregister_session_configfs(sess->se_sess);
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
ft_sess_put(sess);
}
/*
* Delete all sessions from tport.
* Caller holds ft_lport_lock.
@ -273,8 +293,7 @@ static void ft_sess_delete_all(struct ft_tport *tport)
head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
hlist_for_each_entry_rcu(sess, head, hash) {
ft_sess_unhash(sess);
transport_deregister_session_configfs(sess->se_sess);
ft_sess_put(sess); /* release from table */
ft_close_sess(sess); /* release from table */
}
}
}
@ -313,8 +332,7 @@ void ft_sess_close(struct se_session *se_sess)
pr_debug("port_id %x\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
transport_deregister_session_configfs(se_sess);
ft_sess_put(sess);
ft_close_sess(sess);
/* XXX Send LOGO or PRLO */
synchronize_rcu(); /* let transport deregister happen */
}
@ -343,17 +361,12 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
{
struct ft_tport *tport;
struct ft_sess *sess;
struct ft_node_acl *acl;
u32 fcp_parm;
tport = ft_tport_get(rdata->local_port);
if (!tport)
goto not_target; /* not a target for this local port */
acl = ft_acl_get(tport->tpg, rdata);
if (!acl)
goto not_target; /* no target for this remote */
if (!rspp)
goto fill;
@ -375,7 +388,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
if (!(fcp_parm & FCP_SPPF_INIT_FCN))
return FC_SPP_RESP_CONF;
sess = ft_sess_create(tport, rdata->ids.port_id, acl);
sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
if (!sess)
return FC_SPP_RESP_RES;
if (!sess->params)
@ -460,8 +473,7 @@ static void ft_prlo(struct fc_rport_priv *rdata)
return;
}
mutex_unlock(&ft_lport_lock);
transport_deregister_session_configfs(sess->se_sess);
ft_sess_put(sess); /* release from table */
ft_close_sess(sess); /* release from table */
rdata->prli_count--;
/* XXX TBD - clearing actions. unit attn, see 4.10 */
}

View file

@ -205,6 +205,9 @@ config USB_F_HID
config USB_F_PRINTER
tristate
config USB_F_TCM
tristate
choice
tristate "USB Gadget Drivers"
default USB_ETH
@ -457,6 +460,20 @@ config USB_CONFIGFS_F_PRINTER
For more information, see Documentation/usb/gadget_printer.txt
which includes sample code for accessing the device file.
config USB_CONFIGFS_F_TCM
bool "USB Gadget Target Fabric"
depends on TARGET_CORE
depends on USB_CONFIGFS
select USB_LIBCOMPOSITE
select USB_F_TCM
help
This fabric is a USB gadget component. Two USB protocols are
supported that is BBB or BOT (Bulk Only Transport) and UAS
(USB Attached SCSI). BOT is advertised on alternative
interface 0 (primary) and UAS is on alternative interface 1.
Both protocols can work on USB2.0 and USB3.0.
UAS utilizes the USB 3.0 feature called streams support.
source "drivers/usb/gadget/legacy/Kconfig"
endchoice

View file

@ -44,3 +44,5 @@ usb_f_hid-y := f_hid.o
obj-$(CONFIG_USB_F_HID) += usb_f_hid.o
usb_f_printer-y := f_printer.o
obj-$(CONFIG_USB_F_PRINTER) += usb_f_printer.o
usb_f_tcm-y := f_tcm.o
obj-$(CONFIG_USB_F_TCM) += usb_f_tcm.o

File diff suppressed because it is too large Load diff

View file

@ -16,8 +16,7 @@
#define UASP_SS_EP_COMP_NUM_STREAMS (1 << UASP_SS_EP_COMP_LOG_STREAMS)
enum {
USB_G_STR_CONFIG = USB_GADGET_FIRST_AVAIL_IDX,
USB_G_STR_INT_UAS,
USB_G_STR_INT_UAS = 0,
USB_G_STR_INT_BBB,
};
@ -40,6 +39,8 @@ struct usbg_tpg {
u32 gadget_connect;
struct tcm_usbg_nexus *tpg_nexus;
atomic_t tpg_port_count;
struct usb_function_instance *fi;
};
struct usbg_tport {
@ -128,6 +129,4 @@ struct f_uas {
struct usb_request *bot_req_out;
};
extern struct usbg_tpg *the_only_tpg_I_currently_have;
#endif
#endif /* __TARGET_USB_GADGET_H__ */

View file

@ -0,0 +1,50 @@
/*
* u_tcm.h
*
* Utility definitions for the tcm function
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@xxxxxxxxxxx>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef U_TCM_H
#define U_TCM_H
#include <linux/usb/composite.h>
/**
* @dependent: optional dependent module. Meant for legacy gadget.
* If non-null its refcount will be increased when a tpg is created and
* decreased when tpg is dropped.
* @dep_lock: lock for dependent module operations.
* @ready: true if the dependent module information is set.
* @can_attach: true a function can be bound to gadget
* @has_dep: true if there is a dependent module
*
*/
struct f_tcm_opts {
struct usb_function_instance func_inst;
struct module *dependent;
struct mutex dep_lock;
bool ready;
bool can_attach;
bool has_dep;
/*
* Callbacks to be removed when legacy tcm gadget disappears.
*
* If you use the new function registration interface
* programmatically, you MUST set these callbacks to
* something sensible (e.g. probe/remove the composite).
*/
int (*tcm_register_callback)(struct usb_function_instance *);
void (*tcm_unregister_callback)(struct usb_function_instance *);
};
#endif /* U_TCM_H */

View file

@ -250,6 +250,7 @@ config USB_GADGET_TARGET
tristate "USB Gadget Target Fabric Module"
depends on TARGET_CORE
select USB_LIBCOMPOSITE
select USB_F_TCM
help
This fabric is an USB gadget. Two USB protocols are supported that is
BBB or BOT (Bulk Only Transport) and UAS (USB Attached SCSI). BOT is

File diff suppressed because it is too large Load diff

View file

@ -1070,11 +1070,55 @@ static int configfs_depend_prep(struct dentry *origin,
return ret;
}
static int configfs_do_depend_item(struct dentry *subsys_dentry,
struct config_item *target)
{
struct configfs_dirent *p;
int ret;
spin_lock(&configfs_dirent_lock);
/* Scan the tree, return 0 if found */
ret = configfs_depend_prep(subsys_dentry, target);
if (ret)
goto out_unlock_dirent_lock;
/*
* We are sure that the item is not about to be removed by rmdir(), and
* not in the middle of attachment by mkdir().
*/
p = target->ci_dentry->d_fsdata;
p->s_dependent_count += 1;
out_unlock_dirent_lock:
spin_unlock(&configfs_dirent_lock);
return ret;
}
static inline struct configfs_dirent *
configfs_find_subsys_dentry(struct configfs_dirent *root_sd,
struct config_item *subsys_item)
{
struct configfs_dirent *p;
struct configfs_dirent *ret = NULL;
list_for_each_entry(p, &root_sd->s_children, s_sibling) {
if (p->s_type & CONFIGFS_DIR &&
p->s_element == subsys_item) {
ret = p;
break;
}
}
return ret;
}
int configfs_depend_item(struct configfs_subsystem *subsys,
struct config_item *target)
{
int ret;
struct configfs_dirent *p, *root_sd, *subsys_sd = NULL;
struct configfs_dirent *subsys_sd;
struct config_item *s_item = &subsys->su_group.cg_item;
struct dentry *root;
@ -1093,39 +1137,15 @@ int configfs_depend_item(struct configfs_subsystem *subsys,
*/
mutex_lock(&d_inode(root)->i_mutex);
root_sd = root->d_fsdata;
list_for_each_entry(p, &root_sd->s_children, s_sibling) {
if (p->s_type & CONFIGFS_DIR) {
if (p->s_element == s_item) {
subsys_sd = p;
break;
}
}
}
subsys_sd = configfs_find_subsys_dentry(root->d_fsdata, s_item);
if (!subsys_sd) {
ret = -ENOENT;
goto out_unlock_fs;
}
/* Ok, now we can trust subsys/s_item */
ret = configfs_do_depend_item(subsys_sd->s_dentry, target);
spin_lock(&configfs_dirent_lock);
/* Scan the tree, return 0 if found */
ret = configfs_depend_prep(subsys_sd->s_dentry, target);
if (ret)
goto out_unlock_dirent_lock;
/*
* We are sure that the item is not about to be removed by rmdir(), and
* not in the middle of attachment by mkdir().
*/
p = target->ci_dentry->d_fsdata;
p->s_dependent_count += 1;
out_unlock_dirent_lock:
spin_unlock(&configfs_dirent_lock);
out_unlock_fs:
mutex_unlock(&d_inode(root)->i_mutex);
@ -1144,8 +1164,7 @@ EXPORT_SYMBOL(configfs_depend_item);
* configfs_depend_item() because we know that that the client driver is
* pinned, thus the subsystem is pinned, and therefore configfs is pinned.
*/
void configfs_undepend_item(struct configfs_subsystem *subsys,
struct config_item *target)
void configfs_undepend_item(struct config_item *target)
{
struct configfs_dirent *sd;
@ -1168,6 +1187,79 @@ void configfs_undepend_item(struct configfs_subsystem *subsys,
}
EXPORT_SYMBOL(configfs_undepend_item);
/*
* caller_subsys is a caller's subsystem not target's. This is used to
* determine if we should lock root and check subsys or not. When we are
* in the same subsystem as our target there is no need to do locking as
* we know that subsys is valid and is not unregistered during this function
* as we are called from callback of one of his children and VFS holds a lock
* on some inode. Otherwise we have to lock our root to ensure that target's
* subsystem it is not unregistered during this function.
*/
int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys,
struct config_item *target)
{
struct configfs_subsystem *target_subsys;
struct config_group *root, *parent;
struct configfs_dirent *subsys_sd;
int ret = -ENOENT;
/* Disallow this function for configfs root */
if (configfs_is_root(target))
return -EINVAL;
parent = target->ci_group;
/*
* This may happen when someone is trying to depend root
* directory of some subsystem
*/
if (configfs_is_root(&parent->cg_item)) {
target_subsys = to_configfs_subsystem(to_config_group(target));
root = parent;
} else {
target_subsys = parent->cg_subsys;
/* Find a cofnigfs root as we may need it for locking */
for (root = parent; !configfs_is_root(&root->cg_item);
root = root->cg_item.ci_group)
;
}
if (target_subsys != caller_subsys) {
/*
* We are in other configfs subsystem, so we have to do
* additional locking to prevent other subsystem from being
* unregistered
*/
mutex_lock(&d_inode(root->cg_item.ci_dentry)->i_mutex);
/*
* As we are trying to depend item from other subsystem
* we have to check if this subsystem is still registered
*/
subsys_sd = configfs_find_subsys_dentry(
root->cg_item.ci_dentry->d_fsdata,
&target_subsys->su_group.cg_item);
if (!subsys_sd)
goto out_root_unlock;
} else {
subsys_sd = target_subsys->su_group.cg_item.ci_dentry->d_fsdata;
}
/* Now we can execute core of depend item */
ret = configfs_do_depend_item(subsys_sd->s_dentry, target);
if (target_subsys != caller_subsys)
out_root_unlock:
/*
* We were called from subsystem other than our target so we
* took some locks so now it's time to release them
*/
mutex_unlock(&d_inode(root->cg_item.ci_dentry)->i_mutex);
return ret;
}
EXPORT_SYMBOL(configfs_depend_item_unlocked);
static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int ret = 0;

View file

@ -757,7 +757,7 @@ int o2nm_depend_item(struct config_item *item)
void o2nm_undepend_item(struct config_item *item)
{
configfs_undepend_item(&o2nm_cluster_group.cs_subsys, item);
configfs_undepend_item(item);
}
int o2nm_depend_this_node(void)

View file

@ -259,7 +259,24 @@ void configfs_unregister_default_group(struct config_group *group);
/* These functions can sleep and can alloc with GFP_KERNEL */
/* WARNING: These cannot be called underneath configfs callbacks!! */
int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
void configfs_undepend_item(struct configfs_subsystem *subsys, struct config_item *target);
int configfs_depend_item(struct configfs_subsystem *subsys,
struct config_item *target);
void configfs_undepend_item(struct config_item *target);
/*
* These functions can sleep and can alloc with GFP_KERNEL
* NOTE: These should be called only underneath configfs callbacks.
* NOTE: First parameter is a caller's subsystem, not target's.
* WARNING: These cannot be called on newly created item
* (in make_group()/make_item() callback)
*/
int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys,
struct config_item *target);
static inline void configfs_undepend_item_unlocked(struct config_item *target)
{
configfs_undepend_item(target);
}
#endif /* _CONFIGFS_H_ */

View file

@ -63,6 +63,8 @@
#define DA_UNMAP_GRANULARITY_DEFAULT 0
/* Default unmap_granularity_alignment */
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
/* Default unmap_zeroes_data */
#define DA_UNMAP_ZEROES_DATA_DEFAULT 0
/* Default max_write_same_len, disabled by default */
#define DA_MAX_WRITE_SAME_LEN 0
/* Use a model alias based on the configfs backend device name */
@ -526,6 +528,7 @@ struct se_cmd {
unsigned int t_prot_nents;
sense_reason_t pi_err;
sector_t bad_sector;
int cpuid;
};
struct se_ua {
@ -674,6 +677,7 @@ struct se_dev_attrib {
int force_pr_aptpl;
int is_nonrot;
int emulate_rest_reord;
int unmap_zeroes_data;
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
@ -864,8 +868,6 @@ struct se_portal_group {
* Negative values can be used by fabric drivers for internal use TPGs.
*/
int proto_id;
/* Number of ACLed Initiator Nodes for this TPG */
u32 num_node_acls;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t tpg_pr_ref_count;
/* Spinlock for adding/removing ACLed Nodes */

View file

@ -117,7 +117,7 @@ void __transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void target_get_session(struct se_session *);
int target_get_session(struct se_session *);
void target_put_session(struct se_session *);
ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
void transport_free_session(struct se_session *);
@ -140,7 +140,7 @@ int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *sense, u64 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t, unsigned int, int);
gfp_t, u64, int);
int transport_handle_cdb_direct(struct se_cmd *);
sense_reason_t transport_generic_new_cmd(struct se_cmd *);
@ -169,10 +169,11 @@ void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
unsigned char *);
bool target_tpg_has_node_acl(struct se_portal_group *tpg,
const char *);
struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
unsigned char *);
int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
unsigned char *, u32, int);
int core_tpg_set_initiator_node_queue_depth(struct se_node_acl *, u32);
int core_tpg_set_initiator_node_tag(struct se_portal_group *,
struct se_node_acl *, const char *);
int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);