linux-stable/drivers/scsi/qedi/qedi_iscsi.c

1711 lines
44 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI Offload Driver
* Copyright (c) 2016 Cavium Inc.
*/
#include <linux/blkdev.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <scsi/scsi_tcq.h>
#include "qedi.h"
#include "qedi_iscsi.h"
#include "qedi_gbl.h"
int qedi_recover_all_conns(struct qedi_ctx *qedi)
{
struct qedi_conn *qedi_conn;
int i;
for (i = 0; i < qedi->max_active_conns; i++) {
qedi_conn = qedi_get_conn_from_id(qedi, i);
if (!qedi_conn)
continue;
qedi_start_conn_recovery(qedi, qedi_conn);
}
return SUCCESS;
}
static int qedi_eh_host_reset(struct scsi_cmnd *cmd)
{
struct Scsi_Host *shost = cmd->device->host;
struct qedi_ctx *qedi;
qedi = iscsi_host_priv(shost);
return qedi_recover_all_conns(qedi);
}
struct scsi_host_template qedi_host_template = {
.module = THIS_MODULE,
.name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
.proc_name = QEDI_MODULE_NAME,
.queuecommand = iscsi_queuecommand,
.eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.eh_host_reset_handler = qedi_eh_host_reset,
.target_alloc = iscsi_target_alloc,
.change_queue_depth = scsi_change_queue_depth,
.can_queue = QEDI_MAX_ISCSI_TASK,
.this_id = -1,
.sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
.max_sectors = 0xffff,
.dma_boundary = QEDI_HW_DMA_BOUNDARY,
.cmd_per_lun = 128,
.shost_groups = qedi_shost_groups,
};
static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn)
{
if (qedi_conn->gen_pdu.resp_bd_tbl) {
dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
qedi_conn->gen_pdu.resp_bd_tbl,
qedi_conn->gen_pdu.resp_bd_dma);
qedi_conn->gen_pdu.resp_bd_tbl = NULL;
}
if (qedi_conn->gen_pdu.req_bd_tbl) {
dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
qedi_conn->gen_pdu.req_bd_tbl,
qedi_conn->gen_pdu.req_bd_dma);
qedi_conn->gen_pdu.req_bd_tbl = NULL;
}
if (qedi_conn->gen_pdu.resp_buf) {
dma_free_coherent(&qedi->pdev->dev,
ISCSI_DEF_MAX_RECV_SEG_LEN,
qedi_conn->gen_pdu.resp_buf,
qedi_conn->gen_pdu.resp_dma_addr);
qedi_conn->gen_pdu.resp_buf = NULL;
}
if (qedi_conn->gen_pdu.req_buf) {
dma_free_coherent(&qedi->pdev->dev,
ISCSI_DEF_MAX_RECV_SEG_LEN,
qedi_conn->gen_pdu.req_buf,
qedi_conn->gen_pdu.req_dma_addr);
qedi_conn->gen_pdu.req_buf = NULL;
}
}
static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn)
{
qedi_conn->gen_pdu.req_buf =
dma_alloc_coherent(&qedi->pdev->dev,
ISCSI_DEF_MAX_RECV_SEG_LEN,
&qedi_conn->gen_pdu.req_dma_addr,
GFP_KERNEL);
if (!qedi_conn->gen_pdu.req_buf)
goto login_req_buf_failure;
qedi_conn->gen_pdu.req_buf_size = 0;
qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf;
qedi_conn->gen_pdu.resp_buf =
dma_alloc_coherent(&qedi->pdev->dev,
ISCSI_DEF_MAX_RECV_SEG_LEN,
&qedi_conn->gen_pdu.resp_dma_addr,
GFP_KERNEL);
if (!qedi_conn->gen_pdu.resp_buf)
goto login_resp_buf_failure;
qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf;
qedi_conn->gen_pdu.req_bd_tbl =
dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
&qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
if (!qedi_conn->gen_pdu.req_bd_tbl)
goto login_req_bd_tbl_failure;
qedi_conn->gen_pdu.resp_bd_tbl =
dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
&qedi_conn->gen_pdu.resp_bd_dma,
GFP_KERNEL);
if (!qedi_conn->gen_pdu.resp_bd_tbl)
goto login_resp_bd_tbl_failure;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS,
"Allocation successful, cid=0x%x\n",
qedi_conn->iscsi_conn_id);
return 0;
login_resp_bd_tbl_failure:
dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
qedi_conn->gen_pdu.req_bd_tbl,
qedi_conn->gen_pdu.req_bd_dma);
qedi_conn->gen_pdu.req_bd_tbl = NULL;
login_req_bd_tbl_failure:
dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
qedi_conn->gen_pdu.resp_buf,
qedi_conn->gen_pdu.resp_dma_addr);
qedi_conn->gen_pdu.resp_buf = NULL;
login_resp_buf_failure:
dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
qedi_conn->gen_pdu.req_buf,
qedi_conn->gen_pdu.req_dma_addr);
qedi_conn->gen_pdu.req_buf = NULL;
login_req_buf_failure:
iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data,
"login resource alloc failed!!\n");
return -ENOMEM;
}
static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
struct iscsi_session *session)
{
int i;
for (i = 0; i < session->cmds_max; i++) {
struct iscsi_task *task = session->cmds[i];
struct qedi_cmd *cmd = task->dd_data;
if (cmd->io_tbl.sge_tbl)
dma_free_coherent(&qedi->pdev->dev,
QEDI_ISCSI_MAX_BDS_PER_CMD *
sizeof(struct scsi_sge),
cmd->io_tbl.sge_tbl,
cmd->io_tbl.sge_tbl_dma);
if (cmd->sense_buffer)
dma_free_coherent(&qedi->pdev->dev,
SCSI_SENSE_BUFFERSIZE,
cmd->sense_buffer,
cmd->sense_buffer_dma);
}
}
static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
struct qedi_cmd *cmd)
{
struct qedi_io_bdt *io = &cmd->io_tbl;
struct scsi_sge *sge;
io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
QEDI_ISCSI_MAX_BDS_PER_CMD *
sizeof(*sge),
&io->sge_tbl_dma, GFP_KERNEL);
if (!io->sge_tbl) {
iscsi_session_printk(KERN_ERR, session,
"Could not allocate BD table.\n");
return -ENOMEM;
}
io->sge_valid = 0;
return 0;
}
static int qedi_setup_cmd_pool(struct qedi_ctx *qedi,
struct iscsi_session *session)
{
int i;
for (i = 0; i < session->cmds_max; i++) {
struct iscsi_task *task = session->cmds[i];
struct qedi_cmd *cmd = task->dd_data;
task->hdr = &cmd->hdr;
task->hdr_max = sizeof(struct iscsi_hdr);
if (qedi_alloc_sget(qedi, session, cmd))
goto free_sgets;
cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev,
SCSI_SENSE_BUFFERSIZE,
&cmd->sense_buffer_dma,
GFP_KERNEL);
if (!cmd->sense_buffer)
goto free_sgets;
}
return 0;
free_sgets:
qedi_destroy_cmd_pool(qedi, session);
return -ENOMEM;
}
static struct iscsi_cls_session *
qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max,
u16 qdepth, uint32_t initial_cmdsn)
{
struct Scsi_Host *shost;
struct iscsi_cls_session *cls_session;
struct qedi_ctx *qedi;
struct qedi_endpoint *qedi_ep;
if (!ep)
return NULL;
qedi_ep = ep->dd_data;
shost = qedi_ep->qedi->shost;
qedi = iscsi_host_priv(shost);
if (cmds_max > qedi->max_sqes)
cmds_max = qedi->max_sqes;
else if (cmds_max < QEDI_SQ_WQES_MIN)
cmds_max = QEDI_SQ_WQES_MIN;
cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost,
cmds_max, 0, sizeof(struct qedi_cmd),
initial_cmdsn, ISCSI_MAX_TARGET);
if (!cls_session) {
QEDI_ERR(&qedi->dbg_ctx,
"Failed to setup session for ep=%p\n", qedi_ep);
return NULL;
}
if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) {
QEDI_ERR(&qedi->dbg_ctx,
"Failed to setup cmd pool for ep=%p\n", qedi_ep);
goto session_teardown;
}
return cls_session;
session_teardown:
iscsi_session_teardown(cls_session);
return NULL;
}
static void qedi_session_destroy(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct qedi_ctx *qedi = iscsi_host_priv(shost);
qedi_destroy_cmd_pool(qedi, session);
iscsi_session_teardown(cls_session);
}
static struct iscsi_cls_conn *
qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct qedi_ctx *qedi = iscsi_host_priv(shost);
struct iscsi_cls_conn *cls_conn;
struct qedi_conn *qedi_conn;
struct iscsi_conn *conn;
cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn),
cid);
if (!cls_conn) {
QEDI_ERR(&qedi->dbg_ctx,
"conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n",
cid, cls_session);
return NULL;
}
conn = cls_conn->dd_data;
qedi_conn = conn->dd_data;
qedi_conn->cls_conn = cls_conn;
qedi_conn->qedi = qedi;
qedi_conn->ep = NULL;
qedi_conn->active_cmd_count = 0;
INIT_LIST_HEAD(&qedi_conn->active_cmd_list);
spin_lock_init(&qedi_conn->list_lock);
if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) {
iscsi_conn_printk(KERN_ALERT, conn,
"conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n",
cid, cls_session);
goto free_conn;
}
return cls_conn;
free_conn:
iscsi_conn_teardown(cls_conn);
return NULL;
}
void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
struct qedi_conn *qedi_conn = session->leadconn->dd_data;
spin_lock_bh(&session->frwd_lock);
set_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags);
spin_unlock_bh(&session->frwd_lock);
}
void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
struct qedi_conn *qedi_conn = session->leadconn->dd_data;
spin_lock_bh(&session->frwd_lock);
clear_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags);
spin_unlock_bh(&session->frwd_lock);
}
static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn)
{
u32 iscsi_cid = qedi_conn->iscsi_conn_id;
if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) {
iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
"conn bind - entry #%d not free\n",
iscsi_cid);
return -EBUSY;
}
qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn;
return 0;
}
struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid)
{
if (!qedi->cid_que.conn_cid_tbl) {
QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n");
return NULL;
} else if (iscsi_cid >= qedi->max_active_conns) {
QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid);
return NULL;
}
return qedi->cid_que.conn_cid_tbl[iscsi_cid];
}
static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
u64 transport_fd, int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct qedi_conn *qedi_conn = conn->dd_data;
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct qedi_ctx *qedi = iscsi_host_priv(shost);
struct qedi_endpoint *qedi_ep;
struct iscsi_endpoint *ep;
int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
return -EINVAL;
qedi_ep = ep->dd_data;
if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
(qedi_ep->state == EP_STATE_TCP_RST_RCVD)) {
rc = -EINVAL;
goto put_ep;
}
if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
rc = -EINVAL;
goto put_ep;
}
qedi_ep->conn = qedi_conn;
qedi_conn->ep = qedi_ep;
qedi_conn->iscsi_ep = ep;
qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
qedi_conn->fw_cid = qedi_ep->fw_cid;
qedi_conn->cmd_cleanup_req = 0;
qedi_conn->cmd_cleanup_cmpl = 0;
if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
rc = -EINVAL;
goto put_ep;
}
spin_lock_init(&qedi_conn->tmf_work_lock);
INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
init_waitqueue_head(&qedi_conn->wait_queue);
put_ep:
iscsi_put_endpoint(ep);
return rc;
}
static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn)
{
struct qed_iscsi_params_update *conn_info;
struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn;
struct iscsi_conn *conn = cls_conn->dd_data;
struct qedi_endpoint *qedi_ep;
int rval;
qedi_ep = qedi_conn->ep;
conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
if (!conn_info) {
QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n");
return -ENOMEM;
}
conn_info->update_flag = 0;
if (conn->hdrdgst_en)
SET_FIELD(conn_info->update_flag,
ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true);
if (conn->datadgst_en)
SET_FIELD(conn_info->update_flag,
ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true);
if (conn->session->initial_r2t_en)
SET_FIELD(conn_info->update_flag,
ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T,
true);
if (conn->session->imm_data_en)
SET_FIELD(conn_info->update_flag,
ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA,
true);
conn_info->max_seq_size = conn->session->max_burst;
conn_info->max_recv_pdu_length = conn->max_recv_dlength;
conn_info->max_send_pdu_length = conn->max_xmit_dlength;
conn_info->first_seq_length = conn->session->first_burst;
conn_info->exp_stat_sn = conn->exp_statsn;
rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle,
conn_info);
if (rval) {
rval = -ENXIO;
QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
}
kfree(conn_info);
return rval;
}
static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
{
u16 mss = 0;
u16 hdrs = TCP_HDR_LEN;
if (is_ipv6)
hdrs += IPV6_HDR_LEN;
else
hdrs += IPV4_HDR_LEN;
mss = pmtu - hdrs;
if (!mss)
mss = DEF_MSS;
return mss;
}
static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
{
struct qed_iscsi_params_offload *conn_info;
struct qedi_ctx *qedi = qedi_ep->qedi;
int rval;
int i;
conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
if (!conn_info) {
QEDI_ERR(&qedi->dbg_ctx,
"Failed to allocate memory ep=%p\n", qedi_ep);
return -ENOMEM;
}
ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac);
ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac);
conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]);
conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]);
if (qedi_ep->ip_type == TCP_IPV4) {
conn_info->ip_version = 0;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"After ntohl: src_addr=%pI4, dst_addr=%pI4\n",
qedi_ep->src_addr, qedi_ep->dst_addr);
} else {
for (i = 1; i < 4; i++) {
conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]);
conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]);
}
conn_info->ip_version = 1;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"After ntohl: src_addr=%pI6, dst_addr=%pI6\n",
qedi_ep->src_addr, qedi_ep->dst_addr);
}
conn_info->src.port = qedi_ep->src_port;
conn_info->dst.port = qedi_ep->dst_port;
conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE;
conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma;
conn_info->vlan_id = qedi_ep->vlan_id;
SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1);
SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1);
SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1);
SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1);
conn_info->default_cq = (qedi_ep->fw_cid % qedi->num_queues);
conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
conn_info->dup_ack_theshold = 3;
conn_info->rcv_wnd = 65535;
conn_info->ss_thresh = 65535;
conn_info->srtt = 300;
conn_info->rtt_var = 150;
conn_info->flow_label = 0;
conn_info->ka_timeout = DEF_KA_TIMEOUT;
conn_info->ka_interval = DEF_KA_INTERVAL;
conn_info->max_rt_time = DEF_MAX_RT_TIME;
conn_info->ttl = DEF_TTL;
conn_info->tos_or_tc = DEF_TOS;
conn_info->remote_port = qedi_ep->dst_port;
conn_info->local_port = qedi_ep->src_port;
conn_info->mss = qedi_calc_mss(qedi_ep->pmtu,
(qedi_ep->ip_type == TCP_IPV6),
1, (qedi_ep->vlan_id != 0));
conn_info->cwnd = DEF_MAX_CWND * conn_info->mss;
conn_info->rcv_wnd_scale = 4;
conn_info->da_timeout_value = 200;
conn_info->ack_frequency = 2;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Default cq index [%d], mss [%d]\n",
conn_info->default_cq, conn_info->mss);
/* Prepare the doorbell parameters */
qedi_ep->db_data.agg_flags = 0;
qedi_ep->db_data.params = 0;
SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_DEST, DB_DEST_XCM);
SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_CMD,
DB_AGG_CMD_MAX);
SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_VAL_SEL,
DQ_XCM_ISCSI_SQ_PROD_CMD);
SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_BYPASS_EN, 1);
/* Register doorbell with doorbell recovery mechanism */
rval = qedi_ops->common->db_recovery_add(qedi->cdev,
qedi_ep->p_doorbell,
&qedi_ep->db_data,
DB_REC_WIDTH_32B,
DB_REC_KERNEL);
if (rval) {
kfree(conn_info);
return rval;
}
rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
if (rval) {
/* delete doorbell from doorbell recovery mechanism */
rval = qedi_ops->common->db_recovery_del(qedi->cdev,
qedi_ep->p_doorbell,
&qedi_ep->db_data);
QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
rval, qedi_ep);
}
kfree(conn_info);
return rval;
}
static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct qedi_conn *qedi_conn = conn->dd_data;
struct qedi_ctx *qedi;
int rval;
qedi = qedi_conn->qedi;
rval = qedi_iscsi_update_conn(qedi, qedi_conn);
if (rval) {
iscsi_conn_printk(KERN_ALERT, conn,
"conn_start: FW offload conn failed.\n");
rval = -EINVAL;
goto start_err;
}
scsi: qedi: Complete TMF works before disconnect We need to make sure that abort and reset completion work has completed before ep_disconnect returns. After ep_disconnect we can't manipulate cmds because libiscsi will call conn_stop and take onwership. We are trying to make sure abort work and reset completion work has completed before we do the cmd clean up in ep_disconnect. The problem is that: 1. the work function sets the QEDI_CONN_FW_CLEANUP bit, so if the work was still pending we would not see the bit set. We need to do this before the work is queued. 2. If we had multiple works queued then we could break from the loop in qedi_ep_disconnect early because when abort work 1 completes it could clear QEDI_CONN_FW_CLEANUP. qedi_ep_disconnect could then see that before work 2 has run. 3. A TMF reset completion work could run after ep_disconnect starts cleaning up cmds via qedi_clearsq. ep_disconnect's call to qedi_clearsq -> qedi_cleanup_all_io would might think it's done cleaning up cmds, but the reset completion work could still be running. We then return from ep_disconnect while still doing cleanup. This replaces the bit with a counter to track the number of queued TMF works, and adds a bool to prevent new works from starting from the completion path once a ep_disconnect starts. Link: https://lore.kernel.org/r/20210525181821.7617-28-michael.christie@oracle.com Reviewed-by: Manish Rangankar <mrangankar@marvell.com> Signed-off-by: Mike Christie <michael.christie@oracle.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-05-25 18:18:20 +00:00
spin_lock(&qedi_conn->tmf_work_lock);
qedi_conn->fw_cleanup_works = 0;
qedi_conn->ep_disconnect_starting = false;
spin_unlock(&qedi_conn->tmf_work_lock);
qedi_conn->abrt_conn = 0;
rval = iscsi_conn_start(cls_conn);
if (rval) {
iscsi_conn_printk(KERN_ALERT, conn,
"iscsi_conn_start: FW offload conn failed!!\n");
}
start_err:
return rval;
}
static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct qedi_conn *qedi_conn = conn->dd_data;
struct Scsi_Host *shost;
struct qedi_ctx *qedi;
shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
qedi = iscsi_host_priv(shost);
qedi_conn_free_login_resources(qedi, qedi_conn);
iscsi_conn_teardown(cls_conn);
}
static int qedi_ep_get_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf)
{
struct qedi_endpoint *qedi_ep = ep->dd_data;
int len;
if (!qedi_ep)
return -ENOTCONN;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
len = sprintf(buf, "%hu\n", qedi_ep->dst_port);
break;
case ISCSI_PARAM_CONN_ADDRESS:
if (qedi_ep->ip_type == TCP_IPV4)
len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr);
else
len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr);
break;
default:
return -ENOTCONN;
}
return len;
}
static int qedi_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
struct qedi_ctx *qedi;
int len;
qedi = iscsi_host_priv(shost);
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
len = sysfs_format_mac(buf, qedi->mac, 6);
break;
case ISCSI_HOST_PARAM_NETDEV_NAME:
len = sprintf(buf, "host%d\n", shost->host_no);
break;
case ISCSI_HOST_PARAM_IPADDRESS:
if (qedi->ip_type == TCP_IPV4)
len = sprintf(buf, "%pI4\n", qedi->src_ip);
else
len = sprintf(buf, "%pI6\n", qedi->src_ip);
break;
default:
return iscsi_host_get_param(shost, param, buf);
}
return len;
}
static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct qed_iscsi_stats iscsi_stats;
struct Scsi_Host *shost;
struct qedi_ctx *qedi;
shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
qedi = iscsi_host_priv(shost);
qedi_ops->get_stats(qedi->cdev, &iscsi_stats);
conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt;
conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt;
conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt;
conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt;
conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt;
stats->txdata_octets = conn->txdata_octets;
stats->rxdata_octets = conn->rxdata_octets;
stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
stats->dataout_pdus = conn->dataout_pdus_cnt;
stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
stats->datain_pdus = conn->datain_pdus_cnt;
stats->r2t_pdus = conn->r2t_pdus_cnt;
stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
stats->digest_err = 0;
stats->timeout_err = 0;
strcpy(stats->custom[0].desc, "eh_abort_cnt");
stats->custom[0].value = conn->eh_abort_cnt;
stats->custom_length = 1;
}
static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
{
struct scsi_sge *bd_tbl;
bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
bd_tbl->sge_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
qedi_conn->gen_pdu.req_buf;
bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
bd_tbl->sge_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
}
static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
{
struct qedi_cmd *cmd = task->dd_data;
struct qedi_conn *qedi_conn = cmd->conn;
char *buf;
int data_len;
int rc = 0;
qedi_iscsi_prep_generic_pdu_bd(qedi_conn);
switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_LOGIN:
qedi_send_iscsi_login(qedi_conn, task);
break;
case ISCSI_OP_NOOP_OUT:
data_len = qedi_conn->gen_pdu.req_buf_size;
buf = qedi_conn->gen_pdu.req_buf;
if (data_len)
rc = qedi_send_iscsi_nopout(qedi_conn, task,
buf, data_len, 1);
else
rc = qedi_send_iscsi_nopout(qedi_conn, task,
NULL, 0, 1);
break;
case ISCSI_OP_LOGOUT:
rc = qedi_send_iscsi_logout(qedi_conn, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
rc = qedi_send_iscsi_tmf(qedi_conn, task);
break;
case ISCSI_OP_TEXT:
rc = qedi_send_iscsi_text(qedi_conn, task);
break;
default:
iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
"unsupported op 0x%x\n", task->hdr->opcode);
}
return rc;
}
static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct qedi_conn *qedi_conn = conn->dd_data;
struct qedi_cmd *cmd = task->dd_data;
memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
qedi_conn->gen_pdu.req_buf_size = task->data_count;
if (task->data_count) {
memcpy(qedi_conn->gen_pdu.req_buf, task->data,
task->data_count);
qedi_conn->gen_pdu.req_wr_ptr =
qedi_conn->gen_pdu.req_buf + task->data_count;
}
cmd->conn = conn->dd_data;
return qedi_iscsi_send_generic_request(task);
}
static int qedi_task_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct qedi_conn *qedi_conn = conn->dd_data;
struct qedi_cmd *cmd = task->dd_data;
struct scsi_cmnd *sc = task->sc;
/* Clear now so in cleanup_task we know it didn't make it */
cmd->scsi_cmd = NULL;
cmd->task_id = U16_MAX;
if (test_bit(QEDI_IN_SHUTDOWN, &qedi_conn->qedi->flags))
return -ENODEV;
if (test_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags))
return -EACCES;
cmd->state = 0;
cmd->task = NULL;
cmd->use_slowpath = false;
cmd->conn = qedi_conn;
cmd->task = task;
cmd->io_cmd_in_list = false;
INIT_LIST_HEAD(&cmd->io_cmd);
if (!sc)
return qedi_mtask_xmit(conn, task);
cmd->scsi_cmd = sc;
return qedi_iscsi_send_ioreq(task);
}
static struct iscsi_endpoint *
qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
{
struct qedi_ctx *qedi;
struct iscsi_endpoint *ep;
struct qedi_endpoint *qedi_ep;
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
struct iscsi_path path_req;
u32 msg_type = ISCSI_KEVENT_IF_DOWN;
u32 iscsi_cid = QEDI_CID_RESERVED;
u16 len = 0;
char *buf = NULL;
int ret, tmp;
if (!shost) {
ret = -ENXIO;
QEDI_ERR(NULL, "shost is NULL\n");
return ERR_PTR(ret);
}
if (qedi_do_not_recover) {
ret = -ENOMEM;
return ERR_PTR(ret);
}
qedi = iscsi_host_priv(shost);
if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
ret = -ENOMEM;
return ERR_PTR(ret);
}
if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
return ERR_PTR(-ENXIO);
}
ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
if (!ep) {
QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
ret = -ENOMEM;
return ERR_PTR(ret);
}
qedi_ep = ep->dd_data;
memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
qedi_ep->state = EP_STATE_IDLE;
qedi_ep->iscsi_cid = (u32)-1;
qedi_ep->qedi = qedi;
if (dst_addr->sa_family == AF_INET) {
addr = (struct sockaddr_in *)dst_addr;
memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr,
sizeof(struct in_addr));
qedi_ep->dst_port = ntohs(addr->sin_port);
qedi_ep->ip_type = TCP_IPV4;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"dst_addr=%pI4, dst_port=%u\n",
qedi_ep->dst_addr, qedi_ep->dst_port);
} else if (dst_addr->sa_family == AF_INET6) {
addr6 = (struct sockaddr_in6 *)dst_addr;
memcpy(qedi_ep->dst_addr, &addr6->sin6_addr,
sizeof(struct in6_addr));
qedi_ep->dst_port = ntohs(addr6->sin6_port);
qedi_ep->ip_type = TCP_IPV6;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"dst_addr=%pI6, dst_port=%u\n",
qedi_ep->dst_addr, qedi_ep->dst_port);
} else {
QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
}
ret = qedi_alloc_sq(qedi, qedi_ep);
if (ret)
goto ep_conn_exit;
ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle,
&qedi_ep->fw_cid, &qedi_ep->p_doorbell);
if (ret) {
QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n");
ret = -ENXIO;
goto ep_free_sq;
}
iscsi_cid = qedi_ep->handle;
qedi_ep->iscsi_cid = iscsi_cid;
init_waitqueue_head(&qedi_ep->ofld_wait);
init_waitqueue_head(&qedi_ep->tcp_ofld_wait);
qedi_ep->state = EP_STATE_OFLDCONN_START;
qedi->ep_tbl[iscsi_cid] = qedi_ep;
buf = (char *)&path_req;
len = sizeof(path_req);
memset(&path_req, 0, len);
msg_type = ISCSI_KEVENT_PATH_REQ;
path_req.handle = (u64)qedi_ep->iscsi_cid;
path_req.pmtu = qedi->ll2_mtu;
qedi_ep->pmtu = qedi->ll2_mtu;
if (qedi_ep->ip_type == TCP_IPV4) {
memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr,
sizeof(struct in_addr));
path_req.ip_addr_len = 4;
} else {
memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr,
sizeof(struct in6_addr));
path_req.ip_addr_len = 16;
}
ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf,
len);
if (ret) {
QEDI_ERR(&qedi->dbg_ctx,
"iscsi_offload_mesg() failed for cid=0x%x ret=%d\n",
iscsi_cid, ret);
goto ep_rel_conn;
}
atomic_inc(&qedi->num_offloads);
return ep;
ep_rel_conn:
qedi->ep_tbl[iscsi_cid] = NULL;
tmp = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
if (tmp)
QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
tmp);
ep_free_sq:
qedi_free_sq(qedi, qedi_ep);
ep_conn_exit:
iscsi_destroy_endpoint(ep);
return ERR_PTR(ret);
}
static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct qedi_endpoint *qedi_ep;
int ret = 0;
if (qedi_do_not_recover)
return 1;
qedi_ep = ep->dd_data;
if (qedi_ep->state == EP_STATE_IDLE ||
qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
return -1;
if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL)
ret = 1;
ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait,
QEDI_OFLD_WAIT_STATE(qedi_ep),
msecs_to_jiffies(timeout_ms));
if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
ret = -1;
if (ret > 0)
return 1;
else if (!ret)
return 0;
else
return ret;
}
static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
{
struct qedi_cmd *cmd, *cmd_tmp;
spin_lock(&qedi_conn->list_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
io_cmd) {
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
spin_unlock(&qedi_conn->list_lock);
}
static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
{
struct qedi_endpoint *qedi_ep;
struct qedi_conn *qedi_conn = NULL;
struct qedi_ctx *qedi;
int ret = 0;
int wait_delay;
int abrt_conn = 0;
wait_delay = 60 * HZ + DEF_MAX_RT_TIME;
qedi_ep = ep->dd_data;
qedi = qedi_ep->qedi;
scsi: qedi: Abort ep termination if offload not scheduled Sometimes during connection recovery when there is a failure to resolve ARP, and offload connection was not issued, driver tries to flush pending offload connection work which was not queued up. kernel: WARNING: CPU: 19 PID: 10110 at kernel/workqueue.c:3030 __flush_work.isra.34+0x19c/0x1b0 kernel: CPU: 19 PID: 10110 Comm: iscsid Tainted: G W 5.1.0-rc4 #11 kernel: Hardware name: Dell Inc. PowerEdge R730/0599V5, BIOS 2.9.1 12/04/2018 kernel: RIP: 0010:__flush_work.isra.34+0x19c/0x1b0 kernel: Code: 8b fb 66 0f 1f 44 00 00 31 c0 eb ab 48 89 ef c6 07 00 0f 1f 40 00 fb 66 0f 1f 44 00 00 31 c0 eb 96 e8 08 16 fe ff 0f 0b eb 8d <0f> 0b 31 c0 eb 87 0f 1f 40 00 66 2e 0f 1 f 84 00 00 00 00 00 0f 1f kernel: RSP: 0018:ffffa6b4054dba68 EFLAGS: 00010246 kernel: RAX: 0000000000000000 RBX: ffff91df21c36fc0 RCX: 0000000000000000 kernel: RDX: 0000000000000001 RSI: 0000000000000000 RDI: ffff91df21c36fc0 kernel: RBP: ffff91df21c36ef0 R08: 0000000000000000 R09: 0000000000000000 kernel: R10: 0000000000000038 R11: ffffa6b4054dbd60 R12: ffffffffc05e72c0 kernel: R13: ffff91db10280820 R14: 0000000000000048 R15: 0000000000000000 kernel: FS: 00007f5d83cc1740(0000) GS:ffff91df2f840000(0000) knlGS:0000000000000000 kernel: CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 kernel: CR2: 0000000001cc5000 CR3: 0000000465450002 CR4: 00000000001606e0 kernel: Call Trace: kernel: ? try_to_del_timer_sync+0x4d/0x80 kernel: qedi_ep_disconnect+0x3b/0x410 [qedi] kernel: ? 0xffffffffc083c000 kernel: ? klist_iter_exit+0x14/0x20 kernel: ? class_find_device+0x93/0xf0 kernel: iscsi_if_ep_disconnect.isra.18+0x58/0x70 [scsi_transport_iscsi] kernel: iscsi_if_recv_msg+0x10e2/0x1510 [scsi_transport_iscsi] kernel: ? copyout+0x22/0x30 kernel: ? _copy_to_iter+0xa0/0x430 kernel: ? _cond_resched+0x15/0x30 kernel: ? __kmalloc_node_track_caller+0x1f9/0x270 kernel: iscsi_if_rx+0xa5/0x1e0 [scsi_transport_iscsi] kernel: netlink_unicast+0x17f/0x230 kernel: netlink_sendmsg+0x2d2/0x3d0 kernel: sock_sendmsg+0x36/0x50 kernel: ___sys_sendmsg+0x280/0x2a0 kernel: ? timerqueue_add+0x54/0x80 kernel: ? enqueue_hrtimer+0x38/0x90 kernel: ? hrtimer_start_range_ns+0x19f/0x2c0 kernel: __sys_sendmsg+0x58/0xa0 kernel: do_syscall_64+0x5b/0x180 kernel: entry_SYSCALL_64_after_hwframe+0x44/0xa9 Signed-off-by: Manish Rangankar <mrangankar@marvell.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-04-26 10:55:45 +00:00
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
scsi: qedi: Do not flush offload work if ARP not resolved For an unreachable target, offload_work is not initialized and the endpoint state is set to OFLDCONN_NONE. This results in a WARN_ON due to the check of the work function field being set to zero. ------------[ cut here ]------------ WARNING: CPU: 24 PID: 18587 at ../kernel/workqueue.c:3037 __flush_work+0x1c1/0x1d0 : Hardware name: HPE ProLiant DL380 Gen10/ProLiant DL380 Gen10, BIOS U30 02/01/2020 RIP: 0010:__flush_work+0x1c1/0x1d0 Code: ba 6d 00 03 80 c9 f0 eb b6 48 c7 c7 20 ee 6c a4 e8 52 d3 04 00 0f 0b 31 c0 e9 d1 fe ff ff 48 c7 c7 20 ee 6c a4 e8 3d d3 04 00 <0f> 0b 31 c0 e9 bc fe ff ff e8 11 f3 f 00 31 f6 RSP: 0018:ffffac5a8cd47a80 EFLAGS: 00010282 RAX: 0000000000000024 RBX: ffff98d68c1fcaf0 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff98ce9fd99898 RDI: ffff98ce9fd99898 RBP: ffff98d68c1fcbc0 R08: 00000000000006fa R09: 0000000000000001 R10: ffffac5a8cd47b50 R11: 0000000000000001 R12: 0000000000000000 R13: 000000000000489b R14: ffff98d68c1fc800 R15: ffff98d692132c00 FS: 00007f65f7f62280(0000) GS:ffff98ce9fd80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007ffd2435e880 CR3: 0000000809334003 CR4: 00000000007606e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: ? class_create_release+0x40/0x40 ? klist_put+0x2c/0x80 qedi_ep_disconnect+0xdd/0x400 [qedi] iscsi_if_ep_disconnect.isra.20+0x59/0x70 [scsi_transport_iscsi] iscsi_if_rx+0x129b/0x1670 [scsi_transport_iscsi] ? __netlink_lookup+0xe7/0x160 netlink_unicast+0x21d/0x300 netlink_sendmsg+0x30f/0x430 sock_sendmsg+0x5b/0x60 ____sys_sendmsg+0x1e2/0x240 ? copy_msghdr_from_user+0xd9/0x160 ___sys_sendmsg+0x88/0xd0 ? ___sys_recvmsg+0xa2/0xe0 ? hrtimer_try_to_cancel+0x25/0x100 ? do_nanosleep+0x9c/0x170 ? __sys_sendmsg+0x5e/0xa0 __sys_sendmsg+0x5e/0xa0 do_syscall_64+0x60/0x1f0 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x7f65f6f16107 Code: 64 89 02 48 c7 c0 ff ff ff ff eb b9 0f 1f 80 00 00 00 00 8b 05 aa d2 2b 00 48 63 d2 48 63 ff 85 c0 75 18 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 59 f3 c3 0f 1f 8 0 00 00 00 00 53 48 89 f3 48 RSP: 002b:00007ffd24367ca8 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 000055a7aeaaf110 RCX: 00007f65f6f16107 RDX: 0000000000000000 RSI: 00007ffd24367cc0 RDI: 0000000000000003 RBP: 0000000000000070 R08: 0000000000000000 R09: 0000000000000000 R10: 000000000000075c R11: 0000000000000246 R12: 00007ffd24367cc0 R13: 000055a7ae560008 R14: 00007ffd24367db0 R15: 0000000000000000 ---[ end trace 54f499c05d41f8bb ]--- Only flush if the connection endpoint state if different from OFLDCONN_NONE. [mkp: clarified commit desc] Link: https://lore.kernel.org/r/20200408064332.19377-5-mrangankar@marvell.com Reviewed-by: Lee Duncan <lduncan@suse.com> Signed-off-by: Nilesh Javali <njavali@marvell.com> Signed-off-by: Manish Rangankar <mrangankar@marvell.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-04-08 06:43:30 +00:00
if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
flush_work(&qedi_ep->offload_work);
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
abrt_conn = qedi_conn->abrt_conn;
scsi: qedi: Complete TMF works before disconnect We need to make sure that abort and reset completion work has completed before ep_disconnect returns. After ep_disconnect we can't manipulate cmds because libiscsi will call conn_stop and take onwership. We are trying to make sure abort work and reset completion work has completed before we do the cmd clean up in ep_disconnect. The problem is that: 1. the work function sets the QEDI_CONN_FW_CLEANUP bit, so if the work was still pending we would not see the bit set. We need to do this before the work is queued. 2. If we had multiple works queued then we could break from the loop in qedi_ep_disconnect early because when abort work 1 completes it could clear QEDI_CONN_FW_CLEANUP. qedi_ep_disconnect could then see that before work 2 has run. 3. A TMF reset completion work could run after ep_disconnect starts cleaning up cmds via qedi_clearsq. ep_disconnect's call to qedi_clearsq -> qedi_cleanup_all_io would might think it's done cleaning up cmds, but the reset completion work could still be running. We then return from ep_disconnect while still doing cleanup. This replaces the bit with a counter to track the number of queued TMF works, and adds a bool to prevent new works from starting from the completion path once a ep_disconnect starts. Link: https://lore.kernel.org/r/20210525181821.7617-28-michael.christie@oracle.com Reviewed-by: Manish Rangankar <mrangankar@marvell.com> Signed-off-by: Mike Christie <michael.christie@oracle.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-05-25 18:18:20 +00:00
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"cid=0x%x qedi_ep=%p waiting for %d tmfs\n",
qedi_ep->iscsi_cid, qedi_ep,
qedi_conn->fw_cleanup_works);
spin_lock(&qedi_conn->tmf_work_lock);
qedi_conn->ep_disconnect_starting = true;
while (qedi_conn->fw_cleanup_works > 0) {
spin_unlock(&qedi_conn->tmf_work_lock);
msleep(1000);
scsi: qedi: Complete TMF works before disconnect We need to make sure that abort and reset completion work has completed before ep_disconnect returns. After ep_disconnect we can't manipulate cmds because libiscsi will call conn_stop and take onwership. We are trying to make sure abort work and reset completion work has completed before we do the cmd clean up in ep_disconnect. The problem is that: 1. the work function sets the QEDI_CONN_FW_CLEANUP bit, so if the work was still pending we would not see the bit set. We need to do this before the work is queued. 2. If we had multiple works queued then we could break from the loop in qedi_ep_disconnect early because when abort work 1 completes it could clear QEDI_CONN_FW_CLEANUP. qedi_ep_disconnect could then see that before work 2 has run. 3. A TMF reset completion work could run after ep_disconnect starts cleaning up cmds via qedi_clearsq. ep_disconnect's call to qedi_clearsq -> qedi_cleanup_all_io would might think it's done cleaning up cmds, but the reset completion work could still be running. We then return from ep_disconnect while still doing cleanup. This replaces the bit with a counter to track the number of queued TMF works, and adds a bool to prevent new works from starting from the completion path once a ep_disconnect starts. Link: https://lore.kernel.org/r/20210525181821.7617-28-michael.christie@oracle.com Reviewed-by: Manish Rangankar <mrangankar@marvell.com> Signed-off-by: Mike Christie <michael.christie@oracle.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-05-25 18:18:20 +00:00
spin_lock(&qedi_conn->tmf_work_lock);
}
scsi: qedi: Complete TMF works before disconnect We need to make sure that abort and reset completion work has completed before ep_disconnect returns. After ep_disconnect we can't manipulate cmds because libiscsi will call conn_stop and take onwership. We are trying to make sure abort work and reset completion work has completed before we do the cmd clean up in ep_disconnect. The problem is that: 1. the work function sets the QEDI_CONN_FW_CLEANUP bit, so if the work was still pending we would not see the bit set. We need to do this before the work is queued. 2. If we had multiple works queued then we could break from the loop in qedi_ep_disconnect early because when abort work 1 completes it could clear QEDI_CONN_FW_CLEANUP. qedi_ep_disconnect could then see that before work 2 has run. 3. A TMF reset completion work could run after ep_disconnect starts cleaning up cmds via qedi_clearsq. ep_disconnect's call to qedi_clearsq -> qedi_cleanup_all_io would might think it's done cleaning up cmds, but the reset completion work could still be running. We then return from ep_disconnect while still doing cleanup. This replaces the bit with a counter to track the number of queued TMF works, and adds a bool to prevent new works from starting from the completion path once a ep_disconnect starts. Link: https://lore.kernel.org/r/20210525181821.7617-28-michael.christie@oracle.com Reviewed-by: Manish Rangankar <mrangankar@marvell.com> Signed-off-by: Mike Christie <michael.christie@oracle.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-05-25 18:18:20 +00:00
spin_unlock(&qedi_conn->tmf_work_lock);
if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
if (qedi_do_not_recover) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Do not recover cid=0x%x\n",
qedi_ep->iscsi_cid);
goto ep_exit_recover;
}
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n",
qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state);
qedi_cleanup_active_cmd_list(qedi_conn);
goto ep_release_conn;
}
}
if (qedi_do_not_recover)
goto ep_exit_recover;
switch (qedi_ep->state) {
case EP_STATE_OFLDCONN_START:
case EP_STATE_OFLDCONN_NONE:
goto ep_release_conn;
case EP_STATE_OFLDCONN_FAILED:
break;
case EP_STATE_OFLDCONN_COMPL:
if (unlikely(!qedi_conn))
break;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n",
qedi_conn->active_cmd_count, abrt_conn,
qedi_ep->state,
qedi_ep->iscsi_cid,
qedi_ep->conn
);
if (!qedi_conn->active_cmd_count)
abrt_conn = 0;
else
abrt_conn = 1;
if (abrt_conn)
qedi_clearsq(qedi, qedi_conn, NULL);
break;
default:
break;
}
if (!abrt_conn)
wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
qedi_ep->state = EP_STATE_DISCONN_START;
if (test_bit(QEDI_IN_SHUTDOWN, &qedi->flags) ||
test_bit(QEDI_IN_RECOVERY, &qedi->flags))
goto ep_release_conn;
/* Delete doorbell from doorbell recovery mechanism */
ret = qedi_ops->common->db_recovery_del(qedi->cdev,
qedi_ep->p_doorbell,
&qedi_ep->db_data);
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) {
QEDI_WARN(&qedi->dbg_ctx,
"destroy_conn failed returned %d\n", ret);
} else {
ret = wait_event_interruptible_timeout(
qedi_ep->tcp_ofld_wait,
(qedi_ep->state !=
EP_STATE_DISCONN_START),
wait_delay);
if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) {
QEDI_WARN(&qedi->dbg_ctx,
"Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n",
ret, wait_delay, qedi_ep->iscsi_cid);
}
}
ep_release_conn:
ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
if (ret)
QEDI_WARN(&qedi->dbg_ctx,
"release_conn returned %d, cid=0x%x\n",
ret, qedi_ep->iscsi_cid);
ep_exit_recover:
qedi_ep->state = EP_STATE_IDLE;
qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL;
qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL;
qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port);
qedi_free_sq(qedi, qedi_ep);
if (qedi_conn)
qedi_conn->ep = NULL;
qedi_ep->conn = NULL;
qedi_ep->qedi = NULL;
atomic_dec(&qedi->num_offloads);
iscsi_destroy_endpoint(ep);
}
static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
{
struct qed_dev *cdev = qedi->cdev;
struct qedi_uio_dev *udev;
struct qedi_uio_ctrl *uctrl;
struct sk_buff *skb;
u32 len;
int rc = 0;
udev = qedi->udev;
if (!udev) {
QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n");
return -EINVAL;
}
uctrl = (struct qedi_uio_ctrl *)udev->uctrl;
if (!uctrl) {
QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n");
return -EINVAL;
}
len = uctrl->host_tx_pkt_len;
if (!len) {
QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len);
return -EINVAL;
}
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n");
return -EINVAL;
}
skb_put(skb, len);
memcpy(skb->data, udev->tx_pkt, len);
skb->ip_summed = CHECKSUM_NONE;
if (vlanid)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
rc = qedi_ops->ll2->start_xmit(cdev, skb, 0);
if (rc) {
QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n",
rc);
kfree_skb(skb);
}
uctrl->host_tx_pkt_len = 0;
uctrl->hw_tx_cons++;
return rc;
}
static void qedi_offload_work(struct work_struct *work)
{
struct qedi_endpoint *qedi_ep =
container_of(work, struct qedi_endpoint, offload_work);
struct qedi_ctx *qedi;
int wait_delay = 5 * HZ;
int ret;
qedi = qedi_ep->qedi;
ret = qedi_iscsi_offload_conn(qedi_ep);
if (ret) {
QEDI_ERR(&qedi->dbg_ctx,
"offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
qedi_ep->iscsi_cid, qedi_ep, ret);
qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
return;
}
ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
(qedi_ep->state ==
EP_STATE_OFLDCONN_COMPL),
wait_delay);
if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
QEDI_ERR(&qedi->dbg_ctx,
"Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
qedi_ep->iscsi_cid, qedi_ep);
}
}
static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
{
struct qedi_ctx *qedi;
struct qedi_endpoint *qedi_ep;
int ret = 0;
u32 iscsi_cid;
u16 port_id = 0;
if (!shost) {
ret = -ENXIO;
QEDI_ERR(NULL, "shost is NULL\n");
return ret;
}
if (strcmp(shost->hostt->proc_name, "qedi")) {
ret = -ENXIO;
QEDI_ERR(NULL, "shost %s is invalid\n",
shost->hostt->proc_name);
return ret;
}
qedi = iscsi_host_priv(shost);
if (path_data->handle == QEDI_PATH_HANDLE) {
ret = qedi_data_avail(qedi, path_data->vlan_id);
goto set_path_exit;
}
iscsi_cid = (u32)path_data->handle;
if (iscsi_cid >= qedi->max_active_conns) {
ret = -EINVAL;
goto set_path_exit;
}
qedi_ep = qedi->ep_tbl[iscsi_cid];
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
if (!qedi_ep) {
ret = -EINVAL;
goto set_path_exit;
}
if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
qedi_ep->state = EP_STATE_OFLDCONN_NONE;
ret = -EIO;
goto set_path_exit;
}
ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]);
ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]);
qedi_ep->vlan_id = path_data->vlan_id;
if (path_data->pmtu < DEF_PATH_MTU) {
qedi_ep->pmtu = qedi->ll2_mtu;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"MTU cannot be %u, using default MTU %u\n",
path_data->pmtu, qedi_ep->pmtu);
}
if (path_data->pmtu != qedi->ll2_mtu) {
if (path_data->pmtu > JUMBO_MTU) {
ret = -EINVAL;
QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu);
goto set_path_exit;
}
qedi_reset_host_mtu(qedi, path_data->pmtu);
qedi_ep->pmtu = qedi->ll2_mtu;
}
port_id = qedi_ep->src_port;
if (port_id >= QEDI_LOCAL_PORT_MIN &&
port_id < QEDI_LOCAL_PORT_MAX) {
if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id))
port_id = 0;
} else {
port_id = 0;
}
if (!port_id) {
port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl);
if (port_id == QEDI_LOCAL_PORT_INVALID) {
QEDI_ERR(&qedi->dbg_ctx,
"Failed to allocate port id for iscsi_cid=0x%x\n",
iscsi_cid);
ret = -ENOMEM;
goto set_path_exit;
}
}
qedi_ep->src_port = port_id;
if (qedi_ep->ip_type == TCP_IPV4) {
memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr,
sizeof(struct in_addr));
memcpy(&qedi->src_ip[0], &path_data->src.v4_addr,
sizeof(struct in_addr));
qedi->ip_type = TCP_IPV4;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n",
qedi_ep->src_addr, qedi_ep->src_port,
qedi_ep->dst_addr, qedi_ep->dst_port);
} else {
memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr,
sizeof(struct in6_addr));
memcpy(&qedi->src_ip[0], &path_data->src.v6_addr,
sizeof(struct in6_addr));
qedi->ip_type = TCP_IPV6;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n",
qedi_ep->src_addr, qedi_ep->src_port,
qedi_ep->dst_addr, qedi_ep->dst_port);
}
INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
queue_work(qedi->offload_thread, &qedi_ep->offload_work);
ret = 0;
set_path_exit:
return ret;
}
static umode_t qedi_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
switch (param) {
case ISCSI_HOST_PARAM_NETDEV_NAME:
case ISCSI_HOST_PARAM_HWADDRESS:
case ISCSI_HOST_PARAM_IPADDRESS:
return 0444;
default:
return 0;
}
case ISCSI_PARAM:
switch (param) {
case ISCSI_PARAM_MAX_RECV_DLENGTH:
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
case ISCSI_PARAM_HDRDGST_EN:
case ISCSI_PARAM_DATADGST_EN:
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_EXP_STATSN:
case ISCSI_PARAM_PERSISTENT_ADDRESS:
case ISCSI_PARAM_PERSISTENT_PORT:
case ISCSI_PARAM_PING_TMO:
case ISCSI_PARAM_RECV_TMO:
case ISCSI_PARAM_INITIAL_R2T_EN:
case ISCSI_PARAM_MAX_R2T:
case ISCSI_PARAM_IMM_DATA_EN:
case ISCSI_PARAM_FIRST_BURST:
case ISCSI_PARAM_MAX_BURST:
case ISCSI_PARAM_PDU_INORDER_EN:
case ISCSI_PARAM_DATASEQ_INORDER_EN:
case ISCSI_PARAM_ERL:
case ISCSI_PARAM_TARGET_NAME:
case ISCSI_PARAM_TPGT:
case ISCSI_PARAM_USERNAME:
case ISCSI_PARAM_PASSWORD:
case ISCSI_PARAM_USERNAME_IN:
case ISCSI_PARAM_PASSWORD_IN:
case ISCSI_PARAM_FAST_ABORT:
case ISCSI_PARAM_ABORT_TMO:
case ISCSI_PARAM_LU_RESET_TMO:
case ISCSI_PARAM_TGT_RESET_TMO:
case ISCSI_PARAM_IFACE_NAME:
case ISCSI_PARAM_INITIATOR_NAME:
case ISCSI_PARAM_BOOT_ROOT:
case ISCSI_PARAM_BOOT_NIC:
case ISCSI_PARAM_BOOT_TARGET:
return 0444;
default:
return 0;
}
}
return 0;
}
static void qedi_cleanup_task(struct iscsi_task *task)
{
struct qedi_cmd *cmd;
if (task->state == ISCSI_TASK_PENDING) {
QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
refcount_read(&task->refcount));
return;
}
if (task->sc)
qedi_iscsi_unmap_sg_list(task->dd_data);
cmd = task->dd_data;
if (cmd->task_id != U16_MAX)
qedi_clear_task_idx(iscsi_host_priv(task->conn->session->host),
cmd->task_id);
cmd->task_id = U16_MAX;
cmd->scsi_cmd = NULL;
}
struct iscsi_transport qedi_iscsi_transport = {
.owner = THIS_MODULE,
.name = QEDI_MODULE_NAME,
.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST |
CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO,
.create_session = qedi_session_create,
.destroy_session = qedi_session_destroy,
.create_conn = qedi_conn_create,
.bind_conn = qedi_conn_bind,
.unbind_conn = iscsi_conn_unbind,
.start_conn = qedi_conn_start,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qedi_conn_destroy,
.set_param = iscsi_set_param,
.get_ep_param = qedi_ep_get_param,
.get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
.get_host_param = qedi_host_get_param,
.send_pdu = iscsi_conn_send_pdu,
.get_stats = qedi_conn_get_stats,
.xmit_task = qedi_task_xmit,
.cleanup_task = qedi_cleanup_task,
.session_recovery_timedout = iscsi_session_recovery_timedout,
.ep_connect = qedi_ep_connect,
.ep_poll = qedi_ep_poll,
.ep_disconnect = qedi_ep_disconnect,
.set_path = qedi_set_path,
.attr_is_visible = qedi_attr_is_visible,
};
void qedi_start_conn_recovery(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_cls_conn *cls_conn;
struct iscsi_conn *conn;
cls_conn = qedi_conn->cls_conn;
conn = cls_conn->dd_data;
cls_sess = iscsi_conn_to_session(cls_conn);
if (iscsi_is_session_online(cls_sess)) {
qedi_conn->abrt_conn = 1;
QEDI_ERR(&qedi->dbg_ctx,
"Failing connection, state=0x%x, cid=0x%x\n",
conn->session->state, qedi_conn->iscsi_conn_id);
iscsi_conn_failure(qedi_conn->cls_conn->dd_data,
ISCSI_ERR_CONN_FAILED);
}
}
static const struct {
enum iscsi_error_types error_code;
char *err_string;
} qedi_iscsi_error[] = {
{ ISCSI_STATUS_NONE,
"tcp_error none"
},
{ ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
"task cid mismatch"
},
{ ISCSI_CONN_ERROR_TASK_NOT_VALID,
"invalid task"
},
{ ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
"rq ring full"
},
{ ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
"cmdq ring full"
},
{ ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
"sge caching failed"
},
{ ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
"hdr digest error"
},
{ ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
"local cmpl error"
},
{ ISCSI_CONN_ERROR_DATA_OVERRUN,
"invalid task"
},
{ ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
"out of sge error"
},
{ ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
"tcp ip fragment error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
"AHS len protocol error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
"itt out of range error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
"data seg more than pdu size"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
"invalid opcode"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
"invalid opcode before update"
},
{ ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
"unexpected opcode"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
"r2t carries no data"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
"data sn error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
"data TTT error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
"r2t TTT error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
"buffer offset error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
"buffer offset ooo"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
"data seg len 0"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
"data xer len error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
"data xer len1 error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
"data xer len2 error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
"protocol lun error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
"f bit zero error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
"exp stat sn error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
"dsl not zero error"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
"invalid dsl"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
"data seg len too big"
},
{ ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
"outstanding r2t count error"
},
{ ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
"sense datalen error"
},
};
static char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
{
int i;
char *msg = NULL;
for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) {
if (qedi_iscsi_error[i].error_code == err_code) {
msg = qedi_iscsi_error[i].err_string;
break;
}
}
return msg;
}
void qedi_process_iscsi_error(struct qedi_endpoint *ep,
struct iscsi_eqe_data *data)
{
struct qedi_conn *qedi_conn;
struct qedi_ctx *qedi;
char warn_notice[] = "iscsi_warning";
char error_notice[] = "iscsi_error";
char unknown_msg[] = "Unknown error";
char *message;
int need_recovery = 0;
u32 err_mask = 0;
char *msg;
if (!ep)
return;
qedi_conn = ep->conn;
if (!qedi_conn)
return;
qedi = ep->qedi;
QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n",
data->error_code);
if (err_mask) {
need_recovery = 0;
message = warn_notice;
} else {
need_recovery = 1;
message = error_notice;
}
msg = qedi_get_iscsi_error(data->error_code);
if (!msg) {
need_recovery = 0;
msg = unknown_msg;
}
iscsi_conn_printk(KERN_ALERT,
qedi_conn->cls_conn->dd_data,
"qedi: %s - %s\n", message, msg);
if (need_recovery)
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
}
void qedi_process_tcp_error(struct qedi_endpoint *ep,
struct iscsi_eqe_data *data)
{
struct qedi_conn *qedi_conn;
if (!ep)
return;
qedi_conn = ep->conn;
if (!qedi_conn)
return;
QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n",
data->error_code);
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
}