diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c index 6c6c04e1b74d..907d67aeac23 100644 --- a/drivers/scsi/elx/libefc_sli/sli4.c +++ b/drivers/scsi/elx/libefc_sli/sli4.c @@ -4145,7 +4145,7 @@ static int sli_get_read_config(struct sli4 *sli4) { struct sli4_rsp_read_config *conf = sli4->bmbx.virt; - u32 i, total, total_size; + u32 i, total; u32 *base; if (sli_cmd_read_config(sli4, sli4->bmbx.virt)) { @@ -4203,8 +4203,7 @@ sli_get_read_config(struct sli4 *sli4) for (i = 0; i < SLI4_RSRC_MAX; i++) { total = sli4->ext[i].number * sli4->ext[i].size; - total_size = BITS_TO_LONGS(total) * sizeof(long); - sli4->ext[i].use_map = kzalloc(total_size, GFP_KERNEL); + sli4->ext[i].use_map = bitmap_zalloc(total, GFP_KERNEL); if (!sli4->ext[i].use_map) { efc_log_err(sli4, "bitmap memory allocation failed %d\n", i); @@ -4743,7 +4742,7 @@ sli_reset(struct sli4 *sli4) sli4->ext[0].base = NULL; for (i = 0; i < SLI4_RSRC_MAX; i++) { - kfree(sli4->ext[i].use_map); + bitmap_free(sli4->ext[i].use_map); sli4->ext[i].use_map = NULL; sli4->ext[i].base = NULL; } @@ -4784,7 +4783,7 @@ sli_teardown(struct sli4 *sli4) for (i = 0; i < SLI4_RSRC_MAX; i++) { sli4->ext[i].base = NULL; - kfree(sli4->ext[i].use_map); + bitmap_free(sli4->ext[i].use_map); sli4->ext[i].use_map = NULL; } diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index cda0135c43db..8049b00b6766 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -388,6 +388,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->shost_state = SHOST_CREATED; INIT_LIST_HEAD(&shost->__devices); INIT_LIST_HEAD(&shost->__targets); + INIT_LIST_HEAD(&shost->eh_abort_list); INIT_LIST_HEAD(&shost->eh_cmd_q); INIT_LIST_HEAD(&shost->starved_list); init_waitqueue_head(&shost->host_wait); diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 30f9545d2285..032efb294ee5 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2762,7 +2762,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) if (fcport->loop_id != FC_NO_LOOP_ID) fcport->logout_on_delete = 1; - qlt_schedule_sess_for_deletion(fcport); + if (!EDIF_NEGOTIATION_PENDING(fcport)) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, + "%s %d schedule session deletion\n", __func__, + __LINE__); + qlt_schedule_sess_for_deletion(fcport); + } } else { qla2x00_port_logout(fcport->vha, fcport); } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8924eeb9367d..9ebf4a234d9a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -639,9 +639,9 @@ struct qla_els_pt_arg { u8 els_opcode; u8 vp_idx; __le16 nport_handle; - u16 control_flags; + u16 control_flags, ox_id; __le32 rx_xchg_address; - port_id_t did; + port_id_t did, sid; u32 tx_len, tx_byte_count, rx_len, rx_byte_count; dma_addr_t tx_addr, rx_addr; diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c index ad746c62f0d4..2e37b189cb75 100644 --- a/drivers/scsi/qla2xxx/qla_edif.c +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -218,7 +218,7 @@ fc_port_t *fcport) "%s edif not enabled\n", __func__); goto done; } - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); goto done; @@ -290,63 +290,6 @@ qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid) return false; } -static void qla_edif_reset_auth_wait(struct fc_port *fcport, int state, - int waitonly) -{ - int cnt, max_cnt = 200; - bool traced = false; - - fcport->keep_nport_handle = 1; - - if (!waitonly) { - qla2x00_set_fcport_disc_state(fcport, state); - qlt_schedule_sess_for_deletion(fcport); - } else { - qla2x00_set_fcport_disc_state(fcport, state); - } - - ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, - "%s: waiting for session, max_cnt=%u\n", - __func__, max_cnt); - - cnt = 0; - - if (waitonly) { - /* Marker wait min 10 msecs. */ - msleep(50); - cnt += 50; - } - while (1) { - if (!traced) { - ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, - "%s: session sleep.\n", - __func__); - traced = true; - } - msleep(20); - cnt++; - if (waitonly && (fcport->disc_state == state || - fcport->disc_state == DSC_LOGIN_COMPLETE)) - break; - if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) - break; - if (cnt > max_cnt) - break; - } - - if (!waitonly) { - ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, - "%s: waited for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n", - __func__, fcport->port_name, fcport->loop_id, - fcport->d_id.b24, fcport, fcport->disc_state, cnt); - } else { - ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, - "%s: waited ONLY for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n", - __func__, fcport->port_name, fcport->loop_id, - fcport->d_id.b24, fcport, fcport->disc_state, cnt); - } -} - static void qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl, int index) @@ -529,7 +472,8 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) struct app_start_reply appreply; struct fc_port *fcport, *tf; - ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start\n", __func__); + ql_log(ql_log_info, vha, 0x1313, + "EDIF application registration with driver, FC device connections will be re-established.\n"); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &appstart, @@ -538,9 +482,9 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n", __func__, appstart.app_info.app_vid, appstart.app_start_flags); - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { /* mark doorbell as active since an app is now present */ - vha->e_dbell.db_flags = EDB_ACTIVE; + vha->e_dbell.db_flags |= EDB_ACTIVE; } else { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n", __func__); @@ -554,37 +498,36 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) qla2xxx_wake_dpc(vha); } else { list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + ql_dbg(ql_dbg_edif, vha, 0x2058, + "FCSP - nn %8phN pn %8phN portid=%06x.\n", + fcport->node_name, fcport->port_name, + fcport->d_id.b24); ql_dbg(ql_dbg_edif, vha, 0xf084, - "%s: sess %p %8phC lid %#04x s_id %06x logout %d\n", - __func__, fcport, fcport->port_name, - fcport->loop_id, fcport->d_id.b24, - fcport->logout_on_delete); - - ql_dbg(ql_dbg_edif, vha, 0xf084, - "keep %d els_logo %d disc state %d auth state %d stop state %d\n", - fcport->keep_nport_handle, - fcport->send_els_logo, fcport->disc_state, - fcport->edif.auth_state, fcport->edif.app_stop); + "%s: se_sess %p / sess %p from port %8phC " + "loop_id %#04x s_id %06x logout %d " + "keep %d els_logo %d disc state %d auth state %d" + "stop state %d\n", + __func__, fcport->se_sess, fcport, + fcport->port_name, fcport->loop_id, + fcport->d_id.b24, fcport->logout_on_delete, + fcport->keep_nport_handle, fcport->send_els_logo, + fcport->disc_state, fcport->edif.auth_state, + fcport->edif.app_stop); if (atomic_read(&vha->loop_state) == LOOP_DOWN) break; - if (!(fcport->flags & FCF_FCSP_DEVICE)) - continue; fcport->edif.app_started = 1; - if (fcport->edif.app_stop || - (fcport->disc_state != DSC_LOGIN_COMPLETE && - fcport->disc_state != DSC_LOGIN_PEND && - fcport->disc_state != DSC_DELETED)) { - /* no activity */ - fcport->edif.app_stop = 0; + fcport->login_retry = vha->hw->login_retry_count; - ql_dbg(ql_dbg_edif, vha, 0x911e, - "%s wwpn %8phC calling qla_edif_reset_auth_wait\n", - __func__, fcport->port_name); - fcport->edif.app_sess_online = 1; - qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0); - } + /* no activity */ + fcport->edif.app_stop = 0; + + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC calling qla_edif_reset_auth_wait\n", + __func__, fcport->port_name); + fcport->edif.app_sess_online = 0; + qlt_schedule_sess_for_deletion(fcport); qla_edif_sa_ctl_init(vha, fcport); } } @@ -601,14 +544,14 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) appreply.edif_enode_active = vha->pur_cinfo.enode_flags; appreply.edif_edb_active = vha->e_dbell.db_flags; - bsg_job->reply_len = sizeof(struct fc_bsg_reply) + - sizeof(struct app_start_reply); + bsg_job->reply_len = sizeof(struct fc_bsg_reply); SET_DID_STATUS(bsg_reply->result, DID_OK); - sg_copy_from_buffer(bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, &appreply, - sizeof(struct app_start_reply)); + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + &appreply, + sizeof(struct app_start_reply)); ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start completed with 0x%x\n", @@ -800,15 +743,15 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job) ql_dbg(ql_dbg_edif, vha, 0x911e, "%s AUTH complete - RESUME with prli for wwpn %8phC\n", __func__, fcport->port_name); - qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 1); qla24xx_post_prli_work(vha, fcport); } errstate_exit: bsg_job->reply_len = sizeof(struct fc_bsg_reply); - sg_copy_from_buffer(bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, &appplogireply, - sizeof(struct app_plogi_reply)); + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + &appplogireply, + sizeof(struct app_plogi_reply)); return rval; } @@ -873,7 +816,7 @@ qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job) if (qla_ini_mode_enabled(fcport->vha)) { fcport->send_els_logo = 1; - qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0); + qlt_schedule_sess_for_deletion(fcport); } } @@ -891,7 +834,7 @@ static int qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { int32_t rval = 0; - int32_t num_cnt; + int32_t pcnt = 0; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct app_pinfo_req app_req; struct app_pinfo_reply *app_reply; @@ -903,16 +846,14 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) bsg_job->request_payload.sg_cnt, &app_req, sizeof(struct app_pinfo_req)); - num_cnt = app_req.num_ports; /* num of ports alloc'd by app */ - app_reply = kzalloc((sizeof(struct app_pinfo_reply) + - sizeof(struct app_pinfo) * num_cnt), GFP_KERNEL); + sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL); + if (!app_reply) { SET_DID_STATUS(bsg_reply->result, DID_ERROR); rval = -1; } else { struct fc_port *fcport = NULL, *tf; - uint32_t pcnt = 0; list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { if (!(fcport->flags & FCF_FCSP_DEVICE)) @@ -981,9 +922,11 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) SET_DID_STATUS(bsg_reply->result, DID_OK); } - sg_copy_from_buffer(bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, app_reply, - sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * num_cnt); + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + app_reply, + sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt); kfree(app_reply); @@ -1000,10 +943,11 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { int32_t rval = 0; struct fc_bsg_reply *bsg_reply = bsg_job->reply; - uint32_t ret_size, size; + uint32_t size; struct app_sinfo_req app_req; struct app_stats_reply *app_reply; + uint32_t pcnt = 0; sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &app_req, @@ -1019,18 +963,12 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job) size = sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * app_req.num_ports); - if (size > bsg_job->reply_payload.payload_len) - ret_size = bsg_job->reply_payload.payload_len; - else - ret_size = size; - app_reply = kzalloc(size, GFP_KERNEL); if (!app_reply) { SET_DID_STATUS(bsg_reply->result, DID_ERROR); rval = -1; } else { struct fc_port *fcport = NULL, *tf; - uint32_t pcnt = 0; list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { if (fcport->edif.enable) { @@ -1054,9 +992,11 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job) SET_DID_STATUS(bsg_reply->result, DID_OK); } + bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, app_reply, ret_size); + bsg_job->reply_payload.sg_cnt, app_reply, + sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt)); kfree(app_reply); @@ -1130,8 +1070,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job) __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[1]); rval = EXT_STATUS_INVALID_PARAM; - bsg_job->reply_len = sizeof(struct fc_bsg_reply); - SET_DID_STATUS(bsg_reply->result, DID_ERROR); + done = false; break; } @@ -1330,7 +1269,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job) goto done; } - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { ql_log(ql_log_warn, vha, 0x70a1, "App not started\n"); rval = -EIO; SET_DID_STATUS(bsg_reply->result, DID_ERROR); @@ -1651,6 +1590,40 @@ qla_enode_stop(scsi_qla_host_t *vha) spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); } +static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid) +{ + unsigned long flags; + struct enode *e, *tmp; + struct purexevent *purex; + LIST_HEAD(enode_list); + + if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s enode not active\n", __func__); + return; + } + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) { + purex = &e->u.purexinfo; + if (purex->pur_info.pur_sid.b24 == portid.b24) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s free ELS sid=%06x. xchg %x, nb=%xh\n", + __func__, portid.b24, + purex->pur_info.pur_rx_xchg_address, + purex->pur_info.pur_bytes_rcvd); + + list_del_init(&e->list); + list_add_tail(&e->list, &enode_list); + } + } + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + + list_for_each_entry_safe(e, tmp, &enode_list, list) { + list_del_init(&e->list); + qla_enode_free(vha, e); + } +} + /* * allocate enode struct and populate buffer * returns: enode pointer with buffers @@ -1695,41 +1668,25 @@ static struct enode * qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2) { struct enode *node_rtn = NULL; - struct enode *list_node = NULL; + struct enode *list_node, *q; unsigned long flags; - struct list_head *pos, *q; uint32_t sid; - uint32_t rw_flag; struct purexevent *purex; /* secure the list from moving under us */ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); - list_for_each_safe(pos, q, &vha->pur_cinfo.head) { - list_node = list_entry(pos, struct enode, list); + list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) { /* node type determines what p1 and p2 are */ purex = &list_node->u.purexinfo; sid = p1; - rw_flag = p2; if (purex->pur_info.pur_sid.b24 == sid) { - if (purex->pur_info.pur_pend == 1 && - rw_flag == PUR_GET) { - /* - * if the receive is in progress - * and its a read/get then can't - * transfer yet - */ - ql_dbg(ql_dbg_edif, vha, 0x9106, - "%s purex xfer in progress for sid=%x\n", - __func__, sid); - } else { - /* found it and its complete */ - node_rtn = list_node; - list_del(pos); - break; - } + /* found it and its complete */ + node_rtn = list_node; + list_del(&list_node->list); + break; } } @@ -1802,7 +1759,8 @@ qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp, qla_els_pt_iocb(vha, els_iocb, a); ql_dbg(ql_dbg_edif, vha, 0x0183, - "Sending ELS reject...\n"); + "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n", + a->ox_id, a->sid.b24, a->did.b24); ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185, vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c)); /* flush iocb to mem before notifying hw doorbell */ @@ -1814,7 +1772,7 @@ qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp, void qla_edb_init(scsi_qla_host_t *vha) { - if (vha->e_dbell.db_flags == EDB_ACTIVE) { + if (DBELL_ACTIVE(vha)) { /* list already init'd - error */ ql_dbg(ql_dbg_edif, vha, 0x09102, "edif db already initialized, cannot reinit\n"); @@ -1850,6 +1808,57 @@ qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node) node->ntype = N_UNDEF; } +static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid) +{ + unsigned long flags; + struct edb_node *e, *tmp; + port_id_t sid; + LIST_HEAD(edb_list); + + if (DBELL_INACTIVE(vha)) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled\n", __func__); + return; + } + + /* grab lock so list doesn't move */ + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) { + switch (e->ntype) { + case VND_CMD_AUTH_STATE_NEEDED: + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + sid = e->u.plogi_did; + break; + case VND_CMD_AUTH_STATE_ELS_RCVD: + sid = e->u.els_sid; + break; + case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: + /* app wants to see this */ + continue; + default: + ql_log(ql_log_warn, vha, 0x09102, + "%s unknown node type: %x\n", __func__, e->ntype); + sid.b24 = 0; + break; + } + if (sid.b24 == portid.b24) { + ql_dbg(ql_dbg_edif, vha, 0x910f, + "%s free doorbell event : node type = %x %p\n", + __func__, e->ntype, e); + list_del_init(&e->list); + list_add_tail(&e->list, &edb_list); + } + } + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + list_for_each_entry_safe(e, tmp, &edb_list, list) { + qla_edb_node_free(vha, e); + list_del_init(&e->list); + kfree(e); + } +} + /* function called when app is stopping */ void @@ -1858,7 +1867,7 @@ qla_edb_stop(scsi_qla_host_t *vha) unsigned long flags; struct edb_node *node, *q; - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { /* doorbell list not enabled */ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); @@ -1909,7 +1918,7 @@ qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr) { unsigned long flags; - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { /* doorbell list not enabled */ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); @@ -1940,7 +1949,7 @@ qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, return; } - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { if (fcport) fcport->edif.auth_state = dbtype; /* doorbell list not enabled */ @@ -2035,7 +2044,7 @@ qla_edif_timer(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) { - if (vha->e_dbell.db_flags != EDB_ACTIVE && + if (DBELL_INACTIVE(vha) && ha->edif_post_stop_cnt_down) { ha->edif_post_stop_cnt_down--; @@ -2073,7 +2082,7 @@ edif_doorbell_show(struct device *dev, struct device_attribute *attr, sz = 256; /* stop new threads from waiting if we're not init'd */ - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122, "%s error - edif db not enabled\n", __func__); return 0; @@ -2346,6 +2355,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) a.tx_addr = vha->hw->elsrej.cdma; a.vp_idx = vha->vp_idx; a.control_flags = EPD_ELS_RJT; + a.ox_id = le16_to_cpu(p->ox_id); sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16); @@ -2357,7 +2367,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) return; } - if (totlen > MAX_PAYLOAD) { + if (totlen > ELS_MAX_PAYLOAD) { ql_dbg(ql_dbg_edif, vha, 0x0910d, "%s WARNING: verbose ELS frame received (totlen=%x)\n", __func__, totlen); @@ -2387,7 +2397,6 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) purex = &ptr->u.purexinfo; purex->pur_info.pur_sid = a.did; - purex->pur_info.pur_pend = 0; purex->pur_info.pur_bytes_rcvd = totlen; purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr); purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle); @@ -2396,6 +2405,8 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) purex->pur_info.pur_did.b.al_pa = p->d_id[0]; purex->pur_info.vp_idx = p->vp_idx; + a.sid = purex->pur_info.pur_did; + rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp, purex->msgp_len); if (rc) { @@ -2419,7 +2430,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid); - if (host->e_dbell.db_flags != EDB_ACTIVE || + if (DBELL_INACTIVE(vha) || (fcport && EDIF_SESSION_DOWN(fcport))) { ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n", __func__, host->e_dbell.db_flags, @@ -2436,7 +2447,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) ql_dbg(ql_dbg_edif, host, 0x0910c, "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n", __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24, - purex->pur_info.pur_did.b24, p->rx_xchg_addr); + purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address); qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL); } @@ -3139,18 +3150,14 @@ static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport, /* release any sadb entries -- only done at teardown */ void qla_edif_sadb_release(struct qla_hw_data *ha) { - struct list_head *pos; - struct list_head *tmp; - struct edif_sa_index_entry *entry; + struct edif_sa_index_entry *entry, *tmp; - list_for_each_safe(pos, tmp, &ha->sadb_rx_index_list) { - entry = list_entry(pos, struct edif_sa_index_entry, next); + list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) { list_del(&entry->next); kfree(entry); } - list_for_each_safe(pos, tmp, &ha->sadb_tx_index_list) { - entry = list_entry(pos, struct edif_sa_index_entry, next); + list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) { list_del(&entry->next); kfree(entry); } @@ -3449,7 +3456,7 @@ done: void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess) { - if (sess->edif.app_sess_online && vha->e_dbell.db_flags & EDB_ACTIVE) { + if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) { ql_dbg(ql_dbg_disc, vha, 0xf09c, "%s: sess %8phN send port_offline event\n", __func__, sess->port_name); @@ -3459,3 +3466,12 @@ void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess) qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24); } } + +void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport) +{ + if (!(fcport->flags & FCF_FCSP_DEVICE)) + return; + + qla_edb_clear(vha, fcport->d_id); + qla_enode_clear(vha, fcport->d_id); +} diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h index 9e8f28d0caa1..a965ca8e47ce 100644 --- a/drivers/scsi/qla2xxx/qla_edif.h +++ b/drivers/scsi/qla2xxx/qla_edif.h @@ -41,9 +41,12 @@ struct pur_core { }; enum db_flags_t { - EDB_ACTIVE = 0x1, + EDB_ACTIVE = BIT_0, }; +#define DBELL_ACTIVE(_v) (_v->e_dbell.db_flags & EDB_ACTIVE) +#define DBELL_INACTIVE(_v) (!(_v->e_dbell.db_flags & EDB_ACTIVE)) + struct edif_dbell { enum db_flags_t db_flags; spinlock_t db_lock; @@ -93,7 +96,6 @@ struct sa_update_28xx { }; #define NUM_ENTRIES 256 -#define MAX_PAYLOAD 1024 #define PUR_GET 1 struct dinfo { @@ -102,7 +104,6 @@ struct dinfo { }; struct pur_ninfo { - unsigned int pur_pend:1; port_id_t pur_sid; port_id_t pur_did; uint8_t vp_idx; @@ -128,9 +129,15 @@ struct enode { } u; }; +#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES)) + #define EDIF_SESSION_DOWN(_s) \ (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \ _s->disc_state == DSC_DELETED || \ !_s->edif.app_sess_online)) +#define EDIF_NEGOTIATION_PENDING(_fcport) \ + (DBELL_ACTIVE(_fcport->vha) && \ + (_fcport->disc_state == DSC_LOGIN_AUTH_PEND)) + #endif /* __QLA_EDIF_H */ diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h index 58b718d35d19..53026d82ebff 100644 --- a/drivers/scsi/qla2xxx/qla_edif_bsg.h +++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h @@ -8,7 +8,7 @@ #define __QLA_EDIF_BSG_H /* BSG Vendor specific commands */ -#define ELS_MAX_PAYLOAD 1024 +#define ELS_MAX_PAYLOAD 2112 #ifndef WWN_SIZE #define WWN_SIZE 8 #endif diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 3c4fa8bac88d..8d8503a28479 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -142,6 +142,8 @@ void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport, void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport); int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsgjob); void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess); +void qla_edif_clear_appdata(struct scsi_qla_host *vha, + struct fc_port *fcport); const char *sc_to_str(uint16_t cmd); /* @@ -171,7 +173,6 @@ extern int ql2xasynctmfenable; extern int ql2xgffidenable; extern int ql2xenabledif; extern int ql2xenablehba_err_chk; -extern int ql2xtargetreset; extern int ql2xdontresethba; extern uint64_t ql2xmaxlun; extern int ql2xmdcapmask; @@ -818,7 +819,6 @@ extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *); extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *); extern void qlafx00_timer_routine(scsi_qla_host_t *); extern int qlafx00_rescan_isp(scsi_qla_host_t *); -extern int qlafx00_loop_reset(scsi_qla_host_t *vha); /* qla82xx related functions */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 908a72ebf7c2..070b636802d0 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -330,12 +330,9 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; } else { if (vha->hw->flags.edif_enabled && - vha->e_dbell.db_flags & EDB_ACTIVE) { + DBELL_ACTIVE(vha)) { lio->u.logio.flags |= (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI); - ql_dbg(ql_dbg_disc, vha, 0x2072, - "Async-login: w/ FCSP %8phC hdl=%x, loopid=%x portid=%06x\n", - fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24); } else { lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; } @@ -344,12 +341,14 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; - ql_dbg(ql_dbg_disc, vha, 0x2072, - "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n", - fcport->port_name, sp->handle, fcport->loop_id, - fcport->d_id.b24, fcport->login_retry); - rval = qla2x00_start_sp(sp); + + ql_dbg(ql_dbg_disc, vha, 0x2072, + "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", + fcport->port_name, sp->handle, fcport->loop_id, + fcport->d_id.b24, fcport->login_retry, + lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : ""); + if (rval != QLA_SUCCESS) { fcport->flags |= FCF_LOGIN_NEEDED; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); @@ -862,7 +861,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, break; case DSC_LS_PLOGI_COMP: if (vha->hw->flags.edif_enabled && - vha->e_dbell.db_flags & EDB_ACTIVE) { + DBELL_ACTIVE(vha)) { /* check to see if App support secure or not */ qla24xx_post_gpdb_work(vha, fcport, 0); break; @@ -987,8 +986,6 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], sp->u.iocb_cmd.u.mbx.in_mb[2]); - if (res == QLA_FUNCTION_TIMEOUT) - return; sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); memset(&ea, 0, sizeof(ea)); @@ -1026,8 +1023,8 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { - list_del_init(&fcport->gnl_entry); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_del_init(&fcport->gnl_entry); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); ea.fcport = fcport; @@ -1454,7 +1451,7 @@ static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport, qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, fcport->d_id.b24); - if (vha->e_dbell.db_flags == EDB_ACTIVE) { + if (DBELL_ACTIVE(vha)) { ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n", __func__, __LINE__, fcport->port_name); @@ -1786,16 +1783,72 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) fc_port_t *fcport; unsigned long flags; - fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); - if (fcport) { - if (fcport->flags & FCF_FCP2_DEVICE) { - ql_dbg(ql_dbg_disc, vha, 0x2115, - "Delaying session delete for FCP2 portid=%06x %8phC ", - fcport->d_id.b24, fcport->port_name); - return; + switch (ea->id.b.rsvd_1) { + case RSCN_PORT_ADDR: + fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); + if (fcport) { + if (fcport->flags & FCF_FCP2_DEVICE) { + ql_dbg(ql_dbg_disc, vha, 0x2115, + "Delaying session delete for FCP2 portid=%06x %8phC ", + fcport->d_id.b24, fcport->port_name); + return; + } + + if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) { + /* + * On ipsec start by remote port, Target port + * may use RSCN to trigger initiator to + * relogin. If driver is already in the + * process of a relogin, then ignore the RSCN + * and allow the current relogin to continue. + * This reduces thrashing of the connection. + */ + if (atomic_read(&fcport->state) == FCS_ONLINE) { + /* + * If state = online, then set scan_needed=1 to do relogin. + * Otherwise we're already in the middle of a relogin + */ + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } else { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } } - fcport->scan_needed = 1; - fcport->rscn_gen++; + break; + case RSCN_AREA_ADDR: + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->flags & FCF_FCP2_DEVICE) + continue; + + if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } + break; + case RSCN_DOM_ADDR: + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->flags & FCF_FCP2_DEVICE) + continue; + + if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } + break; + case RSCN_FAB_ADDR: + default: + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->flags & FCF_FCP2_DEVICE) + continue; + + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + break; } spin_lock_irqsave(&vha->work_lock, flags); @@ -4187,7 +4240,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) * fw shal not send PRLI after PLOGI Acc */ if (ha->flags.edif_enabled && - vha->e_dbell.db_flags & EDB_ACTIVE) { + DBELL_ACTIVE(vha)) { ha->fw_options[3] |= BIT_15; ha->flags.n2n_fw_acc_sec = 1; } else { @@ -4433,6 +4486,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha) (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); } + /* ELS pass through payload is limit by frame size. */ + if (ha->flags.edif_enabled) + mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD); + rval = qla2x00_init_firmware(vha, ha->init_cb_size); next_check: if (rval) { @@ -5339,8 +5396,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) * use link up to wake up app to get ready for * authentication. */ - if (ha->flags.edif_enabled && - !(vha->e_dbell.db_flags & EDB_ACTIVE)) + if (ha->flags.edif_enabled && DBELL_INACTIVE(vha)) qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); @@ -5833,6 +5889,10 @@ void qla_register_fcport_fn(struct work_struct *work) qla2x00_update_fcport(fcport->vha, fcport); + ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, + "%s rscn gen %d/%d next DS %d\n", __func__, + rscn_gen, fcport->rscn_gen, fcport->next_disc_state); + if (rscn_gen != fcport->rscn_gen) { /* RSCN(s) came in while registration */ switch (fcport->next_disc_state) { diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 9d4ad1d2b00a..ed604f2185bf 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -3034,8 +3034,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, elsio->u.els_plogi.els_cmd = els_opcode; elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; - if (els_opcode == ELS_DCMD_PLOGI && vha->hw->flags.edif_enabled && - vha->e_dbell.db_flags & EDB_ACTIVE) { + if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) { struct fc_els_flogi *p = ptr; p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b26f2699adb2..aaf6504570fd 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2233,6 +2233,10 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req, } } else if (comp_status == CS_PORT_LOGGED_OUT) { + ql_dbg(ql_dbg_disc, vha, 0x911e, + "%s %d schedule session deletion\n", + __func__, __LINE__); + els->u.els_plogi.len = 0; res = DID_IMM_RETRY << 16; qlt_schedule_sess_for_deletion(sp->fcport); diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 6e920da64863..350b0c4346fb 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -738,29 +738,6 @@ qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag) return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); } -int -qlafx00_loop_reset(scsi_qla_host_t *vha) -{ - int ret; - struct fc_port *fcport; - struct qla_hw_data *ha = vha->hw; - - if (ql2xtargetreset) { - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->port_type != FCT_TARGET) - continue; - - ret = ha->isp_ops->target_reset(fcport, 0, 0); - if (ret != QLA_SUCCESS) { - ql_dbg(ql_dbg_taskm, vha, 0x803d, - "Bus Reset failed: Reset=%d " - "d_id=%x.\n", ret, fcport->d_id.b24); - } - } - } - return QLA_SUCCESS; -} - int qlafx00_iospace_config(struct qla_hw_data *ha) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c3ff50ffe205..abcd30917263 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -202,12 +202,6 @@ MODULE_PARM_DESC(ql2xdbwr, " 0 -- Regular doorbell.\n" " 1 -- CAMRAM doorbell (faster).\n"); -int ql2xtargetreset = 1; -module_param(ql2xtargetreset, int, S_IRUGO); -MODULE_PARM_DESC(ql2xtargetreset, - "Enable target reset." - "Default is 1 - use hw defaults."); - int ql2xgffidenable; module_param(ql2xgffidenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xgffidenable, @@ -1695,27 +1689,10 @@ int qla2x00_loop_reset(scsi_qla_host_t *vha) { int ret; - struct fc_port *fcport; struct qla_hw_data *ha = vha->hw; - if (IS_QLAFX00(ha)) { - return qlafx00_loop_reset(vha); - } - - if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->port_type != FCT_TARGET) - continue; - - ret = ha->isp_ops->target_reset(fcport, 0, 0); - if (ret != QLA_SUCCESS) { - ql_dbg(ql_dbg_taskm, vha, 0x802c, - "Bus Reset failed: Reset=%d " - "d_id=%x.\n", ret, fcport->d_id.b24); - } - } - } - + if (IS_QLAFX00(ha)) + return QLA_SUCCESS; if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -3908,13 +3885,13 @@ qla2x00_remove_one(struct pci_dev *pdev) static inline void qla24xx_free_purex_list(struct purex_list *list) { - struct list_head *item, *next; + struct purex_item *item, *next; ulong flags; spin_lock_irqsave(&list->lock, flags); - list_for_each_safe(item, next, &list->head) { - list_del(item); - kfree(list_entry(item, struct purex_item, list)); + list_for_each_entry_safe(item, next, &list->head, list) { + list_del(&item->list); + kfree(item); } spin_unlock_irqrestore(&list->lock, flags); } @@ -4375,7 +4352,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, /* allocate the purex dma pool */ ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev, - MAX_PAYLOAD, 8, 0); + ELS_MAX_PAYLOAD, 8, 0); if (!ha->purex_dma_pool) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 7d8242c120fc..8993d438e0b7 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1003,6 +1003,7 @@ void qlt_free_session_done(struct work_struct *work) "%s bypassing release_all_sadb\n", __func__); } + qla_edif_clear_appdata(vha, sess); qla_edif_sess_down(vha, sess); } qla2x00_mark_device_lost(vha, sess, 0); @@ -4812,7 +4813,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha, } if (vha->hw->flags.edif_enabled) { - if (!(vha->e_dbell.db_flags & EDB_ACTIVE)) { + if (DBELL_INACTIVE(vha)) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d Term INOT due to app not started lid=%d, NportID %06X ", __func__, __LINE__, loop_id, port_id.b24); diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 4b117165bf8b..27e440f8a702 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -6,9 +6,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.02.07.100-k" +#define QLA2XXX_VERSION "10.02.07.200-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 2 #define QLA_DRIVER_PATCH_VER 7 -#define QLA_DRIVER_BETA_VER 100 +#define QLA_DRIVER_BETA_VER 200 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 06e7266018c7..1d0278da9041 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -4259,6 +4259,8 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) mk_sense_invalid_opcode(scp); return check_condition_result; } + if (vnum == 0) + return 0; /* not an error */ a_num = is_bytchk3 ? 1 : vnum; /* Treat following check like one for read (i.e. no write) access */ ret = check_device_access_params(scp, lba, a_num, false); @@ -4322,6 +4324,8 @@ static int resp_report_zones(struct scsi_cmnd *scp, } zs_lba = get_unaligned_be64(cmd + 2); alloc_len = get_unaligned_be32(cmd + 10); + if (alloc_len == 0) + return 0; /* not an error */ rep_opts = cmd[14] & 0x3f; partial = cmd[14] & 0x80; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index a531336f6a0a..2371edbc3af4 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -133,6 +133,23 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd) return true; } +static void scsi_eh_complete_abort(struct scsi_cmnd *scmd, struct Scsi_Host *shost) +{ + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + list_del_init(&scmd->eh_entry); + /* + * If the abort succeeds, and there is no further + * EH action, clear the ->last_reset time. + */ + if (list_empty(&shost->eh_abort_list) && + list_empty(&shost->eh_cmd_q)) + if (shost->eh_deadline != -1) + shost->last_reset = 0; + spin_unlock_irqrestore(shost->host_lock, flags); +} + /** * scmd_eh_abort_handler - Handle command aborts * @work: command to be aborted. @@ -150,6 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work) container_of(work, struct scsi_cmnd, abort_work.work); struct scsi_device *sdev = scmd->device; enum scsi_disposition rtn; + unsigned long flags; if (scsi_host_eh_past_deadline(sdev->host)) { SCSI_LOG_ERROR_RECOVERY(3, @@ -173,12 +191,14 @@ scmd_eh_abort_handler(struct work_struct *work) SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_WARNING, scmd, "retry aborted command\n")); + scsi_eh_complete_abort(scmd, sdev->host); scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); return; } else { SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_WARNING, scmd, "finish aborted command\n")); + scsi_eh_complete_abort(scmd, sdev->host); scsi_finish_command(scmd); return; } @@ -191,6 +211,9 @@ scmd_eh_abort_handler(struct work_struct *work) } } + spin_lock_irqsave(sdev->host->host_lock, flags); + list_del_init(&scmd->eh_entry); + spin_unlock_irqrestore(sdev->host->host_lock, flags); scsi_eh_scmd_add(scmd); } @@ -221,6 +244,8 @@ scsi_abort_command(struct scsi_cmnd *scmd) spin_lock_irqsave(shost->host_lock, flags); if (shost->eh_deadline != -1 && !shost->last_reset) shost->last_reset = jiffies; + BUG_ON(!list_empty(&scmd->eh_entry)); + list_add_tail(&scmd->eh_entry, &shost->eh_abort_list); spin_unlock_irqrestore(shost->host_lock, flags); scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 34412eac4566..400df3354cd6 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -347,6 +347,8 @@ static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq, { struct scsi_request *req = scsi_req(rq); + if (hdr->cmd_len < 6) + return -EMSGSIZE; if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len)) return -EFAULT; if (!scsi_cmd_allowed(req->cmd, mode)) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b731c2983515..621d841d819a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1153,6 +1153,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) cmd->sense_buffer = buf; cmd->prot_sdb = prot; cmd->flags = flags; + INIT_LIST_HEAD(&cmd->eh_entry); INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); cmd->jiffies_at_alloc = jiffies_at_alloc; cmd->retries = retries; @@ -1184,8 +1185,6 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, } cmd->cmd_len = scsi_req(req)->cmd_len; - if (cmd->cmd_len == 0) - cmd->cmd_len = scsi_command_size(cmd->cmnd); cmd->cmnd = scsi_req(req)->cmd; cmd->transfersize = blk_rq_bytes(req); cmd->allowed = scsi_req(req)->retries; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index d3d362289ecc..55addd78fde4 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1383,6 +1383,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) * We're treating error on bsg register as non-fatal, so * pretend nothing went wrong. */ + error = PTR_ERR(sdev->bsg_dev); sdev_printk(KERN_INFO, sdev, "Failed to register bsg queue, errno=%d\n", error); @@ -1580,7 +1581,6 @@ static struct device_type scsi_dev_type = { void scsi_sysfs_device_initialize(struct scsi_device *sdev) { - int i, j = 0; unsigned long flags; struct Scsi_Host *shost = sdev->host; struct scsi_host_template *hostt = shost->hostt; @@ -1592,15 +1592,7 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev) scsi_enable_async_suspend(&sdev->sdev_gendev); dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); - sdev->gendev_attr_groups[j++] = &scsi_sdev_attr_group; - if (hostt->sdev_groups) { - for (i = 0; hostt->sdev_groups[i] && - j < ARRAY_SIZE(sdev->gendev_attr_groups); - i++, j++) { - sdev->gendev_attr_groups[j] = hostt->sdev_groups[i]; - } - } - WARN_ON_ONCE(j >= ARRAY_SIZE(sdev->gendev_attr_groups)); + sdev->sdev_gendev.groups = hostt->sdev_groups; device_initialize(&sdev->sdev_dev); sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 50f8dc960ae2..8e4af111c078 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -693,7 +693,6 @@ static int sr_probe(struct device *dev) cd->device = sdev; cd->disk = disk; cd->driver = &sr_template; - cd->disk = disk; cd->capacity = 0x1fffff; cd->device->changed = 1; /* force recheck CD type */ cd->media_present = 1; diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c index 4e1ff209b933..4a0bbcf1757a 100644 --- a/drivers/scsi/ufs/ufs-debugfs.c +++ b/drivers/scsi/ufs/ufs-debugfs.c @@ -8,6 +8,18 @@ static struct dentry *ufs_debugfs_root; +struct ufs_debugfs_attr { + const char *name; + mode_t mode; + const struct file_operations *fops; +}; + +/* @file corresponds to a debugfs attribute in directory hba->debugfs_root. */ +static inline struct ufs_hba *hba_from_file(const struct file *file) +{ + return d_inode(file->f_path.dentry->d_parent)->i_private; +} + void __init ufs_debugfs_init(void) { ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL); @@ -20,7 +32,7 @@ void ufs_debugfs_exit(void) static int ufs_debugfs_stats_show(struct seq_file *s, void *data) { - struct ufs_hba *hba = s->private; + struct ufs_hba *hba = hba_from_file(s->file); struct ufs_event_hist *e = hba->ufs_stats.event; #define PRT(fmt, typ) \ @@ -126,13 +138,93 @@ static void ufs_debugfs_restart_ee(struct work_struct *work) ufs_debugfs_put_user_access(hba); } +static int ufs_saved_err_show(struct seq_file *s, void *data) +{ + struct ufs_debugfs_attr *attr = s->private; + struct ufs_hba *hba = hba_from_file(s->file); + const int *p; + + if (strcmp(attr->name, "saved_err") == 0) { + p = &hba->saved_err; + } else if (strcmp(attr->name, "saved_uic_err") == 0) { + p = &hba->saved_uic_err; + } else { + return -ENOENT; + } + + seq_printf(s, "%d\n", *p); + return 0; +} + +static ssize_t ufs_saved_err_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ufs_debugfs_attr *attr = file->f_inode->i_private; + struct ufs_hba *hba = hba_from_file(file); + char val_str[16] = { }; + int val, ret; + + if (count > sizeof(val_str)) + return -EINVAL; + if (copy_from_user(val_str, buf, count)) + return -EFAULT; + ret = kstrtoint(val_str, 0, &val); + if (ret < 0) + return ret; + + spin_lock_irq(hba->host->host_lock); + if (strcmp(attr->name, "saved_err") == 0) { + hba->saved_err = val; + } else if (strcmp(attr->name, "saved_uic_err") == 0) { + hba->saved_uic_err = val; + } else { + ret = -ENOENT; + } + if (ret == 0) + ufshcd_schedule_eh_work(hba); + spin_unlock_irq(hba->host->host_lock); + + return ret < 0 ? ret : count; +} + +static int ufs_saved_err_open(struct inode *inode, struct file *file) +{ + return single_open(file, ufs_saved_err_show, inode->i_private); +} + +static const struct file_operations ufs_saved_err_fops = { + .owner = THIS_MODULE, + .open = ufs_saved_err_open, + .read = seq_read, + .write = ufs_saved_err_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct ufs_debugfs_attr ufs_attrs[] = { + { "stats", 0400, &ufs_debugfs_stats_fops }, + { "saved_err", 0600, &ufs_saved_err_fops }, + { "saved_uic_err", 0600, &ufs_saved_err_fops }, + { } +}; + void ufs_debugfs_hba_init(struct ufs_hba *hba) { + const struct ufs_debugfs_attr *attr; + struct dentry *root; + /* Set default exception event rate limit period to 20ms */ hba->debugfs_ee_rate_limit_ms = 20; INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee); - hba->debugfs_root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root); - debugfs_create_file("stats", 0400, hba->debugfs_root, hba, &ufs_debugfs_stats_fops); + + root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root); + if (IS_ERR_OR_NULL(root)) + return; + hba->debugfs_root = root; + d_inode(root)->i_private = hba; + for (attr = ufs_attrs; attr->name; attr++) + debugfs_create_file(attr->name, attr->mode, root, (void *)attr, + attr->fops); debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root, hba, &ee_usr_mask_fops); debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root, diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c index 30d0c1aba0c7..cd26bc82462e 100644 --- a/drivers/scsi/ufs/ufs-exynos.c +++ b/drivers/scsi/ufs/ufs-exynos.c @@ -12,8 +12,10 @@ #include #include #include +#include #include #include +#include #include "ufshcd.h" #include "ufshcd-pltfrm.h" @@ -48,10 +50,11 @@ #define HCI_ERR_EN_T_LAYER 0x84 #define HCI_ERR_EN_DME_LAYER 0x88 #define HCI_CLKSTOP_CTRL 0xB0 +#define REFCLKOUT_STOP BIT(4) #define REFCLK_STOP BIT(2) #define UNIPRO_MCLK_STOP BIT(1) #define UNIPRO_PCLK_STOP BIT(0) -#define CLK_STOP_MASK (REFCLK_STOP |\ +#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\ UNIPRO_MCLK_STOP |\ UNIPRO_PCLK_STOP) #define HCI_MISC 0xB4 @@ -74,6 +77,52 @@ UIC_TRANSPORT_NO_CONNECTION_RX |\ UIC_TRANSPORT_BAD_TC) +/* FSYS UFS Shareability */ +#define UFS_WR_SHARABLE BIT(2) +#define UFS_RD_SHARABLE BIT(1) +#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE) +#define UFS_SHAREABILITY_OFFSET 0x710 + +/* Multi-host registers */ +#define MHCTRL 0xC4 +#define MHCTRL_EN_VH_MASK (0xE) +#define MHCTRL_EN_VH(vh) (vh << 1) +#define PH2VH_MBOX 0xD8 + +#define MH_MSG_MASK (0xFF) + +#define MH_MSG(id, msg) ((id << 8) | (msg & 0xFF)) +#define MH_MSG_PH_READY 0x1 +#define MH_MSG_VH_READY 0x2 + +#define ALLOW_INQUIRY BIT(25) +#define ALLOW_MODE_SELECT BIT(24) +#define ALLOW_MODE_SENSE BIT(23) +#define ALLOW_PRE_FETCH GENMASK(22, 21) +#define ALLOW_READ_CMD_ALL GENMASK(20, 18) /* read_6/10/16 */ +#define ALLOW_READ_BUFFER BIT(17) +#define ALLOW_READ_CAPACITY GENMASK(16, 15) +#define ALLOW_REPORT_LUNS BIT(14) +#define ALLOW_REQUEST_SENSE BIT(13) +#define ALLOW_SYNCHRONIZE_CACHE GENMASK(8, 7) +#define ALLOW_TEST_UNIT_READY BIT(6) +#define ALLOW_UNMAP BIT(5) +#define ALLOW_VERIFY BIT(4) +#define ALLOW_WRITE_CMD_ALL GENMASK(3, 1) /* write_6/10/16 */ + +#define ALLOW_TRANS_VH_DEFAULT (ALLOW_INQUIRY | ALLOW_MODE_SELECT | \ + ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \ + ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \ + ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \ + ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \ + ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \ + ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL) + +#define HCI_MH_ALLOWABLE_TRAN_OF_VH 0x30C +#define HCI_MH_IID_IN_TASK_TAG 0X308 + +#define PH_READY_TIMEOUT_MS (5 * MSEC_PER_SEC) + enum { UNIPRO_L1_5 = 0,/* PHY Adapter */ UNIPRO_L2, /* Data Link */ @@ -149,6 +198,117 @@ static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) return 0; } +static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +{ + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + /* IO Coherency setting */ + if (ufs->sysreg) { + return regmap_update_bits(ufs->sysreg, + ufs->shareability_reg_offset, + UFS_SHARABLE, UFS_SHARABLE); + } + + attr->tx_dif_p_nsec = 3200000; + + return 0; +} + +static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + + /* Enable Virtual Host #1 */ + ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL); + /* Default VH Transfer permissions */ + hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH); + /* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */ + hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG); + + return 0; +} + +static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + int i; + u32 tx_line_reset_period, rx_line_reset_period; + + rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC; + tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC; + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40); + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i), + (rx_line_reset_period >> 16) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i), + (rx_line_reset_period >> 8) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i), + (rx_line_reset_period) & 0xFF); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6); + } + + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + /* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i), + 0x02); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i), + (tx_line_reset_period >> 16) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i), + (tx_line_reset_period >> 8) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i), + (tx_line_reset_period) & 0xFF); + + /* TX PWM Gear Capability / PWM_G1_ONLY */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1); + } + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000); + + return 0; +} + +static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + + /* PACP_PWR_req and delivered to the remote DME */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000); + + return 0; +} + +static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + u32 enabled_vh; + + enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK; + + /* Send physical host ready message to virtual hosts */ + ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX); + + return 0; +} + static int exynos7_ufs_pre_link(struct exynos_ufs *ufs) { struct ufs_hba *hba = ufs->hba; @@ -793,6 +953,27 @@ static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index) } } +static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!ufs) + return 0; + + if (on && status == PRE_CHANGE) { + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_disable_auto_ctrl_hcc(ufs); + exynos_ufs_ungate_clks(ufs); + } else if (!on && status == POST_CHANGE) { + exynos_ufs_gate_clks(ufs); + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_enable_auto_ctrl_hcc(ufs); + } + + return 0; +} + static int exynos_ufs_pre_link(struct ufs_hba *hba) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); @@ -808,8 +989,12 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba) /* m-phy */ exynos_ufs_phy_init(ufs); - exynos_ufs_config_phy_time_attr(ufs); - exynos_ufs_config_phy_cap_attr(ufs); + if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) { + exynos_ufs_config_phy_time_attr(ufs); + exynos_ufs_config_phy_cap_attr(ufs); + } + + exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); if (ufs->drv_data->pre_link) ufs->drv_data->pre_link(ufs); @@ -893,17 +1078,10 @@ static int exynos_ufs_post_link(struct ufs_hba *hba) static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs) { struct device_node *np = dev->of_node; - struct exynos_ufs_drv_data *drv_data = &exynos_ufs_drvs; struct exynos_ufs_uic_attr *attr; int ret = 0; - while (drv_data->compatible) { - if (of_device_is_compatible(np, drv_data->compatible)) { - ufs->drv_data = drv_data; - break; - } - drv_data++; - } + ufs->drv_data = device_get_match_data(dev); if (ufs->drv_data && ufs->drv_data->uic_attr) { attr = ufs->drv_data->uic_attr; @@ -913,6 +1091,17 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs) goto out; } + ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg"); + if (IS_ERR(ufs->sysreg)) + ufs->sysreg = NULL; + else { + if (of_property_read_u32_index(np, "samsung,sysreg", 1, + &ufs->shareability_reg_offset)) { + dev_warn(dev, "can't get an offset from sysreg. Set to default value\n"); + ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET; + } + } + ufs->pclk_avail_min = PCLK_AVAIL_MIN; ufs->pclk_avail_max = PCLK_AVAIL_MAX; @@ -927,6 +1116,18 @@ out: return ret; } +static inline void exynos_ufs_priv_init(struct ufs_hba *hba, + struct exynos_ufs *ufs) +{ + ufs->hba = hba; + ufs->opts = ufs->drv_data->opts; + ufs->rx_sel_idx = PA_MAXDATALANES; + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX) + ufs->rx_sel_idx = 0; + hba->priv = (void *)ufs; + hba->quirks = ufs->drv_data->quirks; +} + static int exynos_ufs_init(struct ufs_hba *hba) { struct device *dev = hba->dev; @@ -976,13 +1177,8 @@ static int exynos_ufs_init(struct ufs_hba *hba) if (ret) goto phy_off; - ufs->hba = hba; - ufs->opts = ufs->drv_data->opts; - ufs->rx_sel_idx = PA_MAXDATALANES; - if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX) - ufs->rx_sel_idx = 0; - hba->priv = (void *)ufs; - hba->quirks = ufs->drv_data->quirks; + exynos_ufs_priv_init(hba, ufs); + if (ufs->drv_data->drv_init) { ret = ufs->drv_data->drv_init(dev, ufs); if (ret) { @@ -1110,6 +1306,12 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, switch (status) { case PRE_CHANGE: + if (ufs->drv_data->pre_hce_enable) { + ret = ufs->drv_data->pre_hce_enable(ufs); + if (ret) + return ret; + } + ret = exynos_ufs_host_reset(hba); if (ret) return ret; @@ -1119,6 +1321,10 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, exynos_ufs_calc_pwm_clk_div(ufs); if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)) exynos_ufs_enable_auto_ctrl_hcc(ufs); + + if (ufs->drv_data->post_hce_enable) + ret = ufs->drv_data->post_hce_enable(ufs); + break; } @@ -1202,12 +1408,77 @@ static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) return 0; } +static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status == POST_CHANGE) { + ufshcd_set_link_active(hba); + ufshcd_set_ufs_dev_active(hba); + } + + return 0; +} + +static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba) +{ + u32 mbox; + ktime_t start, stop; + + start = ktime_get(); + stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS)); + + do { + mbox = ufshcd_readl(hba, PH2VH_MBOX); + /* TODO: Mailbox message protocols between the PH and VHs are + * not implemented yet. This will be supported later + */ + if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY) + return 0; + + usleep_range(40, 50); + } while (ktime_before(ktime_get(), stop)); + + return -ETIME; +} + +static int exynosauto_ufs_vh_init(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct exynos_ufs *ufs; + int ret; + + ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL); + if (!ufs) + return -ENOMEM; + + /* exynos-specific hci */ + ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci"); + if (IS_ERR(ufs->reg_hci)) { + dev_err(dev, "cannot ioremap for hci vendor register\n"); + return PTR_ERR(ufs->reg_hci); + } + + ret = exynosauto_ufs_vh_wait_ph_ready(hba); + if (ret) + return ret; + + ufs->drv_data = device_get_match_data(dev); + if (!ufs->drv_data) + return -ENODEV; + + exynos_ufs_priv_init(hba, ufs); + + return 0; +} + static struct ufs_hba_variant_ops ufs_hba_exynos_ops = { .name = "exynos_ufs", .init = exynos_ufs_init, .hce_enable_notify = exynos_ufs_hce_enable_notify, .link_startup_notify = exynos_ufs_link_startup_notify, .pwr_change_notify = exynos_ufs_pwr_change_notify, + .setup_clocks = exynos_ufs_setup_clocks, .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req, .setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req, .hibern8_notify = exynos_ufs_hibern8_notify, @@ -1215,12 +1486,24 @@ static struct ufs_hba_variant_ops ufs_hba_exynos_ops = { .resume = exynos_ufs_resume, }; +static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = { + .name = "exynosauto_ufs_vh", + .init = exynosauto_ufs_vh_init, + .link_startup_notify = exynosauto_ufs_vh_link_startup_notify, +}; + static int exynos_ufs_probe(struct platform_device *pdev) { int err; struct device *dev = &pdev->dev; + const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops; + const struct exynos_ufs_drv_data *drv_data = + device_get_match_data(dev); - err = ufshcd_pltfrm_init(pdev, &ufs_hba_exynos_ops); + if (drv_data && drv_data->vops) + vops = drv_data->vops; + + err = ufshcd_pltfrm_init(pdev, vops); if (err) dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); @@ -1261,8 +1544,35 @@ static struct exynos_ufs_uic_attr exynos7_uic_attr = { .pa_dbg_option_suite = 0x30103, }; +static struct exynos_ufs_drv_data exynosauto_ufs_drvs = { + .uic_attr = &exynos7_uic_attr, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, + .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | + EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | + EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, + .drv_init = exynosauto_ufs_drv_init, + .post_hce_enable = exynosauto_ufs_post_hce_enable, + .pre_link = exynosauto_ufs_pre_link, + .pre_pwr_change = exynosauto_ufs_pre_pwr_change, + .post_pwr_change = exynosauto_ufs_post_pwr_change, +}; + +static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = { + .vops = &ufs_hba_exynosauto_vh_ops, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCI_QUIRK_BROKEN_HCE | + UFSHCD_QUIRK_BROKEN_UIC_CMD | + UFSHCD_QUIRK_SKIP_PH_CONFIGURATION | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, + .opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, +}; + static struct exynos_ufs_drv_data exynos_ufs_drvs = { - .compatible = "samsung,exynos7-ufs", .uic_attr = &exynos7_uic_attr, .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR | @@ -1287,6 +1597,10 @@ static struct exynos_ufs_drv_data exynos_ufs_drvs = { static const struct of_device_id exynos_ufs_of_match[] = { { .compatible = "samsung,exynos7-ufs", .data = &exynos_ufs_drvs }, + { .compatible = "samsung,exynosautov9-ufs", + .data = &exynosauto_ufs_drvs }, + { .compatible = "samsung,exynosautov9-ufs-vh", + .data = &exynosauto_ufs_vh_drvs }, {}, }; diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h index dadf4fd10dd8..1c33e5466082 100644 --- a/drivers/scsi/ufs/ufs-exynos.h +++ b/drivers/scsi/ufs/ufs-exynos.h @@ -56,6 +56,22 @@ #define TX_GRAN_NVAL_10_08 0x0296 #define TX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3) +#define VND_TX_CLK_PRD 0xAA +#define VND_TX_CLK_PRD_EN 0xA9 +#define VND_TX_LINERESET_PVALUE0 0xAD +#define VND_TX_LINERESET_PVALUE1 0xAC +#define VND_TX_LINERESET_PVALUE2 0xAB + +#define TX_LINE_RESET_TIME 3200 + +#define VND_RX_CLK_PRD 0x12 +#define VND_RX_CLK_PRD_EN 0x11 +#define VND_RX_LINERESET_VALUE0 0x1D +#define VND_RX_LINERESET_VALUE1 0x1C +#define VND_RX_LINERESET_VALUE2 0x1B + +#define RX_LINE_RESET_TIME 1000 + #define RX_FILLER_ENABLE 0x0316 #define RX_FILLER_EN (1 << 1) #define RX_LINERESET_VAL 0x0317 @@ -99,7 +115,7 @@ struct exynos_ufs; #define PA_HIBERN8TIME_VAL 0x20 #define PCLK_AVAIL_MIN 70000000 -#define PCLK_AVAIL_MAX 133000000 +#define PCLK_AVAIL_MAX 167000000 struct exynos_ufs_uic_attr { /* TX Attributes */ @@ -142,7 +158,7 @@ struct exynos_ufs_uic_attr { }; struct exynos_ufs_drv_data { - char *compatible; + const struct ufs_hba_variant_ops *vops; struct exynos_ufs_uic_attr *uic_attr; unsigned int quirks; unsigned int opts; @@ -154,6 +170,8 @@ struct exynos_ufs_drv_data { struct ufs_pa_layer_attr *pwr); int (*post_pwr_change)(struct exynos_ufs *ufs, struct ufs_pa_layer_attr *pwr); + int (*pre_hce_enable)(struct exynos_ufs *ufs); + int (*post_hce_enable)(struct exynos_ufs *ufs); }; struct ufs_phy_time_cfg { @@ -191,7 +209,9 @@ struct exynos_ufs { struct ufs_pa_layer_attr dev_req_params; struct ufs_phy_time_cfg t_cfg; ktime_t entry_hibern8_t; - struct exynos_ufs_drv_data *drv_data; + const struct exynos_ufs_drv_data *drv_data; + struct regmap *sysreg; + u32 shareability_reg_offset; u32 opts; #define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0) @@ -199,6 +219,7 @@ struct exynos_ufs { #define EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL BIT(2) #define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3) #define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4) +#define EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR BIT(5) }; #define for_each_ufs_rx_lane(ufs, i) \ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 5c6a58a666d2..afd38142b1c0 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -132,6 +132,14 @@ enum { UFSHCD_CAN_QUEUE = 32, }; +static const char *const ufshcd_state_name[] = { + [UFSHCD_STATE_RESET] = "reset", + [UFSHCD_STATE_OPERATIONAL] = "operational", + [UFSHCD_STATE_ERROR] = "error", + [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal", + [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal", +}; + /* UFSHCD error handling flags */ enum { UFSHCD_EH_IN_PROGRESS = (1 << 0), @@ -236,7 +244,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); static irqreturn_t ufshcd_intr(int irq, void *__hba); static int ufshcd_change_power_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *pwr_mode); -static void ufshcd_schedule_eh_work(struct ufs_hba *hba); static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, @@ -711,7 +718,7 @@ static inline bool ufshcd_is_device_present(struct ufs_hba *hba) * This function is used to get the OCS field from UTRD * Returns the OCS field in the UTRD */ -static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) +static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) { return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; } @@ -2323,6 +2330,9 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) int ret; unsigned long flags; + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) + return 0; + ufshcd_hold(hba, false); mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); @@ -2367,17 +2377,24 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) sizeof(struct ufshcd_sg_entry))); else lrbp->utr_descriptor_ptr->prd_table_length = - cpu_to_le16((u16) (sg_segments)); + cpu_to_le16(sg_segments); - prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; + prd_table = lrbp->ucd_prdt_ptr; scsi_for_each_sg(cmd, sg, sg_segments, i) { - prd_table[i].size = - cpu_to_le32(((u32) sg_dma_len(sg))-1); - prd_table[i].base_addr = - cpu_to_le32(lower_32_bits(sg->dma_address)); - prd_table[i].upper_addr = - cpu_to_le32(upper_32_bits(sg->dma_address)); + const unsigned int len = sg_dma_len(sg); + + /* + * From the UFSHCI spec: "Data Byte Count (DBC): A '0' + * based value that indicates the length, in bytes, of + * the data block. A maximum of length of 256KB may + * exist for any entry. Bits 1:0 of this field shall be + * 11b to indicate Dword granularity. A value of '3' + * indicates 4 bytes, '7' indicates 8 bytes, etc." + */ + WARN_ONCE(len > 256 * 1024, "len = %#x\n", len); + prd_table[i].size = cpu_to_le32(len - 1); + prd_table[i].addr = cpu_to_le64(sg->dma_address); prd_table[i].reserved = 0; } } else { @@ -2661,7 +2678,7 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) lrb->ucd_req_dma_addr = cmd_desc_element_addr; lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset; - lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; + lrb->ucd_prdt_ptr = cmd_descp[i].prd_table; lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset; } @@ -5084,7 +5101,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { int result = 0; int scsi_status; - int ocs; + enum utp_ocs ocs; /* overall command status of utrd */ ocs = ufshcd_get_tr_ocs(lrbp); @@ -5243,11 +5260,9 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) * __ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance * @completed_reqs: bitmask that indicates which requests to complete - * @retry_requests: whether to ask the SCSI core to retry completed requests */ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, - unsigned long completed_reqs, - bool retry_requests) + unsigned long completed_reqs) { struct ufshcd_lrb *lrbp; struct scsi_cmnd *cmd; @@ -5263,8 +5278,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_update_monitor(hba, lrbp); ufshcd_add_command_trace(hba, index, UFS_CMD_COMP); - result = retry_requests ? DID_BUS_BUSY << 16 : - ufshcd_transfer_rsp_status(hba, lrbp); + result = ufshcd_transfer_rsp_status(hba, lrbp); scsi_dma_unmap(cmd); cmd->result = result; /* Mark completed command as NULL in LRB */ @@ -5290,14 +5304,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, /** * ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance - * @retry_requests: whether or not to ask to retry requests * * Returns * IRQ_HANDLED - If interrupt is valid * IRQ_NONE - If invalid interrupt */ -static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba, - bool retry_requests) +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) { unsigned long completed_reqs, flags; u32 tr_doorbell; @@ -5326,8 +5338,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba, spin_unlock_irqrestore(&hba->outstanding_lock, flags); if (completed_reqs) { - __ufshcd_transfer_req_compl(hba, completed_reqs, - retry_requests); + __ufshcd_transfer_req_compl(hba, completed_reqs); return IRQ_HANDLED; } else { return IRQ_NONE; @@ -5826,13 +5837,7 @@ out: /* Complete requests that have door-bell cleared */ static void ufshcd_complete_requests(struct ufs_hba *hba) { - ufshcd_transfer_req_compl(hba, /*retry_requests=*/false); - ufshcd_tmc_handler(hba); -} - -static void ufshcd_retry_aborted_requests(struct ufs_hba *hba) -{ - ufshcd_transfer_req_compl(hba, /*retry_requests=*/true); + ufshcd_transfer_req_compl(hba); ufshcd_tmc_handler(hba); } @@ -5914,9 +5919,10 @@ static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); } -/* host lock must be held before calling this func */ -static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba) +void ufshcd_schedule_eh_work(struct ufs_hba *hba) { + lockdep_assert_held(hba->host->host_lock); + /* handle fatal errors only when link is not in error state */ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { if (hba->force_reset || ufshcd_is_link_broken(hba) || @@ -6076,6 +6082,13 @@ static void ufshcd_err_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eh_work); + dev_info(hba->dev, + "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n", + __func__, ufshcd_state_name[hba->ufshcd_state], + hba->is_powered, hba->shutting_down, hba->saved_err, + hba->saved_uic_err, hba->force_reset, + ufshcd_is_link_broken(hba) ? "; link is broken" : ""); + down(&hba->host_sem); spin_lock_irqsave(hba->host->host_lock, flags); if (ufshcd_err_handling_should_stop(hba)) { @@ -6170,6 +6183,8 @@ again: err_xfer = true; goto lock_skip_pending_xfer_clear; } + dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag, + hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1); } /* Clear pending task management requests */ @@ -6181,7 +6196,8 @@ again: } lock_skip_pending_xfer_clear: - ufshcd_retry_aborted_requests(hba); + /* Complete the requests that are cleared by s/w */ + ufshcd_complete_requests(hba); spin_lock_irqsave(hba->host->host_lock, flags); hba->silence_err_logs = false; @@ -6249,6 +6265,9 @@ skip_err_handling: spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_err_handling_unprepare(hba); up(&hba->host_sem); + + dev_info(hba->dev, "%s finished; HBA state %s\n", __func__, + ufshcd_state_name[hba->ufshcd_state]); } /** @@ -6473,7 +6492,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) retval |= ufshcd_tmc_handler(hba); if (intr_status & UTP_TRANSFER_REQ_COMPL) - retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false); + retval |= ufshcd_transfer_req_compl(hba); return retval; } @@ -6545,6 +6564,10 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) err = ufshcd_wait_for_register(hba, REG_UTP_TASK_REQ_DOOR_BELL, mask, 0, 1000, 1000); + + dev_err(hba->dev, "Clearing task management function with tag %d %s\n", + tag, err ? "succeeded" : "failed"); + out: return err; } @@ -6637,7 +6660,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, u8 tm_function, u8 *tm_response) { struct utp_task_req_desc treq = { { 0 }, }; - int ocs_value, err; + enum utp_ocs ocs_value; + int err; /* Configure task request descriptor */ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); @@ -6815,7 +6839,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, int err; enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY; struct utp_task_req_desc treq = { { 0 }, }; - int ocs_value; + enum utp_ocs ocs_value; u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC; switch (msgcode) { @@ -6893,7 +6917,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) err = ufshcd_clear_cmd(hba, pos); if (err) break; - __ufshcd_transfer_req_compl(hba, 1U << pos, false); + __ufshcd_transfer_req_compl(hba, 1U << pos); } } @@ -7055,7 +7079,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) dev_err(hba->dev, "%s: cmd was completed, but without a notifying intr, tag = %d", __func__, tag); - __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false); + __ufshcd_transfer_req_compl(hba, 1UL << tag); goto release; } @@ -7121,7 +7145,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) ufshpb_reset_host(hba); ufshcd_hba_stop(hba); hba->silence_err_logs = true; - ufshcd_retry_aborted_requests(hba); + ufshcd_complete_requests(hba); hba->silence_err_logs = false; /* scale up clocks to max frequency before full reinitialization */ @@ -8002,6 +8026,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) if (ret) goto out; + if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) + goto out; + /* Debug counters initialization */ ufshcd_clear_dbg_ufs_stats(hba); @@ -9772,6 +9799,11 @@ static int __init ufshcd_core_init(void) { int ret; + /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */ + static_assert(sizeof(struct utp_transfer_cmd_desc) == + 2 * ALIGNED_UPIU_SIZE + + SG_ALL * sizeof(struct ufshcd_sg_entry)); + ufs_debugfs_init(); ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index dd765863b05f..54750d72c8fb 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -589,6 +589,18 @@ enum ufshcd_quirks { * This quirk allows only sg entries aligned with page size. */ UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14, + + /* + * This quirk needs to be enabled if the host controller does not + * support UIC command + */ + UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15, + + /* + * This quirk needs to be enabled if the host controller cannot + * support physical host configuration. + */ + UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16, }; enum ufshcd_caps { @@ -1023,6 +1035,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val); void ufshcd_hba_stop(struct ufs_hba *hba); +void ufshcd_schedule_eh_work(struct ufs_hba *hba); static inline void check_upiu_size(void) { diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index de95be5d11d4..6a295c88d850 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -389,7 +389,7 @@ enum { }; /* Overall command status values */ -enum { +enum utp_ocs { OCS_SUCCESS = 0x0, OCS_INVALID_CMD_TABLE_ATTR = 0x1, OCS_INVALID_PRDT_ATTR = 0x2, @@ -402,6 +402,9 @@ enum { OCS_INVALID_CRYPTO_CONFIG = 0x9, OCS_GENERAL_CRYPTO_ERROR = 0xA, OCS_INVALID_COMMAND_STATUS = 0x0F, +}; + +enum { MASK_OCS = 0x0F, }; @@ -412,20 +415,18 @@ enum { /** * struct ufshcd_sg_entry - UFSHCI PRD Entry - * @base_addr: Lower 32bit physical address DW-0 - * @upper_addr: Upper 32bit physical address DW-1 + * @addr: Physical address; DW-0 and DW-1. * @reserved: Reserved for future use DW-2 * @size: size of physical segment DW-3 */ struct ufshcd_sg_entry { - __le32 base_addr; - __le32 upper_addr; + __le64 addr; __le32 reserved; __le32 size; }; /** - * struct utp_transfer_cmd_desc - UFS Command Descriptor structure + * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD) * @command_upiu: Command UPIU Frame address * @response_upiu: Response UPIU Frame address * @prd_table: Physical Region Descriptor @@ -451,7 +452,7 @@ struct request_desc_header { }; /** - * struct utp_transfer_req_desc - UTRD structure + * struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD) * @header: UTRD header DW-0 to DW-3 * @command_desc_base_addr_lo: UCD base address low DW-4 * @command_desc_base_addr_hi: UCD base address high DW-5 diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index f4eca441b433..2e31e1413826 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -394,8 +394,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) if (!ufshpb_is_supported_chunk(hpb, transfer_len)) return 0; - WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH); - if (hpb->is_hcm) { /* * in host control mode, reads are the main source for @@ -1572,7 +1570,7 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba, if (ufshpb_is_legacy(hba)) hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH; else - hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH; + hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd; hpb->lu_pinned_start = hpb_lu_info->pinned_start; hpb->lu_pinned_end = hpb_lu_info->num_pinned ? @@ -2582,7 +2580,7 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) { struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; int version, ret; - u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW; + int max_single_cmd; hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; @@ -2598,18 +2596,22 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) if (version == HPB_SUPPORT_LEGACY_VERSION) hpb_dev_info->is_legacy = true; - ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, - QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd); - if (ret) - dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed", - __func__); - hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd; - /* * Get the number of user logical unit to check whether all * scsi_device finish initialization */ hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU]; + + if (hpb_dev_info->is_legacy) + return; + + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd); + + if (ret) + hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH; + else + hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH); } void ufshpb_init(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index f15d8fdbce2e..b475dbd78988 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -31,7 +31,6 @@ /* hpb support chunk size */ #define HPB_LEGACY_CHUNK_HIGH 1 -#define HPB_MULTI_CHUNK_LOW 7 #define HPB_MULTI_CHUNK_HIGH 255 /* hpb vender defined opcode */ diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index e7fcbc09f9db..bac111456fa1 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -50,15 +50,6 @@ EXPORT_SYMBOL(core_tmr_alloc_req); void core_tmr_release_req(struct se_tmr_req *tmr) { - struct se_device *dev = tmr->tmr_dev; - unsigned long flags; - - if (dev) { - spin_lock_irqsave(&dev->se_tmr_lock, flags); - list_del_init(&tmr->tmr_list); - spin_unlock_irqrestore(&dev->se_tmr_lock, flags); - } - kfree(tmr); } @@ -156,13 +147,6 @@ void core_tmr_abort_task( se_cmd->state_active = false; spin_unlock_irqrestore(&dev->queues[i].lock, flags); - /* - * Ensure that this ABORT request is visible to the LU - * RESET code. - */ - if (!tmr->tmr_dev) - WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < 0); - if (dev->transport->tmr_notify) dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list); @@ -234,6 +218,7 @@ static void core_tmr_drain_tmr_list( } list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); + tmr_p->tmr_dev = NULL; } spin_unlock_irqrestore(&dev->se_tmr_lock, flags); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4a2e749eb182..7838dc20f713 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -676,6 +676,21 @@ static void target_remove_from_state_list(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); } +static void target_remove_from_tmr_list(struct se_cmd *cmd) +{ + struct se_device *dev = NULL; + unsigned long flags; + + if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + dev = cmd->se_tmr_req->tmr_dev; + + if (dev) { + spin_lock_irqsave(&dev->se_tmr_lock, flags); + if (cmd->se_tmr_req->tmr_dev) + list_del_init(&cmd->se_tmr_req->tmr_list); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + } +} /* * This function is called by the target core after the target core has * finished processing a SCSI command or SCSI TMF. Both the regular command @@ -687,13 +702,6 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) { unsigned long flags; - target_remove_from_state_list(cmd); - - /* - * Clear struct se_cmd->se_lun before the handoff to FE. - */ - cmd->se_lun = NULL; - spin_lock_irqsave(&cmd->t_state_lock, flags); /* * Determine if frontend context caller is requesting the stopping of @@ -728,8 +736,16 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) if (!lun) return; + target_remove_from_state_list(cmd); + target_remove_from_tmr_list(cmd); + if (cmpxchg(&cmd->lun_ref_active, true, false)) percpu_ref_put(&lun->lun_ref); + + /* + * Clear struct se_cmd->se_lun before the handoff to FE. + */ + cmd->se_lun = NULL; } static void target_complete_failure_work(struct work_struct *work) diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index cd9eaa83432b..477a800a9543 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -73,7 +73,7 @@ enum scsi_cmnd_submitter { struct scsi_cmnd { struct scsi_request req; struct scsi_device *device; - struct list_head eh_entry; /* entry for the host eh_cmd_q */ + struct list_head eh_entry; /* entry for the host eh_abort_list/eh_cmd_q */ struct delayed_work abort_work; struct rcu_head rcu; diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 83a7890f1479..d1c6fc83b1e3 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -226,12 +226,6 @@ struct scsi_device { struct device sdev_gendev, sdev_dev; - /* - * The array size 6 provides space for one attribute group for the - * SCSI core, four attribute groups defined by SCSI LLDs and one - * terminating NULL pointer. - */ - const struct attribute_group *gendev_attr_groups[6]; struct execute_work ew; /* used to get process context on put */ struct work_struct requeue_work; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index ae715959f886..ebe059badba0 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -551,6 +551,7 @@ struct Scsi_Host { struct mutex scan_mutex;/* serialize scanning activity */ + struct list_head eh_abort_list; struct list_head eh_cmd_q; struct task_struct * ehandler; /* Error recovery thread. */ struct completion * eh_action; /* Wait for specific actions on the