Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (102 commits)
  [SCSI] scsi_dh: fix kconfig related build errors
  [SCSI] sym53c8xx: Fix bogus sym_que_entry re-implementation of container_of
  [SCSI] scsi_cmnd.h: remove double inclusion of linux/blkdev.h
  [SCSI] make struct scsi_{host,target}_type static
  [SCSI] fix locking in host use of blk_plug_device()
  [SCSI] zfcp: Cleanup external header file
  [SCSI] zfcp: Cleanup code in zfcp_erp.c
  [SCSI] zfcp: zfcp_fsf cleanup.
  [SCSI] zfcp: consolidate sysfs things into one file.
  [SCSI] zfcp: Cleanup of code in zfcp_aux.c
  [SCSI] zfcp: Cleanup of code in zfcp_scsi.c
  [SCSI] zfcp: Move status accessors from zfcp to SCSI include file.
  [SCSI] zfcp: Small QDIO cleanups
  [SCSI] zfcp: Adapter reopen for large number of unsolicited status
  [SCSI] zfcp: Fix error checking for ELS ADISC requests
  [SCSI] zfcp: wait until adapter is finished with ERP during auto-port
  [SCSI] ibmvfc: IBM Power Virtual Fibre Channel Adapter Client Driver
  [SCSI] sg: Add target reset support
  [SCSI] lib: Add support for the T10 (SCSI) Data Integrity Field CRC
  [SCSI] sd: Move scsi_disk() accessor function to sd.h
  ...
This commit is contained in:
Linus Torvalds 2008-07-15 18:58:04 -07:00
commit 89a93f2f48
106 changed files with 14864 additions and 14654 deletions

View File

@ -56,19 +56,33 @@ Supported Cards/Chipsets
9005:0285:9005:02d1 Adaptec 5405 (Voodoo40) 9005:0285:9005:02d1 Adaptec 5405 (Voodoo40)
9005:0285:15d9:02d2 SMC AOC-USAS-S8i-LP 9005:0285:15d9:02d2 SMC AOC-USAS-S8i-LP
9005:0285:15d9:02d3 SMC AOC-USAS-S8iR-LP 9005:0285:15d9:02d3 SMC AOC-USAS-S8iR-LP
9005:0285:9005:02d4 Adaptec 2045 (Voodoo04 Lite) 9005:0285:9005:02d4 Adaptec ASR-2045 (Voodoo04 Lite)
9005:0285:9005:02d5 Adaptec 2405 (Voodoo40 Lite) 9005:0285:9005:02d5 Adaptec ASR-2405 (Voodoo40 Lite)
9005:0285:9005:02d6 Adaptec 2445 (Voodoo44 Lite) 9005:0285:9005:02d6 Adaptec ASR-2445 (Voodoo44 Lite)
9005:0285:9005:02d7 Adaptec 2805 (Voodoo80 Lite) 9005:0285:9005:02d7 Adaptec ASR-2805 (Voodoo80 Lite)
9005:0285:9005:02d8 Adaptec 5405G (Voodoo40 PM)
9005:0285:9005:02d9 Adaptec 5445G (Voodoo44 PM)
9005:0285:9005:02da Adaptec 5805G (Voodoo80 PM)
9005:0285:9005:02db Adaptec 5085G (Voodoo08 PM)
9005:0285:9005:02dc Adaptec 51245G (Voodoo124 PM)
9005:0285:9005:02dd Adaptec 51645G (Voodoo164 PM)
9005:0285:9005:02de Adaptec 52445G (Voodoo244 PM)
9005:0285:9005:02df Adaptec ASR-2045G (Voodoo04 Lite PM)
9005:0285:9005:02e0 Adaptec ASR-2405G (Voodoo40 Lite PM)
9005:0285:9005:02e1 Adaptec ASR-2445G (Voodoo44 Lite PM)
9005:0285:9005:02e2 Adaptec ASR-2805G (Voodoo80 Lite PM)
1011:0046:9005:0364 Adaptec 5400S (Mustang) 1011:0046:9005:0364 Adaptec 5400S (Mustang)
1011:0046:9005:0365 Adaptec 5400S (Mustang)
9005:0287:9005:0800 Adaptec Themisto (Jupiter) 9005:0287:9005:0800 Adaptec Themisto (Jupiter)
9005:0200:9005:0200 Adaptec Themisto (Jupiter) 9005:0200:9005:0200 Adaptec Themisto (Jupiter)
9005:0286:9005:0800 Adaptec Callisto (Jupiter) 9005:0286:9005:0800 Adaptec Callisto (Jupiter)
1011:0046:9005:1364 Dell PERC 2/QC (Quad Channel, Mustang) 1011:0046:9005:1364 Dell PERC 2/QC (Quad Channel, Mustang)
1011:0046:9005:1365 Dell PERC 2/QC (Quad Channel, Mustang)
1028:0001:1028:0001 Dell PERC 2/Si (Iguana) 1028:0001:1028:0001 Dell PERC 2/Si (Iguana)
1028:0003:1028:0003 Dell PERC 3/Si (SlimFast) 1028:0003:1028:0003 Dell PERC 3/Si (SlimFast)
1028:0002:1028:0002 Dell PERC 3/Di (Opal) 1028:0002:1028:0002 Dell PERC 3/Di (Opal)
1028:0004:1028:0004 Dell PERC 3/DiF (Iguana) 1028:0004:1028:0004 Dell PERC 3/SiF (Iguana)
1028:0004:1028:00d0 Dell PERC 3/DiF (Iguana)
1028:0002:1028:00d1 Dell PERC 3/DiV (Viper) 1028:0002:1028:00d1 Dell PERC 3/DiV (Viper)
1028:0002:1028:00d9 Dell PERC 3/DiL (Lexus) 1028:0002:1028:00d9 Dell PERC 3/DiL (Lexus)
1028:000a:1028:0106 Dell PERC 3/DiJ (Jaguar) 1028:000a:1028:0106 Dell PERC 3/DiJ (Jaguar)

View File

@ -740,8 +740,13 @@ static int bsg_put_device(struct bsg_device *bd)
mutex_lock(&bsg_mutex); mutex_lock(&bsg_mutex);
do_free = atomic_dec_and_test(&bd->ref_count); do_free = atomic_dec_and_test(&bd->ref_count);
if (!do_free) if (!do_free) {
mutex_unlock(&bsg_mutex);
goto out; goto out;
}
hlist_del(&bd->dev_list);
mutex_unlock(&bsg_mutex);
dprintk("%s: tearing down\n", bd->name); dprintk("%s: tearing down\n", bd->name);
@ -757,10 +762,8 @@ static int bsg_put_device(struct bsg_device *bd)
*/ */
ret = bsg_complete_all_commands(bd); ret = bsg_complete_all_commands(bd);
hlist_del(&bd->dev_list);
kfree(bd); kfree(bd);
out: out:
mutex_unlock(&bsg_mutex);
kref_put(&q->bsg_dev.ref, bsg_kref_release_function); kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
if (do_free) if (do_free)
blk_put_queue(q); blk_put_queue(q);

View File

@ -71,6 +71,10 @@
#include "iscsi_iser.h" #include "iscsi_iser.h"
static struct scsi_host_template iscsi_iser_sht;
static struct iscsi_transport iscsi_iser_transport;
static struct scsi_transport_template *iscsi_iser_scsi_transport;
static unsigned int iscsi_max_lun = 512; static unsigned int iscsi_max_lun = 512;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
@ -91,7 +95,6 @@ iscsi_iser_recv(struct iscsi_conn *conn,
struct iscsi_hdr *hdr, char *rx_data, int rx_data_len) struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
{ {
int rc = 0; int rc = 0;
uint32_t ret_itt;
int datalen; int datalen;
int ahslen; int ahslen;
@ -107,12 +110,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
/* read AHS */ /* read AHS */
ahslen = hdr->hlength * 4; ahslen = hdr->hlength * 4;
/* verify itt (itt encoding: age+cid+itt) */ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
rc = iscsi_verify_itt(conn, hdr, &ret_itt);
if (!rc)
rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
if (rc && rc != ISCSI_ERR_NO_SCSI_CMD) if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
goto error; goto error;
@ -123,25 +121,33 @@ error:
/** /**
* iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands * iscsi_iser_task_init - Initialize task
* @task: iscsi task
* *
**/ * Initialize the task for the scsi command or mgmt command.
*/
static int static int
iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask) iscsi_iser_task_init(struct iscsi_task *task)
{ {
struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data; struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
iser_ctask->command_sent = 0; /* mgmt task */
iser_ctask->iser_conn = iser_conn; if (!task->sc) {
iser_ctask_rdma_init(iser_ctask); iser_task->desc.data = task->data;
return 0;
}
iser_task->command_sent = 0;
iser_task->iser_conn = iser_conn;
iser_task_rdma_init(iser_task);
return 0; return 0;
} }
/** /**
* iscsi_mtask_xmit - xmit management(immediate) task * iscsi_iser_mtask_xmit - xmit management(immediate) task
* @conn: iscsi connection * @conn: iscsi connection
* @mtask: task management task * @task: task management task
* *
* Notes: * Notes:
* The function can return -EAGAIN in which case caller must * The function can return -EAGAIN in which case caller must
@ -150,20 +156,19 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
* *
**/ **/
static int static int
iscsi_iser_mtask_xmit(struct iscsi_conn *conn, iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
struct iscsi_mgmt_task *mtask)
{ {
int error = 0; int error = 0;
debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt); debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
error = iser_send_control(conn, mtask); error = iser_send_control(conn, task);
/* since iser xmits control with zero copy, mtasks can not be recycled /* since iser xmits control with zero copy, tasks can not be recycled
* right after sending them. * right after sending them.
* The recycling scheme is based on whether a response is expected * The recycling scheme is based on whether a response is expected
* - if yes, the mtask is recycled at iscsi_complete_pdu * - if yes, the task is recycled at iscsi_complete_pdu
* - if no, the mtask is recycled at iser_snd_completion * - if no, the task is recycled at iser_snd_completion
*/ */
if (error && error != -ENOBUFS) if (error && error != -ENOBUFS)
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
@ -172,99 +177,88 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
} }
static int static int
iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn, iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
struct iscsi_cmd_task *ctask) struct iscsi_task *task)
{ {
struct iscsi_data hdr; struct iscsi_data hdr;
int error = 0; int error = 0;
/* Send data-out PDUs while there's still unsolicited data to send */ /* Send data-out PDUs while there's still unsolicited data to send */
while (ctask->unsol_count > 0) { while (task->unsol_count > 0) {
iscsi_prep_unsolicit_data_pdu(ctask, &hdr); iscsi_prep_unsolicit_data_pdu(task, &hdr);
debug_scsi("Sending data-out: itt 0x%x, data count %d\n", debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
hdr.itt, ctask->data_count); hdr.itt, task->data_count);
/* the buffer description has been passed with the command */ /* the buffer description has been passed with the command */
/* Send the command */ /* Send the command */
error = iser_send_data_out(conn, ctask, &hdr); error = iser_send_data_out(conn, task, &hdr);
if (error) { if (error) {
ctask->unsol_datasn--; task->unsol_datasn--;
goto iscsi_iser_ctask_xmit_unsol_data_exit; goto iscsi_iser_task_xmit_unsol_data_exit;
} }
ctask->unsol_count -= ctask->data_count; task->unsol_count -= task->data_count;
debug_scsi("Need to send %d more as data-out PDUs\n", debug_scsi("Need to send %d more as data-out PDUs\n",
ctask->unsol_count); task->unsol_count);
} }
iscsi_iser_ctask_xmit_unsol_data_exit: iscsi_iser_task_xmit_unsol_data_exit:
return error; return error;
} }
static int static int
iscsi_iser_ctask_xmit(struct iscsi_conn *conn, iscsi_iser_task_xmit(struct iscsi_task *task)
struct iscsi_cmd_task *ctask)
{ {
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; struct iscsi_conn *conn = task->conn;
struct iscsi_iser_task *iser_task = task->dd_data;
int error = 0; int error = 0;
if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { if (!task->sc)
BUG_ON(scsi_bufflen(ctask->sc) == 0); return iscsi_iser_mtask_xmit(conn, task);
if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
BUG_ON(scsi_bufflen(task->sc) == 0);
debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
ctask->itt, scsi_bufflen(ctask->sc), task->itt, scsi_bufflen(task->sc),
ctask->imm_count, ctask->unsol_count); task->imm_count, task->unsol_count);
} }
debug_scsi("ctask deq [cid %d itt 0x%x]\n", debug_scsi("task deq [cid %d itt 0x%x]\n",
conn->id, ctask->itt); conn->id, task->itt);
/* Send the cmd PDU */ /* Send the cmd PDU */
if (!iser_ctask->command_sent) { if (!iser_task->command_sent) {
error = iser_send_command(conn, ctask); error = iser_send_command(conn, task);
if (error) if (error)
goto iscsi_iser_ctask_xmit_exit; goto iscsi_iser_task_xmit_exit;
iser_ctask->command_sent = 1; iser_task->command_sent = 1;
} }
/* Send unsolicited data-out PDU(s) if necessary */ /* Send unsolicited data-out PDU(s) if necessary */
if (ctask->unsol_count) if (task->unsol_count)
error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); error = iscsi_iser_task_xmit_unsol_data(conn, task);
iscsi_iser_ctask_xmit_exit: iscsi_iser_task_xmit_exit:
if (error && error != -ENOBUFS) if (error && error != -ENOBUFS)
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return error; return error;
} }
static void static void
iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
{ {
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
if (iser_ctask->status == ISER_TASK_STATUS_STARTED) { /* mgmt tasks do not need special cleanup */
iser_ctask->status = ISER_TASK_STATUS_COMPLETED; if (!task->sc)
iser_ctask_rdma_finalize(iser_ctask); return;
if (iser_task->status == ISER_TASK_STATUS_STARTED) {
iser_task->status = ISER_TASK_STATUS_COMPLETED;
iser_task_rdma_finalize(iser_task);
} }
} }
static struct iser_conn *
iscsi_iser_ib_conn_lookup(__u64 ep_handle)
{
struct iser_conn *ib_conn;
struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
mutex_lock(&ig.connlist_mutex);
list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
if (ib_conn == uib_conn) {
mutex_unlock(&ig.connlist_mutex);
return ib_conn;
}
}
mutex_unlock(&ig.connlist_mutex);
iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
return NULL;
}
static struct iscsi_cls_conn * static struct iscsi_cls_conn *
iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
{ {
@ -272,7 +266,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
struct iscsi_cls_conn *cls_conn; struct iscsi_cls_conn *cls_conn;
struct iscsi_iser_conn *iser_conn; struct iscsi_iser_conn *iser_conn;
cls_conn = iscsi_conn_setup(cls_session, conn_idx); cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
if (!cls_conn) if (!cls_conn)
return NULL; return NULL;
conn = cls_conn->dd_data; conn = cls_conn->dd_data;
@ -283,21 +277,11 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
*/ */
conn->max_recv_dlength = 128; conn->max_recv_dlength = 128;
iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL); iser_conn = conn->dd_data;
if (!iser_conn)
goto conn_alloc_fail;
/* currently this is the only field which need to be initiated */
rwlock_init(&iser_conn->lock);
conn->dd_data = iser_conn; conn->dd_data = iser_conn;
iser_conn->iscsi_conn = conn; iser_conn->iscsi_conn = conn;
return cls_conn; return cls_conn;
conn_alloc_fail:
iscsi_conn_teardown(cls_conn);
return NULL;
} }
static void static void
@ -305,11 +289,18 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
{ {
struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iser_conn *ib_conn = iser_conn->ib_conn;
iscsi_conn_teardown(cls_conn); iscsi_conn_teardown(cls_conn);
if (iser_conn->ib_conn) /*
iser_conn->ib_conn->iser_conn = NULL; * Userspace will normally call the stop callback and
kfree(iser_conn); * already have freed the ib_conn, but if it goofed up then
* we free it here.
*/
if (ib_conn) {
ib_conn->iser_conn = NULL;
iser_conn_put(ib_conn);
}
} }
static int static int
@ -320,6 +311,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_iser_conn *iser_conn; struct iscsi_iser_conn *iser_conn;
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
struct iscsi_endpoint *ep;
int error; int error;
error = iscsi_conn_bind(cls_session, cls_conn, is_leading); error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
@ -328,12 +320,14 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
/* the transport ep handle comes from user space so it must be /* the transport ep handle comes from user space so it must be
* verified against the global ib connections list */ * verified against the global ib connections list */
ib_conn = iscsi_iser_ib_conn_lookup(transport_eph); ep = iscsi_lookup_endpoint(transport_eph);
if (!ib_conn) { if (!ep) {
iser_err("can't bind eph %llx\n", iser_err("can't bind eph %llx\n",
(unsigned long long)transport_eph); (unsigned long long)transport_eph);
return -EINVAL; return -EINVAL;
} }
ib_conn = ep->dd_data;
/* binds the iSER connection retrieved from the previously /* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges * connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */ * connection pointers */
@ -341,12 +335,32 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
iser_conn = conn->dd_data; iser_conn = conn->dd_data;
ib_conn->iser_conn = iser_conn; ib_conn->iser_conn = iser_conn;
iser_conn->ib_conn = ib_conn; iser_conn->ib_conn = ib_conn;
iser_conn_get(ib_conn);
conn->recv_lock = &iser_conn->lock;
return 0; return 0;
} }
static void
iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iser_conn *ib_conn = iser_conn->ib_conn;
/*
* Userspace may have goofed up and not bound the connection or
* might have only partially setup the connection.
*/
if (ib_conn) {
iscsi_conn_stop(cls_conn, flag);
/*
* There is no unbind event so the stop callback
* must release the ref from the bind.
*/
iser_conn_put(ib_conn);
}
iser_conn->ib_conn = NULL;
}
static int static int
iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
{ {
@ -360,55 +374,75 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
return iscsi_conn_start(cls_conn); return iscsi_conn_start(cls_conn);
} }
static struct iscsi_transport iscsi_iser_transport; static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_host_remove(shost);
iscsi_host_free(shost);
}
static struct iscsi_cls_session * static struct iscsi_cls_session *
iscsi_iser_session_create(struct iscsi_transport *iscsit, iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct scsi_transport_template *scsit, uint16_t cmds_max, uint16_t qdepth,
uint16_t cmds_max, uint16_t qdepth, uint32_t initial_cmdsn, uint32_t *hostno)
uint32_t initial_cmdsn, uint32_t *hostno)
{ {
struct iscsi_cls_session *cls_session; struct iscsi_cls_session *cls_session;
struct iscsi_session *session; struct iscsi_session *session;
struct Scsi_Host *shost;
int i; int i;
uint32_t hn; struct iscsi_task *task;
struct iscsi_cmd_task *ctask; struct iscsi_iser_task *iser_task;
struct iscsi_mgmt_task *mtask; struct iser_conn *ib_conn;
struct iscsi_iser_cmd_task *iser_ctask;
struct iser_desc *desc; shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
if (!shost)
return NULL;
shost->transportt = iscsi_iser_scsi_transport;
shost->max_lun = iscsi_max_lun;
shost->max_id = 0;
shost->max_channel = 0;
shost->max_cmd_len = 16;
/*
* older userspace tools (before 2.0-870) did not pass us
* the leading conn's ep so this will be NULL;
*/
if (ep)
ib_conn = ep->dd_data;
if (iscsi_host_add(shost,
ep ? ib_conn->device->ib_device->dma_device : NULL))
goto free_host;
*hostno = shost->host_no;
/* /*
* we do not support setting can_queue cmd_per_lun from userspace yet * we do not support setting can_queue cmd_per_lun from userspace yet
* because we preallocate so many resources * because we preallocate so many resources
*/ */
cls_session = iscsi_session_setup(iscsit, scsit, cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
ISCSI_DEF_XMIT_CMDS_MAX, ISCSI_DEF_XMIT_CMDS_MAX,
ISCSI_MAX_CMD_PER_LUN, sizeof(struct iscsi_iser_task),
sizeof(struct iscsi_iser_cmd_task), initial_cmdsn, 0);
sizeof(struct iser_desc),
initial_cmdsn, &hn);
if (!cls_session) if (!cls_session)
return NULL; goto remove_host;
session = cls_session->dd_data;
*hostno = hn;
session = class_to_transport_session(cls_session);
shost->can_queue = session->scsi_cmds_max;
/* libiscsi setup itts, data and pool so just set desc fields */ /* libiscsi setup itts, data and pool so just set desc fields */
for (i = 0; i < session->cmds_max; i++) { for (i = 0; i < session->cmds_max; i++) {
ctask = session->cmds[i]; task = session->cmds[i];
iser_ctask = ctask->dd_data; iser_task = task->dd_data;
ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header; task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header); task->hdr_max = sizeof(iser_task->desc.iscsi_header);
} }
for (i = 0; i < session->mgmtpool_max; i++) {
mtask = session->mgmt_cmds[i];
desc = mtask->dd_data;
mtask->hdr = &desc->iscsi_header;
desc->data = mtask->data;
}
return cls_session; return cls_session;
remove_host:
iscsi_host_remove(shost);
free_host:
iscsi_host_free(shost);
return NULL;
} }
static int static int
@ -481,34 +515,37 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
stats->custom[3].value = conn->fmr_unalign_cnt; stats->custom[3].value = conn->fmr_unalign_cnt;
} }
static int static struct iscsi_endpoint *
iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking, iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
__u64 *ep_handle)
{ {
int err; int err;
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
struct iscsi_endpoint *ep;
err = iser_conn_init(&ib_conn); ep = iscsi_create_endpoint(sizeof(*ib_conn));
if (err) if (!ep)
goto out; return ERR_PTR(-ENOMEM);
err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking); ib_conn = ep->dd_data;
if (!err) ib_conn->ep = ep;
*ep_handle = (__u64)(unsigned long)ib_conn; iser_conn_init(ib_conn);
out: err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
return err; non_blocking);
if (err) {
iscsi_destroy_endpoint(ep);
return ERR_PTR(err);
}
return ep;
} }
static int static int
iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{ {
struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); struct iser_conn *ib_conn;
int rc; int rc;
if (!ib_conn) ib_conn = ep->dd_data;
return -EINVAL;
rc = wait_event_interruptible_timeout(ib_conn->wait, rc = wait_event_interruptible_timeout(ib_conn->wait,
ib_conn->state == ISER_CONN_UP, ib_conn->state == ISER_CONN_UP,
msecs_to_jiffies(timeout_ms)); msecs_to_jiffies(timeout_ms));
@ -530,13 +567,21 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
} }
static void static void
iscsi_iser_ep_disconnect(__u64 ep_handle) iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{ {
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); ib_conn = ep->dd_data;
if (!ib_conn) if (ib_conn->iser_conn)
return; /*
* Must suspend xmit path if the ep is bound to the
* iscsi_conn, so we know we are not accessing the ib_conn
* when we free it.
*
* This may not be bound if the ep poll failed.
*/
iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
iser_conn_terminate(ib_conn); iser_conn_terminate(ib_conn);
@ -547,7 +592,6 @@ static struct scsi_host_template iscsi_iser_sht = {
.name = "iSCSI Initiator over iSER, v." DRV_VER, .name = "iSCSI Initiator over iSER, v." DRV_VER,
.queuecommand = iscsi_queuecommand, .queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth, .change_queue_depth = iscsi_change_queue_depth,
.can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
.sg_tablesize = ISCSI_ISER_SG_TABLESIZE, .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
.max_sectors = 1024, .max_sectors = 1024,
.cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
@ -581,17 +625,14 @@ static struct iscsi_transport iscsi_iser_transport = {
ISCSI_USERNAME | ISCSI_PASSWORD | ISCSI_USERNAME | ISCSI_PASSWORD |
ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
ISCSI_PING_TMO | ISCSI_RECV_TMO, ISCSI_PING_TMO | ISCSI_RECV_TMO |
ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
.host_param_mask = ISCSI_HOST_HWADDRESS | .host_param_mask = ISCSI_HOST_HWADDRESS |
ISCSI_HOST_NETDEV_NAME | ISCSI_HOST_NETDEV_NAME |
ISCSI_HOST_INITIATOR_NAME, ISCSI_HOST_INITIATOR_NAME,
.host_template = &iscsi_iser_sht,
.conndata_size = sizeof(struct iscsi_conn),
.max_lun = ISCSI_ISER_MAX_LUN,
.max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
/* session management */ /* session management */
.create_session = iscsi_iser_session_create, .create_session = iscsi_iser_session_create,
.destroy_session = iscsi_session_teardown, .destroy_session = iscsi_iser_session_destroy,
/* connection management */ /* connection management */
.create_conn = iscsi_iser_conn_create, .create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind, .bind_conn = iscsi_iser_conn_bind,
@ -600,17 +641,16 @@ static struct iscsi_transport iscsi_iser_transport = {
.get_conn_param = iscsi_conn_get_param, .get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param, .get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start, .start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_conn_stop, .stop_conn = iscsi_iser_conn_stop,
/* iscsi host params */ /* iscsi host params */
.get_host_param = iscsi_host_get_param, .get_host_param = iscsi_host_get_param,
.set_host_param = iscsi_host_set_param, .set_host_param = iscsi_host_set_param,
/* IO */ /* IO */
.send_pdu = iscsi_conn_send_pdu, .send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_iser_conn_get_stats, .get_stats = iscsi_iser_conn_get_stats,
.init_cmd_task = iscsi_iser_cmd_init, .init_task = iscsi_iser_task_init,
.xmit_cmd_task = iscsi_iser_ctask_xmit, .xmit_task = iscsi_iser_task_xmit,
.xmit_mgmt_task = iscsi_iser_mtask_xmit, .cleanup_task = iscsi_iser_cleanup_task,
.cleanup_cmd_task = iscsi_iser_cleanup_ctask,
/* recovery */ /* recovery */
.session_recovery_timedout = iscsi_session_recovery_timedout, .session_recovery_timedout = iscsi_session_recovery_timedout,
@ -630,8 +670,6 @@ static int __init iser_init(void)
return -EINVAL; return -EINVAL;
} }
iscsi_iser_transport.max_lun = iscsi_max_lun;
memset(&ig, 0, sizeof(struct iser_global)); memset(&ig, 0, sizeof(struct iser_global));
ig.desc_cache = kmem_cache_create("iser_descriptors", ig.desc_cache = kmem_cache_create("iser_descriptors",
@ -647,7 +685,9 @@ static int __init iser_init(void)
mutex_init(&ig.connlist_mutex); mutex_init(&ig.connlist_mutex);
INIT_LIST_HEAD(&ig.connlist); INIT_LIST_HEAD(&ig.connlist);
if (!iscsi_register_transport(&iscsi_iser_transport)) { iscsi_iser_scsi_transport = iscsi_register_transport(
&iscsi_iser_transport);
if (!iscsi_iser_scsi_transport) {
iser_err("iscsi_register_transport failed\n"); iser_err("iscsi_register_transport failed\n");
err = -EINVAL; err = -EINVAL;
goto register_transport_failure; goto register_transport_failure;

View File

@ -94,7 +94,6 @@
/* support upto 512KB in one RDMA */ /* support upto 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
#define ISCSI_ISER_MAX_LUN 256 #define ISCSI_ISER_MAX_LUN 256
#define ISCSI_ISER_MAX_CMD_LEN 16
/* QP settings */ /* QP settings */
/* Maximal bounds on received asynchronous PDUs */ /* Maximal bounds on received asynchronous PDUs */
@ -172,7 +171,8 @@ struct iser_data_buf {
/* fwd declarations */ /* fwd declarations */
struct iser_device; struct iser_device;
struct iscsi_iser_conn; struct iscsi_iser_conn;
struct iscsi_iser_cmd_task; struct iscsi_iser_task;
struct iscsi_endpoint;
struct iser_mem_reg { struct iser_mem_reg {
u32 lkey; u32 lkey;
@ -196,7 +196,7 @@ struct iser_regd_buf {
#define MAX_REGD_BUF_VECTOR_LEN 2 #define MAX_REGD_BUF_VECTOR_LEN 2
struct iser_dto { struct iser_dto {
struct iscsi_iser_cmd_task *ctask; struct iscsi_iser_task *task;
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
int notify_enable; int notify_enable;
@ -240,7 +240,9 @@ struct iser_device {
struct iser_conn { struct iser_conn {
struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
struct iscsi_endpoint *ep;
enum iser_ib_conn_state state; /* rdma connection state */ enum iser_ib_conn_state state; /* rdma connection state */
atomic_t refcount;
spinlock_t lock; /* used for state changes */ spinlock_t lock; /* used for state changes */
struct iser_device *device; /* device context */ struct iser_device *device; /* device context */
struct rdma_cm_id *cma_id; /* CMA ID */ struct rdma_cm_id *cma_id; /* CMA ID */
@ -259,11 +261,9 @@ struct iser_conn {
struct iscsi_iser_conn { struct iscsi_iser_conn {
struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
struct iser_conn *ib_conn; /* iSER IB conn */ struct iser_conn *ib_conn; /* iSER IB conn */
rwlock_t lock;
}; };
struct iscsi_iser_cmd_task { struct iscsi_iser_task {
struct iser_desc desc; struct iser_desc desc;
struct iscsi_iser_conn *iser_conn; struct iscsi_iser_conn *iser_conn;
enum iser_task_status status; enum iser_task_status status;
@ -296,22 +296,26 @@ extern int iser_debug_level;
/* allocate connection resources needed for rdma functionality */ /* allocate connection resources needed for rdma functionality */
int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
int iser_send_control(struct iscsi_conn *conn, int iser_send_control(struct iscsi_conn *conn,
struct iscsi_mgmt_task *mtask); struct iscsi_task *task);
int iser_send_command(struct iscsi_conn *conn, int iser_send_command(struct iscsi_conn *conn,
struct iscsi_cmd_task *ctask); struct iscsi_task *task);
int iser_send_data_out(struct iscsi_conn *conn, int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_cmd_task *ctask, struct iscsi_task *task,
struct iscsi_data *hdr); struct iscsi_data *hdr);
void iscsi_iser_recv(struct iscsi_conn *conn, void iscsi_iser_recv(struct iscsi_conn *conn,
struct iscsi_hdr *hdr, struct iscsi_hdr *hdr,
char *rx_data, char *rx_data,
int rx_data_len); int rx_data_len);
int iser_conn_init(struct iser_conn **ib_conn); void iser_conn_init(struct iser_conn *ib_conn);
void iser_conn_get(struct iser_conn *ib_conn);
void iser_conn_put(struct iser_conn *ib_conn);
void iser_conn_terminate(struct iser_conn *ib_conn); void iser_conn_terminate(struct iser_conn *ib_conn);
@ -320,9 +324,9 @@ void iser_rcv_completion(struct iser_desc *desc,
void iser_snd_completion(struct iser_desc *desc); void iser_snd_completion(struct iser_desc *desc);
void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask); void iser_task_rdma_init(struct iscsi_iser_task *task);
void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask); void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_dto_buffs_release(struct iser_dto *dto); void iser_dto_buffs_release(struct iser_dto *dto);
@ -332,10 +336,10 @@ void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf, struct iser_regd_buf *regd_buf,
enum dma_data_direction direction); enum dma_data_direction direction);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask, void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir); enum iser_data_dir cmd_dir);
int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask, int iser_reg_rdma_mem(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir); enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn, int iser_connect(struct iser_conn *ib_conn,
@ -355,10 +359,10 @@ int iser_post_send(struct iser_desc *tx_desc);
int iser_conn_state_comp(struct iser_conn *ib_conn, int iser_conn_state_comp(struct iser_conn *ib_conn,
enum iser_ib_conn_state comp); enum iser_ib_conn_state comp);
int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data, struct iser_data_buf *data,
enum iser_data_dir iser_dir, enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir); enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask); void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
#endif #endif

View File

@ -64,46 +64,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
/* Register user buffer memory and initialize passive rdma /* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in * dto descriptor. Total data size is stored in
* iser_ctask->data[ISER_DIR_IN].data_len * iser_task->data[ISER_DIR_IN].data_len
*/ */
static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask, static int iser_prepare_read_cmd(struct iscsi_task *task,
unsigned int edtl) unsigned int edtl)
{ {
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
int err; int err;
struct iser_hdr *hdr = &iser_ctask->desc.iser_header; struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN]; struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
err = iser_dma_map_task_data(iser_ctask, err = iser_dma_map_task_data(iser_task,
buf_in, buf_in,
ISER_DIR_IN, ISER_DIR_IN,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (err) if (err)
return err; return err;
if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) { if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
iser_err("Total data length: %ld, less than EDTL: " iser_err("Total data length: %ld, less than EDTL: "
"%d, in READ cmd BHS itt: %d, conn: 0x%p\n", "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
iser_ctask->data[ISER_DIR_IN].data_len, edtl, iser_task->data[ISER_DIR_IN].data_len, edtl,
ctask->itt, iser_ctask->iser_conn); task->itt, iser_task->iser_conn);
return -EINVAL; return -EINVAL;
} }
err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN); err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
if (err) { if (err) {
iser_err("Failed to set up Data-IN RDMA\n"); iser_err("Failed to set up Data-IN RDMA\n");
return err; return err;
} }
regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN]; regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
hdr->flags |= ISER_RSV; hdr->flags |= ISER_RSV;
hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey); hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
hdr->read_va = cpu_to_be64(regd_buf->reg.va); hdr->read_va = cpu_to_be64(regd_buf->reg.va);
iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
ctask->itt, regd_buf->reg.rkey, task->itt, regd_buf->reg.rkey,
(unsigned long long)regd_buf->reg.va); (unsigned long long)regd_buf->reg.va);
return 0; return 0;
@ -111,43 +111,43 @@ static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
/* Register user buffer memory and initialize passive rdma /* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in * dto descriptor. Total data size is stored in
* ctask->data[ISER_DIR_OUT].data_len * task->data[ISER_DIR_OUT].data_len
*/ */
static int static int
iser_prepare_write_cmd(struct iscsi_cmd_task *ctask, iser_prepare_write_cmd(struct iscsi_task *task,
unsigned int imm_sz, unsigned int imm_sz,
unsigned int unsol_sz, unsigned int unsol_sz,
unsigned int edtl) unsigned int edtl)
{ {
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
int err; int err;
struct iser_dto *send_dto = &iser_ctask->desc.dto; struct iser_dto *send_dto = &iser_task->desc.dto;
struct iser_hdr *hdr = &iser_ctask->desc.iser_header; struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT]; struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
err = iser_dma_map_task_data(iser_ctask, err = iser_dma_map_task_data(iser_task,
buf_out, buf_out,
ISER_DIR_OUT, ISER_DIR_OUT,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (err) if (err)
return err; return err;
if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) { if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Total data length: %ld, less than EDTL: %d, " iser_err("Total data length: %ld, less than EDTL: %d, "
"in WRITE cmd BHS itt: %d, conn: 0x%p\n", "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
iser_ctask->data[ISER_DIR_OUT].data_len, iser_task->data[ISER_DIR_OUT].data_len,
edtl, ctask->itt, ctask->conn); edtl, task->itt, task->conn);
return -EINVAL; return -EINVAL;
} }
err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT); err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
if (err != 0) { if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n"); iser_err("Failed to register write cmd RDMA mem\n");
return err; return err;
} }
regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT]; regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
if (unsol_sz < edtl) { if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV; hdr->flags |= ISER_WSV;
@ -156,13 +156,13 @@ iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
"VA:%#llX + unsol:%d\n", "VA:%#llX + unsol:%d\n",
ctask->itt, regd_buf->reg.rkey, task->itt, regd_buf->reg.rkey,
(unsigned long long)regd_buf->reg.va, unsol_sz); (unsigned long long)regd_buf->reg.va, unsol_sz);
} }
if (imm_sz > 0) { if (imm_sz > 0) {
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
ctask->itt, imm_sz); task->itt, imm_sz);
iser_dto_add_regd_buff(send_dto, iser_dto_add_regd_buff(send_dto,
regd_buf, regd_buf,
0, 0,
@ -314,38 +314,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
/** /**
* iser_send_command - send command PDU * iser_send_command - send command PDU
*/ */
int iser_send_command(struct iscsi_conn *conn, int iser_send_command(struct iscsi_conn *conn,
struct iscsi_cmd_task *ctask) struct iscsi_task *task)
{ {
struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_dto *send_dto = NULL; struct iser_dto *send_dto = NULL;
unsigned long edtl; unsigned long edtl;
int err = 0; int err = 0;
struct iser_data_buf *data_buf; struct iser_data_buf *data_buf;
struct iscsi_cmd *hdr = ctask->hdr; struct iscsi_cmd *hdr = task->hdr;
struct scsi_cmnd *sc = ctask->sc; struct scsi_cmnd *sc = task->sc;
if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
return -EPERM; return -EPERM;
} }
if (iser_check_xmit(conn, ctask)) if (iser_check_xmit(conn, task))
return -ENOBUFS; return -ENOBUFS;
edtl = ntohl(hdr->data_length); edtl = ntohl(hdr->data_length);
/* build the tx desc regd header and add it to the tx desc dto */ /* build the tx desc regd header and add it to the tx desc dto */
iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND; iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
send_dto = &iser_ctask->desc.dto; send_dto = &iser_task->desc.dto;
send_dto->ctask = iser_ctask; send_dto->task = iser_task;
iser_create_send_desc(iser_conn, &iser_ctask->desc); iser_create_send_desc(iser_conn, &iser_task->desc);
if (hdr->flags & ISCSI_FLAG_CMD_READ) if (hdr->flags & ISCSI_FLAG_CMD_READ)
data_buf = &iser_ctask->data[ISER_DIR_IN]; data_buf = &iser_task->data[ISER_DIR_IN];
else else
data_buf = &iser_ctask->data[ISER_DIR_OUT]; data_buf = &iser_task->data[ISER_DIR_OUT];
if (scsi_sg_count(sc)) { /* using a scatter list */ if (scsi_sg_count(sc)) { /* using a scatter list */
data_buf->buf = scsi_sglist(sc); data_buf->buf = scsi_sglist(sc);
@ -355,15 +355,15 @@ int iser_send_command(struct iscsi_conn *conn,
data_buf->data_len = scsi_bufflen(sc); data_buf->data_len = scsi_bufflen(sc);
if (hdr->flags & ISCSI_FLAG_CMD_READ) { if (hdr->flags & ISCSI_FLAG_CMD_READ) {
err = iser_prepare_read_cmd(ctask, edtl); err = iser_prepare_read_cmd(task, edtl);
if (err) if (err)
goto send_command_error; goto send_command_error;
} }
if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
err = iser_prepare_write_cmd(ctask, err = iser_prepare_write_cmd(task,
ctask->imm_count, task->imm_count,
ctask->imm_count + task->imm_count +
ctask->unsol_count, task->unsol_count,
edtl); edtl);
if (err) if (err)
goto send_command_error; goto send_command_error;
@ -378,27 +378,27 @@ int iser_send_command(struct iscsi_conn *conn,
goto send_command_error; goto send_command_error;
} }
iser_ctask->status = ISER_TASK_STATUS_STARTED; iser_task->status = ISER_TASK_STATUS_STARTED;
err = iser_post_send(&iser_ctask->desc); err = iser_post_send(&iser_task->desc);
if (!err) if (!err)
return 0; return 0;
send_command_error: send_command_error:
iser_dto_buffs_release(send_dto); iser_dto_buffs_release(send_dto);
iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err); iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
return err; return err;
} }
/** /**
* iser_send_data_out - send data out PDU * iser_send_data_out - send data out PDU
*/ */
int iser_send_data_out(struct iscsi_conn *conn, int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_cmd_task *ctask, struct iscsi_task *task,
struct iscsi_data *hdr) struct iscsi_data *hdr)
{ {
struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_desc *tx_desc = NULL; struct iser_desc *tx_desc = NULL;
struct iser_dto *send_dto = NULL; struct iser_dto *send_dto = NULL;
unsigned long buf_offset; unsigned long buf_offset;
@ -411,7 +411,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
return -EPERM; return -EPERM;
} }
if (iser_check_xmit(conn, ctask)) if (iser_check_xmit(conn, task))
return -ENOBUFS; return -ENOBUFS;
itt = (__force uint32_t)hdr->itt; itt = (__force uint32_t)hdr->itt;
@ -432,7 +432,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
/* build the tx desc regd header and add it to the tx desc dto */ /* build the tx desc regd header and add it to the tx desc dto */
send_dto = &tx_desc->dto; send_dto = &tx_desc->dto;
send_dto->ctask = iser_ctask; send_dto->task = iser_task;
iser_create_send_desc(iser_conn, tx_desc); iser_create_send_desc(iser_conn, tx_desc);
iser_reg_single(iser_conn->ib_conn->device, iser_reg_single(iser_conn->ib_conn->device,
@ -440,15 +440,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
/* all data was registered for RDMA, we can use the lkey */ /* all data was registered for RDMA, we can use the lkey */
iser_dto_add_regd_buff(send_dto, iser_dto_add_regd_buff(send_dto,
&iser_ctask->rdma_regd[ISER_DIR_OUT], &iser_task->rdma_regd[ISER_DIR_OUT],
buf_offset, buf_offset,
data_seg_len); data_seg_len);
if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) { if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Offset:%ld & DSL:%ld in Data-Out " iser_err("Offset:%ld & DSL:%ld in Data-Out "
"inconsistent with total len:%ld, itt:%d\n", "inconsistent with total len:%ld, itt:%d\n",
buf_offset, data_seg_len, buf_offset, data_seg_len,
iser_ctask->data[ISER_DIR_OUT].data_len, itt); iser_task->data[ISER_DIR_OUT].data_len, itt);
err = -EINVAL; err = -EINVAL;
goto send_data_out_error; goto send_data_out_error;
} }
@ -468,10 +468,11 @@ send_data_out_error:
} }
int iser_send_control(struct iscsi_conn *conn, int iser_send_control(struct iscsi_conn *conn,
struct iscsi_mgmt_task *mtask) struct iscsi_task *task)
{ {
struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iser_desc *mdesc = mtask->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_desc *mdesc = &iser_task->desc;
struct iser_dto *send_dto = NULL; struct iser_dto *send_dto = NULL;
unsigned long data_seg_len; unsigned long data_seg_len;
int err = 0; int err = 0;
@ -483,27 +484,27 @@ int iser_send_control(struct iscsi_conn *conn,
return -EPERM; return -EPERM;
} }
if (iser_check_xmit(conn,mtask)) if (iser_check_xmit(conn, task))
return -ENOBUFS; return -ENOBUFS;
/* build the tx desc regd header and add it to the tx desc dto */ /* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL; mdesc->type = ISCSI_TX_CONTROL;
send_dto = &mdesc->dto; send_dto = &mdesc->dto;
send_dto->ctask = NULL; send_dto->task = NULL;
iser_create_send_desc(iser_conn, mdesc); iser_create_send_desc(iser_conn, mdesc);
device = iser_conn->ib_conn->device; device = iser_conn->ib_conn->device;
iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
data_seg_len = ntoh24(mtask->hdr->dlength); data_seg_len = ntoh24(task->hdr->dlength);
if (data_seg_len > 0) { if (data_seg_len > 0) {
regd_buf = &mdesc->data_regd_buf; regd_buf = &mdesc->data_regd_buf;
memset(regd_buf, 0, sizeof(struct iser_regd_buf)); memset(regd_buf, 0, sizeof(struct iser_regd_buf));
regd_buf->device = device; regd_buf->device = device;
regd_buf->virt_addr = mtask->data; regd_buf->virt_addr = task->data;
regd_buf->data_size = mtask->data_count; regd_buf->data_size = task->data_count;
iser_reg_single(device, regd_buf, iser_reg_single(device, regd_buf,
DMA_TO_DEVICE); DMA_TO_DEVICE);
iser_dto_add_regd_buff(send_dto, regd_buf, iser_dto_add_regd_buff(send_dto, regd_buf,
@ -533,15 +534,13 @@ send_control_error:
void iser_rcv_completion(struct iser_desc *rx_desc, void iser_rcv_completion(struct iser_desc *rx_desc,
unsigned long dto_xfer_len) unsigned long dto_xfer_len)
{ {
struct iser_dto *dto = &rx_desc->dto; struct iser_dto *dto = &rx_desc->dto;
struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
struct iscsi_session *session = conn->iscsi_conn->session; struct iscsi_task *task;
struct iscsi_cmd_task *ctask; struct iscsi_iser_task *iser_task;
struct iscsi_iser_cmd_task *iser_ctask;
struct iscsi_hdr *hdr; struct iscsi_hdr *hdr;
char *rx_data = NULL; char *rx_data = NULL;
int rx_data_len = 0; int rx_data_len = 0;
unsigned int itt;
unsigned char opcode; unsigned char opcode;
hdr = &rx_desc->iscsi_header; hdr = &rx_desc->iscsi_header;
@ -557,19 +556,24 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
opcode = hdr->opcode & ISCSI_OPCODE_MASK; opcode = hdr->opcode & ISCSI_OPCODE_MASK;
if (opcode == ISCSI_OP_SCSI_CMD_RSP) { if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
itt = get_itt(hdr->itt); /* mask out cid and age bits */ spin_lock(&conn->iscsi_conn->session->lock);
if (!(itt < session->cmds_max)) task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
iser_err("itt can't be matched to task!!! " if (task)
"conn %p opcode %d cmds_max %d itt %d\n", __iscsi_get_task(task);
conn->iscsi_conn,opcode,session->cmds_max,itt); spin_unlock(&conn->iscsi_conn->session->lock);
/* use the mapping given with the cmds array indexed by itt */
ctask = (struct iscsi_cmd_task *)session->cmds[itt];
iser_ctask = ctask->dd_data;
iser_dbg("itt %d ctask %p\n",itt,ctask);
iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
iser_ctask_rdma_finalize(iser_ctask);
}
if (!task)
iser_err("itt can't be matched to task!!! "
"conn %p opcode %d itt %d\n",
conn->iscsi_conn, opcode, hdr->itt);
else {
iser_task = task->dd_data;
iser_dbg("itt %d task %p\n",hdr->itt, task);
iser_task->status = ISER_TASK_STATUS_COMPLETED;
iser_task_rdma_finalize(iser_task);
iscsi_put_task(task);
}
}
iser_dto_buffs_release(dto); iser_dto_buffs_release(dto);
iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
@ -590,7 +594,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
struct iser_conn *ib_conn = dto->ib_conn; struct iser_conn *ib_conn = dto->ib_conn;
struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
struct iscsi_conn *conn = iser_conn->iscsi_conn; struct iscsi_conn *conn = iser_conn->iscsi_conn;
struct iscsi_mgmt_task *mtask; struct iscsi_task *task;
int resume_tx = 0; int resume_tx = 0;
iser_dbg("Initiator, Data sent dto=0x%p\n", dto); iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
@ -613,36 +617,31 @@ void iser_snd_completion(struct iser_desc *tx_desc)
if (tx_desc->type == ISCSI_TX_CONTROL) { if (tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */ /* this arithmetic is legal by libiscsi dd_data allocation */
mtask = (void *) ((long)(void *)tx_desc - task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_mgmt_task)); sizeof(struct iscsi_task));
if (mtask->hdr->itt == RESERVED_ITT) { if (task->hdr->itt == RESERVED_ITT)
struct iscsi_session *session = conn->session; iscsi_put_task(task);
spin_lock(&conn->session->lock);
iscsi_free_mgmt_task(conn, mtask);
spin_unlock(&session->lock);
}
} }
} }
void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask) void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
{ {
iser_ctask->status = ISER_TASK_STATUS_INIT; iser_task->status = ISER_TASK_STATUS_INIT;
iser_ctask->dir[ISER_DIR_IN] = 0; iser_task->dir[ISER_DIR_IN] = 0;
iser_ctask->dir[ISER_DIR_OUT] = 0; iser_task->dir[ISER_DIR_OUT] = 0;
iser_ctask->data[ISER_DIR_IN].data_len = 0; iser_task->data[ISER_DIR_IN].data_len = 0;
iser_ctask->data[ISER_DIR_OUT].data_len = 0; iser_task->data[ISER_DIR_OUT].data_len = 0;
memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0, memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
sizeof(struct iser_regd_buf)); sizeof(struct iser_regd_buf));
memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0, memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
sizeof(struct iser_regd_buf)); sizeof(struct iser_regd_buf));
} }
void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{ {
int deferred; int deferred;
int is_rdma_aligned = 1; int is_rdma_aligned = 1;
@ -651,17 +650,17 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
/* if we were reading, copy back to unaligned sglist, /* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy * anyway dma_unmap and free the copy
*/ */
if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) { if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
is_rdma_aligned = 0; is_rdma_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN); iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
} }
if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) { if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
is_rdma_aligned = 0; is_rdma_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT); iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
} }
if (iser_ctask->dir[ISER_DIR_IN]) { if (iser_task->dir[ISER_DIR_IN]) {
regd = &iser_ctask->rdma_regd[ISER_DIR_IN]; regd = &iser_task->rdma_regd[ISER_DIR_IN];
deferred = iser_regd_buff_release(regd); deferred = iser_regd_buff_release(regd);
if (deferred) { if (deferred) {
iser_err("%d references remain for BUF-IN rdma reg\n", iser_err("%d references remain for BUF-IN rdma reg\n",
@ -669,8 +668,8 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
} }
} }
if (iser_ctask->dir[ISER_DIR_OUT]) { if (iser_task->dir[ISER_DIR_OUT]) {
regd = &iser_ctask->rdma_regd[ISER_DIR_OUT]; regd = &iser_task->rdma_regd[ISER_DIR_OUT];
deferred = iser_regd_buff_release(regd); deferred = iser_regd_buff_release(regd);
if (deferred) { if (deferred) {
iser_err("%d references remain for BUF-OUT rdma reg\n", iser_err("%d references remain for BUF-OUT rdma reg\n",
@ -680,7 +679,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
/* if the data was unaligned, it was already unmapped and then copied */ /* if the data was unaligned, it was already unmapped and then copied */
if (is_rdma_aligned) if (is_rdma_aligned)
iser_dma_unmap_task_data(iser_ctask); iser_dma_unmap_task_data(iser_task);
} }
void iser_dto_buffs_release(struct iser_dto *dto) void iser_dto_buffs_release(struct iser_dto *dto)

View File

@ -99,13 +99,13 @@ void iser_reg_single(struct iser_device *device,
/** /**
* iser_start_rdma_unaligned_sg * iser_start_rdma_unaligned_sg
*/ */
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
int dma_nents; int dma_nents;
struct ib_device *dev; struct ib_device *dev;
char *mem = NULL; char *mem = NULL;
struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; struct iser_data_buf *data = &iser_task->data[cmd_dir];
unsigned long cmd_data_len = data->data_len; unsigned long cmd_data_len = data->data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD) if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
@ -138,37 +138,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
} }
} }
sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len); sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
iser_ctask->data_copy[cmd_dir].buf = iser_task->data_copy[cmd_dir].buf =
&iser_ctask->data_copy[cmd_dir].sg_single; &iser_task->data_copy[cmd_dir].sg_single;
iser_ctask->data_copy[cmd_dir].size = 1; iser_task->data_copy[cmd_dir].size = 1;
iser_ctask->data_copy[cmd_dir].copy_buf = mem; iser_task->data_copy[cmd_dir].copy_buf = mem;
dev = iser_ctask->iser_conn->ib_conn->device->ib_device; dev = iser_task->iser_conn->ib_conn->device->ib_device;
dma_nents = ib_dma_map_sg(dev, dma_nents = ib_dma_map_sg(dev,
&iser_ctask->data_copy[cmd_dir].sg_single, &iser_task->data_copy[cmd_dir].sg_single,
1, 1,
(cmd_dir == ISER_DIR_OUT) ? (cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE); DMA_TO_DEVICE : DMA_FROM_DEVICE);
BUG_ON(dma_nents == 0); BUG_ON(dma_nents == 0);
iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
return 0; return 0;
} }
/** /**
* iser_finalize_rdma_unaligned_sg * iser_finalize_rdma_unaligned_sg
*/ */
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct ib_device *dev; struct ib_device *dev;
struct iser_data_buf *mem_copy; struct iser_data_buf *mem_copy;
unsigned long cmd_data_len; unsigned long cmd_data_len;
dev = iser_ctask->iser_conn->ib_conn->device->ib_device; dev = iser_task->iser_conn->ib_conn->device->ib_device;
mem_copy = &iser_ctask->data_copy[cmd_dir]; mem_copy = &iser_task->data_copy[cmd_dir];
ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ? (cmd_dir == ISER_DIR_OUT) ?
@ -184,8 +184,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
/* copy back read RDMA to unaligned sg */ /* copy back read RDMA to unaligned sg */
mem = mem_copy->copy_buf; mem = mem_copy->copy_buf;
sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
sg_size = iser_ctask->data[ISER_DIR_IN].size; sg_size = iser_task->data[ISER_DIR_IN].size;
p = mem; p = mem;
for_each_sg(sgl, sg, sg_size, i) { for_each_sg(sgl, sg, sg_size, i) {
@ -198,7 +198,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
} }
} }
cmd_data_len = iser_ctask->data[cmd_dir].data_len; cmd_data_len = iser_task->data[cmd_dir].data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD) if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
free_pages((unsigned long)mem_copy->copy_buf, free_pages((unsigned long)mem_copy->copy_buf,
@ -376,15 +376,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
} }
} }
int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data, struct iser_data_buf *data,
enum iser_data_dir iser_dir, enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir) enum dma_data_direction dma_dir)
{ {
struct ib_device *dev; struct ib_device *dev;
iser_ctask->dir[iser_dir] = 1; iser_task->dir[iser_dir] = 1;
dev = iser_ctask->iser_conn->ib_conn->device->ib_device; dev = iser_task->iser_conn->ib_conn->device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) { if (data->dma_nents == 0) {
@ -394,20 +394,20 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
return 0; return 0;
} }
void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
{ {
struct ib_device *dev; struct ib_device *dev;
struct iser_data_buf *data; struct iser_data_buf *data;
dev = iser_ctask->iser_conn->ib_conn->device->ib_device; dev = iser_task->iser_conn->ib_conn->device->ib_device;
if (iser_ctask->dir[ISER_DIR_IN]) { if (iser_task->dir[ISER_DIR_IN]) {
data = &iser_ctask->data[ISER_DIR_IN]; data = &iser_task->data[ISER_DIR_IN];
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
} }
if (iser_ctask->dir[ISER_DIR_OUT]) { if (iser_task->dir[ISER_DIR_OUT]) {
data = &iser_ctask->data[ISER_DIR_OUT]; data = &iser_task->data[ISER_DIR_OUT];
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
} }
} }
@ -418,21 +418,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
* *
* returns 0 on success, errno code on failure * returns 0 on success, errno code on failure
*/ */
int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn; struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device; struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device; struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
int aligned_len; int aligned_len;
int err; int err;
int i; int i;
struct scatterlist *sg; struct scatterlist *sg;
regd_buf = &iser_ctask->rdma_regd[cmd_dir]; regd_buf = &iser_task->rdma_regd[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev); aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) { if (aligned_len != mem->dma_nents) {
@ -442,13 +442,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
iser_data_buf_dump(mem, ibdev); iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */ /* unmap the command data before accessing it */
iser_dma_unmap_task_data(iser_ctask); iser_dma_unmap_task_data(iser_task);
/* allocate copy buf, if we are writing, copy the */ /* allocate copy buf, if we are writing, copy the */
/* unaligned scatterlist, dma map the copy */ /* unaligned scatterlist, dma map the copy */
if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
return -ENOMEM; return -ENOMEM;
mem = &iser_ctask->data_copy[cmd_dir]; mem = &iser_task->data_copy[cmd_dir];
} }
/* if there a single dma entry, FMR is not needed */ /* if there a single dma entry, FMR is not needed */
@ -472,8 +472,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg); err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
if (err) { if (err) {
iser_data_buf_dump(mem, ibdev); iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
ntoh24(iser_ctask->desc.iscsi_header.dlength)); mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
ib_conn->page_vec->data_size, ib_conn->page_vec->length, ib_conn->page_vec->data_size, ib_conn->page_vec->length,
ib_conn->page_vec->offset); ib_conn->page_vec->offset);

View File

@ -323,7 +323,18 @@ static void iser_conn_release(struct iser_conn *ib_conn)
iser_device_try_release(device); iser_device_try_release(device);
if (ib_conn->iser_conn) if (ib_conn->iser_conn)
ib_conn->iser_conn->ib_conn = NULL; ib_conn->iser_conn->ib_conn = NULL;
kfree(ib_conn); iscsi_destroy_endpoint(ib_conn->ep);
}
void iser_conn_get(struct iser_conn *ib_conn)
{
atomic_inc(&ib_conn->refcount);
}
void iser_conn_put(struct iser_conn *ib_conn)
{
if (atomic_dec_and_test(&ib_conn->refcount))
iser_conn_release(ib_conn);
} }
/** /**
@ -347,7 +358,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
wait_event_interruptible(ib_conn->wait, wait_event_interruptible(ib_conn->wait,
ib_conn->state == ISER_CONN_DOWN); ib_conn->state == ISER_CONN_DOWN);
iser_conn_release(ib_conn); iser_conn_put(ib_conn);
} }
static void iser_connect_error(struct rdma_cm_id *cma_id) static void iser_connect_error(struct rdma_cm_id *cma_id)
@ -481,24 +492,15 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
return ret; return ret;
} }
int iser_conn_init(struct iser_conn **ibconn) void iser_conn_init(struct iser_conn *ib_conn)
{ {
struct iser_conn *ib_conn;
ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
if (!ib_conn) {
iser_err("can't alloc memory for struct iser_conn\n");
return -ENOMEM;
}
ib_conn->state = ISER_CONN_INIT; ib_conn->state = ISER_CONN_INIT;
init_waitqueue_head(&ib_conn->wait); init_waitqueue_head(&ib_conn->wait);
atomic_set(&ib_conn->post_recv_buf_count, 0); atomic_set(&ib_conn->post_recv_buf_count, 0);
atomic_set(&ib_conn->post_send_buf_count, 0); atomic_set(&ib_conn->post_send_buf_count, 0);
atomic_set(&ib_conn->refcount, 1);
INIT_LIST_HEAD(&ib_conn->conn_list); INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock); spin_lock_init(&ib_conn->lock);
*ibconn = ib_conn;
return 0;
} }
/** /**

View File

@ -252,27 +252,14 @@ config DM_ZERO
config DM_MULTIPATH config DM_MULTIPATH
tristate "Multipath target" tristate "Multipath target"
depends on BLK_DEV_DM depends on BLK_DEV_DM
# nasty syntax but means make DM_MULTIPATH independent
# of SCSI_DH if the latter isn't defined but if
# it is, DM_MULTIPATH must depend on it. We get a build
# error if SCSI_DH=m and DM_MULTIPATH=y
depends on SCSI_DH || !SCSI_DH
---help--- ---help---
Allow volume managers to support multipath hardware. Allow volume managers to support multipath hardware.
config DM_MULTIPATH_EMC
tristate "EMC CX/AX multipath support"
depends on DM_MULTIPATH && BLK_DEV_DM
---help---
Multipath support for EMC CX/AX series hardware.
config DM_MULTIPATH_RDAC
tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)"
depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
---help---
Multipath support for LSI/Engenio RDAC.
config DM_MULTIPATH_HP
tristate "HP MSA multipath support (EXPERIMENTAL)"
depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
---help---
Multipath support for HP MSA (Active/Passive) series hardware.
config DM_DELAY config DM_DELAY
tristate "I/O delaying target (EXPERIMENTAL)" tristate "I/O delaying target (EXPERIMENTAL)"
depends on BLK_DEV_DM && EXPERIMENTAL depends on BLK_DEV_DM && EXPERIMENTAL

View File

@ -4,11 +4,9 @@
dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
dm-ioctl.o dm-io.o dm-kcopyd.o dm-ioctl.o dm-io.o dm-kcopyd.o
dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o dm-multipath-objs := dm-path-selector.o dm-mpath.o
dm-snapshot-objs := dm-snap.o dm-exception-store.o dm-snapshot-objs := dm-snap.o dm-exception-store.o
dm-mirror-objs := dm-raid1.o dm-mirror-objs := dm-raid1.o
dm-rdac-objs := dm-mpath-rdac.o
dm-hp-sw-objs := dm-mpath-hp-sw.o
md-mod-objs := md.o bitmap.o md-mod-objs := md.o bitmap.o
raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \ raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \
raid6int1.o raid6int2.o raid6int4.o \ raid6int1.o raid6int2.o raid6int4.o \
@ -35,9 +33,6 @@ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
obj-$(CONFIG_DM_MULTIPATH_HP) += dm-hp-sw.o
obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o obj-$(CONFIG_DM_ZERO) += dm-zero.o

View File

@ -1,345 +0,0 @@
/*
* Copyright (C) 2004 SUSE LINUX Products GmbH. All rights reserved.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*
* Multipath support for EMC CLARiiON AX/CX-series hardware.
*/
#include "dm.h"
#include "dm-hw-handler.h"
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#define DM_MSG_PREFIX "multipath emc"
struct emc_handler {
spinlock_t lock;
/* Whether we should send the short trespass command (FC-series)
* or the long version (default for AX/CX CLARiiON arrays). */
unsigned short_trespass;
/* Whether or not to honor SCSI reservations when initiating a
* switch-over. Default: Don't. */
unsigned hr;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
};
#define TRESPASS_PAGE 0x22
#define EMC_FAILOVER_TIMEOUT (60 * HZ)
/* Code borrowed from dm-lsi-rdac by Mike Christie */
static inline void free_bio(struct bio *bio)
{
__free_page(bio->bi_io_vec[0].bv_page);
bio_put(bio);
}
static void emc_endio(struct bio *bio, int error)
{
struct dm_path *path = bio->bi_private;
/* We also need to look at the sense keys here whether or not to
* switch to the next PG etc.
*
* For now simple logic: either it works or it doesn't.
*/
if (error)
dm_pg_init_complete(path, MP_FAIL_PATH);
else
dm_pg_init_complete(path, 0);
/* request is freed in block layer */
free_bio(bio);
}
static struct bio *get_failover_bio(struct dm_path *path, unsigned data_size)
{
struct bio *bio;
struct page *page;
bio = bio_alloc(GFP_ATOMIC, 1);
if (!bio) {
DMERR("get_failover_bio: bio_alloc() failed.");
return NULL;
}
bio->bi_rw |= (1 << BIO_RW);
bio->bi_bdev = path->dev->bdev;
bio->bi_sector = 0;
bio->bi_private = path;
bio->bi_end_io = emc_endio;
page = alloc_page(GFP_ATOMIC);
if (!page) {
DMERR("get_failover_bio: alloc_page() failed.");
bio_put(bio);
return NULL;
}
if (bio_add_page(bio, page, data_size, 0) != data_size) {
DMERR("get_failover_bio: bio_add_page() failed.");
__free_page(page);
bio_put(bio);
return NULL;
}
return bio;
}
static struct request *get_failover_req(struct emc_handler *h,
struct bio *bio, struct dm_path *path)
{
struct request *rq;
struct block_device *bdev = bio->bi_bdev;
struct request_queue *q = bdev_get_queue(bdev);
/* FIXME: Figure out why it fails with GFP_ATOMIC. */
rq = blk_get_request(q, WRITE, __GFP_WAIT);
if (!rq) {
DMERR("get_failover_req: blk_get_request failed");
return NULL;
}
blk_rq_append_bio(q, rq, bio);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
rq->timeout = EMC_FAILOVER_TIMEOUT;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
return rq;
}
static struct request *emc_trespass_get(struct emc_handler *h,
struct dm_path *path)
{
struct bio *bio;
struct request *rq;
unsigned char *page22;
unsigned char long_trespass_pg[] = {
0, 0, 0, 0,
TRESPASS_PAGE, /* Page code */
0x09, /* Page length - 2 */
h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */
0xff, 0xff, /* Trespass target */
0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
};
unsigned char short_trespass_pg[] = {
0, 0, 0, 0,
TRESPASS_PAGE, /* Page code */
0x02, /* Page length - 2 */
h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */
0xff, /* Trespass target */
};
unsigned data_size = h->short_trespass ? sizeof(short_trespass_pg) :
sizeof(long_trespass_pg);
/* get bio backing */
if (data_size > PAGE_SIZE)
/* this should never happen */
return NULL;
bio = get_failover_bio(path, data_size);
if (!bio) {
DMERR("emc_trespass_get: no bio");
return NULL;
}
page22 = (unsigned char *)bio_data(bio);
memset(page22, 0, data_size);
memcpy(page22, h->short_trespass ?
short_trespass_pg : long_trespass_pg, data_size);
/* get request for block layer packet command */
rq = get_failover_req(h, bio, path);
if (!rq) {
DMERR("emc_trespass_get: no rq");
free_bio(bio);
return NULL;
}
/* Prepare the command. */
rq->cmd[0] = MODE_SELECT;
rq->cmd[1] = 0x10;
rq->cmd[4] = data_size;
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
return rq;
}
static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed,
struct dm_path *path)
{
struct request *rq;
struct request_queue *q = bdev_get_queue(path->dev->bdev);
/*
* We can either blindly init the pg (then look at the sense),
* or we can send some commands to get the state here (then
* possibly send the fo cmnd), or we can also have the
* initial state passed into us and then get an update here.
*/
if (!q) {
DMINFO("emc_pg_init: no queue");
goto fail_path;
}
/* FIXME: The request should be pre-allocated. */
rq = emc_trespass_get(hwh->context, path);
if (!rq) {
DMERR("emc_pg_init: no rq");
goto fail_path;
}
DMINFO("emc_pg_init: sending switch-over command");
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
return;
fail_path:
dm_pg_init_complete(path, MP_FAIL_PATH);
}
static struct emc_handler *alloc_emc_handler(void)
{
struct emc_handler *h = kzalloc(sizeof(*h), GFP_KERNEL);
if (h)
spin_lock_init(&h->lock);
return h;
}
static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv)
{
struct emc_handler *h;
unsigned hr, short_trespass;
if (argc == 0) {
/* No arguments: use defaults */
hr = 0;
short_trespass = 0;
} else if (argc != 2) {
DMWARN("incorrect number of arguments");
return -EINVAL;
} else {
if ((sscanf(argv[0], "%u", &short_trespass) != 1)
|| (short_trespass > 1)) {
DMWARN("invalid trespass mode selected");
return -EINVAL;
}
if ((sscanf(argv[1], "%u", &hr) != 1)
|| (hr > 1)) {
DMWARN("invalid honor reservation flag selected");
return -EINVAL;
}
}
h = alloc_emc_handler();
if (!h)
return -ENOMEM;
hwh->context = h;
if ((h->short_trespass = short_trespass))
DMWARN("short trespass command will be send");
else
DMWARN("long trespass command will be send");
if ((h->hr = hr))
DMWARN("honor reservation bit will be set");
else
DMWARN("honor reservation bit will not be set (default)");
return 0;
}
static void emc_destroy(struct hw_handler *hwh)
{
struct emc_handler *h = (struct emc_handler *) hwh->context;
kfree(h);
hwh->context = NULL;
}
static unsigned emc_error(struct hw_handler *hwh, struct bio *bio)
{
/* FIXME: Patch from axboe still missing */
#if 0
int sense;
if (bio->bi_error & BIO_SENSE) {
sense = bio->bi_error & 0xffffff; /* sense key / asc / ascq */
if (sense == 0x020403) {
/* LUN Not Ready - Manual Intervention Required
* indicates this is a passive path.
*
* FIXME: However, if this is seen and EVPD C0
* indicates that this is due to a NDU in
* progress, we should set FAIL_PATH too.
* This indicates we might have to do a SCSI
* inquiry in the end_io path. Ugh. */
return MP_BYPASS_PG | MP_RETRY_IO;
} else if (sense == 0x052501) {
/* An array based copy is in progress. Do not
* fail the path, do not bypass to another PG,
* do not retry. Fail the IO immediately.
* (Actually this is the same conclusion as in
* the default handler, but lets make sure.) */
return 0;
} else if (sense == 0x062900) {
/* Unit Attention Code. This is the first IO
* to the new path, so just retry. */
return MP_RETRY_IO;
}
}
#endif
/* Try default handler */
return dm_scsi_err_handler(hwh, bio);
}
static struct hw_handler_type emc_hwh = {
.name = "emc",
.module = THIS_MODULE,
.create = emc_create,
.destroy = emc_destroy,
.pg_init = emc_pg_init,
.error = emc_error,
};
static int __init dm_emc_init(void)
{
int r = dm_register_hw_handler(&emc_hwh);
if (r < 0)
DMERR("register failed %d", r);
DMINFO("version 0.0.3 loaded");
return r;
}
static void __exit dm_emc_exit(void)
{
int r = dm_unregister_hw_handler(&emc_hwh);
if (r < 0)
DMERR("unregister failed %d", r);
}
module_init(dm_emc_init);
module_exit(dm_emc_exit);
MODULE_DESCRIPTION(DM_NAME " EMC CX/AX/FC-family multipath");
MODULE_AUTHOR("Lars Marowsky-Bree <lmb@suse.de>");
MODULE_LICENSE("GPL");

View File

@ -1,213 +0,0 @@
/*
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*
* Multipath hardware handler registration.
*/
#include "dm.h"
#include "dm-hw-handler.h"
#include <linux/slab.h>
struct hwh_internal {
struct hw_handler_type hwht;
struct list_head list;
long use;
};
#define hwht_to_hwhi(__hwht) container_of((__hwht), struct hwh_internal, hwht)
static LIST_HEAD(_hw_handlers);
static DECLARE_RWSEM(_hwh_lock);
static struct hwh_internal *__find_hw_handler_type(const char *name)
{
struct hwh_internal *hwhi;
list_for_each_entry(hwhi, &_hw_handlers, list) {
if (!strcmp(name, hwhi->hwht.name))
return hwhi;
}
return NULL;
}
static struct hwh_internal *get_hw_handler(const char *name)
{
struct hwh_internal *hwhi;
down_read(&_hwh_lock);
hwhi = __find_hw_handler_type(name);
if (hwhi) {
if ((hwhi->use == 0) && !try_module_get(hwhi->hwht.module))
hwhi = NULL;
else
hwhi->use++;
}
up_read(&_hwh_lock);
return hwhi;
}
struct hw_handler_type *dm_get_hw_handler(const char *name)
{
struct hwh_internal *hwhi;
if (!name)
return NULL;
hwhi = get_hw_handler(name);
if (!hwhi) {
request_module("dm-%s", name);
hwhi = get_hw_handler(name);
}
return hwhi ? &hwhi->hwht : NULL;
}
void dm_put_hw_handler(struct hw_handler_type *hwht)
{
struct hwh_internal *hwhi;
if (!hwht)
return;
down_read(&_hwh_lock);
hwhi = __find_hw_handler_type(hwht->name);
if (!hwhi)
goto out;
if (--hwhi->use == 0)
module_put(hwhi->hwht.module);
BUG_ON(hwhi->use < 0);
out:
up_read(&_hwh_lock);
}
static struct hwh_internal *_alloc_hw_handler(struct hw_handler_type *hwht)
{
struct hwh_internal *hwhi = kzalloc(sizeof(*hwhi), GFP_KERNEL);
if (hwhi)
hwhi->hwht = *hwht;
return hwhi;
}
int dm_register_hw_handler(struct hw_handler_type *hwht)
{
int r = 0;
struct hwh_internal *hwhi = _alloc_hw_handler(hwht);
if (!hwhi)
return -ENOMEM;
down_write(&_hwh_lock);
if (__find_hw_handler_type(hwht->name)) {
kfree(hwhi);
r = -EEXIST;
} else
list_add(&hwhi->list, &_hw_handlers);
up_write(&_hwh_lock);
return r;
}
int dm_unregister_hw_handler(struct hw_handler_type *hwht)
{
struct hwh_internal *hwhi;
down_write(&_hwh_lock);
hwhi = __find_hw_handler_type(hwht->name);
if (!hwhi) {
up_write(&_hwh_lock);
return -EINVAL;
}
if (hwhi->use) {
up_write(&_hwh_lock);
return -ETXTBSY;
}
list_del(&hwhi->list);
up_write(&_hwh_lock);
kfree(hwhi);
return 0;
}
unsigned dm_scsi_err_handler(struct hw_handler *hwh, struct bio *bio)
{
#if 0
int sense_key, asc, ascq;
if (bio->bi_error & BIO_SENSE) {
/* FIXME: This is just an initial guess. */
/* key / asc / ascq */
sense_key = (bio->bi_error >> 16) & 0xff;
asc = (bio->bi_error >> 8) & 0xff;
ascq = bio->bi_error & 0xff;
switch (sense_key) {
/* This block as a whole comes from the device.
* So no point retrying on another path. */
case 0x03: /* Medium error */
case 0x05: /* Illegal request */
case 0x07: /* Data protect */
case 0x08: /* Blank check */
case 0x0a: /* copy aborted */
case 0x0c: /* obsolete - no clue ;-) */
case 0x0d: /* volume overflow */
case 0x0e: /* data miscompare */
case 0x0f: /* reserved - no idea either. */
return MP_ERROR_IO;
/* For these errors it's unclear whether they
* come from the device or the controller.
* So just lets try a different path, and if
* it eventually succeeds, user-space will clear
* the paths again... */
case 0x02: /* Not ready */
case 0x04: /* Hardware error */
case 0x09: /* vendor specific */
case 0x0b: /* Aborted command */
return MP_FAIL_PATH;
case 0x06: /* Unit attention - might want to decode */
if (asc == 0x04 && ascq == 0x01)
/* "Unit in the process of
* becoming ready" */
return 0;
return MP_FAIL_PATH;
/* FIXME: For Unit Not Ready we may want
* to have a generic pg activation
* feature (START_UNIT). */
/* Should these two ever end up in the
* error path? I don't think so. */
case 0x00: /* No sense */
case 0x01: /* Recovered error */
return 0;
}
}
#endif
/* We got no idea how to decode the other kinds of errors ->
* assume generic error condition. */
return MP_FAIL_PATH;
}
EXPORT_SYMBOL_GPL(dm_register_hw_handler);
EXPORT_SYMBOL_GPL(dm_unregister_hw_handler);
EXPORT_SYMBOL_GPL(dm_scsi_err_handler);

View File

@ -1,63 +0,0 @@
/*
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*
* Multipath hardware handler registration.
*/
#ifndef DM_HW_HANDLER_H
#define DM_HW_HANDLER_H
#include <linux/device-mapper.h>
#include "dm-mpath.h"
struct hw_handler_type;
struct hw_handler {
struct hw_handler_type *type;
struct mapped_device *md;
void *context;
};
/*
* Constructs a hardware handler object, takes custom arguments
*/
/* Information about a hardware handler type */
struct hw_handler_type {
char *name;
struct module *module;
int (*create) (struct hw_handler *handler, unsigned int argc,
char **argv);
void (*destroy) (struct hw_handler *hwh);
void (*pg_init) (struct hw_handler *hwh, unsigned bypassed,
struct dm_path *path);
unsigned (*error) (struct hw_handler *hwh, struct bio *bio);
int (*status) (struct hw_handler *hwh, status_type_t type,
char *result, unsigned int maxlen);
};
/* Register a hardware handler */
int dm_register_hw_handler(struct hw_handler_type *type);
/* Unregister a hardware handler */
int dm_unregister_hw_handler(struct hw_handler_type *type);
/* Returns a registered hardware handler type */
struct hw_handler_type *dm_get_hw_handler(const char *name);
/* Releases a hardware handler */
void dm_put_hw_handler(struct hw_handler_type *hwht);
/* Default err function */
unsigned dm_scsi_err_handler(struct hw_handler *hwh, struct bio *bio);
/* Error flags for err and dm_pg_init_complete */
#define MP_FAIL_PATH 1
#define MP_BYPASS_PG 2
#define MP_ERROR_IO 4 /* Don't retry this I/O */
#define MP_RETRY 8
#endif

View File

@ -1,247 +0,0 @@
/*
* Copyright (C) 2005 Mike Christie, All rights reserved.
* Copyright (C) 2007 Red Hat, Inc. All rights reserved.
* Authors: Mike Christie
* Dave Wysochanski
*
* This file is released under the GPL.
*
* This module implements the specific path activation code for
* HP StorageWorks and FSC FibreCat Asymmetric (Active/Passive)
* storage arrays.
* These storage arrays have controller-based failover, not
* LUN-based failover. However, LUN-based failover is the design
* of dm-multipath. Thus, this module is written for LUN-based failover.
*/
#include <linux/blkdev.h>
#include <linux/list.h>
#include <linux/types.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include "dm.h"
#include "dm-hw-handler.h"
#define DM_MSG_PREFIX "multipath hp-sw"
#define DM_HP_HWH_NAME "hp-sw"
#define DM_HP_HWH_VER "1.0.0"
struct hp_sw_context {
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
};
/*
* hp_sw_error_is_retryable - Is an HP-specific check condition retryable?
* @req: path activation request
*
* Examine error codes of request and determine whether the error is retryable.
* Some error codes are already retried by scsi-ml (see
* scsi_decide_disposition), but some HP specific codes are not.
* The intent of this routine is to supply the logic for the HP specific
* check conditions.
*
* Returns:
* 1 - command completed with retryable error
* 0 - command completed with non-retryable error
*
* Possible optimizations
* 1. More hardware-specific error codes
*/
static int hp_sw_error_is_retryable(struct request *req)
{
/*
* NOT_READY is known to be retryable
* For now we just dump out the sense data and call it retryable
*/
if (status_byte(req->errors) == CHECK_CONDITION)
__scsi_print_sense(DM_HP_HWH_NAME, req->sense, req->sense_len);
/*
* At this point we don't have complete information about all the error
* codes from this hardware, so we are just conservative and retry
* when in doubt.
*/
return 1;
}
/*
* hp_sw_end_io - Completion handler for HP path activation.
* @req: path activation request
* @error: scsi-ml error
*
* Check sense data, free request structure, and notify dm that
* pg initialization has completed.
*
* Context: scsi-ml softirq
*
*/
static void hp_sw_end_io(struct request *req, int error)
{
struct dm_path *path = req->end_io_data;
unsigned err_flags = 0;
if (!error) {
DMDEBUG("%s path activation command - success",
path->dev->name);
goto out;
}
if (hp_sw_error_is_retryable(req)) {
DMDEBUG("%s path activation command - retry",
path->dev->name);
err_flags = MP_RETRY;
goto out;
}
DMWARN("%s path activation fail - error=0x%x",
path->dev->name, error);
err_flags = MP_FAIL_PATH;
out:
req->end_io_data = NULL;
__blk_put_request(req->q, req);
dm_pg_init_complete(path, err_flags);
}
/*
* hp_sw_get_request - Allocate an HP specific path activation request
* @path: path on which request will be sent (needed for request queue)
*
* The START command is used for path activation request.
* These arrays are controller-based failover, not LUN based.
* One START command issued to a single path will fail over all
* LUNs for the same controller.
*
* Possible optimizations
* 1. Make timeout configurable
* 2. Preallocate request
*/
static struct request *hp_sw_get_request(struct dm_path *path)
{
struct request *req;
struct block_device *bdev = path->dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
struct hp_sw_context *h = path->hwhcontext;
req = blk_get_request(q, WRITE, GFP_NOIO);
if (!req)
goto out;
req->timeout = 60 * HZ;
req->errors = 0;
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
req->end_io_data = path;
req->sense = h->sense;
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
req->cmd[0] = START_STOP;
req->cmd[4] = 1;
req->cmd_len = COMMAND_SIZE(req->cmd[0]);
out:
return req;
}
/*
* hp_sw_pg_init - HP path activation implementation.
* @hwh: hardware handler specific data
* @bypassed: unused; is the path group bypassed? (see dm-mpath.c)
* @path: path to send initialization command
*
* Send an HP-specific path activation command on 'path'.
* Do not try to optimize in any way, just send the activation command.
* More than one path activation command may be sent to the same controller.
* This seems to work fine for basic failover support.
*
* Possible optimizations
* 1. Detect an in-progress activation request and avoid submitting another one
* 2. Model the controller and only send a single activation request at a time
* 3. Determine the state of a path before sending an activation request
*
* Context: kmpathd (see process_queued_ios() in dm-mpath.c)
*/
static void hp_sw_pg_init(struct hw_handler *hwh, unsigned bypassed,
struct dm_path *path)
{
struct request *req;
struct hp_sw_context *h;
path->hwhcontext = hwh->context;
h = hwh->context;
req = hp_sw_get_request(path);
if (!req) {
DMERR("%s path activation command - allocation fail",
path->dev->name);
goto retry;
}
DMDEBUG("%s path activation command - sent", path->dev->name);
blk_execute_rq_nowait(req->q, NULL, req, 1, hp_sw_end_io);
return;
retry:
dm_pg_init_complete(path, MP_RETRY);
}
static int hp_sw_create(struct hw_handler *hwh, unsigned argc, char **argv)
{
struct hp_sw_context *h;
h = kmalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return -ENOMEM;
hwh->context = h;
return 0;
}
static void hp_sw_destroy(struct hw_handler *hwh)
{
struct hp_sw_context *h = hwh->context;
kfree(h);
}
static struct hw_handler_type hp_sw_hwh = {
.name = DM_HP_HWH_NAME,
.module = THIS_MODULE,
.create = hp_sw_create,
.destroy = hp_sw_destroy,
.pg_init = hp_sw_pg_init,
};
static int __init hp_sw_init(void)
{
int r;
r = dm_register_hw_handler(&hp_sw_hwh);
if (r < 0)
DMERR("register failed %d", r);
else
DMINFO("version " DM_HP_HWH_VER " loaded");
return r;
}
static void __exit hp_sw_exit(void)
{
int r;
r = dm_unregister_hw_handler(&hp_sw_hwh);
if (r < 0)
DMERR("unregister failed %d", r);
}
module_init(hp_sw_init);
module_exit(hp_sw_exit);
MODULE_DESCRIPTION("DM Multipath HP StorageWorks / FSC FibreCat (A/P) support");
MODULE_AUTHOR("Mike Christie, Dave Wysochanski <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DM_HP_HWH_VER);

View File

@ -1,700 +0,0 @@
/*
* Engenio/LSI RDAC DM HW handler
*
* Copyright (C) 2005 Mike Christie. All rights reserved.
* Copyright (C) Chandra Seetharaman, IBM Corp. 2007
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_eh.h>
#define DM_MSG_PREFIX "multipath rdac"
#include "dm.h"
#include "dm-hw-handler.h"
#define RDAC_DM_HWH_NAME "rdac"
#define RDAC_DM_HWH_VER "0.4"
/*
* LSI mode page stuff
*
* These struct definitions and the forming of the
* mode page were taken from the LSI RDAC 2.4 GPL'd
* driver, and then converted to Linux conventions.
*/
#define RDAC_QUIESCENCE_TIME 20;
/*
* Page Codes
*/
#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
/*
* Controller modes definitions
*/
#define RDAC_MODE_TRANSFER_ALL_LUNS 0x01
#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
/*
* RDAC Options field
*/
#define RDAC_FORCED_QUIESENCE 0x02
#define RDAC_FAILOVER_TIMEOUT (60 * HZ)
struct rdac_mode_6_hdr {
u8 data_len;
u8 medium_type;
u8 device_params;
u8 block_desc_len;
};
struct rdac_mode_10_hdr {
u16 data_len;
u8 medium_type;
u8 device_params;
u16 reserved;
u16 block_desc_len;
};
struct rdac_mode_common {
u8 controller_serial[16];
u8 alt_controller_serial[16];
u8 rdac_mode[2];
u8 alt_rdac_mode[2];
u8 quiescence_timeout;
u8 rdac_options;
};
struct rdac_pg_legacy {
struct rdac_mode_6_hdr hdr;
u8 page_code;
u8 page_len;
struct rdac_mode_common common;
#define MODE6_MAX_LUN 32
u8 lun_table[MODE6_MAX_LUN];
u8 reserved2[32];
u8 reserved3;
u8 reserved4;
};
struct rdac_pg_expanded {
struct rdac_mode_10_hdr hdr;
u8 page_code;
u8 subpage_code;
u8 page_len[2];
struct rdac_mode_common common;
u8 lun_table[256];
u8 reserved3;
u8 reserved4;
};
struct c9_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC9 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "vace" */
u8 avte_cvp;
u8 path_prio;
u8 reserved2[38];
};
#define SUBSYS_ID_LEN 16
#define SLOT_ID_LEN 2
struct c4_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC4 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "subs" */
u8 subsys_id[SUBSYS_ID_LEN];
u8 revision[4];
u8 slot_id[SLOT_ID_LEN];
u8 reserved[2];
};
struct rdac_controller {
u8 subsys_id[SUBSYS_ID_LEN];
u8 slot_id[SLOT_ID_LEN];
int use_10_ms;
struct kref kref;
struct list_head node; /* list of all controllers */
spinlock_t lock;
int submitted;
struct list_head cmd_list; /* list of commands to be submitted */
union {
struct rdac_pg_legacy legacy;
struct rdac_pg_expanded expanded;
} mode_select;
};
struct c8_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC8 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "edid" */
u8 reserved2[3];
u8 vol_uniq_id_len;
u8 vol_uniq_id[16];
u8 vol_user_label_len;
u8 vol_user_label[60];
u8 array_uniq_id_len;
u8 array_unique_id[16];
u8 array_user_label_len;
u8 array_user_label[60];
u8 lun[8];
};
struct c2_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC2 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "swr4" */
u8 sw_version[3];
u8 sw_date[3];
u8 features_enabled;
u8 max_lun_supported;
u8 partitions[239]; /* Total allocation length should be 0xFF */
};
struct rdac_handler {
struct list_head entry; /* list waiting to submit MODE SELECT */
unsigned timeout;
struct rdac_controller *ctlr;
#define UNINITIALIZED_LUN (1 << 8)
unsigned lun;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
struct dm_path *path;
struct work_struct work;
#define SEND_C2_INQUIRY 1
#define SEND_C4_INQUIRY 2
#define SEND_C8_INQUIRY 3
#define SEND_C9_INQUIRY 4
#define SEND_MODE_SELECT 5
int cmd_to_send;
union {
struct c2_inquiry c2;
struct c4_inquiry c4;
struct c8_inquiry c8;
struct c9_inquiry c9;
} inq;
};
static LIST_HEAD(ctlr_list);
static DEFINE_SPINLOCK(list_lock);
static struct workqueue_struct *rdac_wkqd;
static inline int had_failures(struct request *req, int error)
{
return (error || host_byte(req->errors) != DID_OK ||
msg_byte(req->errors) != COMMAND_COMPLETE);
}
static void rdac_resubmit_all(struct rdac_handler *h)
{
struct rdac_controller *ctlr = h->ctlr;
struct rdac_handler *tmp, *h1;
spin_lock(&ctlr->lock);
list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) {
h1->cmd_to_send = SEND_C9_INQUIRY;
queue_work(rdac_wkqd, &h1->work);
list_del(&h1->entry);
}
ctlr->submitted = 0;
spin_unlock(&ctlr->lock);
}
static void mode_select_endio(struct request *req, int error)
{
struct rdac_handler *h = req->end_io_data;
struct scsi_sense_hdr sense_hdr;
int sense = 0, fail = 0;
if (had_failures(req, error)) {
fail = 1;
goto failed;
}
if (status_byte(req->errors) == CHECK_CONDITION) {
scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE,
&sense_hdr);
sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
sense_hdr.ascq;
/* If it is retryable failure, submit the c9 inquiry again */
if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
sense == 0x62900) {
/* 0x59136 - Command lock contention
* 0x[6b]8b02 - Quiesense in progress or achieved
* 0x62900 - Power On, Reset, or Bus Device Reset
*/
h->cmd_to_send = SEND_C9_INQUIRY;
queue_work(rdac_wkqd, &h->work);
goto done;
}
if (sense)
DMINFO("MODE_SELECT failed on %s with sense 0x%x",
h->path->dev->name, sense);
}
failed:
if (fail || sense)
dm_pg_init_complete(h->path, MP_FAIL_PATH);
else
dm_pg_init_complete(h->path, 0);
done:
rdac_resubmit_all(h);
__blk_put_request(req->q, req);
}
static struct request *get_rdac_req(struct rdac_handler *h,
void *buffer, unsigned buflen, int rw)
{
struct request *rq;
struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
rq = blk_get_request(q, rw, GFP_KERNEL);
if (!rq) {
DMINFO("get_rdac_req: blk_get_request failed");
return NULL;
}
if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
blk_put_request(rq);
DMINFO("get_rdac_req: blk_rq_map_kern failed");
return NULL;
}
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
rq->end_io_data = h;
rq->timeout = h->timeout;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
return rq;
}
static struct request *rdac_failover_get(struct rdac_handler *h)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
if (h->ctlr->use_10_ms) {
struct rdac_pg_expanded *rdac_pg;
data_size = sizeof(struct rdac_pg_expanded);
rdac_pg = &h->ctlr->mode_select.expanded;
memset(rdac_pg, 0, data_size);
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
rdac_pg->subpage_code = 0x1;
rdac_pg->page_len[0] = 0x01;
rdac_pg->page_len[1] = 0x28;
rdac_pg->lun_table[h->lun] = 0x81;
} else {
struct rdac_pg_legacy *rdac_pg;
data_size = sizeof(struct rdac_pg_legacy);
rdac_pg = &h->ctlr->mode_select.legacy;
memset(rdac_pg, 0, data_size);
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
rdac_pg->page_len = 0x68;
rdac_pg->lun_table[h->lun] = 0x81;
}
common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
common->rdac_options = RDAC_FORCED_QUIESENCE;
/* get request for block layer packet command */
rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE);
if (!rq) {
DMERR("rdac_failover_get: no rq");
return NULL;
}
/* Prepare the command. */
if (h->ctlr->use_10_ms) {
rq->cmd[0] = MODE_SELECT_10;
rq->cmd[7] = data_size >> 8;
rq->cmd[8] = data_size & 0xff;
} else {
rq->cmd[0] = MODE_SELECT;
rq->cmd[4] = data_size;
}
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
return rq;
}
/* Acquires h->ctlr->lock */
static void submit_mode_select(struct rdac_handler *h)
{
struct request *rq;
struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
spin_lock(&h->ctlr->lock);
if (h->ctlr->submitted) {
list_add(&h->entry, &h->ctlr->cmd_list);
goto drop_lock;
}
if (!q) {
DMINFO("submit_mode_select: no queue");
goto fail_path;
}
rq = rdac_failover_get(h);
if (!rq) {
DMERR("submit_mode_select: no rq");
goto fail_path;
}
DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name);
blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio);
h->ctlr->submitted = 1;
goto drop_lock;
fail_path:
dm_pg_init_complete(h->path, MP_FAIL_PATH);
drop_lock:
spin_unlock(&h->ctlr->lock);
}
static void release_ctlr(struct kref *kref)
{
struct rdac_controller *ctlr;
ctlr = container_of(kref, struct rdac_controller, kref);
spin_lock(&list_lock);
list_del(&ctlr->node);
spin_unlock(&list_lock);
kfree(ctlr);
}
static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
{
struct rdac_controller *ctlr, *tmp;
spin_lock(&list_lock);
list_for_each_entry(tmp, &ctlr_list, node) {
if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
(memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
kref_get(&tmp->kref);
spin_unlock(&list_lock);
return tmp;
}
}
ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
if (!ctlr)
goto done;
/* initialize fields of controller */
memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
kref_init(&ctlr->kref);
spin_lock_init(&ctlr->lock);
ctlr->submitted = 0;
ctlr->use_10_ms = -1;
INIT_LIST_HEAD(&ctlr->cmd_list);
list_add(&ctlr->node, &ctlr_list);
done:
spin_unlock(&list_lock);
return ctlr;
}
static void c4_endio(struct request *req, int error)
{
struct rdac_handler *h = req->end_io_data;
struct c4_inquiry *sp;
if (had_failures(req, error)) {
dm_pg_init_complete(h->path, MP_FAIL_PATH);
goto done;
}
sp = &h->inq.c4;
h->ctlr = get_controller(sp->subsys_id, sp->slot_id);
if (h->ctlr) {
h->cmd_to_send = SEND_C9_INQUIRY;
queue_work(rdac_wkqd, &h->work);
} else
dm_pg_init_complete(h->path, MP_FAIL_PATH);
done:
__blk_put_request(req->q, req);
}
static void c2_endio(struct request *req, int error)
{
struct rdac_handler *h = req->end_io_data;
struct c2_inquiry *sp;
if (had_failures(req, error)) {
dm_pg_init_complete(h->path, MP_FAIL_PATH);
goto done;
}
sp = &h->inq.c2;
/* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */
if (sp->max_lun_supported >= MODE6_MAX_LUN)
h->ctlr->use_10_ms = 1;
else
h->ctlr->use_10_ms = 0;
h->cmd_to_send = SEND_MODE_SELECT;
queue_work(rdac_wkqd, &h->work);
done:
__blk_put_request(req->q, req);
}
static void c9_endio(struct request *req, int error)
{
struct rdac_handler *h = req->end_io_data;
struct c9_inquiry *sp;
if (had_failures(req, error)) {
dm_pg_init_complete(h->path, MP_FAIL_PATH);
goto done;
}
/* We need to look at the sense keys here to take clear action.
* For now simple logic: If the host is in AVT mode or if controller
* owns the lun, return dm_pg_init_complete(), otherwise submit
* MODE SELECT.
*/
sp = &h->inq.c9;
/* If in AVT mode, return success */
if ((sp->avte_cvp >> 7) == 0x1) {
dm_pg_init_complete(h->path, 0);
goto done;
}
/* If the controller on this path owns the LUN, return success */
if (sp->avte_cvp & 0x1) {
dm_pg_init_complete(h->path, 0);
goto done;
}
if (h->ctlr) {
if (h->ctlr->use_10_ms == -1)
h->cmd_to_send = SEND_C2_INQUIRY;
else
h->cmd_to_send = SEND_MODE_SELECT;
} else
h->cmd_to_send = SEND_C4_INQUIRY;
queue_work(rdac_wkqd, &h->work);
done:
__blk_put_request(req->q, req);
}
static void c8_endio(struct request *req, int error)
{
struct rdac_handler *h = req->end_io_data;
struct c8_inquiry *sp;
if (had_failures(req, error)) {
dm_pg_init_complete(h->path, MP_FAIL_PATH);
goto done;
}
/* We need to look at the sense keys here to take clear action.
* For now simple logic: Get the lun from the inquiry page.
*/
sp = &h->inq.c8;
h->lun = sp->lun[7]; /* currently it uses only one byte */
h->cmd_to_send = SEND_C9_INQUIRY;
queue_work(rdac_wkqd, &h->work);
done:
__blk_put_request(req->q, req);
}
static void submit_inquiry(struct rdac_handler *h, int page_code,
unsigned int len, rq_end_io_fn endio)
{
struct request *rq;
struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
if (!q)
goto fail_path;
rq = get_rdac_req(h, &h->inq, len, READ);
if (!rq)
goto fail_path;
/* Prepare the command. */
rq->cmd[0] = INQUIRY;
rq->cmd[1] = 1;
rq->cmd[2] = page_code;
rq->cmd[4] = len;
rq->cmd_len = COMMAND_SIZE(INQUIRY);
blk_execute_rq_nowait(q, NULL, rq, 1, endio);
return;
fail_path:
dm_pg_init_complete(h->path, MP_FAIL_PATH);
}
static void service_wkq(struct work_struct *work)
{
struct rdac_handler *h = container_of(work, struct rdac_handler, work);
switch (h->cmd_to_send) {
case SEND_C2_INQUIRY:
submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio);
break;
case SEND_C4_INQUIRY:
submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio);
break;
case SEND_C8_INQUIRY:
submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
break;
case SEND_C9_INQUIRY:
submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
break;
case SEND_MODE_SELECT:
submit_mode_select(h);
break;
default:
BUG();
}
}
/*
* only support subpage2c until we confirm that this is just a matter of
* of updating firmware or not, and RDAC (basic AVT works already) for now
* but we can add these in in when we get time and testers
*/
static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv)
{
struct rdac_handler *h;
unsigned timeout;
if (argc == 0) {
/* No arguments: use defaults */
timeout = RDAC_FAILOVER_TIMEOUT;
} else if (argc != 1) {
DMWARN("incorrect number of arguments");
return -EINVAL;
} else {
if (sscanf(argv[1], "%u", &timeout) != 1) {
DMWARN("invalid timeout value");
return -EINVAL;
}
}
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return -ENOMEM;
hwh->context = h;
h->timeout = timeout;
h->lun = UNINITIALIZED_LUN;
INIT_WORK(&h->work, service_wkq);
DMWARN("using RDAC command with timeout %u", h->timeout);
return 0;
}
static void rdac_destroy(struct hw_handler *hwh)
{
struct rdac_handler *h = hwh->context;
if (h->ctlr)
kref_put(&h->ctlr->kref, release_ctlr);
kfree(h);
hwh->context = NULL;
}
static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio)
{
/* Try default handler */
return dm_scsi_err_handler(hwh, bio);
}
static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed,
struct dm_path *path)
{
struct rdac_handler *h = hwh->context;
h->path = path;
switch (h->lun) {
case UNINITIALIZED_LUN:
submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
break;
default:
submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
}
}
static struct hw_handler_type rdac_handler = {
.name = RDAC_DM_HWH_NAME,
.module = THIS_MODULE,
.create = rdac_create,
.destroy = rdac_destroy,
.pg_init = rdac_pg_init,
.error = rdac_error,
};
static int __init rdac_init(void)
{
int r;
rdac_wkqd = create_singlethread_workqueue("rdac_wkqd");
if (!rdac_wkqd) {
DMERR("Failed to create workqueue rdac_wkqd.");
return -ENOMEM;
}
r = dm_register_hw_handler(&rdac_handler);
if (r < 0) {
DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r);
destroy_workqueue(rdac_wkqd);
return r;
}
DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER);
return 0;
}
static void __exit rdac_exit(void)
{
int r = dm_unregister_hw_handler(&rdac_handler);
destroy_workqueue(rdac_wkqd);
if (r < 0)
DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r);
}
module_init(rdac_init);
module_exit(rdac_exit);
MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support");
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
MODULE_LICENSE("GPL");
MODULE_VERSION(RDAC_DM_HWH_VER);

View File

@ -7,7 +7,6 @@
#include "dm.h" #include "dm.h"
#include "dm-path-selector.h" #include "dm-path-selector.h"
#include "dm-hw-handler.h"
#include "dm-bio-list.h" #include "dm-bio-list.h"
#include "dm-bio-record.h" #include "dm-bio-record.h"
#include "dm-uevent.h" #include "dm-uevent.h"
@ -20,6 +19,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <scsi/scsi_dh.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#define DM_MSG_PREFIX "multipath" #define DM_MSG_PREFIX "multipath"
@ -61,7 +61,8 @@ struct multipath {
spinlock_t lock; spinlock_t lock;
struct hw_handler hw_handler; const char *hw_handler_name;
struct work_struct activate_path;
unsigned nr_priority_groups; unsigned nr_priority_groups;
struct list_head priority_groups; struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */ unsigned pg_init_required; /* pg_init needs calling? */
@ -106,9 +107,10 @@ typedef int (*action_fn) (struct pgpath *pgpath);
static struct kmem_cache *_mpio_cache; static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd; static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void process_queued_ios(struct work_struct *work); static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work); static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
/*----------------------------------------------- /*-----------------------------------------------
@ -178,6 +180,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m->queue_io = 1; m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event); INIT_WORK(&m->trigger_event, trigger_event);
INIT_WORK(&m->activate_path, activate_path);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) { if (!m->mpio_pool) {
kfree(m); kfree(m);
@ -193,18 +196,13 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
static void free_multipath(struct multipath *m) static void free_multipath(struct multipath *m)
{ {
struct priority_group *pg, *tmp; struct priority_group *pg, *tmp;
struct hw_handler *hwh = &m->hw_handler;
list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
list_del(&pg->list); list_del(&pg->list);
free_priority_group(pg, m->ti); free_priority_group(pg, m->ti);
} }
if (hwh->type) { kfree(m->hw_handler_name);
hwh->type->destroy(hwh);
dm_put_hw_handler(hwh->type);
}
mempool_destroy(m->mpio_pool); mempool_destroy(m->mpio_pool);
kfree(m); kfree(m);
} }
@ -216,12 +214,10 @@ static void free_multipath(struct multipath *m)
static void __switch_pg(struct multipath *m, struct pgpath *pgpath) static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
{ {
struct hw_handler *hwh = &m->hw_handler;
m->current_pg = pgpath->pg; m->current_pg = pgpath->pg;
/* Must we initialise the PG first, and queue I/O till it's ready? */ /* Must we initialise the PG first, and queue I/O till it's ready? */
if (hwh->type && hwh->type->pg_init) { if (m->hw_handler_name) {
m->pg_init_required = 1; m->pg_init_required = 1;
m->queue_io = 1; m->queue_io = 1;
} else { } else {
@ -409,7 +405,6 @@ static void process_queued_ios(struct work_struct *work)
{ {
struct multipath *m = struct multipath *m =
container_of(work, struct multipath, process_queued_ios); container_of(work, struct multipath, process_queued_ios);
struct hw_handler *hwh = &m->hw_handler;
struct pgpath *pgpath = NULL; struct pgpath *pgpath = NULL;
unsigned init_required = 0, must_queue = 1; unsigned init_required = 0, must_queue = 1;
unsigned long flags; unsigned long flags;
@ -439,7 +434,7 @@ out:
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
if (init_required) if (init_required)
hwh->type->pg_init(hwh, pgpath->pg->bypassed, &pgpath->path); queue_work(kmpath_handlerd, &m->activate_path);
if (!must_queue) if (!must_queue)
dispatch_queued_ios(m); dispatch_queued_ios(m);
@ -652,8 +647,6 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
static int parse_hw_handler(struct arg_set *as, struct multipath *m) static int parse_hw_handler(struct arg_set *as, struct multipath *m)
{ {
int r;
struct hw_handler_type *hwht;
unsigned hw_argc; unsigned hw_argc;
struct dm_target *ti = m->ti; struct dm_target *ti = m->ti;
@ -661,30 +654,20 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
{0, 1024, "invalid number of hardware handler args"}, {0, 1024, "invalid number of hardware handler args"},
}; };
r = read_param(_params, shift(as), &hw_argc, &ti->error); if (read_param(_params, shift(as), &hw_argc, &ti->error))
if (r)
return -EINVAL; return -EINVAL;
if (!hw_argc) if (!hw_argc)
return 0; return 0;
hwht = dm_get_hw_handler(shift(as)); m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
if (!hwht) { request_module("scsi_dh_%s", m->hw_handler_name);
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
ti->error = "unknown hardware handler type"; ti->error = "unknown hardware handler type";
kfree(m->hw_handler_name);
m->hw_handler_name = NULL;
return -EINVAL; return -EINVAL;
} }
m->hw_handler.md = dm_table_get_md(ti->table);
dm_put(m->hw_handler.md);
r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
if (r) {
dm_put_hw_handler(hwht);
ti->error = "hardware handler constructor failed";
return r;
}
m->hw_handler.type = hwht;
consume(as, hw_argc - 1); consume(as, hw_argc - 1);
return 0; return 0;
@ -808,6 +791,7 @@ static void multipath_dtr(struct dm_target *ti)
{ {
struct multipath *m = (struct multipath *) ti->private; struct multipath *m = (struct multipath *) ti->private;
flush_workqueue(kmpath_handlerd);
flush_workqueue(kmultipathd); flush_workqueue(kmultipathd);
free_multipath(m); free_multipath(m);
} }
@ -1025,52 +1009,85 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
return limit_reached; return limit_reached;
} }
/* static void pg_init_done(struct dm_path *path, int errors)
* pg_init must call this when it has completed its initialisation
*/
void dm_pg_init_complete(struct dm_path *path, unsigned err_flags)
{ {
struct pgpath *pgpath = path_to_pgpath(path); struct pgpath *pgpath = path_to_pgpath(path);
struct priority_group *pg = pgpath->pg; struct priority_group *pg = pgpath->pg;
struct multipath *m = pg->m; struct multipath *m = pg->m;
unsigned long flags; unsigned long flags;
/* /* device or driver problems */
* If requested, retry pg_init until maximum number of retries exceeded. switch (errors) {
* If retry not requested and PG already bypassed, always fail the path. case SCSI_DH_OK:
*/ break;
if (err_flags & MP_RETRY) { case SCSI_DH_NOSYS:
if (pg_init_limit_reached(m, pgpath)) if (!m->hw_handler_name) {
err_flags |= MP_FAIL_PATH; errors = 0;
} else if (err_flags && pg->bypassed) break;
err_flags |= MP_FAIL_PATH; }
DMERR("Cannot failover device because scsi_dh_%s was not "
if (err_flags & MP_FAIL_PATH) "loaded.", m->hw_handler_name);
/*
* Fail path for now, so we do not ping pong
*/
fail_path(pgpath); fail_path(pgpath);
break;
if (err_flags & MP_BYPASS_PG) case SCSI_DH_DEV_TEMP_BUSY:
/*
* Probably doing something like FW upgrade on the
* controller so try the other pg.
*/
bypass_pg(m, pg, 1); bypass_pg(m, pg, 1);
break;
/* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
case SCSI_DH_RETRY:
case SCSI_DH_IMM_RETRY:
case SCSI_DH_RES_TEMP_UNAVAIL:
if (pg_init_limit_reached(m, pgpath))
fail_path(pgpath);
errors = 0;
break;
default:
/*
* We probably do not want to fail the path for a device
* error, but this is what the old dm did. In future
* patches we can do more advanced handling.
*/
fail_path(pgpath);
}
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
if (err_flags & ~MP_RETRY) { if (errors) {
DMERR("Could not failover device. Error %d.", errors);
m->current_pgpath = NULL; m->current_pgpath = NULL;
m->current_pg = NULL; m->current_pg = NULL;
} else if (!m->pg_init_required) } else if (!m->pg_init_required) {
m->queue_io = 0; m->queue_io = 0;
pg->bypassed = 0;
}
m->pg_init_in_progress = 0; m->pg_init_in_progress = 0;
queue_work(kmultipathd, &m->process_queued_ios); queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
} }
static void activate_path(struct work_struct *work)
{
int ret;
struct multipath *m =
container_of(work, struct multipath, activate_path);
struct dm_path *path = &m->current_pgpath->path;
ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
pg_init_done(path, ret);
}
/* /*
* end_io handling * end_io handling
*/ */
static int do_end_io(struct multipath *m, struct bio *bio, static int do_end_io(struct multipath *m, struct bio *bio,
int error, struct dm_mpath_io *mpio) int error, struct dm_mpath_io *mpio)
{ {
struct hw_handler *hwh = &m->hw_handler;
unsigned err_flags = MP_FAIL_PATH; /* Default behavior */
unsigned long flags; unsigned long flags;
if (!error) if (!error)
@ -1097,19 +1114,8 @@ static int do_end_io(struct multipath *m, struct bio *bio,
} }
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
if (hwh->type && hwh->type->error) if (mpio->pgpath)
err_flags = hwh->type->error(hwh, bio); fail_path(mpio->pgpath);
if (mpio->pgpath) {
if (err_flags & MP_FAIL_PATH)
fail_path(mpio->pgpath);
if (err_flags & MP_BYPASS_PG)
bypass_pg(m, mpio->pgpath->pg, 1);
}
if (err_flags & MP_ERROR_IO)
return -EIO;
requeue: requeue:
dm_bio_restore(&mpio->details, bio); dm_bio_restore(&mpio->details, bio);
@ -1194,7 +1200,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
int sz = 0; int sz = 0;
unsigned long flags; unsigned long flags;
struct multipath *m = (struct multipath *) ti->private; struct multipath *m = (struct multipath *) ti->private;
struct hw_handler *hwh = &m->hw_handler;
struct priority_group *pg; struct priority_group *pg;
struct pgpath *p; struct pgpath *p;
unsigned pg_num; unsigned pg_num;
@ -1214,12 +1219,10 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("pg_init_retries %u ", m->pg_init_retries); DMEMIT("pg_init_retries %u ", m->pg_init_retries);
} }
if (hwh->type && hwh->type->status) if (!m->hw_handler_name || type == STATUSTYPE_INFO)
sz += hwh->type->status(hwh, type, result + sz, maxlen - sz);
else if (!hwh->type || type == STATUSTYPE_INFO)
DMEMIT("0 "); DMEMIT("0 ");
else else
DMEMIT("1 %s ", hwh->type->name); DMEMIT("1 %s ", m->hw_handler_name);
DMEMIT("%u ", m->nr_priority_groups); DMEMIT("%u ", m->nr_priority_groups);
@ -1422,6 +1425,21 @@ static int __init dm_multipath_init(void)
return -ENOMEM; return -ENOMEM;
} }
/*
* A separate workqueue is used to handle the device handlers
* to avoid overloading existing workqueue. Overloading the
* old workqueue would also create a bottleneck in the
* path of the storage hardware device activation.
*/
kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
if (!kmpath_handlerd) {
DMERR("failed to create workqueue kmpath_handlerd");
destroy_workqueue(kmultipathd);
dm_unregister_target(&multipath_target);
kmem_cache_destroy(_mpio_cache);
return -ENOMEM;
}
DMINFO("version %u.%u.%u loaded", DMINFO("version %u.%u.%u loaded",
multipath_target.version[0], multipath_target.version[1], multipath_target.version[0], multipath_target.version[1],
multipath_target.version[2]); multipath_target.version[2]);
@ -1433,6 +1451,7 @@ static void __exit dm_multipath_exit(void)
{ {
int r; int r;
destroy_workqueue(kmpath_handlerd);
destroy_workqueue(kmultipathd); destroy_workqueue(kmultipathd);
r = dm_unregister_target(&multipath_target); r = dm_unregister_target(&multipath_target);
@ -1441,8 +1460,6 @@ static void __exit dm_multipath_exit(void)
kmem_cache_destroy(_mpio_cache); kmem_cache_destroy(_mpio_cache);
} }
EXPORT_SYMBOL_GPL(dm_pg_init_complete);
module_init(dm_multipath_init); module_init(dm_multipath_init);
module_exit(dm_multipath_exit); module_exit(dm_multipath_exit);

View File

@ -16,7 +16,6 @@ struct dm_path {
unsigned is_active; /* Read-only */ unsigned is_active; /* Read-only */
void *pscontext; /* For path-selector use */ void *pscontext; /* For path-selector use */
void *hwhcontext; /* For hw-handler use */
}; };
/* Callback for hwh_pg_init_fn to use when complete */ /* Callback for hwh_pg_init_fn to use when complete */

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000-2007 LSI Corporation. * Copyright (c) 2000-2008 LSI Corporation.
* *
* *
* Name: mpi.h * Name: mpi.h

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000-2007 LSI Corporation. * Copyright (c) 2000-2008 LSI Corporation.
* *
* *
* Name: mpi_cnfg.h * Name: mpi_cnfg.h

View File

@ -5,7 +5,7 @@
* For use with LSI PCI chip/adapter(s) * For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */
@ -103,7 +103,7 @@ static int mfcounter = 0;
* Public data... * Public data...
*/ */
struct proc_dir_entry *mpt_proc_root_dir; static struct proc_dir_entry *mpt_proc_root_dir;
#define WHOINIT_UNKNOWN 0xAA #define WHOINIT_UNKNOWN 0xAA
@ -253,6 +253,55 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
return 0; return 0;
} }
/**
* mpt_fault_reset_work - work performed on workq after ioc fault
* @work: input argument, used to derive ioc
*
**/
static void
mpt_fault_reset_work(struct work_struct *work)
{
MPT_ADAPTER *ioc =
container_of(work, MPT_ADAPTER, fault_reset_work.work);
u32 ioc_raw_state;
int rc;
unsigned long flags;
if (ioc->diagPending || !ioc->active)
goto out;
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
ioc->name, __FUNCTION__);
rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
__FUNCTION__, (rc == 0) ? "success" : "failed");
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
"reset (%04xh)\n", ioc->name, ioc_raw_state &
MPI_DOORBELL_DATA_MASK);
}
out:
/*
* Take turns polling alternate controller
*/
if (ioc->alt_ioc)
ioc = ioc->alt_ioc;
/* rearm the timer */
spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
if (ioc->reset_work_q)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
}
/* /*
* Process turbo (context) reply... * Process turbo (context) reply...
*/ */
@ -1616,6 +1665,22 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* Find lookup slot. */ /* Find lookup slot. */
INIT_LIST_HEAD(&ioc->list); INIT_LIST_HEAD(&ioc->list);
/* Initialize workqueue */
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
spin_lock_init(&ioc->fault_reset_work_lock);
snprintf(ioc->reset_work_q_name, KOBJ_NAME_LEN, "mpt_poll_%d", ioc->id);
ioc->reset_work_q =
create_singlethread_workqueue(ioc->reset_work_q_name);
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
pci_release_selected_regions(pdev, ioc->bars);
kfree(ioc);
return -ENOMEM;
}
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n", dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
ioc->name, &ioc->facts, &ioc->pfacts[0])); ioc->name, &ioc->facts, &ioc->pfacts[0]));
@ -1727,6 +1792,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
iounmap(ioc->memmap); iounmap(ioc->memmap);
if (r != -5) if (r != -5)
pci_release_selected_regions(pdev, ioc->bars); pci_release_selected_regions(pdev, ioc->bars);
destroy_workqueue(ioc->reset_work_q);
ioc->reset_work_q = NULL;
kfree(ioc); kfree(ioc);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
return r; return r;
@ -1759,6 +1828,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
} }
#endif #endif
if (!ioc->alt_ioc)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
return 0; return 0;
} }
@ -1774,6 +1847,19 @@ mpt_detach(struct pci_dev *pdev)
MPT_ADAPTER *ioc = pci_get_drvdata(pdev); MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
char pname[32]; char pname[32];
u8 cb_idx; u8 cb_idx;
unsigned long flags;
struct workqueue_struct *wq;
/*
* Stop polling ioc for fault condition
*/
spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
wq = ioc->reset_work_q;
ioc->reset_work_q = NULL;
spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
cancel_delayed_work(&ioc->fault_reset_work);
destroy_workqueue(wq);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
remove_proc_entry(pname, NULL); remove_proc_entry(pname, NULL);
@ -7456,7 +7542,6 @@ EXPORT_SYMBOL(mpt_resume);
EXPORT_SYMBOL(mpt_suspend); EXPORT_SYMBOL(mpt_suspend);
#endif #endif
EXPORT_SYMBOL(ioc_list); EXPORT_SYMBOL(ioc_list);
EXPORT_SYMBOL(mpt_proc_root_dir);
EXPORT_SYMBOL(mpt_register); EXPORT_SYMBOL(mpt_register);
EXPORT_SYMBOL(mpt_deregister); EXPORT_SYMBOL(mpt_deregister);
EXPORT_SYMBOL(mpt_event_register); EXPORT_SYMBOL(mpt_event_register);

View File

@ -5,7 +5,7 @@
* LSIFC9xx/LSI409xx Fibre Channel * LSIFC9xx/LSI409xx Fibre Channel
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */
@ -73,11 +73,11 @@
#endif #endif
#ifndef COPYRIGHT #ifndef COPYRIGHT
#define COPYRIGHT "Copyright (c) 1999-2007 " MODULEAUTHOR #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif #endif
#define MPT_LINUX_VERSION_COMMON "3.04.06" #define MPT_LINUX_VERSION_COMMON "3.04.07"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.06" #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07"
#define WHAT_MAGIC_STRING "@" "(" "#" ")" #define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \ #define show_mptmod_ver(s,ver) \
@ -176,6 +176,8 @@
/* debug print string length used for events and iocstatus */ /* debug print string length used for events and iocstatus */
# define EVENT_DESCR_STR_SZ 100 # define EVENT_DESCR_STR_SZ 100
#define MPT_POLLING_INTERVAL 1000 /* in milliseconds */
#ifdef __KERNEL__ /* { */ #ifdef __KERNEL__ /* { */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -709,6 +711,12 @@ typedef struct _MPT_ADAPTER
struct workqueue_struct *fc_rescan_work_q; struct workqueue_struct *fc_rescan_work_q;
struct scsi_cmnd **ScsiLookup; struct scsi_cmnd **ScsiLookup;
spinlock_t scsi_lookup_lock; spinlock_t scsi_lookup_lock;
char reset_work_q_name[KOBJ_NAME_LEN];
struct workqueue_struct *reset_work_q;
struct delayed_work fault_reset_work;
spinlock_t fault_reset_work_lock;
} MPT_ADAPTER; } MPT_ADAPTER;
/* /*
@ -919,7 +927,6 @@ extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhys
* Public data decl's... * Public data decl's...
*/ */
extern struct list_head ioc_list; extern struct list_head ioc_list;
extern struct proc_dir_entry *mpt_proc_root_dir;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#endif /* } __KERNEL__ */ #endif /* } __KERNEL__ */

View File

@ -4,7 +4,7 @@
* For use with LSI PCI chip/adapters * For use with LSI PCI chip/adapters
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */
@ -66,7 +66,7 @@
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h> #include <scsi/scsi_tcq.h>
#define COPYRIGHT "Copyright (c) 1999-2007 LSI Corporation" #define COPYRIGHT "Copyright (c) 1999-2008 LSI Corporation"
#define MODULEAUTHOR "LSI Corporation" #define MODULEAUTHOR "LSI Corporation"
#include "mptbase.h" #include "mptbase.h"
#include "mptctl.h" #include "mptctl.h"

View File

@ -5,7 +5,7 @@
* LSIFC9xx/LSI409xx Fibre Channel * LSIFC9xx/LSI409xx Fibre Channel
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */

View File

@ -3,7 +3,7 @@
* For use with LSI PCI chip/adapter(s) * For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */

View File

@ -3,7 +3,7 @@
* For use with LSI PCI chip/adapter(s) * For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */

View File

@ -4,7 +4,7 @@
* For use with LSI Fibre Channel PCI chip/adapters * For use with LSI Fibre Channel PCI chip/adapters
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 2000-2007 LSI Corporation * Copyright (c) 2000-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */

View File

@ -4,7 +4,7 @@
* For use with LSI Fibre Channel PCI chip/adapters * For use with LSI Fibre Channel PCI chip/adapters
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 2000-2007 LSI Corporation * Copyright (c) 2000-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */

View File

@ -3,7 +3,7 @@
* For use with LSI PCI chip/adapter(s) * For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
*/ */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/

View File

@ -5,7 +5,7 @@
* LSIFC9xx/LSI409xx Fibre Channel * LSIFC9xx/LSI409xx Fibre Channel
* running LSI MPT (Message Passing Technology) firmware. * running LSI MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */

View File

@ -3,7 +3,7 @@
* For use with LSI PCI chip/adapter(s) * For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */

View File

@ -5,7 +5,7 @@
* LSIFC9xx/LSI409xx Fibre Channel * LSIFC9xx/LSI409xx Fibre Channel
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */

View File

@ -3,7 +3,7 @@
* For use with LSI PCI chip/adapter(s) * For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware. * running LSI Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Corporation * Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com) * (mailto:DL-MPTFusionLinux@lsi.com)
* *
*/ */
@ -447,6 +447,7 @@ static int mptspi_target_alloc(struct scsi_target *starget)
spi_max_offset(starget) = ioc->spi_data.maxSyncOffset; spi_max_offset(starget) = ioc->spi_data.maxSyncOffset;
spi_offset(starget) = 0; spi_offset(starget) = 0;
spi_period(starget) = 0xFF;
mptspi_write_width(starget, 0); mptspi_write_width(starget, 0);
return 0; return 0;

View File

@ -3,7 +3,6 @@
# #
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \ zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o
zfcp_sysfs_unit.o zfcp_sysfs_driver.o
obj-$(CONFIG_ZFCP) += zfcp.o obj-$(CONFIG_ZFCP) += zfcp.o

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +1,13 @@
/* /*
* This file is part of the zfcp device driver for * zfcp device driver
* FCP adapters for IBM System z9 and zSeries.
* *
* (C) Copyright IBM Corp. 2002, 2006 * Registration and callback for the s390 common I/O layer.
* *
* This program is free software; you can redistribute it and/or modify * Copyright IBM Corporation 2002, 2008
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include "zfcp_ext.h" #include "zfcp_ext.h"
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
static int zfcp_ccw_probe(struct ccw_device *);
static void zfcp_ccw_remove(struct ccw_device *);
static int zfcp_ccw_set_online(struct ccw_device *);
static int zfcp_ccw_set_offline(struct ccw_device *);
static int zfcp_ccw_notify(struct ccw_device *, int);
static void zfcp_ccw_shutdown(struct ccw_device *);
static struct ccw_device_id zfcp_ccw_device_id[] = {
{CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
ZFCP_CONTROL_UNIT_MODEL,
ZFCP_DEVICE_TYPE,
ZFCP_DEVICE_MODEL)},
{CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
ZFCP_CONTROL_UNIT_MODEL,
ZFCP_DEVICE_TYPE,
ZFCP_DEVICE_MODEL_PRIV)},
{},
};
static struct ccw_driver zfcp_ccw_driver = {
.owner = THIS_MODULE,
.name = ZFCP_NAME,
.ids = zfcp_ccw_device_id,
.probe = zfcp_ccw_probe,
.remove = zfcp_ccw_remove,
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
.driver = {
.groups = zfcp_driver_attr_groups,
},
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
/** /**
* zfcp_ccw_probe - probe function of zfcp driver * zfcp_ccw_probe - probe function of zfcp driver
* @ccw_device: pointer to belonging ccw device * @ccw_device: pointer to belonging ccw device
@ -69,19 +18,16 @@ MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
* In addition the nameserver port will be added to the ports of the adapter * In addition the nameserver port will be added to the ports of the adapter
* and its sysfs representation will be created too. * and its sysfs representation will be created too.
*/ */
static int static int zfcp_ccw_probe(struct ccw_device *ccw_device)
zfcp_ccw_probe(struct ccw_device *ccw_device)
{ {
struct zfcp_adapter *adapter;
int retval = 0; int retval = 0;
down(&zfcp_data.config_sema); down(&zfcp_data.config_sema);
adapter = zfcp_adapter_enqueue(ccw_device); if (zfcp_adapter_enqueue(ccw_device)) {
if (!adapter) dev_err(&ccw_device->dev,
"Setup of data structures failed.\n");
retval = -EINVAL; retval = -EINVAL;
else }
ZFCP_LOG_DEBUG("Probed adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
up(&zfcp_data.config_sema); up(&zfcp_data.config_sema);
return retval; return retval;
} }
@ -95,8 +41,7 @@ zfcp_ccw_probe(struct ccw_device *ccw_device)
* ports that belong to this adapter. And in addition all resources of this * ports that belong to this adapter. And in addition all resources of this
* adapter will be freed too. * adapter will be freed too.
*/ */
static void static void zfcp_ccw_remove(struct ccw_device *ccw_device)
zfcp_ccw_remove(struct ccw_device *ccw_device)
{ {
struct zfcp_adapter *adapter; struct zfcp_adapter *adapter;
struct zfcp_port *port, *p; struct zfcp_port *port, *p;
@ -106,8 +51,6 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
down(&zfcp_data.config_sema); down(&zfcp_data.config_sema);
adapter = dev_get_drvdata(&ccw_device->dev); adapter = dev_get_drvdata(&ccw_device->dev);
ZFCP_LOG_DEBUG("Removing adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
write_lock_irq(&zfcp_data.config_lock); write_lock_irq(&zfcp_data.config_lock);
list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
list_for_each_entry_safe(unit, u, &port->unit_list_head, list) { list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
@ -145,8 +88,7 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
* registered with the SCSI stack, that the QDIO queues will be set up * registered with the SCSI stack, that the QDIO queues will be set up
* and that the adapter will be opened (asynchronously). * and that the adapter will be opened (asynchronously).
*/ */
static int static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
zfcp_ccw_set_online(struct ccw_device *ccw_device)
{ {
struct zfcp_adapter *adapter; struct zfcp_adapter *adapter;
int retval; int retval;
@ -155,12 +97,8 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
adapter = dev_get_drvdata(&ccw_device->dev); adapter = dev_get_drvdata(&ccw_device->dev);
retval = zfcp_erp_thread_setup(adapter); retval = zfcp_erp_thread_setup(adapter);
if (retval) { if (retval)
ZFCP_LOG_INFO("error: start of error recovery thread for "
"adapter %s failed\n",
zfcp_get_busid_by_adapter(adapter));
goto out; goto out;
}
retval = zfcp_adapter_scsi_register(adapter); retval = zfcp_adapter_scsi_register(adapter);
if (retval) if (retval)
@ -191,8 +129,7 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
* This function gets called by the common i/o layer and sets an adapter * This function gets called by the common i/o layer and sets an adapter
* into state offline. * into state offline.
*/ */
static int static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
zfcp_ccw_set_offline(struct ccw_device *ccw_device)
{ {
struct zfcp_adapter *adapter; struct zfcp_adapter *adapter;
@ -206,15 +143,14 @@ zfcp_ccw_set_offline(struct ccw_device *ccw_device)
} }
/** /**
* zfcp_ccw_notify * zfcp_ccw_notify - ccw notify function
* @ccw_device: pointer to belonging ccw device * @ccw_device: pointer to belonging ccw device
* @event: indicates if adapter was detached or attached * @event: indicates if adapter was detached or attached
* *
* This function gets called by the common i/o layer if an adapter has gone * This function gets called by the common i/o layer if an adapter has gone
* or reappeared. * or reappeared.
*/ */
static int static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
{ {
struct zfcp_adapter *adapter; struct zfcp_adapter *adapter;
@ -222,18 +158,15 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
adapter = dev_get_drvdata(&ccw_device->dev); adapter = dev_get_drvdata(&ccw_device->dev);
switch (event) { switch (event) {
case CIO_GONE: case CIO_GONE:
ZFCP_LOG_NORMAL("adapter %s: device gone\n", dev_warn(&adapter->ccw_device->dev, "device gone\n");
zfcp_get_busid_by_adapter(adapter));
zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL); zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
break; break;
case CIO_NO_PATH: case CIO_NO_PATH:
ZFCP_LOG_NORMAL("adapter %s: no path\n", dev_warn(&adapter->ccw_device->dev, "no path\n");
zfcp_get_busid_by_adapter(adapter));
zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL); zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
break; break;
case CIO_OPER: case CIO_OPER:
ZFCP_LOG_NORMAL("adapter %s: operational again\n", dev_info(&adapter->ccw_device->dev, "operational again\n");
zfcp_get_busid_by_adapter(adapter));
zfcp_erp_modify_adapter_status(adapter, 11, NULL, zfcp_erp_modify_adapter_status(adapter, 11, NULL,
ZFCP_STATUS_COMMON_RUNNING, ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET); ZFCP_SET);
@ -247,24 +180,10 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
} }
/** /**
* zfcp_ccw_register - ccw register function * zfcp_ccw_shutdown - handle shutdown from cio
* * @cdev: device for adapter to shutdown.
* Registers the driver at the common i/o layer. This function will be called
* at module load time/system start.
*/ */
int __init static void zfcp_ccw_shutdown(struct ccw_device *cdev)
zfcp_ccw_register(void)
{
return ccw_driver_register(&zfcp_ccw_driver);
}
/**
* zfcp_ccw_shutdown - gets called on reboot/shutdown
*
* Makes sure that QDIO queues are down when the system gets stopped.
*/
static void
zfcp_ccw_shutdown(struct ccw_device *cdev)
{ {
struct zfcp_adapter *adapter; struct zfcp_adapter *adapter;
@ -275,4 +194,33 @@ zfcp_ccw_shutdown(struct ccw_device *cdev)
up(&zfcp_data.config_sema); up(&zfcp_data.config_sema);
} }
#undef ZFCP_LOG_AREA static struct ccw_device_id zfcp_ccw_device_id[] = {
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
{},
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
static struct ccw_driver zfcp_ccw_driver = {
.owner = THIS_MODULE,
.name = "zfcp",
.ids = zfcp_ccw_device_id,
.probe = zfcp_ccw_probe,
.remove = zfcp_ccw_remove,
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
};
/**
* zfcp_ccw_register - ccw register function
*
* Registers the driver at the common i/o layer. This function will be called
* at module load time/system start.
*/
int __init zfcp_ccw_register(void)
{
return ccw_driver_register(&zfcp_ccw_driver);
}

View File

@ -0,0 +1,259 @@
/*
* zfcp device driver
*
* Userspace interface for accessing the
* Access Control Lists / Control File Data Channel
*
* Copyright IBM Corporation 2008
*/
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <asm/ccwdev.h>
#include "zfcp_def.h"
#include "zfcp_ext.h"
#include "zfcp_fsf.h"
#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
#define ZFCP_CFDC_DOWNLOAD 0x00000001
#define ZFCP_CFDC_UPLOAD 0x00000002
#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
#define ZFCP_CFDC_IOC_MAGIC 0xDD
#define ZFCP_CFDC_IOC \
_IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data)
/**
* struct zfcp_cfdc_data - data for ioctl cfdc interface
* @signature: request signature
* @devno: FCP adapter device number
* @command: command code
* @fsf_status: returns status of FSF command to userspace
* @fsf_status_qual: returned to userspace
* @payloads: access conflicts list
* @control_file: access control table
*/
struct zfcp_cfdc_data {
u32 signature;
u32 devno;
u32 command;
u32 fsf_status;
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
u8 payloads[256];
u8 control_file[0];
};
static int zfcp_cfdc_copy_from_user(struct scatterlist *sg,
void __user *user_buffer)
{
unsigned int length;
unsigned int size = ZFCP_CFDC_MAX_SIZE;
while (size) {
length = min((unsigned int)size, sg->length);
if (copy_from_user(sg_virt(sg++), user_buffer, length))
return -EFAULT;
user_buffer += length;
size -= length;
}
return 0;
}
static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
struct scatterlist *sg)
{
unsigned int length;
unsigned int size = ZFCP_CFDC_MAX_SIZE;
while (size) {
length = min((unsigned int) size, sg->length);
if (copy_to_user(user_buffer, sg_virt(sg++), length))
return -EFAULT;
user_buffer += length;
size -= length;
}
return 0;
}
static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
{
struct zfcp_adapter *adapter = NULL, *cur_adapter;
struct ccw_dev_id dev_id;
read_lock_irq(&zfcp_data.config_lock);
list_for_each_entry(cur_adapter, &zfcp_data.adapter_list_head, list) {
ccw_device_get_id(cur_adapter->ccw_device, &dev_id);
if (dev_id.devno == devno) {
adapter = cur_adapter;
zfcp_adapter_get(adapter);
break;
}
}
read_unlock_irq(&zfcp_data.config_lock);
return adapter;
}
static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
{
switch (command) {
case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE;
break;
case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_FORCE;
break;
case ZFCP_CFDC_CMND_FULL_ACCESS:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS;
break;
case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
break;
case ZFCP_CFDC_CMND_UPLOAD:
fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE;
fsf_cfdc->option = 0;
break;
default:
return -EINVAL;
}
return 0;
}
static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg,
u8 __user *control_file)
{
int retval;
retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES);
if (retval)
return retval;
sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE;
if (command & ZFCP_CFDC_WITH_CONTROL_FILE &&
command & ZFCP_CFDC_DOWNLOAD) {
retval = zfcp_cfdc_copy_from_user(sg, control_file);
if (retval) {
zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES);
return -EFAULT;
}
}
return 0;
}
static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
struct zfcp_fsf_req *req)
{
data->fsf_status = req->qtcb->header.fsf_status;
memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual,
sizeof(union fsf_status_qual));
memcpy(&data->payloads, &req->qtcb->bottom.support.els,
sizeof(req->qtcb->bottom.support.els));
}
static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
unsigned long buffer)
{
struct zfcp_cfdc_data *data;
struct zfcp_cfdc_data __user *data_user;
struct zfcp_adapter *adapter;
struct zfcp_fsf_req *req;
struct zfcp_fsf_cfdc *fsf_cfdc;
int retval;
if (command != ZFCP_CFDC_IOC)
return -ENOTTY;
data_user = (void __user *) buffer;
if (!data_user)
return -EINVAL;
fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL);
if (!fsf_cfdc)
return -ENOMEM;
data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL);
if (!data) {
retval = -ENOMEM;
goto no_mem_sense;
}
retval = copy_from_user(data, data_user, sizeof(*data));
if (retval) {
retval = -EFAULT;
goto free_buffer;
}
if (data->signature != 0xCFDCACDF) {
retval = -EINVAL;
goto free_buffer;
}
retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command);
adapter = zfcp_cfdc_get_adapter(data->devno);
if (!adapter) {
retval = -ENXIO;
goto free_buffer;
}
retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
data_user->control_file);
if (retval)
goto adapter_put;
req = zfcp_fsf_control_file(adapter, fsf_cfdc);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto free_sg;
}
if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
retval = -ENXIO;
goto free_fsf;
}
zfcp_cfdc_req_to_sense(data, req);
retval = copy_to_user(data_user, data, sizeof(*data_user));
if (retval) {
retval = -EFAULT;
goto free_fsf;
}
if (data->command & ZFCP_CFDC_UPLOAD)
retval = zfcp_cfdc_copy_to_user(&data_user->control_file,
fsf_cfdc->sg);
free_fsf:
zfcp_fsf_req_free(req);
free_sg:
zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
adapter_put:
zfcp_adapter_put(adapter);
free_buffer:
kfree(data);
no_mem_sense:
kfree(fsf_cfdc);
return retval;
}
static const struct file_operations zfcp_cfdc_fops = {
.unlocked_ioctl = zfcp_cfdc_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zfcp_cfdc_dev_ioctl
#endif
};
struct miscdevice zfcp_cfdc_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "zfcp_cfdc",
.fops = &zfcp_cfdc_fops,
};

View File

@ -1,22 +1,9 @@
/* /*
* This file is part of the zfcp device driver for * zfcp device driver
* FCP adapters for IBM System z9 and zSeries.
* *
* (C) Copyright IBM Corp. 2002, 2006 * Debug traces for zfcp.
* *
* This program is free software; you can redistribute it and/or modify * Copyright IBM Corporation 2002, 2008
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/ctype.h> #include <linux/ctype.h>
@ -29,8 +16,6 @@ module_param(dbfsize, uint, 0400);
MODULE_PARM_DESC(dbfsize, MODULE_PARM_DESC(dbfsize,
"number of pages for each debug feature area (default 4)"); "number of pages for each debug feature area (default 4)");
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len,
int level, char *from, int from_len) int level, char *from, int from_len)
{ {
@ -186,8 +171,8 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
response->fsf_req_status = fsf_req->status; response->fsf_req_status = fsf_req->status;
response->sbal_first = fsf_req->sbal_first; response->sbal_first = fsf_req->sbal_first;
response->sbal_curr = fsf_req->sbal_curr;
response->sbal_last = fsf_req->sbal_last; response->sbal_last = fsf_req->sbal_last;
response->sbal_response = fsf_req->sbal_response;
response->pool = fsf_req->pool != NULL; response->pool = fsf_req->pool != NULL;
response->erp_action = (unsigned long)fsf_req->erp_action; response->erp_action = (unsigned long)fsf_req->erp_action;
@ -268,7 +253,7 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
rec->u.status.failed = adapter->status_read_failed; rec->u.status.failed = atomic_read(&adapter->stat_miss);
if (status_buffer != NULL) { if (status_buffer != NULL) {
rec->u.status.status_type = status_buffer->status_type; rec->u.status.status_type = status_buffer->status_type;
rec->u.status.status_subtype = status_buffer->status_subtype; rec->u.status.status_subtype = status_buffer->status_subtype;
@ -355,8 +340,8 @@ static void zfcp_hba_dbf_view_response(char **p,
FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE); FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE);
zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status); zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status);
zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first); zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first);
zfcp_dbf_out(p, "sbal_curr", "0x%02x", r->sbal_curr);
zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last); zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last);
zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response);
zfcp_dbf_out(p, "pool", "0x%02x", r->pool); zfcp_dbf_out(p, "pool", "0x%02x", r->pool);
switch (r->fsf_command) { switch (r->fsf_command) {
@ -515,13 +500,13 @@ static const char *zfcp_rec_dbf_ids[] = {
[52] = "port boxed close unit", [52] = "port boxed close unit",
[53] = "port boxed fcp", [53] = "port boxed fcp",
[54] = "unit boxed fcp", [54] = "unit boxed fcp",
[55] = "port access denied ct", [55] = "port access denied",
[56] = "port access denied els", [56] = "",
[57] = "port access denied open port", [57] = "",
[58] = "port access denied close physical", [58] = "",
[59] = "unit access denied open unit", [59] = "unit access denied",
[60] = "shared unit access denied open unit", [60] = "shared unit access denied open unit",
[61] = "unit access denied fcp", [61] = "",
[62] = "request timeout", [62] = "request timeout",
[63] = "adisc link test reject or timeout", [63] = "adisc link test reject or timeout",
[64] = "adisc link test d_id changed", [64] = "adisc link test d_id changed",
@ -546,8 +531,8 @@ static const char *zfcp_rec_dbf_ids[] = {
[80] = "exclusive read-only unit access unsupported", [80] = "exclusive read-only unit access unsupported",
[81] = "shared read-write unit access unsupported", [81] = "shared read-write unit access unsupported",
[82] = "incoming rscn", [82] = "incoming rscn",
[83] = "incoming plogi", [83] = "incoming wwpn",
[84] = "incoming logo", [84] = "",
[85] = "online", [85] = "online",
[86] = "offline", [86] = "offline",
[87] = "ccw device gone", [87] = "ccw device gone",
@ -586,8 +571,8 @@ static const char *zfcp_rec_dbf_ids[] = {
[120] = "unknown fsf command", [120] = "unknown fsf command",
[121] = "no recommendation for status qualifier", [121] = "no recommendation for status qualifier",
[122] = "status read physical port closed in error", [122] = "status read physical port closed in error",
[123] = "fc service class not supported ct", [123] = "fc service class not supported",
[124] = "fc service class not supported els", [124] = "",
[125] = "need newer zfcp", [125] = "need newer zfcp",
[126] = "need newer microcode", [126] = "need newer microcode",
[127] = "arbitrated loop not supported", [127] = "arbitrated loop not supported",
@ -595,7 +580,7 @@ static const char *zfcp_rec_dbf_ids[] = {
[129] = "qtcb size mismatch", [129] = "qtcb size mismatch",
[130] = "unknown fsf status ecd", [130] = "unknown fsf status ecd",
[131] = "fcp request too big", [131] = "fcp request too big",
[132] = "fc service class not supported fcp", [132] = "",
[133] = "data direction not valid fcp", [133] = "data direction not valid fcp",
[134] = "command length not valid fcp", [134] = "command length not valid fcp",
[135] = "status read act update", [135] = "status read act update",
@ -603,13 +588,18 @@ static const char *zfcp_rec_dbf_ids[] = {
[137] = "hbaapi port open", [137] = "hbaapi port open",
[138] = "hbaapi unit open", [138] = "hbaapi unit open",
[139] = "hbaapi unit shutdown", [139] = "hbaapi unit shutdown",
[140] = "qdio error", [140] = "qdio error outbound",
[141] = "scsi host reset", [141] = "scsi host reset",
[142] = "dismissing fsf request for recovery action", [142] = "dismissing fsf request for recovery action",
[143] = "recovery action timed out", [143] = "recovery action timed out",
[144] = "recovery action gone", [144] = "recovery action gone",
[145] = "recovery action being processed", [145] = "recovery action being processed",
[146] = "recovery action ready for next step", [146] = "recovery action ready for next step",
[147] = "qdio error inbound",
[148] = "nameserver needed for port scan",
[149] = "port scan",
[150] = "ptp attach",
[151] = "port validation failed",
}; };
static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
@ -670,24 +660,20 @@ static struct debug_view zfcp_rec_dbf_view = {
* zfcp_rec_dbf_event_thread - trace event related to recovery thread operation * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
* @id2: identifier for event * @id2: identifier for event
* @adapter: adapter * @adapter: adapter
* @lock: non-zero value indicates that erp_lock has not yet been acquired * This function assumes that the caller is holding erp_lock.
*/ */
void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock) void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter)
{ {
struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
unsigned long flags = 0; unsigned long flags = 0;
struct list_head *entry; struct list_head *entry;
unsigned ready = 0, running = 0, total; unsigned ready = 0, running = 0, total;
if (lock)
read_lock_irqsave(&adapter->erp_lock, flags);
list_for_each(entry, &adapter->erp_ready_head) list_for_each(entry, &adapter->erp_ready_head)
ready++; ready++;
list_for_each(entry, &adapter->erp_running_head) list_for_each(entry, &adapter->erp_running_head)
running++; running++;
total = adapter->erp_total_count; total = adapter->erp_total_count;
if (lock)
read_unlock_irqrestore(&adapter->erp_lock, flags);
spin_lock_irqsave(&adapter->rec_dbf_lock, flags); spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
memset(r, 0, sizeof(*r)); memset(r, 0, sizeof(*r));
@ -696,10 +682,25 @@ void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock)
r->u.thread.total = total; r->u.thread.total = total;
r->u.thread.ready = ready; r->u.thread.ready = ready;
r->u.thread.running = running; r->u.thread.running = running;
debug_event(adapter->rec_dbf, 5, r, sizeof(*r)); debug_event(adapter->rec_dbf, 6, r, sizeof(*r));
spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
} }
/**
* zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
* @id2: identifier for event
* @adapter: adapter
* This function assumes that the caller does not hold erp_lock.
*/
void zfcp_rec_dbf_event_thread_lock(u8 id2, struct zfcp_adapter *adapter)
{
unsigned long flags;
read_lock_irqsave(&adapter->erp_lock, flags);
zfcp_rec_dbf_event_thread(id2, adapter);
read_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void zfcp_rec_dbf_event_target(u8 id2, void *ref, static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
struct zfcp_adapter *adapter, struct zfcp_adapter *adapter,
atomic_t *status, atomic_t *erp_count, atomic_t *status, atomic_t *erp_count,
@ -823,7 +824,7 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
r->u.action.status = erp_action->status; r->u.action.status = erp_action->status;
r->u.action.step = erp_action->step; r->u.action.step = erp_action->step;
r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
debug_event(adapter->rec_dbf, 4, r, sizeof(*r)); debug_event(adapter->rec_dbf, 5, r, sizeof(*r));
spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
} }
@ -960,7 +961,7 @@ void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id, zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id,
fc_host_port_id(adapter->scsi_host), fc_host_port_id(adapter->scsi_host),
*(u8 *)buf->payload, (void *)buf->payload, buf->payload.data[0], (void *)buf->payload.data,
length); length);
} }
@ -1064,8 +1065,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
if (fsf_req != NULL) { if (fsf_req != NULL) {
fcp_rsp = (struct fcp_rsp_iu *) fcp_rsp = (struct fcp_rsp_iu *)
&(fsf_req->qtcb->bottom.io.fcp_rsp); &(fsf_req->qtcb->bottom.io.fcp_rsp);
fcp_rsp_info = fcp_rsp_info = (unsigned char *) &fcp_rsp[1];
zfcp_get_fcp_rsp_info_ptr(fcp_rsp);
fcp_sns_info = fcp_sns_info =
zfcp_get_fcp_sns_info_ptr(fcp_rsp); zfcp_get_fcp_sns_info_ptr(fcp_rsp);
@ -1279,5 +1279,3 @@ void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
adapter->hba_dbf = NULL; adapter->hba_dbf = NULL;
adapter->rec_dbf = NULL; adapter->rec_dbf = NULL;
} }
#undef ZFCP_LOG_AREA

View File

@ -38,7 +38,7 @@ struct zfcp_rec_dbf_record_thread {
u32 total; u32 total;
u32 ready; u32 ready;
u32 running; u32 running;
} __attribute__ ((packed)); };
struct zfcp_rec_dbf_record_target { struct zfcp_rec_dbf_record_target {
u64 ref; u64 ref;
@ -47,7 +47,7 @@ struct zfcp_rec_dbf_record_target {
u64 wwpn; u64 wwpn;
u64 fcp_lun; u64 fcp_lun;
u32 erp_count; u32 erp_count;
} __attribute__ ((packed)); };
struct zfcp_rec_dbf_record_trigger { struct zfcp_rec_dbf_record_trigger {
u8 want; u8 want;
@ -59,14 +59,14 @@ struct zfcp_rec_dbf_record_trigger {
u64 action; u64 action;
u64 wwpn; u64 wwpn;
u64 fcp_lun; u64 fcp_lun;
} __attribute__ ((packed)); };
struct zfcp_rec_dbf_record_action { struct zfcp_rec_dbf_record_action {
u32 status; u32 status;
u32 step; u32 step;
u64 action; u64 action;
u64 fsf_req; u64 fsf_req;
} __attribute__ ((packed)); };
struct zfcp_rec_dbf_record { struct zfcp_rec_dbf_record {
u8 id; u8 id;
@ -77,7 +77,7 @@ struct zfcp_rec_dbf_record {
struct zfcp_rec_dbf_record_target target; struct zfcp_rec_dbf_record_target target;
struct zfcp_rec_dbf_record_trigger trigger; struct zfcp_rec_dbf_record_trigger trigger;
} u; } u;
} __attribute__ ((packed)); };
enum { enum {
ZFCP_REC_DBF_ID_ACTION, ZFCP_REC_DBF_ID_ACTION,
@ -97,8 +97,8 @@ struct zfcp_hba_dbf_record_response {
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
u32 fsf_req_status; u32 fsf_req_status;
u8 sbal_first; u8 sbal_first;
u8 sbal_curr;
u8 sbal_last; u8 sbal_last;
u8 sbal_response;
u8 pool; u8 pool;
u64 erp_action; u64 erp_action;
union { union {

View File

@ -1,22 +1,9 @@
/* /*
* This file is part of the zfcp device driver for * zfcp device driver
* FCP adapters for IBM System z9 and zSeries.
* *
* (C) Copyright IBM Corp. 2002, 2006 * Global definitions for the zfcp device driver.
* *
* This program is free software; you can redistribute it and/or modify * Copyright IBM Corporation 2002, 2008
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#ifndef ZFCP_DEF_H #ifndef ZFCP_DEF_H
@ -26,7 +13,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/major.h> #include <linux/major.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/delay.h> #include <linux/delay.h>
@ -53,9 +39,6 @@
/********************* GENERAL DEFINES *********************************/ /********************* GENERAL DEFINES *********************************/
/* zfcp version number, it consists of major, minor, and patch-level number */
#define ZFCP_VERSION "4.8.0"
/** /**
* zfcp_sg_to_address - determine kernel address from struct scatterlist * zfcp_sg_to_address - determine kernel address from struct scatterlist
* @list: struct scatterlist * @list: struct scatterlist
@ -93,11 +76,6 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
#define ZFCP_DEVICE_MODEL 0x03 #define ZFCP_DEVICE_MODEL 0x03
#define ZFCP_DEVICE_MODEL_PRIV 0x04 #define ZFCP_DEVICE_MODEL_PRIV 0x04
/* allow as many chained SBALs as are supported by hardware */
#define ZFCP_MAX_SBALS_PER_REQ FSF_MAX_SBALS_PER_REQ
#define ZFCP_MAX_SBALS_PER_CT_REQ FSF_MAX_SBALS_PER_REQ
#define ZFCP_MAX_SBALS_PER_ELS_REQ FSF_MAX_SBALS_PER_ELS_REQ
/* DMQ bug workaround: don't use last SBALE */ /* DMQ bug workaround: don't use last SBALE */
#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) #define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
@ -106,42 +84,17 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
/* max. number of (data buffer) SBALEs in largest SBAL chain */ /* max. number of (data buffer) SBALEs in largest SBAL chain */
#define ZFCP_MAX_SBALES_PER_REQ \ #define ZFCP_MAX_SBALES_PER_REQ \
(ZFCP_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2) (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
/* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */ /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8) #define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
/* max. number of (data buffer) SBALEs in largest SBAL chain /* max. number of (data buffer) SBALEs in largest SBAL chain
multiplied with number of sectors per 4k block */ multiplied with number of sectors per 4k block */
/* FIXME(tune): free space should be one max. SBAL chain plus what? */
#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
- (ZFCP_MAX_SBALS_PER_REQ + 4))
#define ZFCP_SBAL_TIMEOUT (5*HZ)
#define ZFCP_TYPE2_RECOVERY_TIME 8 /* seconds */
/* queue polling (values in microseconds) */
#define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */
#define ZFCP_MAX_OUTPUT_THRESHOLD 1000 /* FIXME: tune */
#define ZFCP_MIN_INPUT_THRESHOLD 1 /* ignored by QDIO layer */
#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
/********************* FSF SPECIFIC DEFINES *********************************/ /********************* FSF SPECIFIC DEFINES *********************************/
#define ZFCP_ULP_INFO_VERSION 26
#define ZFCP_QTCB_VERSION FSF_QTCB_CURRENT_VERSION
/* ATTENTION: value must not be used by hardware */ /* ATTENTION: value must not be used by hardware */
#define FSF_QTCB_UNSOLICITED_STATUS 0x6305 #define FSF_QTCB_UNSOLICITED_STATUS 0x6305
#define ZFCP_STATUS_READ_FAILED_THRESHOLD 3
#define ZFCP_STATUS_READS_RECOM FSF_STATUS_READS_RECOM
/* Do 1st retry in 1 second, then double the timeout for each following retry */
#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 1
#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
/* timeout value for "default timer" for fsf requests */ /* timeout value for "default timer" for fsf requests */
#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
@ -153,17 +106,9 @@ typedef unsigned long long fcp_lun_t;
/* data length field may be at variable position in FCP-2 FCP_CMND IU */ /* data length field may be at variable position in FCP-2 FCP_CMND IU */
typedef unsigned int fcp_dl_t; typedef unsigned int fcp_dl_t;
#define ZFCP_FC_SERVICE_CLASS_DEFAULT FSF_CLASS_3
/* timeout for name-server lookup (in seconds) */ /* timeout for name-server lookup (in seconds) */
#define ZFCP_NS_GID_PN_TIMEOUT 10 #define ZFCP_NS_GID_PN_TIMEOUT 10
/* largest SCSI command we can process */
/* FCP-2 (FCP_CMND IU) allows up to (255-3+16) */
#define ZFCP_MAX_SCSI_CMND_LENGTH 255
/* maximum number of commands in LUN queue (tagged queueing) */
#define ZFCP_CMND_PER_LUN 32
/* task attribute values in FCP-2 FCP_CMND IU */ /* task attribute values in FCP-2 FCP_CMND IU */
#define SIMPLE_Q 0 #define SIMPLE_Q 0
#define HEAD_OF_Q 1 #define HEAD_OF_Q 1
@ -224,9 +169,9 @@ struct fcp_rsp_iu {
#define RSP_CODE_TASKMAN_FAILED 5 #define RSP_CODE_TASKMAN_FAILED 5
/* see fc-fs */ /* see fc-fs */
#define LS_RSCN 0x61040000 #define LS_RSCN 0x61
#define LS_LOGO 0x05000000 #define LS_LOGO 0x05
#define LS_PLOGI 0x03000000 #define LS_PLOGI 0x03
struct fcp_rscn_head { struct fcp_rscn_head {
u8 command; u8 command;
@ -266,7 +211,6 @@ struct fcp_logo {
* FC-FS stuff * FC-FS stuff
*/ */
#define R_A_TOV 10 /* seconds */ #define R_A_TOV 10 /* seconds */
#define ZFCP_ELS_TIMEOUT (2 * R_A_TOV)
#define ZFCP_LS_RLS 0x0f #define ZFCP_LS_RLS 0x0f
#define ZFCP_LS_ADISC 0x52 #define ZFCP_LS_ADISC 0x52
@ -311,7 +255,10 @@ struct zfcp_rc_entry {
#define ZFCP_CT_DIRECTORY_SERVICE 0xFC #define ZFCP_CT_DIRECTORY_SERVICE 0xFC
#define ZFCP_CT_NAME_SERVER 0x02 #define ZFCP_CT_NAME_SERVER 0x02
#define ZFCP_CT_SYNCHRONOUS 0x00 #define ZFCP_CT_SYNCHRONOUS 0x00
#define ZFCP_CT_SCSI_FCP 0x08
#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
#define ZFCP_CT_GID_PN 0x0121 #define ZFCP_CT_GID_PN 0x0121
#define ZFCP_CT_GPN_FT 0x0172
#define ZFCP_CT_MAX_SIZE 0x1020 #define ZFCP_CT_MAX_SIZE 0x1020
#define ZFCP_CT_ACCEPT 0x8002 #define ZFCP_CT_ACCEPT 0x8002
#define ZFCP_CT_REJECT 0x8001 #define ZFCP_CT_REJECT 0x8001
@ -321,107 +268,6 @@ struct zfcp_rc_entry {
*/ */
#define ZFCP_CT_TIMEOUT (3 * R_A_TOV) #define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
/******************** LOGGING MACROS AND DEFINES *****************************/
/*
* Logging may be applied on certain kinds of driver operations
* independently. Additionally, different log-levels are supported for
* each of these areas.
*/
#define ZFCP_NAME "zfcp"
/* independent log areas */
#define ZFCP_LOG_AREA_OTHER 0
#define ZFCP_LOG_AREA_SCSI 1
#define ZFCP_LOG_AREA_FSF 2
#define ZFCP_LOG_AREA_CONFIG 3
#define ZFCP_LOG_AREA_CIO 4
#define ZFCP_LOG_AREA_QDIO 5
#define ZFCP_LOG_AREA_ERP 6
#define ZFCP_LOG_AREA_FC 7
/* log level values*/
#define ZFCP_LOG_LEVEL_NORMAL 0
#define ZFCP_LOG_LEVEL_INFO 1
#define ZFCP_LOG_LEVEL_DEBUG 2
#define ZFCP_LOG_LEVEL_TRACE 3
/*
* this allows removal of logging code by the preprocessor
* (the most detailed log level still to be compiled in is specified,
* higher log levels are removed)
*/
#define ZFCP_LOG_LEVEL_LIMIT ZFCP_LOG_LEVEL_TRACE
/* get "loglevel" nibble assignment */
#define ZFCP_GET_LOG_VALUE(zfcp_lognibble) \
((atomic_read(&zfcp_data.loglevel) >> (zfcp_lognibble<<2)) & 0xF)
/* set "loglevel" nibble */
#define ZFCP_SET_LOG_NIBBLE(value, zfcp_lognibble) \
(value << (zfcp_lognibble << 2))
/* all log-level defaults are combined to generate initial log-level */
#define ZFCP_LOG_LEVEL_DEFAULTS \
(ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_OTHER) | \
ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_SCSI) | \
ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FSF) | \
ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CONFIG) | \
ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CIO) | \
ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_QDIO) | \
ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_ERP) | \
ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FC))
/* check whether we have the right level for logging */
#define ZFCP_LOG_CHECK(level) \
((ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA)) >= level)
/* logging routine for zfcp */
#define _ZFCP_LOG(fmt, args...) \
printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __func__, \
__LINE__ , ##args)
#define ZFCP_LOG(level, fmt, args...) \
do { \
if (ZFCP_LOG_CHECK(level)) \
_ZFCP_LOG(fmt, ##args); \
} while (0)
#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
# define ZFCP_LOG_NORMAL(fmt, args...) do { } while (0)
#else
# define ZFCP_LOG_NORMAL(fmt, args...) \
do { \
if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_NORMAL)) \
printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
} while (0)
#endif
#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
# define ZFCP_LOG_INFO(fmt, args...) do { } while (0)
#else
# define ZFCP_LOG_INFO(fmt, args...) \
do { \
if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_INFO)) \
printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
} while (0)
#endif
#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
# define ZFCP_LOG_DEBUG(fmt, args...) do { } while (0)
#else
# define ZFCP_LOG_DEBUG(fmt, args...) \
ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args)
#endif
#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
# define ZFCP_LOG_TRACE(fmt, args...) do { } while (0)
#else
# define ZFCP_LOG_TRACE(fmt, args...) \
ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args)
#endif
/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/ /*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
/* /*
@ -441,6 +287,7 @@ do { \
#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000 #define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000 #define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000 #define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000
#define ZFCP_STATUS_COMMON_NOESC 0x00200000
/* adapter status */ /* adapter status */
#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
@ -496,77 +343,6 @@ do { \
#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800 #define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 #define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
/*********************** ERROR RECOVERY PROCEDURE DEFINES ********************/
#define ZFCP_MAX_ERPS 3
#define ZFCP_ERP_FSFREQ_TIMEOUT (30 * HZ)
#define ZFCP_ERP_MEMWAIT_TIMEOUT HZ
#define ZFCP_STATUS_ERP_TIMEDOUT 0x10000000
#define ZFCP_STATUS_ERP_CLOSE_ONLY 0x01000000
#define ZFCP_STATUS_ERP_DISMISSING 0x00100000
#define ZFCP_STATUS_ERP_DISMISSED 0x00200000
#define ZFCP_STATUS_ERP_LOWMEM 0x00400000
#define ZFCP_ERP_STEP_UNINITIALIZED 0x00000000
#define ZFCP_ERP_STEP_FSF_XCONFIG 0x00000001
#define ZFCP_ERP_STEP_PHYS_PORT_CLOSING 0x00000010
#define ZFCP_ERP_STEP_PORT_CLOSING 0x00000100
#define ZFCP_ERP_STEP_NAMESERVER_OPEN 0x00000200
#define ZFCP_ERP_STEP_NAMESERVER_LOOKUP 0x00000400
#define ZFCP_ERP_STEP_PORT_OPENING 0x00000800
#define ZFCP_ERP_STEP_UNIT_CLOSING 0x00001000
#define ZFCP_ERP_STEP_UNIT_OPENING 0x00002000
/* Ordered by escalation level (necessary for proper erp-code operation) */
#define ZFCP_ERP_ACTION_REOPEN_ADAPTER 0x4
#define ZFCP_ERP_ACTION_REOPEN_PORT_FORCED 0x3
#define ZFCP_ERP_ACTION_REOPEN_PORT 0x2
#define ZFCP_ERP_ACTION_REOPEN_UNIT 0x1
#define ZFCP_ERP_ACTION_RUNNING 0x1
#define ZFCP_ERP_ACTION_READY 0x2
#define ZFCP_ERP_SUCCEEDED 0x0
#define ZFCP_ERP_FAILED 0x1
#define ZFCP_ERP_CONTINUES 0x2
#define ZFCP_ERP_EXIT 0x3
#define ZFCP_ERP_DISMISSED 0x4
#define ZFCP_ERP_NOMEM 0x5
/******************** CFDC SPECIFIC STUFF *****************************/
/* Firewall data channel sense data record */
struct zfcp_cfdc_sense_data {
u32 signature; /* Request signature */
u32 devno; /* FCP adapter device number */
u32 command; /* Command code */
u32 fsf_status; /* FSF request status and status qualifier */
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
u8 payloads[256]; /* Access conflicts list */
u8 control_file[0]; /* Access control table */
};
#define ZFCP_CFDC_SIGNATURE 0xCFDCACDF
#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
#define ZFCP_CFDC_DOWNLOAD 0x00000001
#define ZFCP_CFDC_UPLOAD 0x00000002
#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
#define ZFCP_CFDC_DEV_NAME "zfcp_cfdc"
#define ZFCP_CFDC_DEV_MAJOR MISC_MAJOR
#define ZFCP_CFDC_DEV_MINOR MISC_DYNAMIC_MINOR
#define ZFCP_CFDC_MAX_CONTROL_FILE_SIZE 127 * 1024
/************************* STRUCTURE DEFINITIONS *****************************/ /************************* STRUCTURE DEFINITIONS *****************************/
struct zfcp_fsf_req; struct zfcp_fsf_req;
@ -623,7 +399,6 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
* @resp_count: number of elements in response scatter-gather list * @resp_count: number of elements in response scatter-gather list
* @handler: handler function (called for response to the request) * @handler: handler function (called for response to the request)
* @handler_data: data passed to handler function * @handler_data: data passed to handler function
* @pool: pointer to memory pool for ct request structure
* @timeout: FSF timeout for this request * @timeout: FSF timeout for this request
* @completion: completion for synchronization purposes * @completion: completion for synchronization purposes
* @status: used to pass error status to calling function * @status: used to pass error status to calling function
@ -636,7 +411,6 @@ struct zfcp_send_ct {
unsigned int resp_count; unsigned int resp_count;
zfcp_send_ct_handler_t handler; zfcp_send_ct_handler_t handler;
unsigned long handler_data; unsigned long handler_data;
mempool_t *pool;
int timeout; int timeout;
struct completion *completion; struct completion *completion;
int status; int status;
@ -685,13 +459,13 @@ struct zfcp_send_els {
}; };
struct zfcp_qdio_queue { struct zfcp_qdio_queue {
struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
u8 free_index; /* index of next free bfr u8 first; /* index of next free bfr
in queue (free_count>0) */ in queue (free_count>0) */
atomic_t free_count; /* number of free buffers atomic_t count; /* number of free buffers
in queue */ in queue */
rwlock_t queue_lock; /* lock for operations on queue */ spinlock_t lock; /* lock for operations on queue */
int distance_from_int; /* SBALs used since PCI indication int pci_batch; /* SBALs since PCI indication
was last set */ was last set */
}; };
@ -708,6 +482,24 @@ struct zfcp_erp_action {
struct timer_list timer; struct timer_list timer;
}; };
struct fsf_latency_record {
u32 min;
u32 max;
u64 sum;
};
struct latency_cont {
struct fsf_latency_record channel;
struct fsf_latency_record fabric;
u64 counter;
};
struct zfcp_latencies {
struct latency_cont read;
struct latency_cont write;
struct latency_cont cmd;
spinlock_t lock;
};
struct zfcp_adapter { struct zfcp_adapter {
struct list_head list; /* list of adapters */ struct list_head list; /* list of adapters */
@ -723,24 +515,25 @@ struct zfcp_adapter {
u32 adapter_features; /* FCP channel features */ u32 adapter_features; /* FCP channel features */
u32 connection_features; /* host connection features */ u32 connection_features; /* host connection features */
u32 hardware_version; /* of FCP channel */ u32 hardware_version; /* of FCP channel */
u16 timer_ticks; /* time int for a tick */
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
struct list_head port_list_head; /* remote port list */ struct list_head port_list_head; /* remote port list */
struct list_head port_remove_lh; /* head of ports to be struct list_head port_remove_lh; /* head of ports to be
removed */ removed */
u32 ports; /* number of remote ports */ u32 ports; /* number of remote ports */
atomic_t reqs_active; /* # active FSF reqs */
unsigned long req_no; /* unique FSF req number */ unsigned long req_no; /* unique FSF req number */
struct list_head *req_list; /* list of pending reqs */ struct list_head *req_list; /* list of pending reqs */
spinlock_t req_list_lock; /* request list lock */ spinlock_t req_list_lock; /* request list lock */
struct zfcp_qdio_queue request_queue; /* request queue */ struct zfcp_qdio_queue req_q; /* request queue */
u32 fsf_req_seq_no; /* FSF cmnd seq number */ u32 fsf_req_seq_no; /* FSF cmnd seq number */
wait_queue_head_t request_wq; /* can be used to wait for wait_queue_head_t request_wq; /* can be used to wait for
more avaliable SBALs */ more avaliable SBALs */
struct zfcp_qdio_queue response_queue; /* response queue */ struct zfcp_qdio_queue resp_q; /* response queue */
rwlock_t abort_lock; /* Protects against SCSI rwlock_t abort_lock; /* Protects against SCSI
stack abort/command stack abort/command
completion races */ completion races */
u16 status_read_failed; /* # failed status reads */ atomic_t stat_miss; /* # missing status reads*/
struct work_struct stat_work;
atomic_t status; /* status of this adapter */ atomic_t status; /* status of this adapter */
struct list_head erp_ready_head; /* error recovery for this struct list_head erp_ready_head; /* error recovery for this
adapter/devices */ adapter/devices */
@ -774,13 +567,9 @@ struct zfcp_adapter {
struct fc_host_statistics *fc_stats; struct fc_host_statistics *fc_stats;
struct fsf_qtcb_bottom_port *stats_reset_data; struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset; unsigned long stats_reset;
struct work_struct scan_work;
}; };
/*
* the struct device sysfs_device must be at the beginning of this structure.
* pointer to struct device is used to free port structure in release function
* of the device. don't change!
*/
struct zfcp_port { struct zfcp_port {
struct device sysfs_device; /* sysfs device */ struct device sysfs_device; /* sysfs device */
struct fc_rport *rport; /* rport of fc transport class */ struct fc_rport *rport; /* rport of fc transport class */
@ -804,10 +593,6 @@ struct zfcp_port {
u32 supported_classes; u32 supported_classes;
}; };
/* the struct device sysfs_device must be at the beginning of this structure.
* pointer to struct device is used to free unit structure in release function
* of the device. don't change!
*/
struct zfcp_unit { struct zfcp_unit {
struct device sysfs_device; /* sysfs device */ struct device sysfs_device; /* sysfs device */
struct list_head list; /* list of logical units */ struct list_head list; /* list of logical units */
@ -822,6 +607,7 @@ struct zfcp_unit {
struct scsi_device *device; /* scsi device struct pointer */ struct scsi_device *device; /* scsi device struct pointer */
struct zfcp_erp_action erp_action; /* pending error recovery */ struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter; atomic_t erp_counter;
struct zfcp_latencies latencies;
}; };
/* FSF request */ /* FSF request */
@ -831,19 +617,19 @@ struct zfcp_fsf_req {
struct zfcp_adapter *adapter; /* adapter request belongs to */ struct zfcp_adapter *adapter; /* adapter request belongs to */
u8 sbal_number; /* nr of SBALs free for use */ u8 sbal_number; /* nr of SBALs free for use */
u8 sbal_first; /* first SBAL for this request */ u8 sbal_first; /* first SBAL for this request */
u8 sbal_last; /* last possible SBAL for u8 sbal_last; /* last SBAL for this request */
u8 sbal_limit; /* last possible SBAL for
this reuest */ this reuest */
u8 sbal_curr; /* current SBAL during creation
of request */
u8 sbale_curr; /* current SBALE during creation u8 sbale_curr; /* current SBALE during creation
of request */ of request */
u8 sbal_response; /* SBAL used in interrupt */
wait_queue_head_t completion_wq; /* can be used by a routine wait_queue_head_t completion_wq; /* can be used by a routine
to wait for completion */ to wait for completion */
volatile u32 status; /* status of this request */ volatile u32 status; /* status of this request */
u32 fsf_command; /* FSF Command copy */ u32 fsf_command; /* FSF Command copy */
struct fsf_qtcb *qtcb; /* address of associated QTCB */ struct fsf_qtcb *qtcb; /* address of associated QTCB */
u32 seq_no; /* Sequence number of request */ u32 seq_no; /* Sequence number of request */
unsigned long data; /* private data of request */ void *data; /* private data of request */
struct timer_list timer; /* used for erp or scsi er */ struct timer_list timer; /* used for erp or scsi er */
struct zfcp_erp_action *erp_action; /* used if this request is struct zfcp_erp_action *erp_action; /* used if this request is
issued on behalf of erp */ issued on behalf of erp */
@ -851,10 +637,9 @@ struct zfcp_fsf_req {
from emergency pool */ from emergency pool */
unsigned long long issued; /* request sent time (STCK) */ unsigned long long issued; /* request sent time (STCK) */
struct zfcp_unit *unit; struct zfcp_unit *unit;
void (*handler)(struct zfcp_fsf_req *);
}; };
typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
/* driver data */ /* driver data */
struct zfcp_data { struct zfcp_data {
struct scsi_host_template scsi_host_template; struct scsi_host_template scsi_host_template;
@ -873,29 +658,11 @@ struct zfcp_data {
char init_busid[BUS_ID_SIZE]; char init_busid[BUS_ID_SIZE];
wwn_t init_wwpn; wwn_t init_wwpn;
fcp_lun_t init_fcp_lun; fcp_lun_t init_fcp_lun;
char *driver_version;
struct kmem_cache *fsf_req_qtcb_cache; struct kmem_cache *fsf_req_qtcb_cache;
struct kmem_cache *sr_buffer_cache; struct kmem_cache *sr_buffer_cache;
struct kmem_cache *gid_pn_cache; struct kmem_cache *gid_pn_cache;
}; };
/**
* struct zfcp_sg_list - struct describing a scatter-gather list
* @sg: pointer to array of (struct scatterlist)
* @count: number of elements in scatter-gather list
*/
struct zfcp_sg_list {
struct scatterlist *sg;
unsigned int count;
};
/* number of elements for various memory pools */
#define ZFCP_POOL_FSF_REQ_ERP_NR 1
#define ZFCP_POOL_FSF_REQ_SCSI_NR 1
#define ZFCP_POOL_FSF_REQ_ABORT_NR 1
#define ZFCP_POOL_STATUS_READ_NR ZFCP_STATUS_READS_RECOM
#define ZFCP_POOL_DATA_GID_PN_NR 1
/* struct used by memory pools for fsf_requests */ /* struct used by memory pools for fsf_requests */
struct zfcp_fsf_req_qtcb { struct zfcp_fsf_req_qtcb {
struct zfcp_fsf_req fsf_req; struct zfcp_fsf_req fsf_req;
@ -905,7 +672,6 @@ struct zfcp_fsf_req_qtcb {
/********************** ZFCP SPECIFIC DEFINES ********************************/ /********************** ZFCP SPECIFIC DEFINES ********************************/
#define ZFCP_REQ_AUTO_CLEANUP 0x00000002 #define ZFCP_REQ_AUTO_CLEANUP 0x00000002
#define ZFCP_WAIT_FOR_SBAL 0x00000004
#define ZFCP_REQ_NO_QTCB 0x00000008 #define ZFCP_REQ_NO_QTCB 0x00000008
#define ZFCP_SET 0x00000100 #define ZFCP_SET 0x00000100
@ -916,12 +682,6 @@ struct zfcp_fsf_req_qtcb {
((atomic_read(target) & mask) == mask) ((atomic_read(target) & mask) == mask)
#endif #endif
extern void _zfcp_hex_dump(char *, int);
#define ZFCP_HEX_DUMP(level, addr, count) \
if (ZFCP_LOG_CHECK(level)) { \
_zfcp_hex_dump(addr, count); \
}
#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id) #define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter)) #define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port)) #define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
@ -934,15 +694,6 @@ static inline int zfcp_reqlist_hash(unsigned long req_id)
return req_id % REQUEST_LIST_SIZE; return req_id % REQUEST_LIST_SIZE;
} }
static inline void zfcp_reqlist_add(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *fsf_req)
{
unsigned int idx;
idx = zfcp_reqlist_hash(fsf_req->req_id);
list_add_tail(&fsf_req->list, &adapter->req_list[idx]);
}
static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter, static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *fsf_req) struct zfcp_fsf_req *fsf_req)
{ {

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,9 @@
/* /*
* This file is part of the zfcp device driver for * zfcp device driver
* FCP adapters for IBM System z9 and zSeries.
* *
* (C) Copyright IBM Corp. 2002, 2006 * External function declarations.
* *
* This program is free software; you can redistribute it and/or modify * Copyright IBM Corporation 2002, 2008
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#ifndef ZFCP_EXT_H #ifndef ZFCP_EXT_H
@ -24,172 +11,51 @@
#include "zfcp_def.h" #include "zfcp_def.h"
extern struct zfcp_data zfcp_data; /* zfcp_aux.c */
extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *,
/******************************** SYSFS *************************************/ fcp_lun_t);
extern struct attribute_group *zfcp_driver_attr_groups[]; extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *,
extern int zfcp_sysfs_adapter_create_files(struct device *); wwn_t);
extern void zfcp_sysfs_adapter_remove_files(struct device *); extern int zfcp_adapter_enqueue(struct ccw_device *);
extern int zfcp_sysfs_port_create_files(struct device *, u32); extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
extern void zfcp_sysfs_port_remove_files(struct device *, u32); extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, u32,
extern int zfcp_sysfs_unit_create_files(struct device *); u32);
extern void zfcp_sysfs_unit_remove_files(struct device *); extern void zfcp_port_dequeue(struct zfcp_port *);
extern void zfcp_sysfs_port_release(struct device *);
extern void zfcp_sysfs_unit_release(struct device *);
/**************************** CONFIGURATION *********************************/
extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, fcp_lun_t);
extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, wwn_t);
extern struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *, u32);
struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t,
u32, u32);
extern void zfcp_port_dequeue(struct zfcp_port *);
extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t); extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t);
extern void zfcp_unit_dequeue(struct zfcp_unit *); extern void zfcp_unit_dequeue(struct zfcp_unit *);
extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
extern void zfcp_sg_free_table(struct scatterlist *, int);
extern int zfcp_sg_setup_table(struct scatterlist *, int);
/******************************* S/390 IO ************************************/ /* zfcp_ccw.c */
extern int zfcp_ccw_register(void); extern int zfcp_ccw_register(void);
extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int); /* zfcp_cfdc.c */
extern int zfcp_qdio_allocate(struct zfcp_adapter *); extern struct miscdevice zfcp_cfdc_misc;
extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *);
extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
struct zfcp_fsf_req *);
extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req /* zfcp_dbf.c */
(struct zfcp_fsf_req *, int, int); extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
(struct zfcp_fsf_req *); extern void zfcp_rec_dbf_event_thread(u8, struct zfcp_adapter *);
extern int zfcp_qdio_sbals_from_sg extern void zfcp_rec_dbf_event_thread_lock(u8, struct zfcp_adapter *);
(struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int, int); extern void zfcp_rec_dbf_event_adapter(u8, void *, struct zfcp_adapter *);
extern int zfcp_qdio_sbals_from_scsicmnd extern void zfcp_rec_dbf_event_port(u8, void *, struct zfcp_port *);
(struct zfcp_fsf_req *, unsigned long, struct scsi_cmnd *); extern void zfcp_rec_dbf_event_unit(u8, void *, struct zfcp_unit *);
extern void zfcp_rec_dbf_event_trigger(u8, void *, u8, u8, void *,
struct zfcp_adapter *,
/******************************** FSF ****************************************/
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
struct fsf_qtcb_bottom_config *);
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
struct fsf_qtcb_bottom_port *);
extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
u32, u32, struct zfcp_sg_list *);
extern void zfcp_fsf_start_timer(struct zfcp_fsf_req *, unsigned long);
extern void zfcp_erp_start_timer(struct zfcp_fsf_req *);
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_adapter *, int);
extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
unsigned long *, struct zfcp_fsf_req **);
extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
struct zfcp_erp_action *);
extern int zfcp_fsf_send_els(struct zfcp_send_els *);
extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
struct zfcp_unit *,
struct scsi_cmnd *, int, int);
extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *);
extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_command_task_management(
struct zfcp_adapter *, struct zfcp_unit *, u8, int);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(
unsigned long, struct zfcp_adapter *, struct zfcp_unit *, int);
/******************************* FC/FCP **************************************/
extern int zfcp_nameserver_enqueue(struct zfcp_adapter *);
extern int zfcp_ns_gid_pn_request(struct zfcp_erp_action *);
extern int zfcp_check_ct_response(struct ct_hdr *);
extern int zfcp_handle_els_rjt(u32, struct zfcp_ls_rjt_par *);
extern void zfcp_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
/******************************* SCSI ****************************************/
extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
extern void set_host_byte(int *, char);
extern void set_driver_byte(int *, char);
extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *,
struct scsi_cmnd *, int);
extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, int);
extern struct fc_function_template zfcp_transport_functions;
/******************************** ERP ****************************************/
extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *,
u32, int);
extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *);
extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *);
extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *);
extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32,
int);
extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *);
extern int zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *);
extern int zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *);
extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *);
extern int zfcp_erp_port_reopen_all(struct zfcp_adapter *, int, u8, void *);
extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32,
int);
extern int zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *);
extern int zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *);
extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *);
extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
extern int zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
extern int zfcp_test_link(struct zfcp_port *);
extern void zfcp_erp_port_boxed(struct zfcp_port *, u8 id, void *ref);
extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8 id, void *ref);
extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8 id, void *ref);
extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8 id, void *ref);
extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
extern void zfcp_erp_port_access_changed(struct zfcp_port *, u8, void *);
extern void zfcp_erp_unit_access_changed(struct zfcp_unit *, u8, void *);
/******************************** AUX ****************************************/
extern void zfcp_rec_dbf_event_thread(u8 id, struct zfcp_adapter *adapter,
int lock);
extern void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *);
extern void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port);
extern void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit);
extern void zfcp_rec_dbf_event_trigger(u8 id, void *ref, u8 want, u8 need,
void *action, struct zfcp_adapter *,
struct zfcp_port *, struct zfcp_unit *); struct zfcp_port *, struct zfcp_unit *);
extern void zfcp_rec_dbf_event_action(u8 id, struct zfcp_erp_action *); extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *);
extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
struct fsf_status_read_buffer *); struct fsf_status_read_buffer *);
extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *,
unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
int, int); int, int);
extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
struct scsi_cmnd *, struct scsi_cmnd *,
struct zfcp_fsf_req *); struct zfcp_fsf_req *);
@ -198,6 +64,101 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
unsigned long); unsigned long);
extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
struct scsi_cmnd *); struct scsi_cmnd *);
extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
/* zfcp_erp.c */
extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *,
u32, int);
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *);
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *);
extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *);
extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32,
int);
extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *);
extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *);
extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32,
int);
extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *);
extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *);
extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *);
extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_port_boxed(struct zfcp_port *, u8, void *);
extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8, void *);
extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *);
extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *);
extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
extern void zfcp_erp_timeout_handler(unsigned long);
/* zfcp_fc.c */
extern int zfcp_scan_ports(struct zfcp_adapter *);
extern void _zfcp_scan_ports_later(struct work_struct *);
extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
extern int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *);
extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
extern void zfcp_test_link(struct zfcp_port *);
/* zfcp_fsf.c */
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
struct fsf_qtcb_bottom_config *);
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
struct fsf_qtcb_bottom_port *);
extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
struct zfcp_fsf_cfdc *);
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_adapter *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
struct zfcp_erp_action *);
extern int zfcp_fsf_send_els(struct zfcp_send_els *);
extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
struct zfcp_unit *,
struct scsi_cmnd *, int, int);
extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *,
struct zfcp_unit *, u8, int);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
struct zfcp_adapter *,
struct zfcp_unit *, int);
/* zfcp_qdio.c */
extern int zfcp_qdio_allocate(struct zfcp_adapter *);
extern void zfcp_qdio_free(struct zfcp_adapter *);
extern int zfcp_qdio_send(struct zfcp_fsf_req *);
extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req(
struct zfcp_fsf_req *);
extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr(
struct zfcp_fsf_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_adapter *);
extern void zfcp_qdio_close(struct zfcp_adapter *);
/* zfcp_scsi.c */
extern struct zfcp_data zfcp_data;
extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
extern struct fc_function_template zfcp_transport_functions;
/* zfcp_sysfs.c */
extern struct attribute_group zfcp_sysfs_unit_attrs;
extern struct attribute_group zfcp_sysfs_adapter_attrs;
extern struct attribute_group zfcp_sysfs_ns_port_attrs;
extern struct attribute_group zfcp_sysfs_port_attrs;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
#endif /* ZFCP_EXT_H */ #endif /* ZFCP_EXT_H */

567
drivers/s390/scsi/zfcp_fc.c Normal file
View File

@ -0,0 +1,567 @@
/*
* zfcp device driver
*
* Fibre Channel related functions for the zfcp device driver.
*
* Copyright IBM Corporation 2008
*/
#include "zfcp_ext.h"
struct ct_iu_gpn_ft_req {
struct ct_hdr header;
u8 flags;
u8 domain_id_scope;
u8 area_id_scope;
u8 fc4_type;
} __attribute__ ((packed));
struct gpn_ft_resp_acc {
u8 control;
u8 port_id[3];
u8 reserved[4];
u64 wwpn;
} __attribute__ ((packed));
#define ZFCP_GPN_FT_ENTRIES ((PAGE_SIZE - sizeof(struct ct_hdr)) \
/ sizeof(struct gpn_ft_resp_acc))
#define ZFCP_GPN_FT_BUFFERS 4
#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
struct ct_iu_gpn_ft_resp {
struct ct_hdr header;
struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES];
} __attribute__ ((packed));
struct zfcp_gpn_ft {
struct zfcp_send_ct ct;
struct scatterlist sg_req;
struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
};
static struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *adapter,
u32 d_id)
{
struct zfcp_port *port;
list_for_each_entry(port, &adapter->port_list_head, list)
if ((port->d_id == d_id) &&
!atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status))
return port;
return NULL;
}
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct fcp_rscn_element *elem)
{
unsigned long flags;
struct zfcp_port *port;
read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
continue;
/* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */
if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status))
/* Try to connect to unused ports anyway. */
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
82, fsf_req);
else if ((port->d_id & range) == (elem->nport_did & range))
/* Check connection status for connected ports */
zfcp_test_link(port);
}
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
struct fcp_rscn_head *fcp_rscn_head;
struct fcp_rscn_element *fcp_rscn_element;
u16 i;
u16 no_entries;
u32 range_mask;
fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data;
fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head;
/* see FC-FS */
no_entries = fcp_rscn_head->payload_len /
sizeof(struct fcp_rscn_element);
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
fcp_rscn_element++;
switch (fcp_rscn_element->addr_format) {
case ZFCP_PORT_ADDRESS:
range_mask = ZFCP_PORTS_RANGE_PORT;
break;
case ZFCP_AREA_ADDRESS:
range_mask = ZFCP_PORTS_RANGE_AREA;
break;
case ZFCP_DOMAIN_ADDRESS:
range_mask = ZFCP_PORTS_RANGE_DOMAIN;
break;
case ZFCP_FABRIC_ADDRESS:
range_mask = ZFCP_PORTS_RANGE_FABRIC;
break;
default:
continue;
}
_zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element);
}
schedule_work(&fsf_req->adapter->scan_work);
}
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, wwn_t wwpn)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
unsigned long flags;
read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &adapter->port_list_head, list)
if (port->wwpn == wwpn)
break;
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
if (port && (port->wwpn == wwpn))
zfcp_erp_port_forced_reopen(port, 0, 83, req);
}
static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *)req->data;
struct fsf_plogi *els_plogi =
(struct fsf_plogi *) status_buffer->payload.data;
zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn);
}
static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *)req->data;
struct fcp_logo *els_logo =
(struct fcp_logo *) status_buffer->payload.data;
zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn);
}
/**
* zfcp_fc_incoming_els - handle incoming ELS
* @fsf_req - request which contains incoming ELS
*/
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *) fsf_req->data;
unsigned int els_type = status_buffer->payload.data[0];
zfcp_san_dbf_event_incoming_els(fsf_req);
if (els_type == LS_PLOGI)
zfcp_fc_incoming_plogi(fsf_req);
else if (els_type == LS_LOGO)
zfcp_fc_incoming_logo(fsf_req);
else if (els_type == LS_RSCN)
zfcp_fc_incoming_rscn(fsf_req);
}
static void zfcp_ns_gid_pn_handler(unsigned long data)
{
struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
struct zfcp_send_ct *ct = &gid_pn->ct;
struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req);
struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp);
struct zfcp_port *port = gid_pn->port;
if (ct->status)
goto out;
if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) {
atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
goto out;
}
/* paranoia */
if (ct_iu_req->wwpn != port->wwpn)
goto out;
/* looks like a valid d_id */
port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
out:
mempool_free(gid_pn, port->adapter->pool.data_gid_pn);
}
/**
* zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
* @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
{
int ret;
struct zfcp_gid_pn_data *gid_pn;
struct zfcp_adapter *adapter = erp_action->adapter;
gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
if (!gid_pn)
return -ENOMEM;
memset(gid_pn, 0, sizeof(*gid_pn));
/* setup parameters for send generic command */
gid_pn->port = erp_action->port;
gid_pn->ct.port = adapter->nameserver_port;
gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
gid_pn->ct.handler_data = (unsigned long) gid_pn;
gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
gid_pn->ct.req = &gid_pn->req;
gid_pn->ct.resp = &gid_pn->resp;
gid_pn->ct.req_count = 1;
gid_pn->ct.resp_count = 1;
sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
sizeof(struct ct_iu_gid_pn_req));
sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
sizeof(struct ct_iu_gid_pn_resp));
/* setup nameserver request */
gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION;
gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
erp_action);
if (ret)
mempool_free(gid_pn, adapter->pool.data_gid_pn);
return ret;
}
/**
* zfcp_fc_plogi_evaluate - evaluate PLOGI playload
* @port: zfcp_port structure
* @plogi: plogi payload
*
* Evaluate PLOGI playload and copy important fields into zfcp_port structure
*/
void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi)
{
port->maxframe_size = plogi->serv_param.common_serv_param[7] |
((plogi->serv_param.common_serv_param[6] & 0x0F) << 8);
if (plogi->serv_param.class1_serv_param[0] & 0x80)
port->supported_classes |= FC_COS_CLASS1;
if (plogi->serv_param.class2_serv_param[0] & 0x80)
port->supported_classes |= FC_COS_CLASS2;
if (plogi->serv_param.class3_serv_param[0] & 0x80)
port->supported_classes |= FC_COS_CLASS3;
if (plogi->serv_param.class4_serv_param[0] & 0x80)
port->supported_classes |= FC_COS_CLASS4;
}
struct zfcp_els_adisc {
struct zfcp_send_els els;
struct scatterlist req;
struct scatterlist resp;
struct zfcp_ls_adisc ls_adisc;
struct zfcp_ls_adisc_acc ls_adisc_acc;
};
static void zfcp_fc_adisc_handler(unsigned long data)
{
struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
struct zfcp_port *port = adisc->els.port;
struct zfcp_ls_adisc_acc *ls_adisc = &adisc->ls_adisc_acc;
if (adisc->els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, 0, 63, NULL);
goto out;
}
if (!port->wwnn)
port->wwnn = ls_adisc->wwnn;
if (port->wwpn != ls_adisc->wwpn)
zfcp_erp_port_reopen(port, 0, 64, NULL);
out:
zfcp_port_put(port);
kfree(adisc);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
struct zfcp_els_adisc *adisc;
struct zfcp_adapter *adapter = port->adapter;
adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC);
if (!adisc)
return -ENOMEM;
adisc->els.req = &adisc->req;
adisc->els.resp = &adisc->resp;
sg_init_one(adisc->els.req, &adisc->ls_adisc,
sizeof(struct zfcp_ls_adisc));
sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
sizeof(struct zfcp_ls_adisc_acc));
adisc->els.req_count = 1;
adisc->els.resp_count = 1;
adisc->els.adapter = adapter;
adisc->els.port = port;
adisc->els.d_id = port->d_id;
adisc->els.handler = zfcp_fc_adisc_handler;
adisc->els.handler_data = (unsigned long) adisc;
adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host);
adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host);
adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host);
return zfcp_fsf_send_els(&adisc->els);
}
/**
* zfcp_test_link - lightweight link test procedure
* @port: port to be tested
*
* Test status of a link to a remote port using the ELS command ADISC.
* If there is a problem with the remote port, error recovery steps
* will be triggered.
*/
void zfcp_test_link(struct zfcp_port *port)
{
int retval;
zfcp_port_get(port);
retval = zfcp_fc_adisc(port);
if (retval == 0 || retval == -EBUSY)
return;
/* send of ADISC was not possible */
zfcp_port_put(port);
zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
}
static int zfcp_scan_get_nameserver(struct zfcp_adapter *adapter)
{
int ret;
if (!adapter->nameserver_port)
return -EINTR;
if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
&adapter->nameserver_port->status)) {
ret = zfcp_erp_port_reopen(adapter->nameserver_port, 0, 148,
NULL);
if (ret)
return ret;
zfcp_erp_wait(adapter);
zfcp_port_put(adapter->nameserver_port);
}
return !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
&adapter->nameserver_port->status);
}
static void zfcp_gpn_ft_handler(unsigned long _done)
{
complete((struct completion *)_done);
}
static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
{
struct scatterlist *sg = &gpn_ft->sg_req;
kfree(sg_virt(sg)); /* free request buffer */
zfcp_sg_free_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS);
kfree(gpn_ft);
}
static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void)
{
struct zfcp_gpn_ft *gpn_ft;
struct ct_iu_gpn_ft_req *req;
gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
if (!gpn_ft)
return NULL;
req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL);
if (!req) {
kfree(gpn_ft);
gpn_ft = NULL;
goto out;
}
sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
if (zfcp_sg_setup_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS)) {
zfcp_free_sg_env(gpn_ft);
gpn_ft = NULL;
}
out:
return gpn_ft;
}
static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
struct zfcp_adapter *adapter)
{
struct zfcp_send_ct *ct = &gpn_ft->ct;
struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
struct completion done;
int ret;
/* prepare CT IU for GPN_FT */
req->header.revision = ZFCP_CT_REVISION;
req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
req->header.options = ZFCP_CT_SYNCHRONOUS;
req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
req->header.max_res_size = (sizeof(struct gpn_ft_resp_acc) *
(ZFCP_GPN_FT_MAX_ENTRIES - 1)) >> 2;
req->flags = 0;
req->domain_id_scope = 0;
req->area_id_scope = 0;
req->fc4_type = ZFCP_CT_SCSI_FCP;
/* prepare zfcp_send_ct */
ct->port = adapter->nameserver_port;
ct->handler = zfcp_gpn_ft_handler;
ct->handler_data = (unsigned long)&done;
ct->timeout = 10;
ct->req = &gpn_ft->sg_req;
ct->resp = gpn_ft->sg_resp;
ct->req_count = 1;
ct->resp_count = ZFCP_GPN_FT_BUFFERS;
init_completion(&done);
ret = zfcp_fsf_send_ct(ct, NULL, NULL);
if (!ret)
wait_for_completion(&done);
return ret;
}
static void zfcp_validate_port(struct zfcp_port *port)
{
struct zfcp_adapter *adapter = port->adapter;
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
if (port == adapter->nameserver_port)
return;
if ((port->supported_classes != 0) || (port->units != 0)) {
zfcp_port_put(port);
return;
}
zfcp_erp_port_shutdown(port, 0, 151, NULL);
zfcp_erp_wait(adapter);
zfcp_port_put(port);
zfcp_port_dequeue(port);
}
static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
{
struct zfcp_send_ct *ct = &gpn_ft->ct;
struct scatterlist *sg = gpn_ft->sg_resp;
struct ct_hdr *hdr = sg_virt(sg);
struct gpn_ft_resp_acc *acc = sg_virt(sg);
struct zfcp_adapter *adapter = ct->port->adapter;
struct zfcp_port *port, *tmp;
u32 d_id;
int ret = 0, x;
if (ct->status)
return -EIO;
if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) {
if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD)
return -EAGAIN; /* might be a temporary condition */
return -EIO;
}
if (hdr->max_res_size)
return -E2BIG;
down(&zfcp_data.config_sema);
/* first entry is the header */
for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES; x++) {
if (x % (ZFCP_GPN_FT_ENTRIES + 1))
acc++;
else
acc = sg_virt(++sg);
d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
acc->port_id[2];
/* skip the adapter's port and known remote ports */
if (acc->wwpn == fc_host_port_name(adapter->scsi_host) ||
zfcp_get_port_by_did(adapter, d_id))
continue;
port = zfcp_port_enqueue(adapter, acc->wwpn,
ZFCP_STATUS_PORT_DID_DID |
ZFCP_STATUS_COMMON_NOESC, d_id);
if (IS_ERR(port))
ret = PTR_ERR(port);
else
zfcp_erp_port_reopen(port, 0, 149, NULL);
if (acc->control & 0x80) /* last entry */
break;
}
zfcp_erp_wait(adapter);
list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list)
zfcp_validate_port(port);
up(&zfcp_data.config_sema);
return ret;
}
/**
* zfcp_scan_ports - scan remote ports and attach new ports
* @adapter: pointer to struct zfcp_adapter
*/
int zfcp_scan_ports(struct zfcp_adapter *adapter)
{
int ret, i;
struct zfcp_gpn_ft *gpn_ft;
zfcp_erp_wait(adapter); /* wait until adapter is finished with ERP */
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
return 0;
ret = zfcp_scan_get_nameserver(adapter);
if (ret)
return ret;
gpn_ft = zfcp_alloc_sg_env();
if (!gpn_ft)
return -ENOMEM;
for (i = 0; i < 3; i++) {
ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
if (!ret) {
ret = zfcp_scan_eval_gpn_ft(gpn_ft);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
zfcp_free_sg_env(gpn_ft);
return ret;
}
void _zfcp_scan_ports_later(struct work_struct *work)
{
zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
}

File diff suppressed because it is too large Load Diff

View File

@ -1,27 +1,16 @@
/* /*
* This file is part of the zfcp device driver for * zfcp device driver
* FCP adapters for IBM System z9 and zSeries.
* *
* (C) Copyright IBM Corp. 2002, 2006 * Interface to the FSF support functions.
* *
* This program is free software; you can redistribute it and/or modify * Copyright IBM Corporation 2002, 2008
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#ifndef FSF_H #ifndef FSF_H
#define FSF_H #define FSF_H
#include <linux/pfn.h>
#define FSF_QTCB_CURRENT_VERSION 0x00000001 #define FSF_QTCB_CURRENT_VERSION 0x00000001
/* FSF commands */ /* FSF commands */
@ -258,6 +247,16 @@
#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000 #define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000 #define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
/* FSF interface for CFDC */
#define ZFCP_CFDC_MAX_SIZE 127 * 1024
#define ZFCP_CFDC_PAGES PFN_UP(ZFCP_CFDC_MAX_SIZE)
struct zfcp_fsf_cfdc {
struct scatterlist sg[ZFCP_CFDC_PAGES];
u32 command;
u32 option;
};
struct fsf_queue_designator { struct fsf_queue_designator {
u8 cssid; u8 cssid;
u8 chpid; u8 chpid;
@ -288,29 +287,6 @@ struct fsf_bit_error_payload {
u32 current_transmit_b2b_credit; u32 current_transmit_b2b_credit;
} __attribute__ ((packed)); } __attribute__ ((packed));
struct fsf_status_read_buffer {
u32 status_type;
u32 status_subtype;
u32 length;
u32 res1;
struct fsf_queue_designator queue_designator;
u32 d_id;
u32 class;
u64 fcp_lun;
u8 res3[24];
u8 payload[FSF_STATUS_READ_PAYLOAD_SIZE];
} __attribute__ ((packed));
struct fsf_qual_version_error {
u32 fsf_version;
u32 res1[3];
} __attribute__ ((packed));
struct fsf_qual_sequence_error {
u32 exp_req_seq_no;
u32 res1[3];
} __attribute__ ((packed));
struct fsf_link_down_info { struct fsf_link_down_info {
u32 error_code; u32 error_code;
u32 res1; u32 res1;
@ -323,11 +299,47 @@ struct fsf_link_down_info {
u8 vendor_specific_code; u8 vendor_specific_code;
} __attribute__ ((packed)); } __attribute__ ((packed));
struct fsf_status_read_buffer {
u32 status_type;
u32 status_subtype;
u32 length;
u32 res1;
struct fsf_queue_designator queue_designator;
u32 d_id;
u32 class;
u64 fcp_lun;
u8 res3[24];
union {
u8 data[FSF_STATUS_READ_PAYLOAD_SIZE];
u32 word[FSF_STATUS_READ_PAYLOAD_SIZE/sizeof(u32)];
struct fsf_link_down_info link_down_info;
struct fsf_bit_error_payload bit_error;
} payload;
} __attribute__ ((packed));
struct fsf_qual_version_error {
u32 fsf_version;
u32 res1[3];
} __attribute__ ((packed));
struct fsf_qual_sequence_error {
u32 exp_req_seq_no;
u32 res1[3];
} __attribute__ ((packed));
struct fsf_qual_latency_info {
u32 channel_lat;
u32 fabric_lat;
u8 res1[8];
} __attribute__ ((packed));
union fsf_prot_status_qual { union fsf_prot_status_qual {
u32 word[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u32)];
u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)]; u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
struct fsf_qual_version_error version_error; struct fsf_qual_version_error version_error;
struct fsf_qual_sequence_error sequence_error; struct fsf_qual_sequence_error sequence_error;
struct fsf_link_down_info link_down_info; struct fsf_link_down_info link_down_info;
struct fsf_qual_latency_info latency_info;
} __attribute__ ((packed)); } __attribute__ ((packed));
struct fsf_qtcb_prefix { struct fsf_qtcb_prefix {
@ -437,7 +449,9 @@ struct fsf_qtcb_bottom_config {
u32 fc_link_speed; u32 fc_link_speed;
u32 adapter_type; u32 adapter_type;
u32 peer_d_id; u32 peer_d_id;
u8 res2[12]; u8 res1[2];
u16 timer_interval;
u8 res2[8];
u32 s_id; u32 s_id;
struct fsf_nport_serv_param nport_serv_param; struct fsf_nport_serv_param nport_serv_param;
u8 reserved_nport_serv_param[16]; u8 reserved_nport_serv_param[16];

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,496 @@
/*
* zfcp device driver
*
* sysfs attributes.
*
* Copyright IBM Corporation 2008
*/
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\
_show, _store)
#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value) \
static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
struct _feat_def *_feat = dev_get_drvdata(dev); \
\
return sprintf(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
atomic_read(&adapter->status));
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
adapter->peer_wwnn);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
adapter->peer_wwpn);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
adapter->peer_d_id);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
adapter->hydra_version);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n",
adapter->fsf_lic_version);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n",
adapter->hardware_version);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n",
(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
atomic_read(&port->status));
ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
(atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
(atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
atomic_read(&unit->status));
ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
(atomic_read(&unit->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
(atomic_read(&unit->status) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
(atomic_read(&unit->status) &
ZFCP_STATUS_UNIT_SHARED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
(atomic_read(&unit->status) &
ZFCP_STATUS_UNIT_READONLY) != 0);
#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \
static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct _feat_def *_feat = dev_get_drvdata(dev); \
\
if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
return sprintf(buf, "1\n"); \
else \
return sprintf(buf, "0\n"); \
} \
static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
struct device_attribute *attr,\
const char *buf, size_t count)\
{ \
struct _feat_def *_feat = dev_get_drvdata(dev); \
unsigned long val; \
int retval = 0; \
\
down(&zfcp_data.config_sema); \
if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \
retval = -EBUSY; \
goto out; \
} \
\
if (strict_strtoul(buf, 0, &val) || val != 0) { \
retval = -EINVAL; \
goto out; \
} \
\
zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\
zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \
_reopen_id, NULL); \
zfcp_erp_wait(_adapter); \
out: \
up(&zfcp_data.config_sema); \
return retval ? retval : (ssize_t) count; \
} \
static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
zfcp_sysfs_##_feat##_failed_show, \
zfcp_sysfs_##_feat##_failed_store);
ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, 44, 93);
ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, 45, 96);
ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, 46, 97);
static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_adapter *adapter = dev_get_drvdata(dev);
int ret;
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE)
return -EBUSY;
ret = zfcp_scan_ports(adapter);
return ret ? ret : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_adapter *adapter = dev_get_drvdata(dev);
struct zfcp_port *port;
wwn_t wwpn;
int retval = 0;
down(&zfcp_data.config_sema);
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
retval = -EBUSY;
goto out;
}
if (strict_strtoull(buf, 0, &wwpn)) {
retval = -EINVAL;
goto out;
}
write_lock_irq(&zfcp_data.config_lock);
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (port && (atomic_read(&port->refcount) == 0)) {
zfcp_port_get(port);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
list_move(&port->list, &adapter->port_remove_lh);
} else
port = NULL;
write_unlock_irq(&zfcp_data.config_lock);
if (!port) {
retval = -ENXIO;
goto out;
}
zfcp_erp_port_shutdown(port, 0, 92, NULL);
zfcp_erp_wait(adapter);
zfcp_port_put(port);
zfcp_port_dequeue(port);
out:
up(&zfcp_data.config_sema);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
zfcp_sysfs_port_remove_store);
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
&dev_attr_adapter_port_remove.attr,
&dev_attr_adapter_port_rescan.attr,
&dev_attr_adapter_peer_wwnn.attr,
&dev_attr_adapter_peer_wwpn.attr,
&dev_attr_adapter_peer_d_id.attr,
&dev_attr_adapter_card_version.attr,
&dev_attr_adapter_lic_version.attr,
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
NULL
};
struct attribute_group zfcp_sysfs_adapter_attrs = {
.attrs = zfcp_adapter_attrs,
};
static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = dev_get_drvdata(dev);
struct zfcp_unit *unit;
fcp_lun_t fcp_lun;
int retval = -EINVAL;
down(&zfcp_data.config_sema);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
retval = -EBUSY;
goto out;
}
if (strict_strtoull(buf, 0, &fcp_lun))
goto out;
unit = zfcp_unit_enqueue(port, fcp_lun);
if (IS_ERR(unit))
goto out;
retval = 0;
zfcp_erp_unit_reopen(unit, 0, 94, NULL);
zfcp_erp_wait(unit->port->adapter);
zfcp_unit_put(unit);
out:
up(&zfcp_data.config_sema);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = dev_get_drvdata(dev);
struct zfcp_unit *unit;
fcp_lun_t fcp_lun;
int retval = 0;
down(&zfcp_data.config_sema);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
retval = -EBUSY;
goto out;
}
if (strict_strtoull(buf, 0, &fcp_lun)) {
retval = -EINVAL;
goto out;
}
write_lock_irq(&zfcp_data.config_lock);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
if (unit && (atomic_read(&unit->refcount) == 0)) {
zfcp_unit_get(unit);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
list_move(&unit->list, &port->unit_remove_lh);
} else
unit = NULL;
write_unlock_irq(&zfcp_data.config_lock);
if (!unit) {
retval = -ENXIO;
goto out;
}
zfcp_erp_unit_shutdown(unit, 0, 95, NULL);
zfcp_erp_wait(unit->port->adapter);
zfcp_unit_put(unit);
zfcp_unit_dequeue(unit);
out:
up(&zfcp_data.config_sema);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
static struct attribute *zfcp_port_ns_attrs[] = {
&dev_attr_port_failed.attr,
&dev_attr_port_in_recovery.attr,
&dev_attr_port_status.attr,
&dev_attr_port_access_denied.attr,
NULL
};
/**
* zfcp_sysfs_ns_port_attrs - sysfs attributes for nameserver
*/
struct attribute_group zfcp_sysfs_ns_port_attrs = {
.attrs = zfcp_port_ns_attrs,
};
static struct attribute *zfcp_port_no_ns_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
&dev_attr_port_failed.attr,
&dev_attr_port_in_recovery.attr,
&dev_attr_port_status.attr,
&dev_attr_port_access_denied.attr,
NULL
};
/**
* zfcp_sysfs_port_attrs - sysfs attributes for all other ports
*/
struct attribute_group zfcp_sysfs_port_attrs = {
.attrs = zfcp_port_no_ns_attrs,
};
static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_unit_failed.attr,
&dev_attr_unit_in_recovery.attr,
&dev_attr_unit_status.attr,
&dev_attr_unit_access_denied.attr,
&dev_attr_unit_access_shared.attr,
&dev_attr_unit_access_readonly.attr,
NULL
};
struct attribute_group zfcp_sysfs_unit_attrs = {
.attrs = zfcp_unit_attrs,
};
#define ZFCP_DEFINE_LATENCY_ATTR(_name) \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) { \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_unit *unit = sdev->hostdata; \
struct zfcp_latencies *lat = &unit->latencies; \
struct zfcp_adapter *adapter = unit->port->adapter; \
unsigned long flags; \
unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
\
spin_lock_irqsave(&lat->lock, flags); \
fsum = lat->_name.fabric.sum * adapter->timer_ticks; \
fmin = lat->_name.fabric.min * adapter->timer_ticks; \
fmax = lat->_name.fabric.max * adapter->timer_ticks; \
csum = lat->_name.channel.sum * adapter->timer_ticks; \
cmin = lat->_name.channel.min * adapter->timer_ticks; \
cmax = lat->_name.channel.max * adapter->timer_ticks; \
cc = lat->_name.counter; \
spin_unlock_irqrestore(&lat->lock, flags); \
\
do_div(fsum, 1000); \
do_div(fmin, 1000); \
do_div(fmax, 1000); \
do_div(csum, 1000); \
do_div(cmin, 1000); \
do_div(cmax, 1000); \
\
return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \
fmin, fmax, fsum, cmin, cmax, csum, cc); \
} \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_unit *unit = sdev->hostdata; \
struct zfcp_latencies *lat = &unit->latencies; \
unsigned long flags; \
\
spin_lock_irqsave(&lat->lock, flags); \
lat->_name.fabric.sum = 0; \
lat->_name.fabric.min = 0xFFFFFFFF; \
lat->_name.fabric.max = 0; \
lat->_name.channel.sum = 0; \
lat->_name.channel.min = 0xFFFFFFFF; \
lat->_name.channel.max = 0; \
lat->_name.counter = 0; \
spin_unlock_irqrestore(&lat->lock, flags); \
\
return (ssize_t) count; \
} \
static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO, \
zfcp_sysfs_unit_##_name##_latency_show, \
zfcp_sysfs_unit_##_name##_latency_store);
ZFCP_DEFINE_LATENCY_ATTR(read);
ZFCP_DEFINE_LATENCY_ATTR(write);
ZFCP_DEFINE_LATENCY_ATTR(cmd);
#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_unit *unit = sdev->hostdata; \
\
return sprintf(buf, _format, _value); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
unit->port->adapter->ccw_device->dev.bus_id);
ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_fcp_lun,
&dev_attr_wwpn,
&dev_attr_hba_id,
&dev_attr_read_latency,
&dev_attr_write_latency,
&dev_attr_cmd_latency,
NULL
};
static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *scsi_host = dev_to_shost(dev);
struct fsf_qtcb_bottom_port *qtcb_port;
struct zfcp_adapter *adapter;
int retval;
adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
return -EOPNOTSUPP;
qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
if (!qtcb_port)
return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
if (!retval)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port);
return retval;
}
static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
static int zfcp_sysfs_adapter_ex_config(struct device *dev,
struct fsf_statistics_info *stat_inf)
{
struct Scsi_Host *scsi_host = dev_to_shost(dev);
struct fsf_qtcb_bottom_config *qtcb_config;
struct zfcp_adapter *adapter;
int retval;
adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
return -EOPNOTSUPP;
qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
GFP_KERNEL);
if (!qtcb_config)
return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
if (!retval)
*stat_inf = qtcb_config->stat_info;
kfree(qtcb_config);
return retval;
}
#define ZFCP_SHOST_ATTR(_name, _format, _arg...) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct fsf_statistics_info stat_info; \
int retval; \
\
retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); \
if (retval) \
return retval; \
\
return sprintf(buf, _format, ## _arg); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n",
(unsigned long long) stat_info.input_req,
(unsigned long long) stat_info.output_req,
(unsigned long long) stat_info.control_req);
ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
(unsigned long long) stat_info.input_mb,
(unsigned long long) stat_info.output_mb);
ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
(unsigned long long) stat_info.seconds_act);
struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_utilization,
&dev_attr_requests,
&dev_attr_megabytes,
&dev_attr_seconds_active,
NULL
};

View File

@ -1,270 +0,0 @@
/*
* This file is part of the zfcp device driver for
* FCP adapters for IBM System z9 and zSeries.
*
* (C) Copyright IBM Corp. 2002, 2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "zfcp_ext.h"
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
/**
* ZFCP_DEFINE_ADAPTER_ATTR
* @_name: name of show attribute
* @_format: format string
* @_value: value to print
*
* Generates attributes for an adapter.
*/
#define ZFCP_DEFINE_ADAPTER_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct zfcp_adapter *adapter; \
\
adapter = dev_get_drvdata(dev); \
return sprintf(buf, _format, _value); \
} \
\
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
adapter->hardware_version);
ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
/**
* zfcp_sysfs_port_add_store - add a port to sysfs tree
* @dev: pointer to belonging device
* @buf: pointer to input buffer
* @count: number of bytes in buffer
*
* Store function of the "port_add" attribute of an adapter.
*/
static ssize_t
zfcp_sysfs_port_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
wwn_t wwpn;
char *endp;
struct zfcp_adapter *adapter;
struct zfcp_port *port;
int retval = -EINVAL;
down(&zfcp_data.config_sema);
adapter = dev_get_drvdata(dev);
if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
retval = -EBUSY;
goto out;
}
wwpn = simple_strtoull(buf, &endp, 0);
if ((endp + 1) < (buf + count))
goto out;
port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
if (!port)
goto out;
retval = 0;
zfcp_erp_port_reopen(port, 0, 91, NULL);
zfcp_erp_wait(port->adapter);
zfcp_port_put(port);
out:
up(&zfcp_data.config_sema);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(port_add, S_IWUSR, NULL, zfcp_sysfs_port_add_store);
/**
* zfcp_sysfs_port_remove_store - remove a port from sysfs tree
* @dev: pointer to belonging device
* @buf: pointer to input buffer
* @count: number of bytes in buffer
*
* Store function of the "port_remove" attribute of an adapter.
*/
static ssize_t
zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct zfcp_adapter *adapter;
struct zfcp_port *port;
wwn_t wwpn;
char *endp;
int retval = 0;
down(&zfcp_data.config_sema);
adapter = dev_get_drvdata(dev);
if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
retval = -EBUSY;
goto out;
}
wwpn = simple_strtoull(buf, &endp, 0);
if ((endp + 1) < (buf + count)) {
retval = -EINVAL;
goto out;
}
write_lock_irq(&zfcp_data.config_lock);
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (port && (atomic_read(&port->refcount) == 0)) {
zfcp_port_get(port);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
list_move(&port->list, &adapter->port_remove_lh);
}
else {
port = NULL;
}
write_unlock_irq(&zfcp_data.config_lock);
if (!port) {
retval = -ENXIO;
goto out;
}
zfcp_erp_port_shutdown(port, 0, 92, NULL);
zfcp_erp_wait(adapter);
zfcp_port_put(port);
zfcp_port_dequeue(port);
out:
up(&zfcp_data.config_sema);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(port_remove, S_IWUSR, NULL, zfcp_sysfs_port_remove_store);
/**
* zfcp_sysfs_adapter_failed_store - failed state of adapter
* @dev: pointer to belonging device
* @buf: pointer to input buffer
* @count: number of bytes in buffer
*
* Store function of the "failed" attribute of an adapter.
* If a "0" gets written to "failed", error recovery will be
* started for the belonging adapter.
*/
static ssize_t
zfcp_sysfs_adapter_failed_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_adapter *adapter;
unsigned int val;
char *endp;
int retval = 0;
down(&zfcp_data.config_sema);
adapter = dev_get_drvdata(dev);
if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
retval = -EBUSY;
goto out;
}
val = simple_strtoul(buf, &endp, 0);
if (((endp + 1) < (buf + count)) || (val != 0)) {
retval = -EINVAL;
goto out;
}
zfcp_erp_modify_adapter_status(adapter, 44, NULL,
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 93,
NULL);
zfcp_erp_wait(adapter);
out:
up(&zfcp_data.config_sema);
return retval ? retval : (ssize_t) count;
}
/**
* zfcp_sysfs_adapter_failed_show - failed state of adapter
* @dev: pointer to belonging device
* @buf: pointer to input buffer
*
* Show function of "failed" attribute of adapter. Will be
* "0" if adapter is working, otherwise "1".
*/
static ssize_t
zfcp_sysfs_adapter_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct zfcp_adapter *adapter;
adapter = dev_get_drvdata(dev);
if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status))
return sprintf(buf, "1\n");
else
return sprintf(buf, "0\n");
}
static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_adapter_failed_show,
zfcp_sysfs_adapter_failed_store);
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_failed.attr,
&dev_attr_in_recovery.attr,
&dev_attr_port_remove.attr,
&dev_attr_port_add.attr,
&dev_attr_peer_wwnn.attr,
&dev_attr_peer_wwpn.attr,
&dev_attr_peer_d_id.attr,
&dev_attr_card_version.attr,
&dev_attr_lic_version.attr,
&dev_attr_status.attr,
&dev_attr_hardware_version.attr,
NULL
};
static struct attribute_group zfcp_adapter_attr_group = {
.attrs = zfcp_adapter_attrs,
};
/**
* zfcp_sysfs_create_adapter_files - create sysfs adapter files
* @dev: pointer to belonging device
*
* Create all attributes of the sysfs representation of an adapter.
*/
int
zfcp_sysfs_adapter_create_files(struct device *dev)
{
return sysfs_create_group(&dev->kobj, &zfcp_adapter_attr_group);
}
/**
* zfcp_sysfs_remove_adapter_files - remove sysfs adapter files
* @dev: pointer to belonging device
*
* Remove all attributes of the sysfs representation of an adapter.
*/
void
zfcp_sysfs_adapter_remove_files(struct device *dev)
{
sysfs_remove_group(&dev->kobj, &zfcp_adapter_attr_group);
}
#undef ZFCP_LOG_AREA

View File

@ -1,106 +0,0 @@
/*
* This file is part of the zfcp device driver for
* FCP adapters for IBM System z9 and zSeries.
*
* (C) Copyright IBM Corp. 2002, 2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "zfcp_ext.h"
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
/**
* ZFCP_DEFINE_DRIVER_ATTR - define for all loglevels sysfs attributes
* @_name: name of attribute
* @_define: name of ZFCP loglevel define
*
* Generates store function for a sysfs loglevel attribute of zfcp driver.
*/
#define ZFCP_DEFINE_DRIVER_ATTR(_name, _define) \
static ssize_t zfcp_sysfs_loglevel_##_name##_store(struct device_driver *drv, \
const char *buf, \
size_t count) \
{ \
unsigned int loglevel; \
unsigned int new_loglevel; \
char *endp; \
\
new_loglevel = simple_strtoul(buf, &endp, 0); \
if ((endp + 1) < (buf + count)) \
return -EINVAL; \
if (new_loglevel > 3) \
return -EINVAL; \
down(&zfcp_data.config_sema); \
loglevel = atomic_read(&zfcp_data.loglevel); \
loglevel &= ~((unsigned int) 0xf << (ZFCP_LOG_AREA_##_define << 2)); \
loglevel |= new_loglevel << (ZFCP_LOG_AREA_##_define << 2); \
atomic_set(&zfcp_data.loglevel, loglevel); \
up(&zfcp_data.config_sema); \
return count; \
} \
\
static ssize_t zfcp_sysfs_loglevel_##_name##_show(struct device_driver *dev, \
char *buf) \
{ \
return sprintf(buf,"%d\n", (unsigned int) \
ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA_##_define)); \
} \
\
static DRIVER_ATTR(loglevel_##_name, S_IWUSR | S_IRUGO, \
zfcp_sysfs_loglevel_##_name##_show, \
zfcp_sysfs_loglevel_##_name##_store);
ZFCP_DEFINE_DRIVER_ATTR(other, OTHER);
ZFCP_DEFINE_DRIVER_ATTR(scsi, SCSI);
ZFCP_DEFINE_DRIVER_ATTR(fsf, FSF);
ZFCP_DEFINE_DRIVER_ATTR(config, CONFIG);
ZFCP_DEFINE_DRIVER_ATTR(cio, CIO);
ZFCP_DEFINE_DRIVER_ATTR(qdio, QDIO);
ZFCP_DEFINE_DRIVER_ATTR(erp, ERP);
ZFCP_DEFINE_DRIVER_ATTR(fc, FC);
static ssize_t zfcp_sysfs_version_show(struct device_driver *dev,
char *buf)
{
return sprintf(buf, "%s\n", zfcp_data.driver_version);
}
static DRIVER_ATTR(version, S_IRUGO, zfcp_sysfs_version_show, NULL);
static struct attribute *zfcp_driver_attrs[] = {
&driver_attr_loglevel_other.attr,
&driver_attr_loglevel_scsi.attr,
&driver_attr_loglevel_fsf.attr,
&driver_attr_loglevel_config.attr,
&driver_attr_loglevel_cio.attr,
&driver_attr_loglevel_qdio.attr,
&driver_attr_loglevel_erp.attr,
&driver_attr_loglevel_fc.attr,
&driver_attr_version.attr,
NULL
};
static struct attribute_group zfcp_driver_attr_group = {
.attrs = zfcp_driver_attrs,
};
struct attribute_group *zfcp_driver_attr_groups[] = {
&zfcp_driver_attr_group,
NULL,
};
#undef ZFCP_LOG_AREA

View File

@ -1,295 +0,0 @@
/*
* This file is part of the zfcp device driver for
* FCP adapters for IBM System z9 and zSeries.
*
* (C) Copyright IBM Corp. 2002, 2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "zfcp_ext.h"
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
/**
* zfcp_sysfs_port_release - gets called when a struct device port is released
* @dev: pointer to belonging device
*/
void
zfcp_sysfs_port_release(struct device *dev)
{
kfree(dev);
}
/**
* ZFCP_DEFINE_PORT_ATTR
* @_name: name of show attribute
* @_format: format string
* @_value: value to print
*
* Generates attributes for a port.
*/
#define ZFCP_DEFINE_PORT_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_port_##_name##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct zfcp_port *port; \
\
port = dev_get_drvdata(dev); \
return sprintf(buf, _format, _value); \
} \
\
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_port_##_name##_show, NULL);
ZFCP_DEFINE_PORT_ATTR(status, "0x%08x\n", atomic_read(&port->status));
ZFCP_DEFINE_PORT_ATTR(in_recovery, "%d\n", atomic_test_mask
(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status));
ZFCP_DEFINE_PORT_ATTR(access_denied, "%d\n", atomic_test_mask
(ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status));
/**
* zfcp_sysfs_unit_add_store - add a unit to sysfs tree
* @dev: pointer to belonging device
* @buf: pointer to input buffer
* @count: number of bytes in buffer
*
* Store function of the "unit_add" attribute of a port.
*/
static ssize_t
zfcp_sysfs_unit_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
fcp_lun_t fcp_lun;
char *endp;
struct zfcp_port *port;
struct zfcp_unit *unit;
int retval = -EINVAL;
down(&zfcp_data.config_sema);
port = dev_get_drvdata(dev);
if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
retval = -EBUSY;
goto out;
}
fcp_lun = simple_strtoull(buf, &endp, 0);
if ((endp + 1) < (buf + count))
goto out;
unit = zfcp_unit_enqueue(port, fcp_lun);
if (!unit)
goto out;
retval = 0;
zfcp_erp_unit_reopen(unit, 0, 94, NULL);
zfcp_erp_wait(unit->port->adapter);
zfcp_unit_put(unit);
out:
up(&zfcp_data.config_sema);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
/**
* zfcp_sysfs_unit_remove_store - remove a unit from sysfs tree
* @dev: pointer to belonging device
* @buf: pointer to input buffer
* @count: number of bytes in buffer
*/
static ssize_t
zfcp_sysfs_unit_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct zfcp_port *port;
struct zfcp_unit *unit;
fcp_lun_t fcp_lun;
char *endp;
int retval = 0;
down(&zfcp_data.config_sema);
port = dev_get_drvdata(dev);
if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
retval = -EBUSY;
goto out;
}
fcp_lun = simple_strtoull(buf, &endp, 0);
if ((endp + 1) < (buf + count)) {
retval = -EINVAL;
goto out;
}
write_lock_irq(&zfcp_data.config_lock);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
if (unit && (atomic_read(&unit->refcount) == 0)) {
zfcp_unit_get(unit);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
list_move(&unit->list, &port->unit_remove_lh);
}
else {
unit = NULL;
}
write_unlock_irq(&zfcp_data.config_lock);
if (!unit) {
retval = -ENXIO;
goto out;
}
zfcp_erp_unit_shutdown(unit, 0, 95, NULL);
zfcp_erp_wait(unit->port->adapter);
zfcp_unit_put(unit);
zfcp_unit_dequeue(unit);
out:
up(&zfcp_data.config_sema);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
/**
* zfcp_sysfs_port_failed_store - failed state of port
* @dev: pointer to belonging device
* @buf: pointer to input buffer
* @count: number of bytes in buffer
*
* Store function of the "failed" attribute of a port.
* If a "0" gets written to "failed", error recovery will be
* started for the belonging port.
*/
static ssize_t
zfcp_sysfs_port_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct zfcp_port *port;
unsigned int val;
char *endp;
int retval = 0;
down(&zfcp_data.config_sema);
port = dev_get_drvdata(dev);
if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
retval = -EBUSY;
goto out;
}
val = simple_strtoul(buf, &endp, 0);
if (((endp + 1) < (buf + count)) || (val != 0)) {
retval = -EINVAL;
goto out;
}
zfcp_erp_modify_port_status(port, 45, NULL,
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 96, NULL);
zfcp_erp_wait(port->adapter);
out:
up(&zfcp_data.config_sema);
return retval ? retval : (ssize_t) count;
}
/**
* zfcp_sysfs_port_failed_show - failed state of port
* @dev: pointer to belonging device
* @buf: pointer to input buffer
*
* Show function of "failed" attribute of port. Will be
* "0" if port is working, otherwise "1".
*/
static ssize_t
zfcp_sysfs_port_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct zfcp_port *port;
port = dev_get_drvdata(dev);
if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status))
return sprintf(buf, "1\n");
else
return sprintf(buf, "0\n");
}
static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_port_failed_show,
zfcp_sysfs_port_failed_store);
/**
* zfcp_port_common_attrs
* sysfs attributes that are common for all kind of fc ports.
*/
static struct attribute *zfcp_port_common_attrs[] = {
&dev_attr_failed.attr,
&dev_attr_in_recovery.attr,
&dev_attr_status.attr,
&dev_attr_access_denied.attr,
NULL
};
static struct attribute_group zfcp_port_common_attr_group = {
.attrs = zfcp_port_common_attrs,
};
/**
* zfcp_port_no_ns_attrs
* sysfs attributes not to be used for nameserver ports.
*/
static struct attribute *zfcp_port_no_ns_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
NULL
};
static struct attribute_group zfcp_port_no_ns_attr_group = {
.attrs = zfcp_port_no_ns_attrs,
};
/**
* zfcp_sysfs_port_create_files - create sysfs port files
* @dev: pointer to belonging device
*
* Create all attributes of the sysfs representation of a port.
*/
int
zfcp_sysfs_port_create_files(struct device *dev, u32 flags)
{
int retval;
retval = sysfs_create_group(&dev->kobj, &zfcp_port_common_attr_group);
if ((flags & ZFCP_STATUS_PORT_WKA) || retval)
return retval;
retval = sysfs_create_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
if (retval)
sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
return retval;
}
/**
* zfcp_sysfs_port_remove_files - remove sysfs port files
* @dev: pointer to belonging device
*
* Remove all attributes of the sysfs representation of a port.
*/
void
zfcp_sysfs_port_remove_files(struct device *dev, u32 flags)
{
sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
if (!(flags & ZFCP_STATUS_PORT_WKA))
sysfs_remove_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
}
#undef ZFCP_LOG_AREA

View File

@ -1,167 +0,0 @@
/*
* This file is part of the zfcp device driver for
* FCP adapters for IBM System z9 and zSeries.
*
* (C) Copyright IBM Corp. 2002, 2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "zfcp_ext.h"
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
/**
* zfcp_sysfs_unit_release - gets called when a struct device unit is released
* @dev: pointer to belonging device
*/
void
zfcp_sysfs_unit_release(struct device *dev)
{
kfree(dev);
}
/**
* ZFCP_DEFINE_UNIT_ATTR
* @_name: name of show attribute
* @_format: format string
* @_value: value to print
*
* Generates attribute for a unit.
*/
#define ZFCP_DEFINE_UNIT_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_unit_##_name##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct zfcp_unit *unit; \
\
unit = dev_get_drvdata(dev); \
return sprintf(buf, _format, _value); \
} \
\
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_unit_##_name##_show, NULL);
ZFCP_DEFINE_UNIT_ATTR(status, "0x%08x\n", atomic_read(&unit->status));
ZFCP_DEFINE_UNIT_ATTR(in_recovery, "%d\n", atomic_test_mask
(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status));
ZFCP_DEFINE_UNIT_ATTR(access_denied, "%d\n", atomic_test_mask
(ZFCP_STATUS_COMMON_ACCESS_DENIED, &unit->status));
ZFCP_DEFINE_UNIT_ATTR(access_shared, "%d\n", atomic_test_mask
(ZFCP_STATUS_UNIT_SHARED, &unit->status));
ZFCP_DEFINE_UNIT_ATTR(access_readonly, "%d\n", atomic_test_mask
(ZFCP_STATUS_UNIT_READONLY, &unit->status));
/**
* zfcp_sysfs_unit_failed_store - failed state of unit
* @dev: pointer to belonging device
* @buf: pointer to input buffer
* @count: number of bytes in buffer
*
* Store function of the "failed" attribute of a unit.
* If a "0" gets written to "failed", error recovery will be
* started for the belonging unit.
*/
static ssize_t
zfcp_sysfs_unit_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct zfcp_unit *unit;
unsigned int val;
char *endp;
int retval = 0;
down(&zfcp_data.config_sema);
unit = dev_get_drvdata(dev);
if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status)) {
retval = -EBUSY;
goto out;
}
val = simple_strtoul(buf, &endp, 0);
if (((endp + 1) < (buf + count)) || (val != 0)) {
retval = -EINVAL;
goto out;
}
zfcp_erp_modify_unit_status(unit, 46, NULL,
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, 97, NULL);
zfcp_erp_wait(unit->port->adapter);
out:
up(&zfcp_data.config_sema);
return retval ? retval : (ssize_t) count;
}
/**
* zfcp_sysfs_unit_failed_show - failed state of unit
* @dev: pointer to belonging device
* @buf: pointer to input buffer
*
* Show function of "failed" attribute of unit. Will be
* "0" if unit is working, otherwise "1".
*/
static ssize_t
zfcp_sysfs_unit_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct zfcp_unit *unit;
unit = dev_get_drvdata(dev);
if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status))
return sprintf(buf, "1\n");
else
return sprintf(buf, "0\n");
}
static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_unit_failed_show,
zfcp_sysfs_unit_failed_store);
static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_failed.attr,
&dev_attr_in_recovery.attr,
&dev_attr_status.attr,
&dev_attr_access_denied.attr,
&dev_attr_access_shared.attr,
&dev_attr_access_readonly.attr,
NULL
};
static struct attribute_group zfcp_unit_attr_group = {
.attrs = zfcp_unit_attrs,
};
/**
* zfcp_sysfs_create_unit_files - create sysfs unit files
* @dev: pointer to belonging device
*
* Create all attributes of the sysfs representation of a unit.
*/
int
zfcp_sysfs_unit_create_files(struct device *dev)
{
return sysfs_create_group(&dev->kobj, &zfcp_unit_attr_group);
}
/**
* zfcp_sysfs_remove_unit_files - remove sysfs unit files
* @dev: pointer to belonging device
*
* Remove all attributes of the sysfs representation of a unit.
*/
void
zfcp_sysfs_unit_remove_files(struct device *dev)
{
sysfs_remove_group(&dev->kobj, &zfcp_unit_attr_group);
}
#undef ZFCP_LOG_AREA

View File

@ -888,6 +888,25 @@ config SCSI_IBMVSCSIS
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called ibmvstgt. module will be called ibmvstgt.
config SCSI_IBMVFC
tristate "IBM Virtual FC support"
depends on PPC_PSERIES && SCSI
select SCSI_FC_ATTRS
help
This is the IBM POWER Virtual FC Client
To compile this driver as a module, choose M here: the
module will be called ibmvfc.
config SCSI_IBMVFC_TRACE
bool "enable driver internal trace"
depends on SCSI_IBMVFC
default y
help
If you say Y here, the driver will trace all commands issued
to the adapter. Performance impact is minimal. Trace can be
dumped using /sys/class/scsi_host/hostXX/trace.
config SCSI_INITIO config SCSI_INITIO
tristate "Initio 9100U(W) support" tristate "Initio 9100U(W) support"
depends on PCI && SCSI depends on PCI && SCSI
@ -1738,10 +1757,12 @@ config SCSI_SUNESP
select SCSI_SPI_ATTRS select SCSI_SPI_ATTRS
help help
This is the driver for the Sun ESP SCSI host adapter. The ESP This is the driver for the Sun ESP SCSI host adapter. The ESP
chipset is present in most SPARC SBUS-based computers. chipset is present in most SPARC SBUS-based computers and
supports the Emulex family of ESP SCSI chips (esp100, esp100A,
esp236, fas101, fas236) as well as the Qlogic fas366 SCSI chip.
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called esp. module will be called sun_esp.
config ZFCP config ZFCP
tristate "FCP host bus adapter driver for IBM eServer zSeries" tristate "FCP host bus adapter driver for IBM eServer zSeries"
@ -1771,4 +1792,6 @@ endif # SCSI_LOWLEVEL
source "drivers/scsi/pcmcia/Kconfig" source "drivers/scsi/pcmcia/Kconfig"
source "drivers/scsi/device_handler/Kconfig"
endmenu endmenu

View File

@ -34,6 +34,7 @@ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
obj-$(CONFIG_SCSI_DH) += device_handler/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
@ -118,6 +119,7 @@ obj-$(CONFIG_SCSI_IPR) += ipr.o
obj-$(CONFIG_SCSI_SRP) += libsrp.o obj-$(CONFIG_SCSI_SRP) += libsrp.o
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o obj-$(CONFIG_SCSI_STEX) += stex.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas.o obj-$(CONFIG_SCSI_MVSAS) += mvsas.o

View File

@ -41,6 +41,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <scsi/scsi_host.h>
#include "aacraid.h" #include "aacraid.h"
@ -581,6 +582,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) { for (i = 0; i < upsg->count; i++) {
u64 addr; u64 addr;
void* p; void* p;
if (upsg->sg[i].count >
(dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */ /* Does this really need to be GFP_DMA? */
p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) { if(!p) {
@ -625,6 +634,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < usg->count; i++) { for (i = 0; i < usg->count; i++) {
u64 addr; u64 addr;
void* p; void* p;
if (usg->sg[i].count >
(dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */ /* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) { if(!p) {
@ -667,6 +684,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) { for (i = 0; i < upsg->count; i++) {
uintptr_t addr; uintptr_t addr;
void* p; void* p;
if (usg->sg[i].count >
(dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */ /* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) { if(!p) {
@ -698,6 +723,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) { for (i = 0; i < upsg->count; i++) {
dma_addr_t addr; dma_addr_t addr;
void* p; void* p;
if (upsg->sg[i].count >
(dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536) {
rcode = -EINVAL;
goto cleanup;
}
p = kmalloc(upsg->sg[i].count, GFP_KERNEL); p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
if (!p) { if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",

View File

@ -865,7 +865,7 @@ static ssize_t aac_show_bios_version(struct device *device,
return len; return len;
} }
ssize_t aac_show_serial_number(struct device *device, static ssize_t aac_show_serial_number(struct device *device,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;

View File

@ -0,0 +1,32 @@
#
# SCSI Device Handler configuration
#
menuconfig SCSI_DH
tristate "SCSI Device Handlers"
depends on SCSI
default n
help
SCSI Device Handlers provide device specific support for
devices utilized in multipath configurations. Say Y here to
select support for specific hardware.
config SCSI_DH_RDAC
tristate "LSI RDAC Device Handler"
depends on SCSI_DH
help
If you have a LSI RDAC select y. Otherwise, say N.
config SCSI_DH_HP_SW
tristate "HP/COMPAQ MSA Device Handler"
depends on SCSI_DH
help
If you have a HP/COMPAQ MSA device that requires START_STOP to
be sent to start it and cannot upgrade the firmware then select y.
Otherwise, say N.
config SCSI_DH_EMC
tristate "EMC CLARiiON Device Handler"
depends on SCSI_DH
help
If you have a EMC CLARiiON select y. Otherwise, say N.

View File

@ -0,0 +1,7 @@
#
# SCSI Device Handler
#
obj-$(CONFIG_SCSI_DH) += scsi_dh.o
obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o

View File

@ -0,0 +1,162 @@
/*
* SCSI device handler infrastruture.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright IBM Corporation, 2007
* Authors:
* Chandra Seetharaman <sekharan@us.ibm.com>
* Mike Anderson <andmike@linux.vnet.ibm.com>
*/
#include <scsi/scsi_dh.h>
#include "../scsi_priv.h"
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
static struct scsi_device_handler *get_device_handler(const char *name)
{
struct scsi_device_handler *tmp, *found = NULL;
spin_lock(&list_lock);
list_for_each_entry(tmp, &scsi_dh_list, list) {
if (!strcmp(tmp->name, name)) {
found = tmp;
break;
}
}
spin_unlock(&list_lock);
return found;
}
static int scsi_dh_notifier_add(struct device *dev, void *data)
{
struct scsi_device_handler *scsi_dh = data;
scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
return 0;
}
/*
* scsi_register_device_handler - register a device handler personality
* module.
* @scsi_dh - device handler to be registered.
*
* Returns 0 on success, -EBUSY if handler already registered.
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
int ret = -EBUSY;
struct scsi_device_handler *tmp;
tmp = get_device_handler(scsi_dh->name);
if (tmp)
goto done;
ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
spin_lock(&list_lock);
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
done:
return ret;
}
EXPORT_SYMBOL_GPL(scsi_register_device_handler);
static int scsi_dh_notifier_remove(struct device *dev, void *data)
{
struct scsi_device_handler *scsi_dh = data;
scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
return 0;
}
/*
* scsi_unregister_device_handler - register a device handler personality
* module.
* @scsi_dh - device handler to be unregistered.
*
* Returns 0 on success, -ENODEV if handler not registered.
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
int ret = -ENODEV;
struct scsi_device_handler *tmp;
tmp = get_device_handler(scsi_dh->name);
if (!tmp)
goto done;
ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb);
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
scsi_dh_notifier_remove);
spin_lock(&list_lock);
list_del(&scsi_dh->list);
spin_unlock(&list_lock);
done:
return ret;
}
EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
/*
* scsi_dh_activate - activate the path associated with the scsi_device
* corresponding to the given request queue.
* @q - Request queue that is associated with the scsi_device to be
* activated.
*/
int scsi_dh_activate(struct request_queue *q)
{
int err = 0;
unsigned long flags;
struct scsi_device *sdev;
struct scsi_device_handler *scsi_dh = NULL;
spin_lock_irqsave(q->queue_lock, flags);
sdev = q->queuedata;
if (sdev && sdev->scsi_dh_data)
scsi_dh = sdev->scsi_dh_data->scsi_dh;
if (!scsi_dh || !get_device(&sdev->sdev_gendev))
err = SCSI_DH_NOSYS;
spin_unlock_irqrestore(q->queue_lock, flags);
if (err)
return err;
if (scsi_dh->activate)
err = scsi_dh->activate(sdev);
put_device(&sdev->sdev_gendev);
return err;
}
EXPORT_SYMBOL_GPL(scsi_dh_activate);
/*
* scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
* the given name. FALSE(0) otherwise.
* @name - name of the device handler.
*/
int scsi_dh_handler_exist(const char *name)
{
return (get_device_handler(name) != NULL);
}
EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
MODULE_DESCRIPTION("SCSI device handler");
MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,499 @@
/*
* Target driver for EMC CLARiiON AX/CX-series hardware.
* Based on code from Lars Marowsky-Bree <lmb@suse.de>
* and Ed Goggin <egoggin@emc.com>.
*
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
* Copyright (C) 2006 Mike Christie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
#include <scsi/scsi_device.h>
#define CLARIION_NAME "emc_clariion"
#define CLARIION_TRESPASS_PAGE 0x22
#define CLARIION_BUFFER_SIZE 0x80
#define CLARIION_TIMEOUT (60 * HZ)
#define CLARIION_RETRIES 3
#define CLARIION_UNBOUND_LU -1
static unsigned char long_trespass[] = {
0, 0, 0, 0,
CLARIION_TRESPASS_PAGE, /* Page code */
0x09, /* Page length - 2 */
0x81, /* Trespass code + Honor reservation bit */
0xff, 0xff, /* Trespass target */
0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
};
static unsigned char long_trespass_hr[] = {
0, 0, 0, 0,
CLARIION_TRESPASS_PAGE, /* Page code */
0x09, /* Page length - 2 */
0x01, /* Trespass code + Honor reservation bit */
0xff, 0xff, /* Trespass target */
0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
};
static unsigned char short_trespass[] = {
0, 0, 0, 0,
CLARIION_TRESPASS_PAGE, /* Page code */
0x02, /* Page length - 2 */
0x81, /* Trespass code + Honor reservation bit */
0xff, /* Trespass target */
};
static unsigned char short_trespass_hr[] = {
0, 0, 0, 0,
CLARIION_TRESPASS_PAGE, /* Page code */
0x02, /* Page length - 2 */
0x01, /* Trespass code + Honor reservation bit */
0xff, /* Trespass target */
};
struct clariion_dh_data {
/*
* Use short trespass command (FC-series) or the long version
* (default for AX/CX CLARiiON arrays).
*/
unsigned short_trespass;
/*
* Whether or not (default) to honor SCSI reservations when
* initiating a switch-over.
*/
unsigned hr;
/* I/O buffer for both MODE_SELECT and INQUIRY commands. */
char buffer[CLARIION_BUFFER_SIZE];
/*
* SCSI sense buffer for commands -- assumes serial issuance
* and completion sequence of all commands for same multipath.
*/
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
/* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */
int default_sp;
/* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */
int current_sp;
};
static inline struct clariion_dh_data
*get_clariion_data(struct scsi_device *sdev)
{
struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
BUG_ON(scsi_dh_data == NULL);
return ((struct clariion_dh_data *) scsi_dh_data->buf);
}
/*
* Parse MODE_SELECT cmd reply.
*/
static int trespass_endio(struct scsi_device *sdev, int result)
{
int err = SCSI_DH_OK;
struct scsi_sense_hdr sshdr;
struct clariion_dh_data *csdev = get_clariion_data(sdev);
char *sense = csdev->sense;
if (status_byte(result) == CHECK_CONDITION &&
scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, "
"0x%2x, 0x%2x while sending CLARiiON trespass "
"command.\n", sshdr.sense_key, sshdr.asc,
sshdr.ascq);
if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
(sshdr.ascq == 0x00)) {
/*
* Array based copy in progress -- do not send
* mode_select or copy will be aborted mid-stream.
*/
sdev_printk(KERN_INFO, sdev, "Array Based Copy in "
"progress while sending CLARiiON trespass "
"command.\n");
err = SCSI_DH_DEV_TEMP_BUSY;
} else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
(sshdr.ascq == 0x03)) {
/*
* LUN Not Ready - Manual Intervention Required
* indicates in-progress ucode upgrade (NDU).
*/
sdev_printk(KERN_INFO, sdev, "Detected in-progress "
"ucode upgrade NDU operation while sending "
"CLARiiON trespass command.\n");
err = SCSI_DH_DEV_TEMP_BUSY;
} else
err = SCSI_DH_DEV_FAILED;
} else if (result) {
sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending "
"CLARiiON trespass command.\n", result);
err = SCSI_DH_IO;
}
return err;
}
static int parse_sp_info_reply(struct scsi_device *sdev, int result,
int *default_sp, int *current_sp, int *new_current_sp)
{
int err = SCSI_DH_OK;
struct clariion_dh_data *csdev = get_clariion_data(sdev);
if (result == 0) {
/* check for in-progress ucode upgrade (NDU) */
if (csdev->buffer[48] != 0) {
sdev_printk(KERN_NOTICE, sdev, "Detected in-progress "
"ucode upgrade NDU operation while finding "
"current active SP.");
err = SCSI_DH_DEV_TEMP_BUSY;
} else {
*default_sp = csdev->buffer[5];
if (csdev->buffer[4] == 2)
/* SP for path is current */
*current_sp = csdev->buffer[8];
else {
if (csdev->buffer[4] == 1)
/* SP for this path is NOT current */
if (csdev->buffer[8] == 0)
*current_sp = 1;
else
*current_sp = 0;
else
/* unbound LU or LUNZ */
*current_sp = CLARIION_UNBOUND_LU;
}
*new_current_sp = csdev->buffer[8];
}
} else {
struct scsi_sense_hdr sshdr;
err = SCSI_DH_IO;
if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
&sshdr))
sdev_printk(KERN_ERR, sdev, "Found valid sense data "
"0x%2x, 0x%2x, 0x%2x while finding current "
"active SP.", sshdr.sense_key, sshdr.asc,
sshdr.ascq);
else
sdev_printk(KERN_ERR, sdev, "Error 0x%x finding "
"current active SP.", result);
}
return err;
}
static int sp_info_endio(struct scsi_device *sdev, int result,
int mode_select_sent, int *done)
{
struct clariion_dh_data *csdev = get_clariion_data(sdev);
int err_flags, default_sp, current_sp, new_current_sp;
err_flags = parse_sp_info_reply(sdev, result, &default_sp,
&current_sp, &new_current_sp);
if (err_flags != SCSI_DH_OK)
goto done;
if (mode_select_sent) {
csdev->default_sp = default_sp;
csdev->current_sp = current_sp;
} else {
/*
* Issue the actual module_selec request IFF either
* (1) we do not know the identity of the current SP OR
* (2) what we think we know is actually correct.
*/
if ((current_sp != CLARIION_UNBOUND_LU) &&
(new_current_sp != current_sp)) {
csdev->default_sp = default_sp;
csdev->current_sp = current_sp;
sdev_printk(KERN_INFO, sdev, "Ignoring path group "
"switch-over command for CLARiiON SP%s since "
" mapped device is already initialized.",
current_sp ? "B" : "A");
if (done)
*done = 1; /* as good as doing it */
}
}
done:
return err_flags;
}
/*
* Get block request for REQ_BLOCK_PC command issued to path. Currently
* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
*
* Uses data and sense buffers in hardware handler context structure and
* assumes serial servicing of commands, both issuance and completion.
*/
static struct request *get_req(struct scsi_device *sdev, int cmd)
{
struct clariion_dh_data *csdev = get_clariion_data(sdev);
struct request *rq;
unsigned char *page22;
int len = 0;
rq = blk_get_request(sdev->request_queue,
(cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC);
if (!rq) {
sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
return NULL;
}
memset(&rq->cmd, 0, BLK_MAX_CDB);
rq->cmd[0] = cmd;
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
switch (cmd) {
case MODE_SELECT:
if (csdev->short_trespass) {
page22 = csdev->hr ? short_trespass_hr : short_trespass;
len = sizeof(short_trespass);
} else {
page22 = csdev->hr ? long_trespass_hr : long_trespass;
len = sizeof(long_trespass);
}
/*
* Can't DMA from kernel BSS -- must copy selected trespass
* command mode page contents to context buffer which is
* allocated by kmalloc.
*/
BUG_ON((len > CLARIION_BUFFER_SIZE));
memcpy(csdev->buffer, page22, len);
rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
break;
case INQUIRY:
rq->cmd[1] = 0x1;
rq->cmd[2] = 0xC0;
len = CLARIION_BUFFER_SIZE;
memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE);
break;
default:
BUG_ON(1);
break;
}
rq->cmd[4] = len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST;
rq->timeout = CLARIION_TIMEOUT;
rq->retries = CLARIION_RETRIES;
rq->sense = csdev->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
len, GFP_ATOMIC)) {
__blk_put_request(rq->q, rq);
return NULL;
}
return rq;
}
static int send_cmd(struct scsi_device *sdev, int cmd)
{
struct request *rq = get_req(sdev, cmd);
if (!rq)
return SCSI_DH_RES_TEMP_UNAVAIL;
return blk_execute_rq(sdev->request_queue, NULL, rq, 1);
}
static int clariion_activate(struct scsi_device *sdev)
{
int result, done = 0;
result = send_cmd(sdev, INQUIRY);
result = sp_info_endio(sdev, result, 0, &done);
if (result || done)
goto done;
result = send_cmd(sdev, MODE_SELECT);
result = trespass_endio(sdev, result);
if (result)
goto done;
result = send_cmd(sdev, INQUIRY);
result = sp_info_endio(sdev, result, 1, NULL);
done:
return result;
}
static int clariion_check_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sense_hdr)
{
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03)
/*
* LUN Not Ready - Manual Intervention Required
* indicates this is a passive path.
*
* FIXME: However, if this is seen and EVPD C0
* indicates that this is due to a NDU in
* progress, we should set FAIL_PATH too.
* This indicates we might have to do a SCSI
* inquiry in the end_io path. Ugh.
*
* Can return FAILED only when we want the error
* recovery process to kick in.
*/
return SUCCESS;
break;
case ILLEGAL_REQUEST:
if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01)
/*
* An array based copy is in progress. Do not
* fail the path, do not bypass to another PG,
* do not retry. Fail the IO immediately.
* (Actually this is the same conclusion as in
* the default handler, but lets make sure.)
*
* Can return FAILED only when we want the error
* recovery process to kick in.
*/
return SUCCESS;
break;
case UNIT_ATTENTION:
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
/*
* Unit Attention Code. This is the first IO
* to the new path, so just retry.
*/
return NEEDS_RETRY;
break;
}
/* success just means we do not care what scsi-ml does */
return SUCCESS;
}
static const struct {
char *vendor;
char *model;
} clariion_dev_list[] = {
{"DGC", "RAID"},
{"DGC", "DISK"},
{NULL, NULL},
};
static int clariion_bus_notify(struct notifier_block *, unsigned long, void *);
static struct scsi_device_handler clariion_dh = {
.name = CLARIION_NAME,
.module = THIS_MODULE,
.nb.notifier_call = clariion_bus_notify,
.check_sense = clariion_check_sense,
.activate = clariion_activate,
};
/*
* TODO: need some interface so we can set trespass values
*/
static int clariion_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_dh_data *scsi_dh_data;
struct clariion_dh_data *h;
int i, found = 0;
unsigned long flags;
if (action == BUS_NOTIFY_ADD_DEVICE) {
for (i = 0; clariion_dev_list[i].vendor; i++) {
if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
strlen(clariion_dev_list[i].vendor)) &&
!strncmp(sdev->model, clariion_dev_list[i].model,
strlen(clariion_dev_list[i].model))) {
found = 1;
break;
}
}
if (!found)
goto out;
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
CLARIION_NAME);
goto out;
}
scsi_dh_data->scsi_dh = &clariion_dh;
h = (struct clariion_dh_data *) scsi_dh_data->buf;
h->default_sp = CLARIION_UNBOUND_LU;
h->current_sp = CLARIION_UNBOUND_LU;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME);
try_module_get(THIS_MODULE);
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
if (sdev->scsi_dh_data == NULL ||
sdev->scsi_dh_data->scsi_dh != &clariion_dh)
goto out;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
scsi_dh_data = sdev->scsi_dh_data;
sdev->scsi_dh_data = NULL;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n",
CLARIION_NAME);
kfree(scsi_dh_data);
module_put(THIS_MODULE);
}
out:
return 0;
}
static int __init clariion_init(void)
{
int r;
r = scsi_register_device_handler(&clariion_dh);
if (r != 0)
printk(KERN_ERR "Failed to register scsi device handler.");
return r;
}
static void __exit clariion_exit(void)
{
scsi_unregister_device_handler(&clariion_dh);
}
module_init(clariion_init);
module_exit(clariion_exit);
MODULE_DESCRIPTION("EMC CX/AX/FC-family driver");
MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, Chandra Seetharaman <sekharan@us.ibm.com>");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,202 @@
/*
* Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be
* upgraded.
*
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
* Copyright (C) 2006 Mike Christie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
#define HP_SW_NAME "hp_sw"
#define HP_SW_TIMEOUT (60 * HZ)
#define HP_SW_RETRIES 3
struct hp_sw_dh_data {
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
int retries;
};
static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
{
struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
BUG_ON(scsi_dh_data == NULL);
return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
}
static int hp_sw_done(struct scsi_device *sdev)
{
struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
struct scsi_sense_hdr sshdr;
int rc;
sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
if (!rc)
goto done;
switch (sshdr.sense_key) {
case NOT_READY:
if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
rc = SCSI_DH_RETRY;
h->retries++;
break;
}
/* fall through */
default:
h->retries++;
rc = SCSI_DH_IMM_RETRY;
}
done:
if (rc == SCSI_DH_OK || rc == SCSI_DH_IO)
h->retries = 0;
else if (h->retries > HP_SW_RETRIES) {
h->retries = 0;
rc = SCSI_DH_IO;
}
return rc;
}
static int hp_sw_activate(struct scsi_device *sdev)
{
struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
struct request *req;
int ret = SCSI_DH_RES_TEMP_UNAVAIL;
req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC);
if (!req)
goto done;
sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
req->cmd_len = COMMAND_SIZE(START_STOP);
memset(req->cmd, 0, MAX_COMMAND_SIZE);
req->cmd[0] = START_STOP;
req->cmd[4] = 1; /* Start spin cycle */
req->timeout = HP_SW_TIMEOUT;
req->sense = h->sense;
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
req->sense_len = 0;
ret = blk_execute_rq(req->q, NULL, req, 1);
if (!ret) /* SUCCESS */
ret = hp_sw_done(sdev);
else
ret = SCSI_DH_IO;
done:
return ret;
}
static const struct {
char *vendor;
char *model;
} hp_sw_dh_data_list[] = {
{"COMPAQ", "MSA"},
{"HP", "HSV"},
{"DEC", "HSG80"},
{NULL, NULL},
};
static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *);
static struct scsi_device_handler hp_sw_dh = {
.name = HP_SW_NAME,
.module = THIS_MODULE,
.nb.notifier_call = hp_sw_bus_notify,
.activate = hp_sw_activate,
};
static int hp_sw_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_dh_data *scsi_dh_data;
int i, found = 0;
unsigned long flags;
if (action == BUS_NOTIFY_ADD_DEVICE) {
for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
strlen(hp_sw_dh_data_list[i].vendor)) &&
!strncmp(sdev->model, hp_sw_dh_data_list[i].model,
strlen(hp_sw_dh_data_list[i].model))) {
found = 1;
break;
}
}
if (!found)
goto out;
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
HP_SW_NAME);
goto out;
}
scsi_dh_data->scsi_dh = &hp_sw_dh;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
try_module_get(THIS_MODULE);
sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME);
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
if (sdev->scsi_dh_data == NULL ||
sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
goto out;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
scsi_dh_data = sdev->scsi_dh_data;
sdev->scsi_dh_data = NULL;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
module_put(THIS_MODULE);
sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME);
kfree(scsi_dh_data);
}
out:
return 0;
}
static int __init hp_sw_init(void)
{
return scsi_register_device_handler(&hp_sw_dh);
}
static void __exit hp_sw_exit(void)
{
scsi_unregister_device_handler(&hp_sw_dh);
}
module_init(hp_sw_init);
module_exit(hp_sw_exit);
MODULE_DESCRIPTION("HP MSA 1000");
MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,691 @@
/*
* Engenio/LSI RDAC SCSI Device Handler
*
* Copyright (C) 2005 Mike Christie. All rights reserved.
* Copyright (C) Chandra Seetharaman, IBM Corp. 2007
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
#define RDAC_NAME "rdac"
/*
* LSI mode page stuff
*
* These struct definitions and the forming of the
* mode page were taken from the LSI RDAC 2.4 GPL'd
* driver, and then converted to Linux conventions.
*/
#define RDAC_QUIESCENCE_TIME 20;
/*
* Page Codes
*/
#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
/*
* Controller modes definitions
*/
#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
/*
* RDAC Options field
*/
#define RDAC_FORCED_QUIESENCE 0x02
#define RDAC_TIMEOUT (60 * HZ)
#define RDAC_RETRIES 3
struct rdac_mode_6_hdr {
u8 data_len;
u8 medium_type;
u8 device_params;
u8 block_desc_len;
};
struct rdac_mode_10_hdr {
u16 data_len;
u8 medium_type;
u8 device_params;
u16 reserved;
u16 block_desc_len;
};
struct rdac_mode_common {
u8 controller_serial[16];
u8 alt_controller_serial[16];
u8 rdac_mode[2];
u8 alt_rdac_mode[2];
u8 quiescence_timeout;
u8 rdac_options;
};
struct rdac_pg_legacy {
struct rdac_mode_6_hdr hdr;
u8 page_code;
u8 page_len;
struct rdac_mode_common common;
#define MODE6_MAX_LUN 32
u8 lun_table[MODE6_MAX_LUN];
u8 reserved2[32];
u8 reserved3;
u8 reserved4;
};
struct rdac_pg_expanded {
struct rdac_mode_10_hdr hdr;
u8 page_code;
u8 subpage_code;
u8 page_len[2];
struct rdac_mode_common common;
u8 lun_table[256];
u8 reserved3;
u8 reserved4;
};
struct c9_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC9 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "vace" */
u8 avte_cvp;
u8 path_prio;
u8 reserved2[38];
};
#define SUBSYS_ID_LEN 16
#define SLOT_ID_LEN 2
struct c4_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC4 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "subs" */
u8 subsys_id[SUBSYS_ID_LEN];
u8 revision[4];
u8 slot_id[SLOT_ID_LEN];
u8 reserved[2];
};
struct rdac_controller {
u8 subsys_id[SUBSYS_ID_LEN];
u8 slot_id[SLOT_ID_LEN];
int use_ms10;
struct kref kref;
struct list_head node; /* list of all controllers */
union {
struct rdac_pg_legacy legacy;
struct rdac_pg_expanded expanded;
} mode_select;
};
struct c8_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC8 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "edid" */
u8 reserved2[3];
u8 vol_uniq_id_len;
u8 vol_uniq_id[16];
u8 vol_user_label_len;
u8 vol_user_label[60];
u8 array_uniq_id_len;
u8 array_unique_id[16];
u8 array_user_label_len;
u8 array_user_label[60];
u8 lun[8];
};
struct c2_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC2 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "swr4" */
u8 sw_version[3];
u8 sw_date[3];
u8 features_enabled;
u8 max_lun_supported;
u8 partitions[239]; /* Total allocation length should be 0xFF */
};
struct rdac_dh_data {
struct rdac_controller *ctlr;
#define UNINITIALIZED_LUN (1 << 8)
unsigned lun;
#define RDAC_STATE_ACTIVE 0
#define RDAC_STATE_PASSIVE 1
unsigned char state;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
union {
struct c2_inquiry c2;
struct c4_inquiry c4;
struct c8_inquiry c8;
struct c9_inquiry c9;
} inq;
};
static LIST_HEAD(ctlr_list);
static DEFINE_SPINLOCK(list_lock);
static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
{
struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
BUG_ON(scsi_dh_data == NULL);
return ((struct rdac_dh_data *) scsi_dh_data->buf);
}
static struct request *get_rdac_req(struct scsi_device *sdev,
void *buffer, unsigned buflen, int rw)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
struct rdac_dh_data *h = get_rdac_data(sdev);
rq = blk_get_request(q, rw, GFP_KERNEL);
if (!rq) {
sdev_printk(KERN_INFO, sdev,
"get_rdac_req: blk_get_request failed.\n");
return NULL;
}
if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
blk_put_request(rq);
sdev_printk(KERN_INFO, sdev,
"get_rdac_req: blk_rq_map_kern failed.\n");
return NULL;
}
memset(&rq->cmd, 0, BLK_MAX_CDB);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
rq->retries = RDAC_RETRIES;
rq->timeout = RDAC_TIMEOUT;
return rq;
}
static struct request *rdac_failover_get(struct scsi_device *sdev)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
struct rdac_dh_data *h = get_rdac_data(sdev);
if (h->ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
data_size = sizeof(struct rdac_pg_expanded);
rdac_pg = &h->ctlr->mode_select.expanded;
memset(rdac_pg, 0, data_size);
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
rdac_pg->subpage_code = 0x1;
rdac_pg->page_len[0] = 0x01;
rdac_pg->page_len[1] = 0x28;
rdac_pg->lun_table[h->lun] = 0x81;
} else {
struct rdac_pg_legacy *rdac_pg;
data_size = sizeof(struct rdac_pg_legacy);
rdac_pg = &h->ctlr->mode_select.legacy;
memset(rdac_pg, 0, data_size);
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
rdac_pg->page_len = 0x68;
rdac_pg->lun_table[h->lun] = 0x81;
}
common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
common->rdac_options = RDAC_FORCED_QUIESENCE;
/* get request for block layer packet command */
rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
if (!rq)
return NULL;
/* Prepare the command. */
if (h->ctlr->use_ms10) {
rq->cmd[0] = MODE_SELECT_10;
rq->cmd[7] = data_size >> 8;
rq->cmd[8] = data_size & 0xff;
} else {
rq->cmd[0] = MODE_SELECT;
rq->cmd[4] = data_size;
}
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
return rq;
}
static void release_controller(struct kref *kref)
{
struct rdac_controller *ctlr;
ctlr = container_of(kref, struct rdac_controller, kref);
spin_lock(&list_lock);
list_del(&ctlr->node);
spin_unlock(&list_lock);
kfree(ctlr);
}
static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
{
struct rdac_controller *ctlr, *tmp;
spin_lock(&list_lock);
list_for_each_entry(tmp, &ctlr_list, node) {
if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
(memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
kref_get(&tmp->kref);
spin_unlock(&list_lock);
return tmp;
}
}
ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
if (!ctlr)
goto done;
/* initialize fields of controller */
memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
kref_init(&ctlr->kref);
ctlr->use_ms10 = -1;
list_add(&ctlr->node, &ctlr_list);
done:
spin_unlock(&list_lock);
return ctlr;
}
static int submit_inquiry(struct scsi_device *sdev, int page_code,
unsigned int len)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = get_rdac_req(sdev, &h->inq, len, READ);
if (!rq)
goto done;
/* Prepare the command. */
rq->cmd[0] = INQUIRY;
rq->cmd[1] = 1;
rq->cmd[2] = page_code;
rq->cmd[4] = len;
rq->cmd_len = COMMAND_SIZE(INQUIRY);
err = blk_execute_rq(q, NULL, rq, 1);
if (err == -EIO)
err = SCSI_DH_IO;
done:
return err;
}
static int get_lun(struct scsi_device *sdev)
{
int err;
struct c8_inquiry *inqp;
struct rdac_dh_data *h = get_rdac_data(sdev);
err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry));
if (err == SCSI_DH_OK) {
inqp = &h->inq.c8;
h->lun = inqp->lun[7]; /* currently it uses only one byte */
}
return err;
}
#define RDAC_OWNED 0
#define RDAC_UNOWNED 1
#define RDAC_FAILED 2
static int check_ownership(struct scsi_device *sdev)
{
int err;
struct c9_inquiry *inqp;
struct rdac_dh_data *h = get_rdac_data(sdev);
err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry));
if (err == SCSI_DH_OK) {
err = RDAC_UNOWNED;
inqp = &h->inq.c9;
/*
* If in AVT mode or if the path already owns the LUN,
* return RDAC_OWNED;
*/
if (((inqp->avte_cvp >> 7) == 0x1) ||
((inqp->avte_cvp & 0x1) != 0))
err = RDAC_OWNED;
} else
err = RDAC_FAILED;
return err;
}
static int initialize_controller(struct scsi_device *sdev)
{
int err;
struct c4_inquiry *inqp;
struct rdac_dh_data *h = get_rdac_data(sdev);
err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry));
if (err == SCSI_DH_OK) {
inqp = &h->inq.c4;
h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
}
return err;
}
static int set_mode_select(struct scsi_device *sdev)
{
int err;
struct c2_inquiry *inqp;
struct rdac_dh_data *h = get_rdac_data(sdev);
err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry));
if (err == SCSI_DH_OK) {
inqp = &h->inq.c2;
/*
* If more than MODE6_MAX_LUN luns are supported, use
* mode select 10
*/
if (inqp->max_lun_supported >= MODE6_MAX_LUN)
h->ctlr->use_ms10 = 1;
else
h->ctlr->use_ms10 = 0;
}
return err;
}
static int mode_select_handle_sense(struct scsi_device *sdev)
{
struct scsi_sense_hdr sense_hdr;
struct rdac_dh_data *h = get_rdac_data(sdev);
int sense, err = SCSI_DH_IO, ret;
ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (!ret)
goto done;
err = SCSI_DH_OK;
sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
sense_hdr.ascq;
/* If it is retryable failure, submit the c9 inquiry again */
if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
sense == 0x62900) {
/* 0x59136 - Command lock contention
* 0x[6b]8b02 - Quiesense in progress or achieved
* 0x62900 - Power On, Reset, or Bus Device Reset
*/
err = SCSI_DH_RETRY;
}
if (sense)
sdev_printk(KERN_INFO, sdev,
"MODE_SELECT failed with sense 0x%x.\n", sense);
done:
return err;
}
static int send_mode_select(struct scsi_device *sdev)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = rdac_failover_get(sdev);
if (!rq)
goto done;
sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n");
err = blk_execute_rq(q, NULL, rq, 1);
if (err != SCSI_DH_OK)
err = mode_select_handle_sense(sdev);
if (err == SCSI_DH_OK)
h->state = RDAC_STATE_ACTIVE;
done:
return err;
}
static int rdac_activate(struct scsi_device *sdev)
{
struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_OK;
if (h->lun == UNINITIALIZED_LUN) {
err = get_lun(sdev);
if (err != SCSI_DH_OK)
goto done;
}
err = check_ownership(sdev);
switch (err) {
case RDAC_UNOWNED:
break;
case RDAC_OWNED:
err = SCSI_DH_OK;
goto done;
case RDAC_FAILED:
default:
err = SCSI_DH_IO;
goto done;
}
if (!h->ctlr) {
err = initialize_controller(sdev);
if (err != SCSI_DH_OK)
goto done;
}
if (h->ctlr->use_ms10 == -1) {
err = set_mode_select(sdev);
if (err != SCSI_DH_OK)
goto done;
}
err = send_mode_select(sdev);
done:
return err;
}
static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct rdac_dh_data *h = get_rdac_data(sdev);
int ret = BLKPREP_OK;
if (h->state != RDAC_STATE_ACTIVE) {
ret = BLKPREP_KILL;
req->cmd_flags |= REQ_QUIET;
}
return ret;
}
static int rdac_check_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sense_hdr)
{
struct rdac_dh_data *h = get_rdac_data(sdev);
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
/* LUN Not Ready - Storage firmware incompatible
* Manual code synchonisation required.
*
* Nothing we can do here. Try to bypass the path.
*/
return SUCCESS;
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
/* LUN Not Ready - Quiescense in progress
*
* Just retry and wait.
*/
return NEEDS_RETRY;
break;
case ILLEGAL_REQUEST:
if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
/* Invalid Request - Current Logical Unit Ownership.
* Controller is not the current owner of the LUN,
* Fail the path, so that the other path be used.
*/
h->state = RDAC_STATE_PASSIVE;
return SUCCESS;
}
break;
case UNIT_ATTENTION:
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
/*
* Power On, Reset, or Bus Device Reset, just retry.
*/
return NEEDS_RETRY;
break;
}
/* success just means we do not care what scsi-ml does */
return SCSI_RETURN_NOT_HANDLED;
}
static const struct {
char *vendor;
char *model;
} rdac_dev_list[] = {
{"IBM", "1722"},
{"IBM", "1724"},
{"IBM", "1726"},
{"IBM", "1742"},
{"IBM", "1814"},
{"IBM", "1815"},
{"IBM", "1818"},
{"IBM", "3526"},
{"SGI", "TP9400"},
{"SGI", "TP9500"},
{"SGI", "IS"},
{"STK", "OPENstorage D280"},
{"SUN", "CSM200_R"},
{"SUN", "LCSM100_F"},
{NULL, NULL},
};
static int rdac_bus_notify(struct notifier_block *, unsigned long, void *);
static struct scsi_device_handler rdac_dh = {
.name = RDAC_NAME,
.module = THIS_MODULE,
.nb.notifier_call = rdac_bus_notify,
.prep_fn = rdac_prep_fn,
.check_sense = rdac_check_sense,
.activate = rdac_activate,
};
/*
* TODO: need some interface so we can set trespass values
*/
static int rdac_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_dh_data *scsi_dh_data;
struct rdac_dh_data *h;
int i, found = 0;
unsigned long flags;
if (action == BUS_NOTIFY_ADD_DEVICE) {
for (i = 0; rdac_dev_list[i].vendor; i++) {
if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
strlen(rdac_dev_list[i].vendor)) &&
!strncmp(sdev->model, rdac_dev_list[i].model,
strlen(rdac_dev_list[i].model))) {
found = 1;
break;
}
}
if (!found)
goto out;
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
RDAC_NAME);
goto out;
}
scsi_dh_data->scsi_dh = &rdac_dh;
h = (struct rdac_dh_data *) scsi_dh_data->buf;
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
try_module_get(THIS_MODULE);
sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME);
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
if (sdev->scsi_dh_data == NULL ||
sdev->scsi_dh_data->scsi_dh != &rdac_dh)
goto out;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
scsi_dh_data = sdev->scsi_dh_data;
sdev->scsi_dh_data = NULL;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
h = (struct rdac_dh_data *) scsi_dh_data->buf;
if (h->ctlr)
kref_put(&h->ctlr->kref, release_controller);
kfree(scsi_dh_data);
module_put(THIS_MODULE);
sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
}
out:
return 0;
}
static int __init rdac_init(void)
{
int r;
r = scsi_register_device_handler(&rdac_dh);
if (r != 0)
printk(KERN_ERR "Failed to register scsi device handler.");
return r;
}
static void __exit rdac_exit(void)
{
scsi_unregister_device_handler(&rdac_dh);
}
module_init(rdac_init);
module_exit(rdac_exit);
MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
MODULE_LICENSE("GPL");

View File

@ -219,19 +219,10 @@ static void esp_reset_esp(struct esp *esp)
/* Now reset the ESP chip */ /* Now reset the ESP chip */
scsi_esp_cmd(esp, ESP_CMD_RC); scsi_esp_cmd(esp, ESP_CMD_RC);
scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
if (esp->rev == FAST)
esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
/* Reload the configuration registers */
esp_write8(esp->cfact, ESP_CFACT);
esp->prev_stp = 0;
esp_write8(esp->prev_stp, ESP_STP);
esp->prev_soff = 0;
esp_write8(esp->prev_soff, ESP_SOFF);
esp_write8(esp->neg_defp, ESP_TIMEO);
/* This is the only point at which it is reliable to read /* This is the only point at which it is reliable to read
* the ID-code for a fast ESP chip variants. * the ID-code for a fast ESP chip variants.
*/ */
@ -316,6 +307,17 @@ static void esp_reset_esp(struct esp *esp)
break; break;
} }
/* Reload the configuration registers */
esp_write8(esp->cfact, ESP_CFACT);
esp->prev_stp = 0;
esp_write8(esp->prev_stp, ESP_STP);
esp->prev_soff = 0;
esp_write8(esp->prev_soff, ESP_SOFF);
esp_write8(esp->neg_defp, ESP_TIMEO);
/* Eat any bitrot in the chip */ /* Eat any bitrot in the chip */
esp_read8(ESP_INTRPT); esp_read8(ESP_INTRPT);
udelay(100); udelay(100);

View File

@ -290,7 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost); kfree(shost);
} }
struct device_type scsi_host_type = { static struct device_type scsi_host_type = {
.name = "scsi_host", .name = "scsi_host",
.release = scsi_host_dev_release, .release = scsi_host_dev_release,
}; };

View File

@ -5,3 +5,4 @@ ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,682 @@
/*
* ibmvfc.h -- driver for IBM Power Virtual Fibre Channel Adapter
*
* Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
*
* Copyright (C) IBM Corporation, 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef _IBMVFC_H
#define _IBMVFC_H
#include <linux/list.h>
#include <linux/types.h>
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
#define IBMVFC_DRIVER_VERSION "1.0.0"
#define IBMVFC_DRIVER_DATE "(July 1, 2008)"
#define IBMVFC_DEFAULT_TIMEOUT 15
#define IBMVFC_INIT_TIMEOUT 30
#define IBMVFC_MAX_REQUESTS_DEFAULT 100
#define IBMVFC_DEBUG 0
#define IBMVFC_MAX_TARGETS 1024
#define IBMVFC_MAX_LUN 0xffffffff
#define IBMVFC_MAX_SECTORS 0xffffu
#define IBMVFC_MAX_DISC_THREADS 4
#define IBMVFC_TGT_MEMPOOL_SZ 64
#define IBMVFC_MAX_CMDS_PER_LUN 64
#define IBMVFC_MAX_INIT_RETRIES 3
#define IBMVFC_DEV_LOSS_TMO (5 * 60)
#define IBMVFC_DEFAULT_LOG_LEVEL 2
#define IBMVFC_MAX_CDB_LEN 16
/*
* Ensure we have resources for ERP and initialization:
* 1 for ERP
* 1 for initialization
* 1 for each discovery thread
*/
#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + disc_threads)
#define IBMVFC_MAD_SUCCESS 0x00
#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
#define IBMVFC_MAD_FAILED 0xF7
#define IBMVFC_MAD_DRIVER_FAILED 0xEE
#define IBMVFC_MAD_CRQ_ERROR 0xEF
enum ibmvfc_crq_valid {
IBMVFC_CRQ_CMD_RSP = 0x80,
IBMVFC_CRQ_INIT_RSP = 0xC0,
IBMVFC_CRQ_XPORT_EVENT = 0xFF,
};
enum ibmvfc_crq_format {
IBMVFC_CRQ_INIT = 0x01,
IBMVFC_CRQ_INIT_COMPLETE = 0x02,
IBMVFC_PARTITION_MIGRATED = 0x06,
};
enum ibmvfc_cmd_status_flags {
IBMVFC_FABRIC_MAPPED = 0x0001,
IBMVFC_VIOS_FAILURE = 0x0002,
IBMVFC_FC_FAILURE = 0x0004,
IBMVFC_FC_SCSI_ERROR = 0x0008,
IBMVFC_HW_EVENT_LOGGED = 0x0010,
IBMVFC_VIOS_LOGGED = 0x0020,
};
enum ibmvfc_fabric_mapped_errors {
IBMVFC_UNABLE_TO_ESTABLISH = 0x0001,
IBMVFC_XPORT_FAULT = 0x0002,
IBMVFC_CMD_TIMEOUT = 0x0003,
IBMVFC_ENETDOWN = 0x0004,
IBMVFC_HW_FAILURE = 0x0005,
IBMVFC_LINK_DOWN_ERR = 0x0006,
IBMVFC_LINK_DEAD_ERR = 0x0007,
IBMVFC_UNABLE_TO_REGISTER = 0x0008,
IBMVFC_XPORT_BUSY = 0x000A,
IBMVFC_XPORT_DEAD = 0x000B,
IBMVFC_CONFIG_ERROR = 0x000C,
IBMVFC_NAME_SERVER_FAIL = 0x000D,
IBMVFC_LINK_HALTED = 0x000E,
IBMVFC_XPORT_GENERAL = 0x8000,
};
enum ibmvfc_vios_errors {
IBMVFC_CRQ_FAILURE = 0x0001,
IBMVFC_SW_FAILURE = 0x0002,
IBMVFC_INVALID_PARAMETER = 0x0003,
IBMVFC_MISSING_PARAMETER = 0x0004,
IBMVFC_HOST_IO_BUS = 0x0005,
IBMVFC_TRANS_CANCELLED = 0x0006,
IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007,
IBMVFC_INSUFFICIENT_RESOURCE = 0x0008,
IBMVFC_COMMAND_FAILED = 0x8000,
};
enum ibmvfc_mad_types {
IBMVFC_NPIV_LOGIN = 0x0001,
IBMVFC_DISC_TARGETS = 0x0002,
IBMVFC_PORT_LOGIN = 0x0004,
IBMVFC_PROCESS_LOGIN = 0x0008,
IBMVFC_QUERY_TARGET = 0x0010,
IBMVFC_IMPLICIT_LOGOUT = 0x0040,
IBMVFC_TMF_MAD = 0x0100,
};
struct ibmvfc_mad_common {
u32 version;
u32 reserved;
u32 opcode;
u16 status;
u16 length;
u64 tag;
}__attribute__((packed, aligned (8)));
struct ibmvfc_npiv_login_mad {
struct ibmvfc_mad_common common;
struct srp_direct_buf buffer;
}__attribute__((packed, aligned (8)));
#define IBMVFC_MAX_NAME 256
struct ibmvfc_npiv_login {
u32 ostype;
#define IBMVFC_OS_LINUX 0x02
u32 pad;
u64 max_dma_len;
u32 max_payload;
u32 max_response;
u32 partition_num;
u32 vfc_frame_version;
u16 fcp_version;
u16 flags;
#define IBMVFC_CLIENT_MIGRATED 0x01
#define IBMVFC_FLUSH_ON_HALT 0x02
u32 max_cmds;
u64 capabilities;
#define IBMVFC_CAN_MIGRATE 0x01
u64 node_name;
struct srp_direct_buf async;
u8 partition_name[IBMVFC_MAX_NAME];
u8 device_name[IBMVFC_MAX_NAME];
u8 drc_name[IBMVFC_MAX_NAME];
u64 reserved2[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_common_svc_parms {
u16 fcph_version;
u16 b2b_credit;
u16 features;
u16 bb_rcv_sz; /* upper nibble is BB_SC_N */
u32 ratov;
u32 edtov;
}__attribute__((packed, aligned (4)));
struct ibmvfc_service_parms {
struct ibmvfc_common_svc_parms common;
u8 port_name[8];
u8 node_name[8];
u32 class1_parms[4];
u32 class2_parms[4];
u32 class3_parms[4];
u32 obsolete[4];
u32 vendor_version[4];
u32 services_avail[2];
u32 ext_len;
u32 reserved[30];
u32 clk_sync_qos[2];
}__attribute__((packed, aligned (4)));
struct ibmvfc_npiv_login_resp {
u32 version;
u16 status;
u16 error;
u32 flags;
#define IBMVFC_NATIVE_FC 0x01
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
u32 reserved;
u64 capabilites;
u32 max_cmds;
u32 scsi_id_sz;
u64 max_dma_len;
u64 scsi_id;
u64 port_name;
u64 node_name;
u64 link_speed;
u8 partition_name[IBMVFC_MAX_NAME];
u8 device_name[IBMVFC_MAX_NAME];
u8 port_loc_code[IBMVFC_MAX_NAME];
u8 drc_name[IBMVFC_MAX_NAME];
struct ibmvfc_service_parms service_parms;
u64 reserved2;
}__attribute__((packed, aligned (8)));
union ibmvfc_npiv_login_data {
struct ibmvfc_npiv_login login;
struct ibmvfc_npiv_login_resp resp;
}__attribute__((packed, aligned (8)));
struct ibmvfc_discover_targets_buf {
u32 scsi_id[1];
#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff
};
struct ibmvfc_discover_targets {
struct ibmvfc_mad_common common;
struct srp_direct_buf buffer;
u32 flags;
u16 status;
u16 error;
u32 bufflen;
u32 num_avail;
u32 num_written;
u64 reserved[2];
}__attribute__((packed, aligned (8)));
enum ibmvfc_fc_reason {
IBMVFC_INVALID_ELS_CMD_CODE = 0x01,
IBMVFC_INVALID_VERSION = 0x02,
IBMVFC_LOGICAL_ERROR = 0x03,
IBMVFC_INVALID_CT_IU_SIZE = 0x04,
IBMVFC_LOGICAL_BUSY = 0x05,
IBMVFC_PROTOCOL_ERROR = 0x07,
IBMVFC_UNABLE_TO_PERFORM_REQ = 0x09,
IBMVFC_CMD_NOT_SUPPORTED = 0x0B,
IBMVFC_SERVER_NOT_AVAIL = 0x0D,
IBMVFC_CMD_IN_PROGRESS = 0x0E,
IBMVFC_VENDOR_SPECIFIC = 0xFF,
};
enum ibmvfc_fc_type {
IBMVFC_FABRIC_REJECT = 0x01,
IBMVFC_PORT_REJECT = 0x02,
IBMVFC_LS_REJECT = 0x03,
IBMVFC_FABRIC_BUSY = 0x04,
IBMVFC_PORT_BUSY = 0x05,
IBMVFC_BASIC_REJECT = 0x06,
};
enum ibmvfc_gs_explain {
IBMVFC_PORT_NAME_NOT_REG = 0x02,
};
struct ibmvfc_port_login {
struct ibmvfc_mad_common common;
u64 scsi_id;
u16 reserved;
u16 fc_service_class;
u32 blksz;
u32 hdr_per_blk;
u16 status;
u16 error; /* also fc_reason */
u16 fc_explain;
u16 fc_type;
u32 reserved2;
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
u64 reserved3[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_prli_svc_parms {
u8 type;
#define IBMVFC_SCSI_FCP_TYPE 0x08
u8 type_ext;
u16 flags;
#define IBMVFC_PRLI_ORIG_PA_VALID 0x8000
#define IBMVFC_PRLI_RESP_PA_VALID 0x4000
#define IBMVFC_PRLI_EST_IMG_PAIR 0x2000
u32 orig_pa;
u32 resp_pa;
u32 service_parms;
#define IBMVFC_PRLI_TASK_RETRY 0x00000200
#define IBMVFC_PRLI_RETRY 0x00000100
#define IBMVFC_PRLI_DATA_OVERLAY 0x00000040
#define IBMVFC_PRLI_INITIATOR_FUNC 0x00000020
#define IBMVFC_PRLI_TARGET_FUNC 0x00000010
#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002
#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001
}__attribute__((packed, aligned (4)));
struct ibmvfc_process_login {
struct ibmvfc_mad_common common;
u64 scsi_id;
struct ibmvfc_prli_svc_parms parms;
u8 reserved[48];
u16 status;
u16 error; /* also fc_reason */
u32 reserved2;
u64 reserved3[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_query_tgt {
struct ibmvfc_mad_common common;
u64 wwpn;
u64 scsi_id;
u16 status;
u16 error;
u16 fc_explain;
u16 fc_type;
u64 reserved[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_implicit_logout {
struct ibmvfc_mad_common common;
u64 old_scsi_id;
u64 reserved[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_tmf {
struct ibmvfc_mad_common common;
u64 scsi_id;
struct scsi_lun lun;
u32 flags;
#define IBMVFC_TMF_ABORT_TASK 0x02
#define IBMVFC_TMF_ABORT_TASK_SET 0x04
#define IBMVFC_TMF_LUN_RESET 0x10
#define IBMVFC_TMF_TGT_RESET 0x20
#define IBMVFC_TMF_LUA_VALID 0x40
u32 cancel_key;
u32 my_cancel_key;
#define IBMVFC_TMF_CANCEL_KEY 0x80000000
u32 pad;
u64 reserved[2];
}__attribute__((packed, aligned (8)));
enum ibmvfc_fcp_rsp_info_codes {
RSP_NO_FAILURE = 0x00,
RSP_TMF_REJECTED = 0x04,
RSP_TMF_FAILED = 0x05,
RSP_TMF_INVALID_LUN = 0x09,
};
struct ibmvfc_fcp_rsp_info {
u16 reserved;
u8 rsp_code;
u8 reserved2[4];
}__attribute__((packed, aligned (2)));
enum ibmvfc_fcp_rsp_flags {
FCP_BIDI_RSP = 0x80,
FCP_BIDI_READ_RESID_UNDER = 0x40,
FCP_BIDI_READ_RESID_OVER = 0x20,
FCP_CONF_REQ = 0x10,
FCP_RESID_UNDER = 0x08,
FCP_RESID_OVER = 0x04,
FCP_SNS_LEN_VALID = 0x02,
FCP_RSP_LEN_VALID = 0x01,
};
union ibmvfc_fcp_rsp_data {
struct ibmvfc_fcp_rsp_info info;
u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
}__attribute__((packed, aligned (8)));
struct ibmvfc_fcp_rsp {
u64 reserved;
u16 retry_delay_timer;
u8 flags;
u8 scsi_status;
u32 fcp_resid;
u32 fcp_sense_len;
u32 fcp_rsp_len;
union ibmvfc_fcp_rsp_data data;
}__attribute__((packed, aligned (8)));
enum ibmvfc_cmd_flags {
IBMVFC_SCATTERLIST = 0x0001,
IBMVFC_NO_MEM_DESC = 0x0002,
IBMVFC_READ = 0x0004,
IBMVFC_WRITE = 0x0008,
IBMVFC_TMF = 0x0080,
IBMVFC_CLASS_3_ERR = 0x0100,
};
enum ibmvfc_fc_task_attr {
IBMVFC_SIMPLE_TASK = 0x00,
IBMVFC_HEAD_OF_QUEUE = 0x01,
IBMVFC_ORDERED_TASK = 0x02,
IBMVFC_ACA_TASK = 0x04,
};
enum ibmvfc_fc_tmf_flags {
IBMVFC_ABORT_TASK_SET = 0x02,
IBMVFC_LUN_RESET = 0x10,
IBMVFC_TARGET_RESET = 0x20,
};
struct ibmvfc_fcp_cmd_iu {
struct scsi_lun lun;
u8 crn;
u8 pri_task_attr;
u8 tmf_flags;
u8 add_cdb_len;
#define IBMVFC_RDDATA 0x02
#define IBMVFC_WRDATA 0x01
u8 cdb[IBMVFC_MAX_CDB_LEN];
u32 xfer_len;
}__attribute__((packed, aligned (4)));
struct ibmvfc_cmd {
u64 task_tag;
u32 frame_type;
u32 payload_len;
u32 resp_len;
u32 adapter_resid;
u16 status;
u16 error;
u16 flags;
u16 response_flags;
#define IBMVFC_ADAPTER_RESID_VALID 0x01
u32 cancel_key;
u32 exchange_id;
struct srp_direct_buf ext_func;
struct srp_direct_buf ioba;
struct srp_direct_buf resp;
u64 correlation;
u64 tgt_scsi_id;
u64 tag;
u64 reserved3[2];
struct ibmvfc_fcp_cmd_iu iu;
struct ibmvfc_fcp_rsp rsp;
}__attribute__((packed, aligned (8)));
struct ibmvfc_trace_start_entry {
u32 xfer_len;
}__attribute__((packed));
struct ibmvfc_trace_end_entry {
u16 status;
u16 error;
u8 fcp_rsp_flags;
u8 rsp_code;
u8 scsi_status;
u8 reserved;
}__attribute__((packed));
struct ibmvfc_trace_entry {
struct ibmvfc_event *evt;
u32 time;
u32 scsi_id;
u32 lun;
u8 fmt;
u8 op_code;
u8 tmf_flags;
u8 type;
#define IBMVFC_TRC_START 0x00
#define IBMVFC_TRC_END 0xff
union {
struct ibmvfc_trace_start_entry start;
struct ibmvfc_trace_end_entry end;
} u;
}__attribute__((packed, aligned (8)));
enum ibmvfc_crq_formats {
IBMVFC_CMD_FORMAT = 0x01,
IBMVFC_ASYNC_EVENT = 0x02,
IBMVFC_MAD_FORMAT = 0x04,
};
enum ibmvfc_async_event {
IBMVFC_AE_ELS_PLOGI = 0x0001,
IBMVFC_AE_ELS_LOGO = 0x0002,
IBMVFC_AE_ELS_PRLO = 0x0004,
IBMVFC_AE_SCN_NPORT = 0x0008,
IBMVFC_AE_SCN_GROUP = 0x0010,
IBMVFC_AE_SCN_DOMAIN = 0x0020,
IBMVFC_AE_SCN_FABRIC = 0x0040,
IBMVFC_AE_LINK_UP = 0x0080,
IBMVFC_AE_LINK_DOWN = 0x0100,
IBMVFC_AE_LINK_DEAD = 0x0200,
IBMVFC_AE_HALT = 0x0400,
IBMVFC_AE_RESUME = 0x0800,
IBMVFC_AE_ADAPTER_FAILED = 0x1000,
};
struct ibmvfc_crq {
u8 valid;
u8 format;
u8 reserved[6];
u64 ioba;
}__attribute__((packed, aligned (8)));
struct ibmvfc_crq_queue {
struct ibmvfc_crq *msgs;
int size, cur;
dma_addr_t msg_token;
};
struct ibmvfc_async_crq {
u8 valid;
u8 pad[3];
u32 pad2;
u64 event;
u64 scsi_id;
u64 wwpn;
u64 node_name;
u64 reserved;
}__attribute__((packed, aligned (8)));
struct ibmvfc_async_crq_queue {
struct ibmvfc_async_crq *msgs;
int size, cur;
dma_addr_t msg_token;
};
union ibmvfc_iu {
struct ibmvfc_mad_common mad_common;
struct ibmvfc_npiv_login_mad npiv_login;
struct ibmvfc_discover_targets discover_targets;
struct ibmvfc_port_login plogi;
struct ibmvfc_process_login prli;
struct ibmvfc_query_tgt query_tgt;
struct ibmvfc_implicit_logout implicit_logout;
struct ibmvfc_tmf tmf;
struct ibmvfc_cmd cmd;
}__attribute__((packed, aligned (8)));
enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_NONE = 0,
IBMVFC_TGT_ACTION_INIT,
IBMVFC_TGT_ACTION_INIT_WAIT,
IBMVFC_TGT_ACTION_ADD_RPORT,
IBMVFC_TGT_ACTION_DEL_RPORT,
};
struct ibmvfc_target {
struct list_head queue;
struct ibmvfc_host *vhost;
u64 scsi_id;
u64 new_scsi_id;
struct fc_rport *rport;
int target_id;
enum ibmvfc_target_action action;
int need_login;
int init_retries;
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
struct fc_rport_identifiers ids;
void (*job_step) (struct ibmvfc_target *);
struct kref kref;
};
/* a unit of work for the hosting partition */
struct ibmvfc_event {
struct list_head queue;
struct ibmvfc_host *vhost;
struct ibmvfc_target *tgt;
struct scsi_cmnd *cmnd;
atomic_t free;
union ibmvfc_iu *xfer_iu;
void (*done) (struct ibmvfc_event *);
struct ibmvfc_crq crq;
union ibmvfc_iu iu;
union ibmvfc_iu *sync_iu;
struct srp_direct_buf *ext_list;
dma_addr_t ext_list_token;
struct completion comp;
struct timer_list timer;
};
/* a pool of event structs for use */
struct ibmvfc_event_pool {
struct ibmvfc_event *events;
u32 size;
union ibmvfc_iu *iu_storage;
dma_addr_t iu_token;
};
enum ibmvfc_host_action {
IBMVFC_HOST_ACTION_NONE = 0,
IBMVFC_HOST_ACTION_INIT,
IBMVFC_HOST_ACTION_INIT_WAIT,
IBMVFC_HOST_ACTION_QUERY,
IBMVFC_HOST_ACTION_QUERY_TGTS,
IBMVFC_HOST_ACTION_TGT_DEL,
IBMVFC_HOST_ACTION_ALLOC_TGTS,
IBMVFC_HOST_ACTION_TGT_INIT,
IBMVFC_HOST_ACTION_TGT_ADD,
};
enum ibmvfc_host_state {
IBMVFC_NO_CRQ = 0,
IBMVFC_INITIALIZING,
IBMVFC_ACTIVE,
IBMVFC_HALTED,
IBMVFC_LINK_DOWN,
IBMVFC_LINK_DEAD,
IBMVFC_HOST_OFFLINE,
};
struct ibmvfc_host {
char name[8];
struct list_head queue;
struct Scsi_Host *host;
enum ibmvfc_host_state state;
enum ibmvfc_host_action action;
#define IBMVFC_NUM_TRACE_INDEX_BITS 8
#define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS)
#define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES)
struct ibmvfc_trace_entry *trace;
u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS;
int num_targets;
struct list_head targets;
struct list_head sent;
struct list_head free;
struct device *dev;
struct ibmvfc_event_pool pool;
struct dma_pool *sg_pool;
mempool_t *tgt_pool;
struct ibmvfc_crq_queue crq;
struct ibmvfc_async_crq_queue async_crq;
struct ibmvfc_npiv_login login_info;
union ibmvfc_npiv_login_data *login_buf;
dma_addr_t login_buf_dma;
int disc_buf_sz;
int log_level;
struct ibmvfc_discover_targets_buf *disc_buf;
int task_set;
int init_retries;
int discovery_threads;
int client_migrated;
int reinit;
int events_to_log;
#define IBMVFC_AE_LINKUP 0x0001
#define IBMVFC_AE_LINKDOWN 0x0002
#define IBMVFC_AE_RSCN 0x0004
dma_addr_t disc_buf_dma;
unsigned int partition_number;
char partition_name[97];
void (*job_step) (struct ibmvfc_host *);
struct task_struct *work_thread;
wait_queue_head_t init_wait_q;
wait_queue_head_t work_wait_q;
};
#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0)
#define tgt_dbg(t, fmt, ...) \
DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
#define tgt_err(t, fmt, ...) \
dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
#define ibmvfc_dbg(vhost, ...) \
DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
#define ibmvfc_log(vhost, level, ...) \
do { \
if (level >= (vhost)->log_level) \
dev_err((vhost)->dev, ##__VA_ARGS__); \
} while (0)
#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__))
#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__))
#ifdef CONFIG_SCSI_IBMVFC_TRACE
#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
#define ibmvfc_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
#else
#define ibmvfc_create_trace_file(kobj, attr) 0
#define ibmvfc_remove_trace_file(kobj, attr) do { } while (0)
#endif
#endif

View File

@ -64,6 +64,10 @@ MODULE_LICENSE("GPL");
#define BUG_ON(expr) #define BUG_ON(expr)
#endif #endif
static struct scsi_transport_template *iscsi_tcp_scsi_transport;
static struct scsi_host_template iscsi_sht;
static struct iscsi_transport iscsi_tcp_transport;
static unsigned int iscsi_max_lun = 512; static unsigned int iscsi_max_lun = 512;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
@ -494,39 +498,43 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
* must be called with session lock * must be called with session lock
*/ */
static void static void
iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
{ {
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_r2t_info *r2t; struct iscsi_r2t_info *r2t;
/* flush ctask's r2t queues */ /* nothing to do for mgmt tasks */
while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { if (!task->sc)
__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, return;
/* flush task's r2t queues */
while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*)); sizeof(void*));
debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
} }
r2t = tcp_ctask->r2t; r2t = tcp_task->r2t;
if (r2t != NULL) { if (r2t != NULL) {
__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*)); sizeof(void*));
tcp_ctask->r2t = NULL; tcp_task->r2t = NULL;
} }
} }
/** /**
* iscsi_data_rsp - SCSI Data-In Response processing * iscsi_data_rsp - SCSI Data-In Response processing
* @conn: iscsi connection * @conn: iscsi connection
* @ctask: scsi command task * @task: scsi command task
**/ **/
static int static int
iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
{ {
struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = ctask->sc; struct scsi_cmnd *sc = task->sc;
int datasn = be32_to_cpu(rhdr->datasn); int datasn = be32_to_cpu(rhdr->datasn);
unsigned total_in_length = scsi_in(sc)->length; unsigned total_in_length = scsi_in(sc)->length;
@ -534,18 +542,18 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
if (tcp_conn->in.datalen == 0) if (tcp_conn->in.datalen == 0)
return 0; return 0;
if (tcp_ctask->exp_datasn != datasn) { if (tcp_task->exp_datasn != datasn) {
debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n", debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
__FUNCTION__, tcp_ctask->exp_datasn, datasn); __func__, tcp_task->exp_datasn, datasn);
return ISCSI_ERR_DATASN; return ISCSI_ERR_DATASN;
} }
tcp_ctask->exp_datasn++; tcp_task->exp_datasn++;
tcp_ctask->data_offset = be32_to_cpu(rhdr->offset); tcp_task->data_offset = be32_to_cpu(rhdr->offset);
if (tcp_ctask->data_offset + tcp_conn->in.datalen > total_in_length) { if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n", debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
__FUNCTION__, tcp_ctask->data_offset, __func__, tcp_task->data_offset,
tcp_conn->in.datalen, total_in_length); tcp_conn->in.datalen, total_in_length);
return ISCSI_ERR_DATA_OFFSET; return ISCSI_ERR_DATA_OFFSET;
} }
@ -574,7 +582,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
/** /**
* iscsi_solicit_data_init - initialize first Data-Out * iscsi_solicit_data_init - initialize first Data-Out
* @conn: iscsi connection * @conn: iscsi connection
* @ctask: scsi command task * @task: scsi command task
* @r2t: R2T info * @r2t: R2T info
* *
* Notes: * Notes:
@ -584,7 +592,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
* This function is called with connection lock taken. * This function is called with connection lock taken.
**/ **/
static void static void
iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
struct iscsi_r2t_info *r2t) struct iscsi_r2t_info *r2t)
{ {
struct iscsi_data *hdr; struct iscsi_data *hdr;
@ -595,8 +603,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
hdr->datasn = cpu_to_be32(r2t->solicit_datasn); hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
r2t->solicit_datasn++; r2t->solicit_datasn++;
hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
hdr->itt = ctask->hdr->itt; hdr->itt = task->hdr->itt;
hdr->exp_statsn = r2t->exp_statsn; hdr->exp_statsn = r2t->exp_statsn;
hdr->offset = cpu_to_be32(r2t->data_offset); hdr->offset = cpu_to_be32(r2t->data_offset);
if (r2t->data_length > conn->max_xmit_dlength) { if (r2t->data_length > conn->max_xmit_dlength) {
@ -616,14 +624,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
/** /**
* iscsi_r2t_rsp - iSCSI R2T Response processing * iscsi_r2t_rsp - iSCSI R2T Response processing
* @conn: iscsi connection * @conn: iscsi connection
* @ctask: scsi command task * @task: scsi command task
**/ **/
static int static int
iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
{ {
struct iscsi_r2t_info *r2t; struct iscsi_r2t_info *r2t;
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
int r2tsn = be32_to_cpu(rhdr->r2tsn); int r2tsn = be32_to_cpu(rhdr->r2tsn);
@ -636,23 +644,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
return ISCSI_ERR_DATALEN; return ISCSI_ERR_DATALEN;
} }
if (tcp_ctask->exp_datasn != r2tsn){ if (tcp_task->exp_datasn != r2tsn){
debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n", debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
__FUNCTION__, tcp_ctask->exp_datasn, r2tsn); __func__, tcp_task->exp_datasn, r2tsn);
return ISCSI_ERR_R2TSN; return ISCSI_ERR_R2TSN;
} }
/* fill-in new R2T associated with the task */ /* fill-in new R2T associated with the task */
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) { if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
iscsi_conn_printk(KERN_INFO, conn, iscsi_conn_printk(KERN_INFO, conn,
"dropping R2T itt %d in recovery.\n", "dropping R2T itt %d in recovery.\n",
ctask->itt); task->itt);
return 0; return 0;
} }
rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
BUG_ON(!rc); BUG_ON(!rc);
r2t->exp_statsn = rhdr->statsn; r2t->exp_statsn = rhdr->statsn;
@ -660,7 +668,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
if (r2t->data_length == 0) { if (r2t->data_length == 0) {
iscsi_conn_printk(KERN_ERR, conn, iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with zero data len\n"); "invalid R2T with zero data len\n");
__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*)); sizeof(void*));
return ISCSI_ERR_DATALEN; return ISCSI_ERR_DATALEN;
} }
@ -671,12 +679,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->data_length, session->max_burst); r2t->data_length, session->max_burst);
r2t->data_offset = be32_to_cpu(rhdr->data_offset); r2t->data_offset = be32_to_cpu(rhdr->data_offset);
if (r2t->data_offset + r2t->data_length > scsi_out(ctask->sc)->length) { if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
iscsi_conn_printk(KERN_ERR, conn, iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with data len %u at offset %u " "invalid R2T with data len %u at offset %u "
"and total length %d\n", r2t->data_length, "and total length %d\n", r2t->data_length,
r2t->data_offset, scsi_out(ctask->sc)->length); r2t->data_offset, scsi_out(task->sc)->length);
__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*)); sizeof(void*));
return ISCSI_ERR_DATALEN; return ISCSI_ERR_DATALEN;
} }
@ -684,13 +692,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->ttt = rhdr->ttt; /* no flip */ r2t->ttt = rhdr->ttt; /* no flip */
r2t->solicit_datasn = 0; r2t->solicit_datasn = 0;
iscsi_solicit_data_init(conn, ctask, r2t); iscsi_solicit_data_init(conn, task, r2t);
tcp_ctask->exp_datasn = r2tsn + 1; tcp_task->exp_datasn = r2tsn + 1;
__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
conn->r2t_pdus_cnt++; conn->r2t_pdus_cnt++;
iscsi_requeue_ctask(ctask); iscsi_requeue_task(task);
return 0; return 0;
} }
@ -733,10 +741,8 @@ static int
iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{ {
int rc = 0, opcode, ahslen; int rc = 0, opcode, ahslen;
struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_cmd_task *ctask; struct iscsi_task *task;
uint32_t itt;
/* verify PDU length */ /* verify PDU length */
tcp_conn->in.datalen = ntoh24(hdr->dlength); tcp_conn->in.datalen = ntoh24(hdr->dlength);
@ -754,7 +760,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
opcode = hdr->opcode & ISCSI_OPCODE_MASK; opcode = hdr->opcode & ISCSI_OPCODE_MASK;
/* verify itt (itt encoding: age+cid+itt) */ /* verify itt (itt encoding: age+cid+itt) */
rc = iscsi_verify_itt(conn, hdr, &itt); rc = iscsi_verify_itt(conn, hdr->itt);
if (rc) if (rc)
return rc; return rc;
@ -763,16 +769,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
switch(opcode) { switch(opcode) {
case ISCSI_OP_SCSI_DATA_IN: case ISCSI_OP_SCSI_DATA_IN:
ctask = session->cmds[itt];
spin_lock(&conn->session->lock); spin_lock(&conn->session->lock);
rc = iscsi_data_rsp(conn, ctask); task = iscsi_itt_to_ctask(conn, hdr->itt);
spin_unlock(&conn->session->lock); if (!task)
if (rc) rc = ISCSI_ERR_BAD_ITT;
return rc; else
rc = iscsi_data_rsp(conn, task);
if (rc) {
spin_unlock(&conn->session->lock);
break;
}
if (tcp_conn->in.datalen) { if (tcp_conn->in.datalen) {
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data;
struct hash_desc *rx_hash = NULL; struct hash_desc *rx_hash = NULL;
struct scsi_data_buffer *sdb = scsi_in(ctask->sc); struct scsi_data_buffer *sdb = scsi_in(task->sc);
/* /*
* Setup copy of Data-In into the Scsi_Cmnd * Setup copy of Data-In into the Scsi_Cmnd
@ -787,17 +798,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, " debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
"datalen=%d)\n", tcp_conn, "datalen=%d)\n", tcp_conn,
tcp_ctask->data_offset, tcp_task->data_offset,
tcp_conn->in.datalen); tcp_conn->in.datalen);
return iscsi_segment_seek_sg(&tcp_conn->in.segment, rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
sdb->table.sgl, sdb->table.sgl,
sdb->table.nents, sdb->table.nents,
tcp_ctask->data_offset, tcp_task->data_offset,
tcp_conn->in.datalen, tcp_conn->in.datalen,
iscsi_tcp_process_data_in, iscsi_tcp_process_data_in,
rx_hash); rx_hash);
spin_unlock(&conn->session->lock);
return rc;
} }
/* fall through */ rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
spin_unlock(&conn->session->lock);
break;
case ISCSI_OP_SCSI_CMD_RSP: case ISCSI_OP_SCSI_CMD_RSP:
if (tcp_conn->in.datalen) { if (tcp_conn->in.datalen) {
iscsi_tcp_data_recv_prep(tcp_conn); iscsi_tcp_data_recv_prep(tcp_conn);
@ -806,15 +821,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
rc = iscsi_complete_pdu(conn, hdr, NULL, 0); rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
break; break;
case ISCSI_OP_R2T: case ISCSI_OP_R2T:
ctask = session->cmds[itt]; spin_lock(&conn->session->lock);
if (ahslen) task = iscsi_itt_to_ctask(conn, hdr->itt);
if (!task)
rc = ISCSI_ERR_BAD_ITT;
else if (ahslen)
rc = ISCSI_ERR_AHSLEN; rc = ISCSI_ERR_AHSLEN;
else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
spin_lock(&session->lock); rc = iscsi_r2t_rsp(conn, task);
rc = iscsi_r2t_rsp(conn, ctask); else
spin_unlock(&session->lock);
} else
rc = ISCSI_ERR_PROTO; rc = ISCSI_ERR_PROTO;
spin_unlock(&conn->session->lock);
break; break;
case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_LOGIN_RSP:
case ISCSI_OP_TEXT_RSP: case ISCSI_OP_TEXT_RSP:
@ -1176,7 +1193,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
{ {
struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn, debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
conn->hdrdgst_en? ", digest enabled" : ""); conn->hdrdgst_en? ", digest enabled" : "");
/* Clear the data segment - needs to be filled in by the /* Clear the data segment - needs to be filled in by the
@ -1185,7 +1202,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
/* If header digest is enabled, compute the CRC and /* If header digest is enabled, compute the CRC and
* place the digest into the same buffer. We make * place the digest into the same buffer. We make
* sure that both iscsi_tcp_ctask and mtask have * sure that both iscsi_tcp_task and mtask have
* sufficient room. * sufficient room.
*/ */
if (conn->hdrdgst_en) { if (conn->hdrdgst_en) {
@ -1217,7 +1234,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
struct hash_desc *tx_hash = NULL; struct hash_desc *tx_hash = NULL;
unsigned int hdr_spec_len; unsigned int hdr_spec_len;
debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__, debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
tcp_conn, offset, len, tcp_conn, offset, len,
conn->datadgst_en? ", digest enabled" : ""); conn->datadgst_en? ", digest enabled" : "");
@ -1242,7 +1259,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
struct hash_desc *tx_hash = NULL; struct hash_desc *tx_hash = NULL;
unsigned int hdr_spec_len; unsigned int hdr_spec_len;
debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len, debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
conn->datadgst_en? ", digest enabled" : ""); conn->datadgst_en? ", digest enabled" : "");
/* Make sure the datalen matches what the caller /* Make sure the datalen matches what the caller
@ -1260,7 +1277,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
/** /**
* iscsi_solicit_data_cont - initialize next Data-Out * iscsi_solicit_data_cont - initialize next Data-Out
* @conn: iscsi connection * @conn: iscsi connection
* @ctask: scsi command task * @task: scsi command task
* @r2t: R2T info * @r2t: R2T info
* @left: bytes left to transfer * @left: bytes left to transfer
* *
@ -1271,7 +1288,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
* Called under connection lock. * Called under connection lock.
**/ **/
static int static int
iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
struct iscsi_r2t_info *r2t) struct iscsi_r2t_info *r2t)
{ {
struct iscsi_data *hdr; struct iscsi_data *hdr;
@ -1288,8 +1305,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
hdr->datasn = cpu_to_be32(r2t->solicit_datasn); hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
r2t->solicit_datasn++; r2t->solicit_datasn++;
hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
hdr->itt = ctask->hdr->itt; hdr->itt = task->hdr->itt;
hdr->exp_statsn = r2t->exp_statsn; hdr->exp_statsn = r2t->exp_statsn;
new_offset = r2t->data_offset + r2t->sent; new_offset = r2t->data_offset + r2t->sent;
hdr->offset = cpu_to_be32(new_offset); hdr->offset = cpu_to_be32(new_offset);
@ -1307,89 +1324,76 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
} }
/** /**
* iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
* @conn: iscsi connection * @conn: iscsi connection
* @ctask: scsi command task * @task: scsi command task
* @sc: scsi command * @sc: scsi command
**/ **/
static int static int
iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask) iscsi_tcp_task_init(struct iscsi_task *task)
{ {
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_conn *conn = ctask->conn; struct iscsi_conn *conn = task->conn;
struct scsi_cmnd *sc = ctask->sc; struct scsi_cmnd *sc = task->sc;
int err; int err;
BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); if (!sc) {
tcp_ctask->sent = 0; /*
tcp_ctask->exp_datasn = 0; * mgmt tasks do not have a scatterlist since they come
* in from the iscsi interface.
*/
debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
task->itt);
/* Prepare PDU, optionally w/ immediate data */
iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
/* If we have immediate data, attach a payload */
if (task->data_count)
iscsi_tcp_send_linear_data_prepare(conn, task->data,
task->data_count);
return 0;
}
BUG_ON(__kfifo_len(tcp_task->r2tqueue));
tcp_task->sent = 0;
tcp_task->exp_datasn = 0;
/* Prepare PDU, optionally w/ immediate data */ /* Prepare PDU, optionally w/ immediate data */
debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n", debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
conn->id, ctask->itt, ctask->imm_count, conn->id, task->itt, task->imm_count,
ctask->unsol_count); task->unsol_count);
iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len); iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
if (!ctask->imm_count) if (!task->imm_count)
return 0; return 0;
/* If we have immediate data, attach a payload */ /* If we have immediate data, attach a payload */
err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl, err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
scsi_out(sc)->table.nents, scsi_out(sc)->table.nents,
0, ctask->imm_count); 0, task->imm_count);
if (err) if (err)
return err; return err;
tcp_ctask->sent += ctask->imm_count; tcp_task->sent += task->imm_count;
ctask->imm_count = 0; task->imm_count = 0;
return 0;
}
/**
* iscsi_tcp_mtask_xmit - xmit management(immediate) task
* @conn: iscsi connection
* @mtask: task management task
*
* Notes:
* The function can return -EAGAIN in which case caller must
* call it again later, or recover. '0' return code means successful
* xmit.
**/
static int
iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
{
int rc;
/* Flush any pending data first. */
rc = iscsi_tcp_flush(conn);
if (rc < 0)
return rc;
if (mtask->hdr->itt == RESERVED_ITT) {
struct iscsi_session *session = conn->session;
spin_lock_bh(&session->lock);
iscsi_free_mgmt_task(conn, mtask);
spin_unlock_bh(&session->lock);
}
return 0; return 0;
} }
/* /*
* iscsi_tcp_ctask_xmit - xmit normal PDU task * iscsi_tcp_task_xmit - xmit normal PDU task
* @conn: iscsi connection * @task: iscsi command task
* @ctask: iscsi command task
* *
* We're expected to return 0 when everything was transmitted succesfully, * We're expected to return 0 when everything was transmitted succesfully,
* -EAGAIN if there's still data in the queue, or != 0 for any other kind * -EAGAIN if there's still data in the queue, or != 0 for any other kind
* of error. * of error.
*/ */
static int static int
iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) iscsi_tcp_task_xmit(struct iscsi_task *task)
{ {
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_conn *conn = task->conn;
struct scsi_cmnd *sc = ctask->sc; struct iscsi_tcp_task *tcp_task = task->dd_data;
struct scsi_data_buffer *sdb = scsi_out(sc); struct scsi_cmnd *sc = task->sc;
struct scsi_data_buffer *sdb;
int rc = 0; int rc = 0;
flush: flush:
@ -1398,31 +1402,39 @@ flush:
if (rc < 0) if (rc < 0)
return rc; return rc;
/* mgmt command */
if (!sc) {
if (task->hdr->itt == RESERVED_ITT)
iscsi_put_task(task);
return 0;
}
/* Are we done already? */ /* Are we done already? */
if (sc->sc_data_direction != DMA_TO_DEVICE) if (sc->sc_data_direction != DMA_TO_DEVICE)
return 0; return 0;
if (ctask->unsol_count != 0) { sdb = scsi_out(sc);
struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr; if (task->unsol_count != 0) {
struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
/* Prepare a header for the unsolicited PDU. /* Prepare a header for the unsolicited PDU.
* The amount of data we want to send will be * The amount of data we want to send will be
* in ctask->data_count. * in task->data_count.
* FIXME: return the data count instead. * FIXME: return the data count instead.
*/ */
iscsi_prep_unsolicit_data_pdu(ctask, hdr); iscsi_prep_unsolicit_data_pdu(task, hdr);
debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n", debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
ctask->itt, tcp_ctask->sent, ctask->data_count); task->itt, tcp_task->sent, task->data_count);
iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr)); iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl, rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
sdb->table.nents, tcp_ctask->sent, sdb->table.nents, tcp_task->sent,
ctask->data_count); task->data_count);
if (rc) if (rc)
goto fail; goto fail;
tcp_ctask->sent += ctask->data_count; tcp_task->sent += task->data_count;
ctask->unsol_count -= ctask->data_count; task->unsol_count -= task->data_count;
goto flush; goto flush;
} else { } else {
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
@ -1431,22 +1443,22 @@ flush:
/* All unsolicited PDUs sent. Check for solicited PDUs. /* All unsolicited PDUs sent. Check for solicited PDUs.
*/ */
spin_lock_bh(&session->lock); spin_lock_bh(&session->lock);
r2t = tcp_ctask->r2t; r2t = tcp_task->r2t;
if (r2t != NULL) { if (r2t != NULL) {
/* Continue with this R2T? */ /* Continue with this R2T? */
if (!iscsi_solicit_data_cont(conn, ctask, r2t)) { if (!iscsi_solicit_data_cont(conn, task, r2t)) {
debug_scsi(" done with r2t %p\n", r2t); debug_scsi(" done with r2t %p\n", r2t);
__kfifo_put(tcp_ctask->r2tpool.queue, __kfifo_put(tcp_task->r2tpool.queue,
(void*)&r2t, sizeof(void*)); (void*)&r2t, sizeof(void*));
tcp_ctask->r2t = r2t = NULL; tcp_task->r2t = r2t = NULL;
} }
} }
if (r2t == NULL) { if (r2t == NULL) {
__kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
sizeof(void*)); sizeof(void*));
r2t = tcp_ctask->r2t; r2t = tcp_task->r2t;
} }
spin_unlock_bh(&session->lock); spin_unlock_bh(&session->lock);
@ -1457,7 +1469,7 @@ flush:
} }
debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n", debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
r2t, r2t->solicit_datasn - 1, ctask->itt, r2t, r2t->solicit_datasn - 1, task->itt,
r2t->data_offset + r2t->sent, r2t->data_count); r2t->data_offset + r2t->sent, r2t->data_count);
iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr, iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
@ -1469,7 +1481,7 @@ flush:
r2t->data_count); r2t->data_count);
if (rc) if (rc)
goto fail; goto fail;
tcp_ctask->sent += r2t->data_count; tcp_task->sent += r2t->data_count;
r2t->sent += r2t->data_count; r2t->sent += r2t->data_count;
goto flush; goto flush;
} }
@ -1486,7 +1498,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
struct iscsi_cls_conn *cls_conn; struct iscsi_cls_conn *cls_conn;
struct iscsi_tcp_conn *tcp_conn; struct iscsi_tcp_conn *tcp_conn;
cls_conn = iscsi_conn_setup(cls_session, conn_idx); cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
if (!cls_conn) if (!cls_conn)
return NULL; return NULL;
conn = cls_conn->dd_data; conn = cls_conn->dd_data;
@ -1496,18 +1508,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
*/ */
conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN; conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL); tcp_conn = conn->dd_data;
if (!tcp_conn)
goto tcp_conn_alloc_fail;
conn->dd_data = tcp_conn;
tcp_conn->iscsi_conn = conn; tcp_conn->iscsi_conn = conn;
tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC); CRYPTO_ALG_ASYNC);
tcp_conn->tx_hash.flags = 0; tcp_conn->tx_hash.flags = 0;
if (IS_ERR(tcp_conn->tx_hash.tfm)) if (IS_ERR(tcp_conn->tx_hash.tfm))
goto free_tcp_conn; goto free_conn;
tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC); CRYPTO_ALG_ASYNC);
@ -1519,14 +1527,12 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
free_tx_tfm: free_tx_tfm:
crypto_free_hash(tcp_conn->tx_hash.tfm); crypto_free_hash(tcp_conn->tx_hash.tfm);
free_tcp_conn: free_conn:
iscsi_conn_printk(KERN_ERR, conn, iscsi_conn_printk(KERN_ERR, conn,
"Could not create connection due to crc32c " "Could not create connection due to crc32c "
"loading error. Make sure the crc32c " "loading error. Make sure the crc32c "
"module is built as a module or into the " "module is built as a module or into the "
"kernel\n"); "kernel\n");
kfree(tcp_conn);
tcp_conn_alloc_fail:
iscsi_conn_teardown(cls_conn); iscsi_conn_teardown(cls_conn);
return NULL; return NULL;
} }
@ -1547,7 +1553,6 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
spin_lock_bh(&session->lock); spin_lock_bh(&session->lock);
tcp_conn->sock = NULL; tcp_conn->sock = NULL;
conn->recv_lock = NULL;
spin_unlock_bh(&session->lock); spin_unlock_bh(&session->lock);
sockfd_put(sock); sockfd_put(sock);
} }
@ -1559,20 +1564,32 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
iscsi_tcp_release_conn(conn); iscsi_tcp_release_conn(conn);
iscsi_conn_teardown(cls_conn);
if (tcp_conn->tx_hash.tfm) if (tcp_conn->tx_hash.tfm)
crypto_free_hash(tcp_conn->tx_hash.tfm); crypto_free_hash(tcp_conn->tx_hash.tfm);
if (tcp_conn->rx_hash.tfm) if (tcp_conn->rx_hash.tfm)
crypto_free_hash(tcp_conn->rx_hash.tfm); crypto_free_hash(tcp_conn->rx_hash.tfm);
kfree(tcp_conn); iscsi_conn_teardown(cls_conn);
} }
static void static void
iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{ {
struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
/* userspace may have goofed up and not bound us */
if (!tcp_conn->sock)
return;
/*
* Make sure our recv side is stopped.
* Older tools called conn stop before ep_disconnect
* so IO could still be coming in.
*/
write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
iscsi_conn_stop(cls_conn, flag); iscsi_conn_stop(cls_conn, flag);
iscsi_tcp_release_conn(conn); iscsi_tcp_release_conn(conn);
@ -1623,6 +1640,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading) int is_leading)
{ {
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct iscsi_host *ihost = shost_priv(shost);
struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct sock *sk; struct sock *sk;
@ -1646,8 +1665,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
if (err) if (err)
goto free_socket; goto free_socket;
err = iscsi_tcp_get_addr(conn, sock, conn->local_address, err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
&conn->local_port, kernel_getsockname); &ihost->local_port, kernel_getsockname);
if (err) if (err)
goto free_socket; goto free_socket;
@ -1664,13 +1683,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
sk->sk_allocation = GFP_ATOMIC; sk->sk_allocation = GFP_ATOMIC;
/* FIXME: disable Nagle's algorithm */
/*
* Intercept TCP callbacks for sendfile like receive
* processing.
*/
conn->recv_lock = &sk->sk_callback_lock;
iscsi_conn_set_callbacks(conn); iscsi_conn_set_callbacks(conn);
tcp_conn->sendpage = tcp_conn->sock->ops->sendpage; tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
/* /*
@ -1684,21 +1696,6 @@ free_socket:
return err; return err;
} }
/* called with host lock */
static void
iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
{
debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
/* Prepare PDU, optionally w/ immediate data */
iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
/* If we have immediate data, attach a payload */
if (mtask->data_count)
iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
mtask->data_count);
}
static int static int
iscsi_r2tpool_alloc(struct iscsi_session *session) iscsi_r2tpool_alloc(struct iscsi_session *session)
{ {
@ -1709,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
* initialize per-task: R2T pool and xmit queue * initialize per-task: R2T pool and xmit queue
*/ */
for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; struct iscsi_task *task = session->cmds[cmd_i];
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data;
/* /*
* pre-allocated x4 as much r2ts to handle race when * pre-allocated x4 as much r2ts to handle race when
@ -1719,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
*/ */
/* R2T pool */ /* R2T pool */
if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL, if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
sizeof(struct iscsi_r2t_info))) { sizeof(struct iscsi_r2t_info))) {
goto r2t_alloc_fail; goto r2t_alloc_fail;
} }
/* R2T xmit queue */ /* R2T xmit queue */
tcp_ctask->r2tqueue = kfifo_alloc( tcp_task->r2tqueue = kfifo_alloc(
session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) { if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
iscsi_pool_free(&tcp_ctask->r2tpool); iscsi_pool_free(&tcp_task->r2tpool);
goto r2t_alloc_fail; goto r2t_alloc_fail;
} }
} }
@ -1737,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
r2t_alloc_fail: r2t_alloc_fail:
for (i = 0; i < cmd_i; i++) { for (i = 0; i < cmd_i; i++) {
struct iscsi_cmd_task *ctask = session->cmds[i]; struct iscsi_task *task = session->cmds[i];
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data;
kfifo_free(tcp_ctask->r2tqueue); kfifo_free(tcp_task->r2tqueue);
iscsi_pool_free(&tcp_ctask->r2tpool); iscsi_pool_free(&tcp_task->r2tpool);
} }
return -ENOMEM; return -ENOMEM;
} }
@ -1752,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
int i; int i;
for (i = 0; i < session->cmds_max; i++) { for (i = 0; i < session->cmds_max; i++) {
struct iscsi_cmd_task *ctask = session->cmds[i]; struct iscsi_task *task = session->cmds[i];
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data;
kfifo_free(tcp_ctask->r2tqueue); kfifo_free(tcp_task->r2tqueue);
iscsi_pool_free(&tcp_ctask->r2tpool); iscsi_pool_free(&tcp_task->r2tpool);
} }
} }
@ -1821,29 +1818,6 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
return len; return len;
} }
static int
iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
char *buf)
{
struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
int len;
switch (param) {
case ISCSI_HOST_PARAM_IPADDRESS:
spin_lock_bh(&session->lock);
if (!session->leadconn)
len = -ENODEV;
else
len = sprintf(buf, "%s\n",
session->leadconn->local_address);
spin_unlock_bh(&session->lock);
break;
default:
return iscsi_host_get_param(shost, param, buf);
}
return len;
}
static void static void
iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
{ {
@ -1869,54 +1843,70 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
} }
static struct iscsi_cls_session * static struct iscsi_cls_session *
iscsi_tcp_session_create(struct iscsi_transport *iscsit, iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
struct scsi_transport_template *scsit, uint16_t qdepth, uint32_t initial_cmdsn,
uint16_t cmds_max, uint16_t qdepth, uint32_t *hostno)
uint32_t initial_cmdsn, uint32_t *hostno)
{ {
struct iscsi_cls_session *cls_session; struct iscsi_cls_session *cls_session;
struct iscsi_session *session; struct iscsi_session *session;
uint32_t hn; struct Scsi_Host *shost;
int cmd_i; int cmd_i;
cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth, if (ep) {
sizeof(struct iscsi_tcp_cmd_task), printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
sizeof(struct iscsi_tcp_mgmt_task),
initial_cmdsn, &hn);
if (!cls_session)
return NULL; return NULL;
*hostno = hn; }
session = class_to_transport_session(cls_session); shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
if (!shost)
return NULL;
shost->transportt = iscsi_tcp_scsi_transport;
shost->max_lun = iscsi_max_lun;
shost->max_id = 0;
shost->max_channel = 0;
shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
if (iscsi_host_add(shost, NULL))
goto free_host;
*hostno = shost->host_no;
cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
sizeof(struct iscsi_tcp_task),
initial_cmdsn, 0);
if (!cls_session)
goto remove_host;
session = cls_session->dd_data;
shost->can_queue = session->scsi_cmds_max;
for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; struct iscsi_task *task = session->cmds[cmd_i];
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data;
ctask->hdr = &tcp_ctask->hdr.cmd_hdr; task->hdr = &tcp_task->hdr.cmd_hdr;
ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE; task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
} }
for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) { if (iscsi_r2tpool_alloc(session))
struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; goto remove_session;
struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
}
if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
goto r2tpool_alloc_fail;
return cls_session; return cls_session;
r2tpool_alloc_fail: remove_session:
iscsi_session_teardown(cls_session); iscsi_session_teardown(cls_session);
remove_host:
iscsi_host_remove(shost);
free_host:
iscsi_host_free(shost);
return NULL; return NULL;
} }
static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{ {
iscsi_r2tpool_free(class_to_transport_session(cls_session)); struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_session_teardown(cls_session);
iscsi_r2tpool_free(cls_session->dd_data);
iscsi_host_remove(shost);
iscsi_host_free(shost);
} }
static int iscsi_tcp_slave_configure(struct scsi_device *sdev) static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
@ -1971,14 +1961,11 @@ static struct iscsi_transport iscsi_tcp_transport = {
ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
ISCSI_LU_RESET_TMO | ISCSI_LU_RESET_TMO |
ISCSI_PING_TMO | ISCSI_RECV_TMO, ISCSI_PING_TMO | ISCSI_RECV_TMO |
ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_INITIATOR_NAME |
ISCSI_HOST_NETDEV_NAME, ISCSI_HOST_NETDEV_NAME,
.host_template = &iscsi_sht,
.conndata_size = sizeof(struct iscsi_conn),
.max_conn = 1,
.max_cmd_len = 16,
/* session management */ /* session management */
.create_session = iscsi_tcp_session_create, .create_session = iscsi_tcp_session_create,
.destroy_session = iscsi_tcp_session_destroy, .destroy_session = iscsi_tcp_session_destroy,
@ -1992,16 +1979,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
.start_conn = iscsi_conn_start, .start_conn = iscsi_conn_start,
.stop_conn = iscsi_tcp_conn_stop, .stop_conn = iscsi_tcp_conn_stop,
/* iscsi host params */ /* iscsi host params */
.get_host_param = iscsi_tcp_host_get_param, .get_host_param = iscsi_host_get_param,
.set_host_param = iscsi_host_set_param, .set_host_param = iscsi_host_set_param,
/* IO */ /* IO */
.send_pdu = iscsi_conn_send_pdu, .send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_conn_get_stats, .get_stats = iscsi_conn_get_stats,
.init_cmd_task = iscsi_tcp_ctask_init, .init_task = iscsi_tcp_task_init,
.init_mgmt_task = iscsi_tcp_mtask_init, .xmit_task = iscsi_tcp_task_xmit,
.xmit_cmd_task = iscsi_tcp_ctask_xmit, .cleanup_task = iscsi_tcp_cleanup_task,
.xmit_mgmt_task = iscsi_tcp_mtask_xmit,
.cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
/* recovery */ /* recovery */
.session_recovery_timedout = iscsi_session_recovery_timedout, .session_recovery_timedout = iscsi_session_recovery_timedout,
}; };
@ -2014,9 +1999,10 @@ iscsi_tcp_init(void)
iscsi_max_lun); iscsi_max_lun);
return -EINVAL; return -EINVAL;
} }
iscsi_tcp_transport.max_lun = iscsi_max_lun;
if (!iscsi_register_transport(&iscsi_tcp_transport)) iscsi_tcp_scsi_transport = iscsi_register_transport(
&iscsi_tcp_transport);
if (!iscsi_tcp_scsi_transport)
return -ENODEV; return -ENODEV;
return 0; return 0;

View File

@ -103,11 +103,6 @@ struct iscsi_data_task {
char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */ char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
}; };
struct iscsi_tcp_mgmt_task {
struct iscsi_hdr hdr;
char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
};
struct iscsi_r2t_info { struct iscsi_r2t_info {
__be32 ttt; /* copied from R2T */ __be32 ttt; /* copied from R2T */
__be32 exp_statsn; /* copied from R2T */ __be32 exp_statsn; /* copied from R2T */
@ -119,7 +114,7 @@ struct iscsi_r2t_info {
struct iscsi_data_task dtask; /* Data-Out header buf */ struct iscsi_data_task dtask; /* Data-Out header buf */
}; };
struct iscsi_tcp_cmd_task { struct iscsi_tcp_task {
struct iscsi_hdr_buff { struct iscsi_hdr_buff {
struct iscsi_cmd cmd_hdr; struct iscsi_cmd cmd_hdr;
char hdrextbuf[ISCSI_MAX_AHS_SIZE + char hdrextbuf[ISCSI_MAX_AHS_SIZE +

File diff suppressed because it is too large Load Diff

View File

@ -33,6 +33,7 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
/* /*
* Following time intervals are used of adjusting SCSI device * Following time intervals are used of adjusting SCSI device
@ -59,6 +60,9 @@ struct lpfc_sli2_slim;
#define MAX_HBAEVT 32 #define MAX_HBAEVT 32
/* lpfc wait event data ready flag */
#define LPFC_DATA_READY (1<<0)
enum lpfc_polling_flags { enum lpfc_polling_flags {
ENABLE_FCP_RING_POLLING = 0x1, ENABLE_FCP_RING_POLLING = 0x1,
DISABLE_FCP_RING_INT = 0x2 DISABLE_FCP_RING_INT = 0x2
@ -425,9 +429,6 @@ struct lpfc_hba {
uint16_t pci_cfg_value; uint16_t pci_cfg_value;
uint8_t work_found;
#define LPFC_MAX_WORKER_ITERATION 4
uint8_t fc_linkspeed; /* Link speed after last READ_LA */ uint8_t fc_linkspeed; /* Link speed after last READ_LA */
uint32_t fc_eventTag; /* event tag for link attention */ uint32_t fc_eventTag; /* event tag for link attention */
@ -489,8 +490,9 @@ struct lpfc_hba {
uint32_t work_hs; /* HS stored in case of ERRAT */ uint32_t work_hs; /* HS stored in case of ERRAT */
uint32_t work_status[2]; /* Extra status from SLIM */ uint32_t work_status[2]; /* Extra status from SLIM */
wait_queue_head_t *work_wait; wait_queue_head_t work_waitq;
struct task_struct *worker_thread; struct task_struct *worker_thread;
long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */ uint32_t hbq_in_use; /* HBQs in use flag */
struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
@ -637,6 +639,17 @@ lpfc_is_link_up(struct lpfc_hba *phba)
phba->link_state == LPFC_HBA_READY; phba->link_state == LPFC_HBA_READY;
} }
static inline void
lpfc_worker_wake_up(struct lpfc_hba *phba)
{
/* Set the lpfc data pending flag */
set_bit(LPFC_DATA_READY, &phba->data_flags);
/* Wake up worker thread */
wake_up(&phba->work_waitq);
return;
}
#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ #define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature #define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature
event */ event */

View File

@ -1995,8 +1995,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
/* Don't allow mailbox commands to be sent when blocked /* Don't allow mailbox commands to be sent when blocked
* or when in the middle of discovery * or when in the middle of discovery
*/ */
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO || if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
vport->fc_flag & FC_NDISC_ACTIVE) {
sysfs_mbox_idle(phba); sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
return -EAGAIN; return -EAGAIN;

View File

@ -142,7 +142,7 @@ int lpfc_config_port_post(struct lpfc_hba *);
int lpfc_hba_down_prep(struct lpfc_hba *); int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *); int lpfc_hba_down_post(struct lpfc_hba *);
void lpfc_hba_init(struct lpfc_hba *, uint32_t *); void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int); int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
int lpfc_online(struct lpfc_hba *); int lpfc_online(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *); void lpfc_unblock_mgmt_io(struct lpfc_hba *);
@ -263,6 +263,7 @@ extern int lpfc_sli_mode;
extern int lpfc_enable_npiv; extern int lpfc_enable_npiv;
int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
void lpfc_terminate_rport_io(struct fc_rport *); void lpfc_terminate_rport_io(struct fc_rport *);
void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);

View File

@ -101,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Not enough posted buffers; Try posting more buffers */ /* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++; phba->fc_stat.NoRcvBuf++;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
lpfc_post_buffer(phba, pring, 2, 1); lpfc_post_buffer(phba, pring, 2);
return; return;
} }
@ -151,7 +151,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
} }
list_del(&iocbq->list); list_del(&iocbq->list);
lpfc_sli_release_iocbq(phba, iocbq); lpfc_sli_release_iocbq(phba, iocbq);
lpfc_post_buffer(phba, pring, i, 1); lpfc_post_buffer(phba, pring, i);
} }
} }
} }
@ -990,7 +990,7 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return; return;
} }
static int int
lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
size_t size) size_t size)
{ {
@ -1679,20 +1679,18 @@ lpfc_fdmi_tmo(unsigned long ptr)
{ {
struct lpfc_vport *vport = (struct lpfc_vport *)ptr; struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long iflag; unsigned long iflag;
spin_lock_irqsave(&vport->work_port_lock, iflag); spin_lock_irqsave(&vport->work_port_lock, iflag);
if (!(vport->work_port_events & WORKER_FDMI_TMO)) { tmo_posted = vport->work_port_events & WORKER_FDMI_TMO;
if (!tmo_posted)
vport->work_port_events |= WORKER_FDMI_TMO; vport->work_port_events |= WORKER_FDMI_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflag); spin_unlock_irqrestore(&vport->work_port_lock, iflag);
spin_lock_irqsave(&phba->hbalock, iflag); if (!tmo_posted)
if (phba->work_wait) lpfc_worker_wake_up(phba);
lpfc_worker_wake_up(phba); return;
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
else
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
} }
void void

View File

@ -1754,29 +1754,34 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_work_evt *evtp; struct lpfc_work_evt *evtp;
if (!(nlp->nlp_flag & NLP_DELAY_TMO))
return;
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
nlp->nlp_flag &= ~NLP_DELAY_TMO; nlp->nlp_flag &= ~NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
del_timer_sync(&nlp->nlp_delayfunc); del_timer_sync(&nlp->nlp_delayfunc);
nlp->nlp_last_elscmd = 0; nlp->nlp_last_elscmd = 0;
if (!list_empty(&nlp->els_retry_evt.evt_listp)) { if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
list_del_init(&nlp->els_retry_evt.evt_listp); list_del_init(&nlp->els_retry_evt.evt_listp);
/* Decrement nlp reference count held for the delayed retry */ /* Decrement nlp reference count held for the delayed retry */
evtp = &nlp->els_retry_evt; evtp = &nlp->els_retry_evt;
lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
} }
if (nlp->nlp_flag & NLP_NPR_2B_DISC) { if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
nlp->nlp_flag &= ~NLP_NPR_2B_DISC; nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
if (vport->num_disc_nodes) { if (vport->num_disc_nodes) {
/* Check to see if there are more if (vport->port_state < LPFC_VPORT_READY) {
* PLOGIs to be sent /* Check if there are more ADISCs to be sent */
*/ lpfc_more_adisc(vport);
lpfc_more_plogi(vport); if ((vport->num_disc_nodes == 0) &&
(vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
} else {
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
}
if (vport->num_disc_nodes == 0) { if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE; vport->fc_flag &= ~FC_NDISC_ACTIVE;
@ -1798,10 +1803,6 @@ lpfc_els_retry_delay(unsigned long ptr)
unsigned long flags; unsigned long flags;
struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
ndlp = (struct lpfc_nodelist *) ptr;
phba = ndlp->vport->phba;
evtp = &ndlp->els_retry_evt;
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->hbalock, flags);
if (!list_empty(&evtp->evt_listp)) { if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
@ -1812,11 +1813,11 @@ lpfc_els_retry_delay(unsigned long ptr)
* count until the queued work is done * count until the queued work is done
*/ */
evtp->evt_arg1 = lpfc_nlp_get(ndlp); evtp->evt_arg1 = lpfc_nlp_get(ndlp);
evtp->evt = LPFC_EVT_ELS_RETRY; if (evtp->evt_arg1) {
list_add_tail(&evtp->evt_listp, &phba->work_list); evtp->evt = LPFC_EVT_ELS_RETRY;
if (phba->work_wait) list_add_tail(&evtp->evt_listp, &phba->work_list);
lpfc_worker_wake_up(phba); lpfc_worker_wake_up(phba);
}
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
return; return;
} }
@ -2761,10 +2762,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
npr = (PRLI *) pcmd; npr = (PRLI *) pcmd;
vpd = &phba->vpd; vpd = &phba->vpd;
/* /*
* If our firmware version is 3.20 or later, * If the remote port is a target and our firmware version is 3.20 or
* set the following bits for FC-TAPE support. * later, set the following bits for FC-TAPE support.
*/ */
if (vpd->rev.feaLevelHigh >= 0x02) { if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
(vpd->rev.feaLevelHigh >= 0x02)) {
npr->ConfmComplAllowed = 1; npr->ConfmComplAllowed = 1;
npr->Retry = 1; npr->Retry = 1;
npr->TaskRetryIdReq = 1; npr->TaskRetryIdReq = 1;
@ -3056,27 +3058,16 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
{ {
struct lpfc_nodelist *ndlp = NULL; struct lpfc_nodelist *ndlp = NULL;
/* Look at all nodes effected by pending RSCNs and move /* Move all affected nodes by pending RSCNs to NPR state. */
* them to NPR state.
*/
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp) || if (!NLP_CHK_NODE_ACT(ndlp) ||
ndlp->nlp_state == NLP_STE_UNUSED_NODE || (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue; continue;
lpfc_disc_state_machine(vport, ndlp, NULL, lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY); NLP_EVT_DEVICE_RECOVERY);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
/*
* Make sure NLP_DELAY_TMO is NOT running after a device
* recovery event.
*/
if (ndlp->nlp_flag & NLP_DELAY_TMO)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
} }
return 0; return 0;
} }
@ -3781,91 +3772,27 @@ static int
lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *fan_ndlp) struct lpfc_nodelist *fan_ndlp)
{ {
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
IOCB_t *icmd;
uint32_t cmd, did;
FAN *fp;
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
uint32_t *lp;
FAN *fp;
/* FAN received */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
"0265 FAN received\n"); fp = (FAN *) ++lp;
icmd = &cmdiocb->iocb;
did = icmd->un.elsreq64.remoteID;
pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
lp = (uint32_t *)pcmd->virt;
cmd = *lp++;
fp = (FAN *) lp;
/* FAN received; Fan does not have a reply sequence */ /* FAN received; Fan does not have a reply sequence */
if ((vport == phba->pport) &&
if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) { (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
sizeof(struct lpfc_name)) != 0) || sizeof(struct lpfc_name))) ||
(memcmp(&phba->fc_fabparam.portName, &fp->FportName, (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
sizeof(struct lpfc_name)) != 0)) { sizeof(struct lpfc_name)))) {
/* /* This port has switched fabrics. FLOGI is required */
* This node has switched fabrics. FLOGI is required
* Clean up the old rpi's
*/
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
/*
* Clean up old Fabric, Nameserver and
* other NLP_FABRIC logins
*/
lpfc_drop_node(vport, ndlp);
} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
/* Fail outstanding I/O now since this
* device is marked for PLOGI
*/
lpfc_unreg_rpi(vport, ndlp);
}
}
lpfc_initial_flogi(vport); lpfc_initial_flogi(vport);
return 0; } else {
/* FAN verified - skip FLOGI */
vport->fc_myDID = vport->fc_prevDID;
lpfc_issue_fabric_reglogin(vport);
} }
/* Discovery not needed,
* move the nodes to their original state.
*/
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
switch (ndlp->nlp_prev_state) {
case NLP_STE_UNMAPPED_NODE:
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_UNMAPPED_NODE);
break;
case NLP_STE_MAPPED_NODE:
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_MAPPED_NODE);
break;
default:
break;
}
}
/* Start discovery - this should just do CLEAR_LA */
lpfc_disc_start(vport);
} }
return 0; return 0;
} }
@ -3875,20 +3802,17 @@ lpfc_els_timeout(unsigned long ptr)
{ {
struct lpfc_vport *vport = (struct lpfc_vport *) ptr; struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long iflag; unsigned long iflag;
spin_lock_irqsave(&vport->work_port_lock, iflag); spin_lock_irqsave(&vport->work_port_lock, iflag);
if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
if (!tmo_posted)
vport->work_port_events |= WORKER_ELS_TMO; vport->work_port_events |= WORKER_ELS_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflag); spin_unlock_irqrestore(&vport->work_port_lock, iflag);
spin_lock_irqsave(&phba->hbalock, iflag); if (!tmo_posted)
if (phba->work_wait) lpfc_worker_wake_up(phba);
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
else
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
return; return;
} }
@ -3933,9 +3857,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
els_command == ELS_CMD_FDISC) els_command == ELS_CMD_FDISC)
continue; continue;
if (vport != piocb->vport)
continue;
if (piocb->drvrTimeout > 0) { if (piocb->drvrTimeout > 0) {
if (piocb->drvrTimeout >= timeout) if (piocb->drvrTimeout >= timeout)
piocb->drvrTimeout -= timeout; piocb->drvrTimeout -= timeout;
@ -4089,7 +4010,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
cmd = *payload; cmd = *payload;
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
lpfc_post_buffer(phba, pring, 1, 1); lpfc_post_buffer(phba, pring, 1);
did = icmd->un.rcvels.remoteID; did = icmd->un.rcvels.remoteID;
if (icmd->ulpStatus) { if (icmd->ulpStatus) {
@ -4398,7 +4319,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.NoRcvBuf++; phba->fc_stat.NoRcvBuf++;
/* Not enough posted buffers; Try posting more buffers */ /* Not enough posted buffers; Try posting more buffers */
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
lpfc_post_buffer(phba, pring, 0, 1); lpfc_post_buffer(phba, pring, 0);
return; return;
} }
@ -4842,18 +4763,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
struct lpfc_hba *phba = (struct lpfc_hba *) ptr; struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
unsigned long iflags; unsigned long iflags;
uint32_t tmo_posted; uint32_t tmo_posted;
spin_lock_irqsave(&phba->pport->work_port_lock, iflags); spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
if (!tmo_posted) if (!tmo_posted)
phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
if (!tmo_posted) { if (!tmo_posted)
spin_lock_irqsave(&phba->hbalock, iflags); lpfc_worker_wake_up(phba);
if (phba->work_wait) return;
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
} }
static void static void

View File

@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
* count until this queued work is done * count until this queued work is done
*/ */
evtp->evt_arg1 = lpfc_nlp_get(ndlp); evtp->evt_arg1 = lpfc_nlp_get(ndlp);
evtp->evt = LPFC_EVT_DEV_LOSS; if (evtp->evt_arg1) {
list_add_tail(&evtp->evt_listp, &phba->work_list); evtp->evt = LPFC_EVT_DEV_LOSS;
if (phba->work_wait) list_add_tail(&evtp->evt_listp, &phba->work_list);
wake_up(phba->work_wait); lpfc_worker_wake_up(phba);
}
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
return; return;
@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
} }
void
lpfc_worker_wake_up(struct lpfc_hba *phba)
{
wake_up(phba->work_wait);
return;
}
static void static void
lpfc_work_list_done(struct lpfc_hba *phba) lpfc_work_list_done(struct lpfc_hba *phba)
{ {
@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba)
|| (pring->flag & LPFC_DEFERRED_RING_EVENT)) { || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
if (pring->flag & LPFC_STOP_IOCB_EVENT) { if (pring->flag & LPFC_STOP_IOCB_EVENT) {
pring->flag |= LPFC_DEFERRED_RING_EVENT; pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */
set_bit(LPFC_DATA_READY, &phba->data_flags);
} else { } else {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT; pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring, lpfc_sli_handle_slow_ring_event(phba, pring,
@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_work_list_done(phba); lpfc_work_list_done(phba);
} }
static int
check_work_wait_done(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
int rc = 0;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport->work_port_events) {
rc = 1;
break;
}
}
if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
rc = 1;
phba->work_found++;
} else
phba->work_found = 0;
spin_unlock_irq(&phba->hbalock);
return rc;
}
int int
lpfc_do_work(void *p) lpfc_do_work(void *p)
{ {
struct lpfc_hba *phba = p; struct lpfc_hba *phba = p;
int rc; int rc;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
set_user_nice(current, -20); set_user_nice(current, -20);
phba->work_wait = &work_waitq; phba->data_flags = 0;
phba->work_found = 0;
while (1) { while (1) {
/* wait and check worker queue activities */
rc = wait_event_interruptible(work_waitq, rc = wait_event_interruptible(phba->work_waitq,
check_work_wait_done(phba)); (test_and_clear_bit(LPFC_DATA_READY,
&phba->data_flags)
|| kthread_should_stop()));
BUG_ON(rc); BUG_ON(rc);
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
/* Attend pending lpfc data processing */
lpfc_work_done(phba); lpfc_work_done(phba);
/* If there is alot of slow ring work, like during link up
* check_work_wait_done() may cause this thread to not give
* up the CPU for very long periods of time. This may cause
* soft lockups or other problems. To avoid these situations
* give up the CPU here after LPFC_MAX_WORKER_ITERATION
* consecutive iterations.
*/
if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
phba->work_found = 0;
schedule();
}
} }
spin_lock_irq(&phba->hbalock);
phba->work_wait = NULL;
spin_unlock_irq(&phba->hbalock);
return 0; return 0;
} }
@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->hbalock, flags);
list_add_tail(&evtp->evt_listp, &phba->work_list); list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_worker_wake_up(phba);
return 1; return 1;
} }
@ -963,6 +917,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
if (phba->fc_topology == TOPOLOGY_LOOP) { if (phba->fc_topology == TOPOLOGY_LOOP) {
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
if (phba->cfg_enable_npiv)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1309 Link Up Event npiv not supported in loop "
"topology\n");
/* Get Loop Map information */ /* Get Loop Map information */
if (la->il) if (la->il)
vport->fc_flag |= FC_LBIT; vport->fc_flag |= FC_LBIT;
@ -1087,6 +1045,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->mb; MAILBOX_t *mb = &pmb->mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
/* Unblock ELS traffic */
phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
/* Check for error */ /* Check for error */
if (mb->mbxStatus) { if (mb->mbxStatus) {
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@ -1650,7 +1610,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_DID, old_state, state); ndlp->nlp_DID, old_state, state);
if (old_state == NLP_STE_NPR_NODE && if (old_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
state != NLP_STE_NPR_NODE) state != NLP_STE_NPR_NODE)
lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (old_state == NLP_STE_UNMAPPED_NODE) { if (old_state == NLP_STE_UNMAPPED_NODE) {
@ -1687,8 +1646,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{ {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) lpfc_cancel_retry_delay_tmo(vport, ndlp);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
lpfc_nlp_counters(vport, ndlp->nlp_state, -1); lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
@ -1701,8 +1659,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
static void static void
lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{ {
if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) lpfc_cancel_retry_delay_tmo(vport, ndlp);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
lpfc_nlp_counters(vport, ndlp->nlp_state, -1); lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
@ -2121,10 +2078,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_last_elscmd = 0; ndlp->nlp_last_elscmd = 0;
del_timer_sync(&ndlp->nlp_delayfunc); del_timer_sync(&ndlp->nlp_delayfunc);
if (!list_empty(&ndlp->els_retry_evt.evt_listp)) list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp);
if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
list_del_init(&ndlp->dev_loss_evt.evt_listp);
lpfc_unreg_rpi(vport, ndlp); lpfc_unreg_rpi(vport, ndlp);
@ -2144,10 +2099,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
LPFC_MBOXQ_t *mbox; LPFC_MBOXQ_t *mbox;
int rc; int rc;
if (ndlp->nlp_flag & NLP_DELAY_TMO) { lpfc_cancel_retry_delay_tmo(vport, ndlp);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
}
if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
/* For this case we need to cleanup the default rpi /* For this case we need to cleanup the default rpi
* allocated by the firmware. * allocated by the firmware.
@ -2317,8 +2269,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* Since this node is marked for discovery, /* Since this node is marked for discovery,
* delay timeout is not needed. * delay timeout is not needed.
*/ */
if (ndlp->nlp_flag & NLP_DELAY_TMO) lpfc_cancel_retry_delay_tmo(vport, ndlp);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
} else } else
ndlp = NULL; ndlp = NULL;
} else { } else {
@ -2643,21 +2594,20 @@ lpfc_disc_timeout(unsigned long ptr)
{ {
struct lpfc_vport *vport = (struct lpfc_vport *) ptr; struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long flags = 0; unsigned long flags = 0;
if (unlikely(!phba)) if (unlikely(!phba))
return; return;
if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { spin_lock_irqsave(&vport->work_port_lock, flags);
spin_lock_irqsave(&vport->work_port_lock, flags); tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
if (!tmo_posted)
vport->work_port_events |= WORKER_DISC_TMO; vport->work_port_events |= WORKER_DISC_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, flags); spin_unlock_irqrestore(&vport->work_port_lock, flags);
spin_lock_irqsave(&phba->hbalock, flags); if (!tmo_posted)
if (phba->work_wait) lpfc_worker_wake_up(phba);
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, flags);
}
return; return;
} }

View File

@ -145,8 +145,10 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
return -ERESTART; return -ERESTART;
} }
if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
mempool_free(pmb, phba->mbox_mem_pool);
return -EINVAL; return -EINVAL;
}
/* Save information as VPD data */ /* Save information as VPD data */
vp->rev.rBit = 1; vp->rev.rBit = 1;
@ -551,18 +553,18 @@ static void
lpfc_hb_timeout(unsigned long ptr) lpfc_hb_timeout(unsigned long ptr)
{ {
struct lpfc_hba *phba; struct lpfc_hba *phba;
uint32_t tmo_posted;
unsigned long iflag; unsigned long iflag;
phba = (struct lpfc_hba *)ptr; phba = (struct lpfc_hba *)ptr;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag); spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
if (!(phba->pport->work_port_events & WORKER_HB_TMO)) tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
if (!tmo_posted)
phba->pport->work_port_events |= WORKER_HB_TMO; phba->pport->work_port_events |= WORKER_HB_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
spin_lock_irqsave(&phba->hbalock, iflag); if (!tmo_posted)
if (phba->work_wait) lpfc_worker_wake_up(phba);
wake_up(phba->work_wait);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
} }
@ -851,6 +853,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)
lpfc_read_la(phba, pmb, mp); lpfc_read_la(phba, pmb, mp);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
pmb->vport = vport; pmb->vport = vport;
/* Block ELS IOCBs until we have processed this mbox command */
phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) { if (rc == MBX_NOT_FINISHED) {
rc = 4; rc = 4;
@ -866,6 +870,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
return; return;
lpfc_handle_latt_free_mbuf: lpfc_handle_latt_free_mbuf:
phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
lpfc_mbuf_free(phba, mp->virt, mp->phys); lpfc_mbuf_free(phba, mp->virt, mp->phys);
lpfc_handle_latt_free_mp: lpfc_handle_latt_free_mp:
kfree(mp); kfree(mp);
@ -1194,8 +1199,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
/* Returns the number of buffers NOT posted. */ /* Returns the number of buffers NOT posted. */
/**************************************************/ /**************************************************/
int int
lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
int type)
{ {
IOCB_t *icmd; IOCB_t *icmd;
struct lpfc_iocbq *iocb; struct lpfc_iocbq *iocb;
@ -1295,7 +1299,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli; struct lpfc_sli *psli = &phba->sli;
/* Ring 0, ELS / CT buffers */ /* Ring 0, ELS / CT buffers */
lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
/* Ring 2 - FCP no buffers needed */ /* Ring 2 - FCP no buffers needed */
return 0; return 0;
@ -1454,6 +1458,15 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_disc_state_machine(vport, ndlp, NULL, lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM); NLP_EVT_DEVICE_RM);
/* nlp_type zero is not defined, nlp_flag zero also not defined,
* nlp_state is unused, this happens when
* an initiator has logged
* into us so cleanup this ndlp.
*/
if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) &&
(ndlp->nlp_state == 0))
lpfc_nlp_put(ndlp);
} }
/* At this point, ALL ndlp's should be gone /* At this point, ALL ndlp's should be gone
@ -2101,6 +2114,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
/* Initialize the wait queue head for the kernel thread */
init_waitqueue_head(&phba->work_waitq);
/* Startup the kernel thread for this host adapter. */ /* Startup the kernel thread for this host adapter. */
phba->worker_thread = kthread_run(lpfc_do_work, phba, phba->worker_thread = kthread_run(lpfc_do_work, phba,
"lpfc_worker_%d", phba->brd_no); "lpfc_worker_%d", phba->brd_no);

View File

@ -235,10 +235,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
(iocb->iocb_cmpl) (phba, iocb, iocb); (iocb->iocb_cmpl) (phba, iocb, iocb);
} }
} }
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
/* If we are delaying issuing an ELS command, cancel it */
if (ndlp->nlp_flag & NLP_DELAY_TMO)
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
return 0; return 0;
} }
@ -249,7 +246,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd; struct lpfc_dmabuf *pcmd;
struct lpfc_work_evt *evtp;
uint32_t *lp; uint32_t *lp;
IOCB_t *icmd; IOCB_t *icmd;
struct serv_parm *sp; struct serv_parm *sp;
@ -425,73 +421,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp, mbox); ndlp, mbox);
return 1; return 1;
} }
/* If the remote NPort logs into us, before we can initiate
* discovery to them, cleanup the NPort from discovery accordingly.
*/
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
del_timer_sync(&ndlp->nlp_delayfunc);
ndlp->nlp_last_elscmd = 0;
if (!list_empty(&ndlp->els_retry_evt.evt_listp)) {
list_del_init(&ndlp->els_retry_evt.evt_listp);
/* Decrement ndlp reference count held for the
* delayed retry
*/
evtp = &ndlp->els_retry_evt;
lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
}
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
(vport->num_disc_nodes)) {
/* Check to see if there are more
* ADISCs to be sent
*/
lpfc_more_adisc(vport);
if ((vport->num_disc_nodes == 0) &&
(vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
}
}
} else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(vport->num_disc_nodes)) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
/* Check to see if there are more
* PLOGIs to be sent
*/
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
}
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
return 1; return 1;
out: out:
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
@ -574,7 +505,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
if (!(ndlp->nlp_type & NLP_FABRIC) || if ((!(ndlp->nlp_type & NLP_FABRIC) &&
((ndlp->nlp_type & NLP_FCP_TARGET) ||
!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
/* Only try to re-login if this is NOT a Fabric Node */ /* Only try to re-login if this is NOT a Fabric Node */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
@ -751,6 +684,7 @@ static uint32_t
lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt) void *arg, uint32_t evt)
{ {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = arg; struct lpfc_iocbq *cmdiocb = arg;
struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@ -776,7 +710,22 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
NULL); NULL);
} else { } else {
lpfc_rcv_plogi(vport, ndlp, cmdiocb); if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(vport->num_disc_nodes)) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
}
} /* If our portname was less */ } /* If our portname was less */
return ndlp->nlp_state; return ndlp->nlp_state;
@ -1040,6 +989,7 @@ static uint32_t
lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt) void *arg, uint32_t evt)
{ {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb; struct lpfc_iocbq *cmdiocb;
@ -1048,9 +998,28 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
cmdiocb = (struct lpfc_iocbq *) arg; cmdiocb = (struct lpfc_iocbq *) arg;
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
return ndlp->nlp_state; if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
if (vport->num_disc_nodes) {
lpfc_more_adisc(vport);
if ((vport->num_disc_nodes == 0) &&
(vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
}
}
return ndlp->nlp_state;
}
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@ -1742,24 +1711,21 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* Ignore PLOGI if we have an outstanding LOGO */ /* Ignore PLOGI if we have an outstanding LOGO */
if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) { if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
return ndlp->nlp_state; return ndlp->nlp_state;
}
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC; ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state; } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
/* send PLOGI immediately, move to PLOGI issue state */
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
} }
/* send PLOGI immediately, move to PLOGI issue state */
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
return ndlp->nlp_state; return ndlp->nlp_state;
} }
@ -1810,7 +1776,6 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_rcv_padisc(vport, ndlp, cmdiocb); lpfc_rcv_padisc(vport, ndlp, cmdiocb);
/* /*
* Do not start discovery if discovery is about to start * Do not start discovery if discovery is about to start
* or discovery in progress for this node. Starting discovery * or discovery in progress for this node. Starting discovery
@ -1973,9 +1938,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_DELAY_TMO) { lpfc_cancel_retry_delay_tmo(vport, ndlp);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
}
return ndlp->nlp_state; return ndlp->nlp_state;
} }

View File

@ -50,6 +50,7 @@ void
lpfc_adjust_queue_depth(struct lpfc_hba *phba) lpfc_adjust_queue_depth(struct lpfc_hba *phba)
{ {
unsigned long flags; unsigned long flags;
uint32_t evt_posted;
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->hbalock, flags);
atomic_inc(&phba->num_rsrc_err); atomic_inc(&phba->num_rsrc_err);
@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
spin_lock_irqsave(&phba->pport->work_port_lock, flags); spin_lock_irqsave(&phba->pport->work_port_lock, flags);
if ((phba->pport->work_port_events & evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
WORKER_RAMP_DOWN_QUEUE) == 0) { if (!evt_posted)
phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
}
spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
spin_lock_irqsave(&phba->hbalock, flags); if (!evt_posted)
if (phba->work_wait) lpfc_worker_wake_up(phba);
wake_up(phba->work_wait);
spin_unlock_irqrestore(&phba->hbalock, flags);
return; return;
} }
@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
{ {
unsigned long flags; unsigned long flags;
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
uint32_t evt_posted;
atomic_inc(&phba->num_cmd_success); atomic_inc(&phba->num_cmd_success);
if (vport->cfg_lun_queue_depth <= sdev->queue_depth) if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
spin_lock_irqsave(&phba->pport->work_port_lock, flags); spin_lock_irqsave(&phba->pport->work_port_lock, flags);
if ((phba->pport->work_port_events & evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
WORKER_RAMP_UP_QUEUE) == 0) { if (!evt_posted)
phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
}
spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
spin_lock_irqsave(&phba->hbalock, flags); if (!evt_posted)
if (phba->work_wait) lpfc_worker_wake_up(phba);
wake_up(phba->work_wait); return;
spin_unlock_irqrestore(&phba->hbalock, flags);
} }
void void
@ -609,9 +605,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
result = cmd->result; result = cmd->result;
sdev = cmd->device; sdev = cmd->device;
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
spin_lock_irqsave(sdev->host->host_lock, flags);
lpfc_cmd->pCmd = NULL; /* This must be done before scsi_done */
spin_unlock_irqrestore(sdev->host->host_lock, flags);
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@ -620,6 +613,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
* wake up the thread. * wake up the thread.
*/ */
spin_lock_irqsave(sdev->host->host_lock, flags); spin_lock_irqsave(sdev->host->host_lock, flags);
lpfc_cmd->pCmd = NULL;
if (lpfc_cmd->waitq) if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq); wake_up(lpfc_cmd->waitq);
spin_unlock_irqrestore(sdev->host->host_lock, flags); spin_unlock_irqrestore(sdev->host->host_lock, flags);
@ -690,6 +684,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
* wake up the thread. * wake up the thread.
*/ */
spin_lock_irqsave(sdev->host->host_lock, flags); spin_lock_irqsave(sdev->host->host_lock, flags);
lpfc_cmd->pCmd = NULL;
if (lpfc_cmd->waitq) if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq); wake_up(lpfc_cmd->waitq);
spin_unlock_irqrestore(sdev->host->host_lock, flags); spin_unlock_irqrestore(sdev->host->host_lock, flags);
@ -849,14 +844,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp; struct lpfc_iocbq *iocbqrsp;
int ret; int ret;
int status;
if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED; return FAILED;
lpfc_cmd->rdata = rdata; lpfc_cmd->rdata = rdata;
ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
FCP_TARGET_RESET); FCP_TARGET_RESET);
if (!ret) if (!status)
return FAILED; return FAILED;
iocbq = &lpfc_cmd->cur_iocbq; iocbq = &lpfc_cmd->cur_iocbq;
@ -869,12 +865,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue Target Reset to TGT %d Data: x%x x%x\n", "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
ret = lpfc_sli_issue_iocb_wait(phba, status = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring], &phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout); iocbq, iocbqrsp, lpfc_cmd->timeout);
if (ret != IOCB_SUCCESS) { if (status != IOCB_SUCCESS) {
if (ret == IOCB_TIMEDOUT) if (status == IOCB_TIMEDOUT) {
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
ret = TIMEOUT_ERROR;
} else
ret = FAILED;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT; lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
} else { } else {
ret = SUCCESS; ret = SUCCESS;
@ -1142,121 +1141,96 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_iocbq *iocbq, *iocbqrsp; struct lpfc_iocbq *iocbq, *iocbqrsp;
struct lpfc_rport_data *rdata = cmnd->device->hostdata; struct lpfc_rport_data *rdata = cmnd->device->hostdata;
struct lpfc_nodelist *pnode = rdata->pnode; struct lpfc_nodelist *pnode = rdata->pnode;
uint32_t cmd_result = 0, cmd_status = 0; unsigned long later;
int ret = FAILED; int ret = SUCCESS;
int iocb_status = IOCB_SUCCESS; int status;
int cnt, loopcnt; int cnt;
lpfc_block_error_handler(cmnd); lpfc_block_error_handler(cmnd);
loopcnt = 0;
/* /*
* If target is not in a MAPPED state, delay the reset until * If target is not in a MAPPED state, delay the reset until
* target is rediscovered or devloss timeout expires. * target is rediscovered or devloss timeout expires.
*/ */
while (1) { later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies)) {
if (!pnode || !NLP_CHK_NODE_ACT(pnode)) if (!pnode || !NLP_CHK_NODE_ACT(pnode))
goto out; return FAILED;
if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
loopcnt++;
rdata = cmnd->device->hostdata;
if (!rdata ||
(loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0721 LUN Reset rport "
"failure: cnt x%x rdata x%p\n",
loopcnt, rdata);
goto out;
}
pnode = rdata->pnode;
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
goto out;
}
if (pnode->nlp_state == NLP_STE_MAPPED_NODE) if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
break; break;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
rdata = cmnd->device->hostdata;
if (!rdata)
break;
pnode = rdata->pnode;
}
if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0721 LUN Reset rport "
"failure: msec x%x rdata x%p\n",
jiffies_to_msecs(jiffies - later), rdata);
return FAILED;
} }
lpfc_cmd = lpfc_get_scsi_buf(phba); lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL) if (lpfc_cmd == NULL)
goto out; return FAILED;
lpfc_cmd->timeout = 60; lpfc_cmd->timeout = 60;
lpfc_cmd->rdata = rdata; lpfc_cmd->rdata = rdata;
ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun, status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
FCP_TARGET_RESET); cmnd->device->lun,
if (!ret) FCP_TARGET_RESET);
goto out_free_scsi_buf; if (!status) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
iocbq = &lpfc_cmd->cur_iocbq; iocbq = &lpfc_cmd->cur_iocbq;
/* get a buffer for this IOCB command response */ /* get a buffer for this IOCB command response */
iocbqrsp = lpfc_sli_get_iocbq(phba); iocbqrsp = lpfc_sli_get_iocbq(phba);
if (iocbqrsp == NULL) if (iocbqrsp == NULL) {
goto out_free_scsi_buf; lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0703 Issue target reset to TGT %d LUN %d " "0703 Issue target reset to TGT %d LUN %d "
"rpi x%x nlp_flag x%x\n", cmnd->device->id, "rpi x%x nlp_flag x%x\n", cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
iocb_status = lpfc_sli_issue_iocb_wait(phba, status = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring], &phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout); iocbq, iocbqrsp, lpfc_cmd->timeout);
if (status == IOCB_TIMEDOUT) {
if (iocb_status == IOCB_TIMEDOUT)
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
ret = TIMEOUT_ERROR;
if (iocb_status == IOCB_SUCCESS) } else {
ret = SUCCESS; if (status != IOCB_SUCCESS)
else ret = FAILED;
ret = iocb_status;
cmd_result = iocbqrsp->iocb.un.ulpWord[4];
cmd_status = iocbqrsp->iocb.ulpStatus;
lpfc_sli_release_iocbq(phba, iocbqrsp);
/*
* All outstanding txcmplq I/Os should have been aborted by the device.
* Unfortunately, some targets do not abide by this forcing the driver
* to double check.
*/
cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
LPFC_CTX_LUN);
if (cnt)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
LPFC_CTX_LUN);
loopcnt = 0;
while(cnt) {
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
if (++loopcnt
> (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
break;
cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
cmnd->device->lun, LPFC_CTX_LUN);
}
if (cnt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0719 device reset I/O flush failure: "
"cnt x%x\n", cnt);
ret = FAILED;
}
out_free_scsi_buf:
if (iocb_status != IOCB_TIMEDOUT) {
lpfc_release_scsi_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd);
} }
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0713 SCSI layer issued device reset (%d, %d) " "0713 SCSI layer issued device reset (%d, %d) "
"return x%x status x%x result x%x\n", "return x%x status x%x result x%x\n",
cmnd->device->id, cmnd->device->lun, ret, cmnd->device->id, cmnd->device->lun, ret,
cmd_status, cmd_result); iocbqrsp->iocb.ulpStatus,
out: iocbqrsp->iocb.un.ulpWord[4]);
lpfc_sli_release_iocbq(phba, iocbqrsp);
cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
LPFC_CTX_TGT);
if (cnt)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
LPFC_CTX_TGT);
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
cmnd->device->lun, LPFC_CTX_TGT);
}
if (cnt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0719 device reset I/O flush failure: "
"cnt x%x\n", cnt);
ret = FAILED;
}
return ret; return ret;
} }
@ -1268,19 +1242,12 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL; struct lpfc_nodelist *ndlp = NULL;
int match; int match;
int ret = FAILED, i, err_count = 0; int ret = SUCCESS, status, i;
int cnt, loopcnt; int cnt;
struct lpfc_scsi_buf * lpfc_cmd; struct lpfc_scsi_buf * lpfc_cmd;
unsigned long later;
lpfc_block_error_handler(cmnd); lpfc_block_error_handler(cmnd);
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL)
goto out;
/* The lpfc_cmd storage is reused. Set all loop invariants. */
lpfc_cmd->timeout = 60;
/* /*
* Since the driver manages a single bus device, reset all * Since the driver manages a single bus device, reset all
* targets known to the driver. Should any target reset * targets known to the driver. Should any target reset
@ -1294,7 +1261,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (!NLP_CHK_NODE_ACT(ndlp)) if (!NLP_CHK_NODE_ACT(ndlp))
continue; continue;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
i == ndlp->nlp_sid && ndlp->nlp_sid == i &&
ndlp->rport) { ndlp->rport) {
match = 1; match = 1;
break; break;
@ -1303,27 +1270,22 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
if (!match) if (!match)
continue; continue;
lpfc_cmd = lpfc_get_scsi_buf(phba);
ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, if (lpfc_cmd) {
cmnd->device->lun, lpfc_cmd->timeout = 60;
ndlp->rport->dd_data); status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
if (ret != SUCCESS) { cmnd->device->lun,
ndlp->rport->dd_data);
if (status != TIMEOUT_ERROR)
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
if (!lpfc_cmd || status != SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0700 Bus Reset on target %d failed\n", "0700 Bus Reset on target %d failed\n",
i); i);
err_count++; ret = FAILED;
break;
} }
} }
if (ret != IOCB_TIMEDOUT)
lpfc_release_scsi_buf(phba, lpfc_cmd);
if (err_count == 0)
ret = SUCCESS;
else
ret = FAILED;
/* /*
* All outstanding txcmplq I/Os should have been aborted by * All outstanding txcmplq I/Os should have been aborted by
* the targets. Unfortunately, some targets do not abide by * the targets. Unfortunately, some targets do not abide by
@ -1333,27 +1295,19 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (cnt) if (cnt)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST); 0, 0, LPFC_CTX_HOST);
loopcnt = 0; later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while(cnt) { while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); schedule_timeout_uninterruptible(msecs_to_jiffies(20));
if (++loopcnt
> (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
break;
cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
} }
if (cnt) { if (cnt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0715 Bus Reset I/O flush failure: " "0715 Bus Reset I/O flush failure: "
"cnt x%x left x%x\n", cnt, i); "cnt x%x left x%x\n", cnt, i);
ret = FAILED; ret = FAILED;
} }
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0714 SCSI layer issued Bus Reset Data: x%x\n", ret); "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
out:
return ret; return ret;
} }

View File

@ -324,9 +324,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
phba->work_ha |= HA_ERATT; phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3; phba->work_hs = HS_FFER3;
/* hbalock should already be held */ lpfc_worker_wake_up(phba);
if (phba->work_wait)
lpfc_worker_wake_up(phba);
return NULL; return NULL;
} }
@ -1309,9 +1307,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
phba->work_ha |= HA_ERATT; phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3; phba->work_hs = HS_FFER3;
/* hbalock should already be held */ lpfc_worker_wake_up(phba);
if (phba->work_wait)
lpfc_worker_wake_up(phba);
return; return;
} }
@ -2611,12 +2607,9 @@ lpfc_mbox_timeout(unsigned long ptr)
phba->pport->work_port_events |= WORKER_MBOX_TMO; phba->pport->work_port_events |= WORKER_MBOX_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
if (!tmo_posted) { if (!tmo_posted)
spin_lock_irqsave(&phba->hbalock, iflag); lpfc_worker_wake_up(phba);
if (phba->work_wait) return;
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
} }
void void
@ -3374,8 +3367,12 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
for (i = 0; i < psli->num_rings; i++) { for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i]; pring = &psli->ring[i];
prev_pring_flag = pring->flag; prev_pring_flag = pring->flag;
if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ /* Only slow rings */
if (pring->ringno == LPFC_ELS_RING) {
pring->flag |= LPFC_DEFERRED_RING_EVENT; pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */
set_bit(LPFC_DATA_READY, &phba->data_flags);
}
/* /*
* Error everything on the txq since these iocbs have not been * Error everything on the txq since these iocbs have not been
* given to the FW yet. * given to the FW yet.
@ -3434,8 +3431,12 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < psli->num_rings; i++) { for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i]; pring = &psli->ring[i];
if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ /* Only slow rings */
if (pring->ringno == LPFC_ELS_RING) {
pring->flag |= LPFC_DEFERRED_RING_EVENT; pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */
set_bit(LPFC_DATA_READY, &phba->data_flags);
}
/* /*
* Error everything on the txq since these iocbs have not been * Error everything on the txq since these iocbs have not been
@ -3762,7 +3763,6 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
lpfc_ctx_cmd ctx_cmd) lpfc_ctx_cmd ctx_cmd)
{ {
struct lpfc_scsi_buf *lpfc_cmd; struct lpfc_scsi_buf *lpfc_cmd;
struct scsi_cmnd *cmnd;
int rc = 1; int rc = 1;
if (!(iocbq->iocb_flag & LPFC_IO_FCP)) if (!(iocbq->iocb_flag & LPFC_IO_FCP))
@ -3772,19 +3772,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
return rc; return rc;
lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
cmnd = lpfc_cmd->pCmd;
if (cmnd == NULL) if (lpfc_cmd->pCmd == NULL)
return rc; return rc;
switch (ctx_cmd) { switch (ctx_cmd) {
case LPFC_CTX_LUN: case LPFC_CTX_LUN:
if ((cmnd->device->id == tgt_id) && if ((lpfc_cmd->rdata->pnode) &&
(cmnd->device->lun == lun_id)) (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
(scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
rc = 0; rc = 0;
break; break;
case LPFC_CTX_TGT: case LPFC_CTX_TGT:
if (cmnd->device->id == tgt_id) if ((lpfc_cmd->rdata->pnode) &&
(lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
rc = 0; rc = 0;
break; break;
case LPFC_CTX_HOST: case LPFC_CTX_HOST:
@ -3994,6 +3995,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
if (pmboxq->context1) if (pmboxq->context1)
return MBX_NOT_FINISHED; return MBX_NOT_FINISHED;
pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
/* setup wake call as IOCB callback */ /* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
/* setup context field to pass wait_queue pointer to wake function */ /* setup context field to pass wait_queue pointer to wake function */
@ -4159,7 +4161,7 @@ lpfc_intr_handler(int irq, void *dev_id)
"pwork:x%x hawork:x%x wait:x%x", "pwork:x%x hawork:x%x wait:x%x",
phba->work_ha, work_ha_copy, phba->work_ha, work_ha_copy,
(uint32_t)((unsigned long) (uint32_t)((unsigned long)
phba->work_wait)); &phba->work_waitq));
control &= control &=
~(HC_R0INT_ENA << LPFC_ELS_RING); ~(HC_R0INT_ENA << LPFC_ELS_RING);
@ -4172,7 +4174,7 @@ lpfc_intr_handler(int irq, void *dev_id)
"x%x hawork:x%x wait:x%x", "x%x hawork:x%x wait:x%x",
phba->work_ha, work_ha_copy, phba->work_ha, work_ha_copy,
(uint32_t)((unsigned long) (uint32_t)((unsigned long)
phba->work_wait)); &phba->work_waitq));
} }
spin_unlock(&phba->hbalock); spin_unlock(&phba->hbalock);
} }
@ -4297,9 +4299,8 @@ send_current_mbox:
spin_lock(&phba->hbalock); spin_lock(&phba->hbalock);
phba->work_ha |= work_ha_copy; phba->work_ha |= work_ha_copy;
if (phba->work_wait)
lpfc_worker_wake_up(phba);
spin_unlock(&phba->hbalock); spin_unlock(&phba->hbalock);
lpfc_worker_wake_up(phba);
} }
ha_copy &= ~(phba->work_ha_mask); ha_copy &= ~(phba->work_ha_mask);

View File

@ -18,7 +18,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "8.2.6" #define LPFC_DRIVER_VERSION "8.2.7"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"

View File

@ -216,6 +216,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
int vpi; int vpi;
int rc = VPORT_ERROR; int rc = VPORT_ERROR;
int status; int status;
int size;
if ((phba->sli_rev < 3) || if ((phba->sli_rev < 3) ||
!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
@ -278,7 +279,20 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
if (size) {
vport->vname = kzalloc(size+1, GFP_KERNEL);
if (!vport->vname) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1814 Create VPORT failed. "
"vname allocation failed.\n");
rc = VPORT_ERROR;
lpfc_free_vpi(phba, vpi);
destroy_port(vport);
goto error_out;
}
memcpy(vport->vname, fc_vport->symbolic_name, size+1);
}
if (fc_vport->node_name != 0) if (fc_vport->node_name != 0)
u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
if (fc_vport->port_name != 0) if (fc_vport->port_name != 0)

View File

@ -1765,7 +1765,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
default: default:
return 0; return 0;
} }
if (mesg.event == mdev->ofdev.dev.power.power_state.event) if (ms->phase == sleeping)
return 0; return 0;
scsi_block_requests(ms->host); scsi_block_requests(ms->host);
@ -1780,8 +1780,6 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
disable_irq(ms->meshintr); disable_irq(ms->meshintr);
set_mesh_power(ms, 0); set_mesh_power(ms, 0);
mdev->ofdev.dev.power.power_state = mesg;
return 0; return 0;
} }
@ -1790,7 +1788,7 @@ static int mesh_resume(struct macio_dev *mdev)
struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
unsigned long flags; unsigned long flags;
if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON) if (ms->phase != sleeping)
return 0; return 0;
set_mesh_power(ms, 1); set_mesh_power(ms, 1);
@ -1801,8 +1799,6 @@ static int mesh_resume(struct macio_dev *mdev)
enable_irq(ms->meshintr); enable_irq(ms->meshintr);
scsi_unblock_requests(ms->host); scsi_unblock_requests(ms->host);
mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
return 0; return 0;
} }

View File

@ -113,9 +113,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.host_param_mask = ISCSI_HOST_HWADDRESS | .host_param_mask = ISCSI_HOST_HWADDRESS |
ISCSI_HOST_IPADDRESS | ISCSI_HOST_IPADDRESS |
ISCSI_HOST_INITIATOR_NAME, ISCSI_HOST_INITIATOR_NAME,
.sessiondata_size = sizeof(struct ddb_entry),
.host_template = &qla4xxx_driver_template,
.tgt_dscvr = qla4xxx_tgt_dscvr, .tgt_dscvr = qla4xxx_tgt_dscvr,
.get_conn_param = qla4xxx_conn_get_param, .get_conn_param = qla4xxx_conn_get_param,
.get_session_param = qla4xxx_sess_get_param, .get_session_param = qla4xxx_sess_get_param,
@ -275,7 +272,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
return err; return err;
} }
ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0); ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0);
if (!ddb_entry->conn) { if (!ddb_entry->conn) {
iscsi_remove_session(ddb_entry->sess); iscsi_remove_session(ddb_entry->sess);
DEBUG2(printk(KERN_ERR "Could not add connection.\n")); DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
@ -292,7 +289,8 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
struct ddb_entry *ddb_entry; struct ddb_entry *ddb_entry;
struct iscsi_cls_session *sess; struct iscsi_cls_session *sess;
sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport); sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport,
sizeof(struct ddb_entry));
if (!sess) if (!sess)
return NULL; return NULL;

View File

@ -855,9 +855,18 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
good_bytes = scsi_bufflen(cmd); good_bytes = scsi_bufflen(cmd);
if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
int old_good_bytes = good_bytes;
drv = scsi_cmd_to_driver(cmd); drv = scsi_cmd_to_driver(cmd);
if (drv->done) if (drv->done)
good_bytes = drv->done(cmd); good_bytes = drv->done(cmd);
/*
* USB may not give sense identifying bad sector and
* simply return a residue instead, so subtract off the
* residue if drv->done() error processing indicates no
* change to the completion length.
*/
if (good_bytes == old_good_bytes)
good_bytes -= scsi_get_resid(cmd);
} }
scsi_io_completion(cmd, good_bytes); scsi_io_completion(cmd, good_bytes);
} }

View File

@ -94,6 +94,7 @@ static const char * scsi_debug_version_date = "20070104";
#define DEF_VIRTUAL_GB 0 #define DEF_VIRTUAL_GB 0
#define DEF_FAKE_RW 0 #define DEF_FAKE_RW 0
#define DEF_VPD_USE_HOSTNO 1 #define DEF_VPD_USE_HOSTNO 1
#define DEF_SECTOR_SIZE 512
/* bit mask values for scsi_debug_opts */ /* bit mask values for scsi_debug_opts */
#define SCSI_DEBUG_OPT_NOISE 1 #define SCSI_DEBUG_OPT_NOISE 1
@ -142,6 +143,7 @@ static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
static int scsi_debug_fake_rw = DEF_FAKE_RW; static int scsi_debug_fake_rw = DEF_FAKE_RW;
static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
static int scsi_debug_cmnd_count = 0; static int scsi_debug_cmnd_count = 0;
@ -157,11 +159,6 @@ static int sdebug_heads; /* heads per disk */
static int sdebug_cylinders_per; /* cylinders per surface */ static int sdebug_cylinders_per; /* cylinders per surface */
static int sdebug_sectors_per; /* sectors per cylinder */ static int sdebug_sectors_per; /* sectors per cylinder */
/* default sector size is 512 bytes, 2**9 bytes */
#define POW2_SECT_SIZE 9
#define SECT_SIZE (1 << POW2_SECT_SIZE)
#define SECT_SIZE_PER(TGT) SECT_SIZE
#define SDEBUG_MAX_PARTS 4 #define SDEBUG_MAX_PARTS 4
#define SDEBUG_SENSE_LEN 32 #define SDEBUG_SENSE_LEN 32
@ -646,6 +643,14 @@ static int inquiry_evpd_b0(unsigned char * arr)
return sizeof(vpdb0_data); return sizeof(vpdb0_data);
} }
static int inquiry_evpd_b1(unsigned char *arr)
{
memset(arr, 0, 0x3c);
arr[0] = 0;
arr[1] = 1;
return 0x3c;
}
#define SDEBUG_LONG_INQ_SZ 96 #define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584 #define SDEBUG_MAX_INQ_ARR_SZ 584
@ -701,6 +706,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
arr[n++] = 0x88; /* SCSI ports */ arr[n++] = 0x88; /* SCSI ports */
arr[n++] = 0x89; /* ATA information */ arr[n++] = 0x89; /* ATA information */
arr[n++] = 0xb0; /* Block limits (SBC) */ arr[n++] = 0xb0; /* Block limits (SBC) */
arr[n++] = 0xb1; /* Block characteristics (SBC) */
arr[3] = n - 4; /* number of supported VPD pages */ arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */ } else if (0x80 == cmd[2]) { /* unit serial number */
arr[1] = cmd[2]; /*sanity */ arr[1] = cmd[2]; /*sanity */
@ -740,6 +746,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */ } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
arr[1] = cmd[2]; /*sanity */ arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_evpd_b0(&arr[4]); arr[3] = inquiry_evpd_b0(&arr[4]);
} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_evpd_b1(&arr[4]);
} else { } else {
/* Illegal request, invalid field in cdb */ /* Illegal request, invalid field in cdb */
mk_sense_buffer(devip, ILLEGAL_REQUEST, mk_sense_buffer(devip, ILLEGAL_REQUEST,
@ -878,8 +887,8 @@ static int resp_readcap(struct scsi_cmnd * scp,
arr[2] = 0xff; arr[2] = 0xff;
arr[3] = 0xff; arr[3] = 0xff;
} }
arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff; arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
arr[7] = SECT_SIZE_PER(target) & 0xff; arr[7] = scsi_debug_sector_size & 0xff;
return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
} }
@ -902,10 +911,10 @@ static int resp_readcap16(struct scsi_cmnd * scp,
capac = sdebug_capacity - 1; capac = sdebug_capacity - 1;
for (k = 0; k < 8; ++k, capac >>= 8) for (k = 0; k < 8; ++k, capac >>= 8)
arr[7 - k] = capac & 0xff; arr[7 - k] = capac & 0xff;
arr[8] = (SECT_SIZE_PER(target) >> 24) & 0xff; arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
arr[9] = (SECT_SIZE_PER(target) >> 16) & 0xff; arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
arr[10] = (SECT_SIZE_PER(target) >> 8) & 0xff; arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
arr[11] = SECT_SIZE_PER(target) & 0xff; arr[11] = scsi_debug_sector_size & 0xff;
return fill_from_dev_buffer(scp, arr, return fill_from_dev_buffer(scp, arr,
min(alloc_len, SDEBUG_READCAP16_ARR_SZ)); min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
} }
@ -1019,20 +1028,20 @@ static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
static int resp_format_pg(unsigned char * p, int pcontrol, int target) static int resp_format_pg(unsigned char * p, int pcontrol, int target)
{ /* Format device page for mode_sense */ { /* Format device page for mode_sense */
unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x40, 0, 0, 0}; 0, 0, 0, 0, 0x40, 0, 0, 0};
memcpy(p, format_pg, sizeof(format_pg)); memcpy(p, format_pg, sizeof(format_pg));
p[10] = (sdebug_sectors_per >> 8) & 0xff; p[10] = (sdebug_sectors_per >> 8) & 0xff;
p[11] = sdebug_sectors_per & 0xff; p[11] = sdebug_sectors_per & 0xff;
p[12] = (SECT_SIZE >> 8) & 0xff; p[12] = (scsi_debug_sector_size >> 8) & 0xff;
p[13] = SECT_SIZE & 0xff; p[13] = scsi_debug_sector_size & 0xff;
if (DEV_REMOVEABLE(target)) if (DEV_REMOVEABLE(target))
p[20] |= 0x20; /* should agree with INQUIRY */ p[20] |= 0x20; /* should agree with INQUIRY */
if (1 == pcontrol) if (1 == pcontrol)
memset(p + 2, 0, sizeof(format_pg) - 2); memset(p + 2, 0, sizeof(format_pg) - 2);
return sizeof(format_pg); return sizeof(format_pg);
} }
static int resp_caching_pg(unsigned char * p, int pcontrol, int target) static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
@ -1206,8 +1215,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
ap[2] = (sdebug_capacity >> 8) & 0xff; ap[2] = (sdebug_capacity >> 8) & 0xff;
ap[3] = sdebug_capacity & 0xff; ap[3] = sdebug_capacity & 0xff;
} }
ap[6] = (SECT_SIZE_PER(target) >> 8) & 0xff; ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
ap[7] = SECT_SIZE_PER(target) & 0xff; ap[7] = scsi_debug_sector_size & 0xff;
offset += bd_len; offset += bd_len;
ap = arr + offset; ap = arr + offset;
} else if (16 == bd_len) { } else if (16 == bd_len) {
@ -1215,10 +1224,10 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
for (k = 0; k < 8; ++k, capac >>= 8) for (k = 0; k < 8; ++k, capac >>= 8)
ap[7 - k] = capac & 0xff; ap[7 - k] = capac & 0xff;
ap[12] = (SECT_SIZE_PER(target) >> 24) & 0xff; ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
ap[13] = (SECT_SIZE_PER(target) >> 16) & 0xff; ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
ap[14] = (SECT_SIZE_PER(target) >> 8) & 0xff; ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
ap[15] = SECT_SIZE_PER(target) & 0xff; ap[15] = scsi_debug_sector_size & 0xff;
offset += bd_len; offset += bd_len;
ap = arr + offset; ap = arr + offset;
} }
@ -1519,10 +1528,10 @@ static int do_device_access(struct scsi_cmnd *scmd,
if (block + num > sdebug_store_sectors) if (block + num > sdebug_store_sectors)
rest = block + num - sdebug_store_sectors; rest = block + num - sdebug_store_sectors;
ret = func(scmd, fake_storep + (block * SECT_SIZE), ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
(num - rest) * SECT_SIZE); (num - rest) * scsi_debug_sector_size);
if (!ret && rest) if (!ret && rest)
ret = func(scmd, fake_storep, rest * SECT_SIZE); ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
return ret; return ret;
} }
@ -1575,10 +1584,10 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
write_unlock_irqrestore(&atomic_rw, iflags); write_unlock_irqrestore(&atomic_rw, iflags);
if (-1 == ret) if (-1 == ret)
return (DID_ERROR << 16); return (DID_ERROR << 16);
else if ((ret < (num * SECT_SIZE)) && else if ((ret < (num * scsi_debug_sector_size)) &&
(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
" IO sent=%d bytes\n", num * SECT_SIZE, ret); " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
return 0; return 0;
} }
@ -2085,6 +2094,7 @@ module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
S_IRUGO | S_IWUSR); S_IRUGO | S_IWUSR);
module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver"); MODULE_DESCRIPTION("SCSI debug adapter driver");
@ -2106,6 +2116,7 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)");
static char sdebug_info[256]; static char sdebug_info[256];
@ -2158,8 +2169,9 @@ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **sta
scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth, scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
scsi_debug_cmnd_count, scsi_debug_delay, scsi_debug_cmnd_count, scsi_debug_delay,
scsi_debug_max_luns, scsi_debug_scsi_level, scsi_debug_max_luns, scsi_debug_scsi_level,
SECT_SIZE, sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
num_aborts, num_dev_resets, num_bus_resets, num_host_resets); sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
num_host_resets);
if (pos < offset) { if (pos < offset) {
len = 0; len = 0;
begin = pos; begin = pos;
@ -2434,6 +2446,12 @@ static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show, DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
sdebug_vpd_use_hostno_store); sdebug_vpd_use_hostno_store);
static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
{
return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
}
DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
/* Note: The following function creates attribute files in the /* Note: The following function creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters files (over those found in the /sys/module/scsi_debug/parameters
@ -2459,11 +2477,13 @@ static int do_create_driverfs_files(void)
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
return ret; return ret;
} }
static void do_remove_driverfs_files(void) static void do_remove_driverfs_files(void)
{ {
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
@ -2499,10 +2519,22 @@ static int __init scsi_debug_init(void)
int k; int k;
int ret; int ret;
switch (scsi_debug_sector_size) {
case 512:
case 1024:
case 2048:
case 4096:
break;
default:
printk(KERN_ERR "scsi_debug_init: invalid sector_size %u\n",
scsi_debug_sector_size);
return -EINVAL;
}
if (scsi_debug_dev_size_mb < 1) if (scsi_debug_dev_size_mb < 1)
scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
sdebug_store_sectors = sz / SECT_SIZE; sdebug_store_sectors = sz / scsi_debug_sector_size;
sdebug_capacity = get_sdebug_capacity(); sdebug_capacity = get_sdebug_capacity();
/* play around with geometry, don't waste too much on track 0 */ /* play around with geometry, don't waste too much on track 0 */

View File

@ -298,6 +298,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
*/ */
static int scsi_check_sense(struct scsi_cmnd *scmd) static int scsi_check_sense(struct scsi_cmnd *scmd)
{ {
struct scsi_device *sdev = scmd->device;
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr sshdr;
if (! scsi_command_normalize_sense(scmd, &sshdr)) if (! scsi_command_normalize_sense(scmd, &sshdr))
@ -306,6 +307,16 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scsi_sense_is_deferred(&sshdr)) if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY; return NEEDS_RETRY;
if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
sdev->scsi_dh_data->scsi_dh->check_sense) {
int rc;
rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
if (rc != SCSI_RETURN_NOT_HANDLED)
return rc;
/* handler does not care. Drop down to default handling */
}
/* /*
* Previous logic looked for FILEMARK, EOM or ILI which are * Previous logic looked for FILEMARK, EOM or ILI which are
* mainly associated with tapes and returned SUCCESS. * mainly associated with tapes and returned SUCCESS.

View File

@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
}; };
#undef SP #undef SP
static struct kmem_cache *scsi_bidi_sdb_cache; static struct kmem_cache *scsi_sdb_cache;
static void scsi_run_queue(struct request_queue *q); static void scsi_run_queue(struct request_queue *q);
@ -784,7 +784,7 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
struct scsi_data_buffer *bidi_sdb = struct scsi_data_buffer *bidi_sdb =
cmd->request->next_rq->special; cmd->request->next_rq->special;
scsi_free_sgtable(bidi_sdb); scsi_free_sgtable(bidi_sdb);
kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb); kmem_cache_free(scsi_sdb_cache, bidi_sdb);
cmd->request->next_rq->special = NULL; cmd->request->next_rq->special = NULL;
} }
} }
@ -1059,7 +1059,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
if (blk_bidi_rq(cmd->request)) { if (blk_bidi_rq(cmd->request)) {
struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
scsi_bidi_sdb_cache, GFP_ATOMIC); scsi_sdb_cache, GFP_ATOMIC);
if (!bidi_sdb) { if (!bidi_sdb) {
error = BLKPREP_DEFER; error = BLKPREP_DEFER;
goto err_exit; goto err_exit;
@ -1169,6 +1169,14 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
if (ret != BLKPREP_OK) if (ret != BLKPREP_OK)
return ret; return ret;
if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
&& sdev->scsi_dh_data->scsi_dh->prep_fn)) {
ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
if (ret != BLKPREP_OK)
return ret;
}
/* /*
* Filesystem requests must transfer data. * Filesystem requests must transfer data.
*/ */
@ -1329,7 +1337,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
printk("scsi%d unblocking host at zero depth\n", printk("scsi%d unblocking host at zero depth\n",
shost->host_no)); shost->host_no));
} else { } else {
blk_plug_device(q);
return 0; return 0;
} }
} }
@ -1693,11 +1700,11 @@ int __init scsi_init_queue(void)
return -ENOMEM; return -ENOMEM;
} }
scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb", scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
sizeof(struct scsi_data_buffer), sizeof(struct scsi_data_buffer),
0, 0, NULL); 0, 0, NULL);
if (!scsi_bidi_sdb_cache) { if (!scsi_sdb_cache) {
printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n"); printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
goto cleanup_io_context; goto cleanup_io_context;
} }
@ -1710,7 +1717,7 @@ int __init scsi_init_queue(void)
if (!sgp->slab) { if (!sgp->slab) {
printk(KERN_ERR "SCSI: can't init sg slab %s\n", printk(KERN_ERR "SCSI: can't init sg slab %s\n",
sgp->name); sgp->name);
goto cleanup_bidi_sdb; goto cleanup_sdb;
} }
sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
@ -1718,13 +1725,13 @@ int __init scsi_init_queue(void)
if (!sgp->pool) { if (!sgp->pool) {
printk(KERN_ERR "SCSI: can't init sg mempool %s\n", printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
sgp->name); sgp->name);
goto cleanup_bidi_sdb; goto cleanup_sdb;
} }
} }
return 0; return 0;
cleanup_bidi_sdb: cleanup_sdb:
for (i = 0; i < SG_MEMPOOL_NR; i++) { for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
if (sgp->pool) if (sgp->pool)
@ -1732,7 +1739,7 @@ cleanup_bidi_sdb:
if (sgp->slab) if (sgp->slab)
kmem_cache_destroy(sgp->slab); kmem_cache_destroy(sgp->slab);
} }
kmem_cache_destroy(scsi_bidi_sdb_cache); kmem_cache_destroy(scsi_sdb_cache);
cleanup_io_context: cleanup_io_context:
kmem_cache_destroy(scsi_io_context_cache); kmem_cache_destroy(scsi_io_context_cache);
@ -1744,7 +1751,7 @@ void scsi_exit_queue(void)
int i; int i;
kmem_cache_destroy(scsi_io_context_cache); kmem_cache_destroy(scsi_io_context_cache);
kmem_cache_destroy(scsi_bidi_sdb_cache); kmem_cache_destroy(scsi_sdb_cache);
for (i = 0; i < SG_MEMPOOL_NR; i++) { for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;

View File

@ -346,7 +346,7 @@ static void scsi_target_dev_release(struct device *dev)
put_device(parent); put_device(parent);
} }
struct device_type scsi_target_type = { static struct device_type scsi_target_type = {
.name = "scsi_target", .name = "scsi_target",
.release = scsi_target_dev_release, .release = scsi_target_dev_release,
}; };

View File

@ -439,6 +439,7 @@ struct bus_type scsi_bus_type = {
.resume = scsi_bus_resume, .resume = scsi_bus_resume,
.remove = scsi_bus_remove, .remove = scsi_bus_remove,
}; };
EXPORT_SYMBOL_GPL(scsi_bus_type);
int scsi_sysfs_register(void) int scsi_sysfs_register(void)
{ {

View File

@ -30,10 +30,11 @@
#include <scsi/scsi_transport_iscsi.h> #include <scsi/scsi_transport_iscsi.h>
#include <scsi/iscsi_if.h> #include <scsi/iscsi_if.h>
#define ISCSI_SESSION_ATTRS 19 #define ISCSI_SESSION_ATTRS 21
#define ISCSI_CONN_ATTRS 13 #define ISCSI_CONN_ATTRS 13
#define ISCSI_HOST_ATTRS 4 #define ISCSI_HOST_ATTRS 4
#define ISCSI_TRANSPORT_VERSION "2.0-869"
#define ISCSI_TRANSPORT_VERSION "2.0-870"
struct iscsi_internal { struct iscsi_internal {
int daemon_pid; int daemon_pid;
@ -101,16 +102,10 @@ show_transport_##name(struct device *dev, \
static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
show_transport_attr(caps, "0x%x"); show_transport_attr(caps, "0x%x");
show_transport_attr(max_lun, "%d");
show_transport_attr(max_conn, "%d");
show_transport_attr(max_cmd_len, "%d");
static struct attribute *iscsi_transport_attrs[] = { static struct attribute *iscsi_transport_attrs[] = {
&dev_attr_handle.attr, &dev_attr_handle.attr,
&dev_attr_caps.attr, &dev_attr_caps.attr,
&dev_attr_max_lun.attr,
&dev_attr_max_conn.attr,
&dev_attr_max_cmd_len.attr,
NULL, NULL,
}; };
@ -118,18 +113,139 @@ static struct attribute_group iscsi_transport_group = {
.attrs = iscsi_transport_attrs, .attrs = iscsi_transport_attrs,
}; };
/*
* iSCSI endpoint attrs
*/
#define iscsi_dev_to_endpoint(_dev) \
container_of(_dev, struct iscsi_endpoint, dev)
#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
struct device_attribute dev_attr_##_prefix##_##_name = \
__ATTR(_name,_mode,_show,_store)
static void iscsi_endpoint_release(struct device *dev)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
kfree(ep);
}
static struct class iscsi_endpoint_class = {
.name = "iscsi_endpoint",
.dev_release = iscsi_endpoint_release,
};
static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
return sprintf(buf, "%u\n", ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
static struct attribute *iscsi_endpoint_attrs[] = {
&dev_attr_ep_handle.attr,
NULL,
};
static struct attribute_group iscsi_endpoint_group = {
.attrs = iscsi_endpoint_attrs,
};
#define ISCSI_MAX_EPID -1
static int iscsi_match_epid(struct device *dev, void *data)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
unsigned int *epid = (unsigned int *) data;
return *epid == ep->id;
}
struct iscsi_endpoint *
iscsi_create_endpoint(int dd_size)
{
struct device *dev;
struct iscsi_endpoint *ep;
unsigned int id;
int err;
for (id = 1; id < ISCSI_MAX_EPID; id++) {
dev = class_find_device(&iscsi_endpoint_class, &id,
iscsi_match_epid);
if (!dev)
break;
}
if (id == ISCSI_MAX_EPID) {
printk(KERN_ERR "Too many connections. Max supported %u\n",
ISCSI_MAX_EPID - 1);
return NULL;
}
ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
if (!ep)
return NULL;
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
err = device_register(&ep->dev);
if (err)
goto free_ep;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
if (err)
goto unregister_dev;
if (dd_size)
ep->dd_data = &ep[1];
return ep;
unregister_dev:
device_unregister(&ep->dev);
return NULL;
free_ep:
kfree(ep);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
{
sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
device_unregister(&ep->dev);
}
EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
struct iscsi_endpoint *ep;
struct device *dev;
dev = class_find_device(&iscsi_endpoint_class, &handle,
iscsi_match_epid);
if (!dev)
return NULL;
ep = iscsi_dev_to_endpoint(dev);
/*
* we can drop this now because the interface will prevent
* removals and lookups from racing.
*/
put_device(dev);
return ep;
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
static int iscsi_setup_host(struct transport_container *tc, struct device *dev, static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
struct device *cdev) struct device *cdev)
{ {
struct Scsi_Host *shost = dev_to_shost(dev); struct Scsi_Host *shost = dev_to_shost(dev);
struct iscsi_host *ihost = shost->shost_data; struct iscsi_cls_host *ihost = shost->shost_data;
memset(ihost, 0, sizeof(*ihost)); memset(ihost, 0, sizeof(*ihost));
INIT_LIST_HEAD(&ihost->sessions);
mutex_init(&ihost->mutex);
atomic_set(&ihost->nr_scans, 0); atomic_set(&ihost->nr_scans, 0);
mutex_init(&ihost->mutex);
snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d", snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
shost->host_no); shost->host_no);
@ -144,7 +260,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
struct device *cdev) struct device *cdev)
{ {
struct Scsi_Host *shost = dev_to_shost(dev); struct Scsi_Host *shost = dev_to_shost(dev);
struct iscsi_host *ihost = shost->shost_data; struct iscsi_cls_host *ihost = shost->shost_data;
destroy_workqueue(ihost->scan_workq); destroy_workqueue(ihost->scan_workq);
return 0; return 0;
@ -287,6 +403,24 @@ static int iscsi_is_session_dev(const struct device *dev)
return dev->release == iscsi_session_release; return dev->release == iscsi_session_release;
} }
static int iscsi_iter_session_fn(struct device *dev, void *data)
{
void (* fn) (struct iscsi_cls_session *) = data;
if (!iscsi_is_session_dev(dev))
return 0;
fn(iscsi_dev_to_session(dev));
return 0;
}
void iscsi_host_for_each_session(struct Scsi_Host *shost,
void (*fn)(struct iscsi_cls_session *))
{
device_for_each_child(&shost->shost_gendev, fn,
iscsi_iter_session_fn);
}
EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
/** /**
* iscsi_scan_finished - helper to report when running scans are done * iscsi_scan_finished - helper to report when running scans are done
* @shost: scsi host * @shost: scsi host
@ -297,7 +431,7 @@ static int iscsi_is_session_dev(const struct device *dev)
*/ */
int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time) int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
{ {
struct iscsi_host *ihost = shost->shost_data; struct iscsi_cls_host *ihost = shost->shost_data;
/* /*
* qla4xxx will have kicked off some session unblocks before calling * qla4xxx will have kicked off some session unblocks before calling
* scsi_scan_host, so just wait for them to complete. * scsi_scan_host, so just wait for them to complete.
@ -306,22 +440,61 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
} }
EXPORT_SYMBOL_GPL(iscsi_scan_finished); EXPORT_SYMBOL_GPL(iscsi_scan_finished);
struct iscsi_scan_data {
unsigned int channel;
unsigned int id;
unsigned int lun;
};
static int iscsi_user_scan_session(struct device *dev, void *data)
{
struct iscsi_scan_data *scan_data = data;
struct iscsi_cls_session *session;
struct Scsi_Host *shost;
struct iscsi_cls_host *ihost;
unsigned long flags;
unsigned int id;
if (!iscsi_is_session_dev(dev))
return 0;
session = iscsi_dev_to_session(dev);
shost = iscsi_session_to_shost(session);
ihost = shost->shost_data;
mutex_lock(&ihost->mutex);
spin_lock_irqsave(&session->lock, flags);
if (session->state != ISCSI_SESSION_LOGGED_IN) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
return 0;
}
id = session->target_id;
spin_unlock_irqrestore(&session->lock, flags);
if (id != ISCSI_MAX_TARGET) {
if ((scan_data->channel == SCAN_WILD_CARD ||
scan_data->channel == 0) &&
(scan_data->id == SCAN_WILD_CARD ||
scan_data->id == id))
scsi_scan_target(&session->dev, 0, id,
scan_data->lun, 1);
}
mutex_unlock(&ihost->mutex);
return 0;
}
static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
uint id, uint lun) uint id, uint lun)
{ {
struct iscsi_host *ihost = shost->shost_data; struct iscsi_scan_data scan_data;
struct iscsi_cls_session *session;
mutex_lock(&ihost->mutex); scan_data.channel = channel;
list_for_each_entry(session, &ihost->sessions, host_list) { scan_data.id = id;
if ((channel == SCAN_WILD_CARD || channel == 0) && scan_data.lun = lun;
(id == SCAN_WILD_CARD || id == session->target_id))
scsi_scan_target(&session->dev, 0,
session->target_id, lun, 1);
}
mutex_unlock(&ihost->mutex);
return 0; return device_for_each_child(&shost->shost_gendev, &scan_data,
iscsi_user_scan_session);
} }
static void iscsi_scan_session(struct work_struct *work) static void iscsi_scan_session(struct work_struct *work)
@ -329,19 +502,14 @@ static void iscsi_scan_session(struct work_struct *work)
struct iscsi_cls_session *session = struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session, scan_work); container_of(work, struct iscsi_cls_session, scan_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session); struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data; struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags; struct iscsi_scan_data scan_data;
spin_lock_irqsave(&session->lock, flags); scan_data.channel = 0;
if (session->state != ISCSI_SESSION_LOGGED_IN) { scan_data.id = SCAN_WILD_CARD;
spin_unlock_irqrestore(&session->lock, flags); scan_data.lun = SCAN_WILD_CARD;
goto done;
}
spin_unlock_irqrestore(&session->lock, flags);
scsi_scan_target(&session->dev, 0, session->target_id, iscsi_user_scan_session(&session->dev, &scan_data);
SCAN_WILD_CARD, 1);
done:
atomic_dec(&ihost->nr_scans); atomic_dec(&ihost->nr_scans);
} }
@ -381,7 +549,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
container_of(work, struct iscsi_cls_session, container_of(work, struct iscsi_cls_session,
unblock_work); unblock_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session); struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data; struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags; unsigned long flags;
/* /*
@ -449,15 +617,19 @@ static void __iscsi_unbind_session(struct work_struct *work)
container_of(work, struct iscsi_cls_session, container_of(work, struct iscsi_cls_session,
unbind_work); unbind_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session); struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data; struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
/* Prevent new scans and make sure scanning is not in progress */ /* Prevent new scans and make sure scanning is not in progress */
mutex_lock(&ihost->mutex); mutex_lock(&ihost->mutex);
if (list_empty(&session->host_list)) { spin_lock_irqsave(&session->lock, flags);
if (session->target_id == ISCSI_MAX_TARGET) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex); mutex_unlock(&ihost->mutex);
return; return;
} }
list_del_init(&session->host_list); session->target_id = ISCSI_MAX_TARGET;
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex); mutex_unlock(&ihost->mutex);
scsi_remove_target(&session->dev); scsi_remove_target(&session->dev);
@ -467,18 +639,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
static int iscsi_unbind_session(struct iscsi_cls_session *session) static int iscsi_unbind_session(struct iscsi_cls_session *session)
{ {
struct Scsi_Host *shost = iscsi_session_to_shost(session); struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data; struct iscsi_cls_host *ihost = shost->shost_data;
return queue_work(ihost->scan_workq, &session->unbind_work); return queue_work(ihost->scan_workq, &session->unbind_work);
} }
struct iscsi_cls_session * struct iscsi_cls_session *
iscsi_alloc_session(struct Scsi_Host *shost, iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
struct iscsi_transport *transport) int dd_size)
{ {
struct iscsi_cls_session *session; struct iscsi_cls_session *session;
session = kzalloc(sizeof(*session) + transport->sessiondata_size, session = kzalloc(sizeof(*session) + dd_size,
GFP_KERNEL); GFP_KERNEL);
if (!session) if (!session)
return NULL; return NULL;
@ -487,7 +659,6 @@ iscsi_alloc_session(struct Scsi_Host *shost,
session->recovery_tmo = 120; session->recovery_tmo = 120;
session->state = ISCSI_SESSION_FREE; session->state = ISCSI_SESSION_FREE;
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
INIT_LIST_HEAD(&session->host_list);
INIT_LIST_HEAD(&session->sess_list); INIT_LIST_HEAD(&session->sess_list);
INIT_WORK(&session->unblock_work, __iscsi_unblock_session); INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
INIT_WORK(&session->block_work, __iscsi_block_session); INIT_WORK(&session->block_work, __iscsi_block_session);
@ -500,22 +671,57 @@ iscsi_alloc_session(struct Scsi_Host *shost,
session->dev.parent = &shost->shost_gendev; session->dev.parent = &shost->shost_gendev;
session->dev.release = iscsi_session_release; session->dev.release = iscsi_session_release;
device_initialize(&session->dev); device_initialize(&session->dev);
if (transport->sessiondata_size) if (dd_size)
session->dd_data = &session[1]; session->dd_data = &session[1];
return session; return session;
} }
EXPORT_SYMBOL_GPL(iscsi_alloc_session); EXPORT_SYMBOL_GPL(iscsi_alloc_session);
static int iscsi_get_next_target_id(struct device *dev, void *data)
{
struct iscsi_cls_session *session;
unsigned long flags;
int err = 0;
if (!iscsi_is_session_dev(dev))
return 0;
session = iscsi_dev_to_session(dev);
spin_lock_irqsave(&session->lock, flags);
if (*((unsigned int *) data) == session->target_id)
err = -EEXIST;
spin_unlock_irqrestore(&session->lock, flags);
return err;
}
int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
{ {
struct Scsi_Host *shost = iscsi_session_to_shost(session); struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost; struct iscsi_cls_host *ihost;
unsigned long flags; unsigned long flags;
unsigned int id = target_id;
int err; int err;
ihost = shost->shost_data; ihost = shost->shost_data;
session->sid = atomic_add_return(1, &iscsi_session_nr); session->sid = atomic_add_return(1, &iscsi_session_nr);
session->target_id = target_id;
if (id == ISCSI_MAX_TARGET) {
for (id = 0; id < ISCSI_MAX_TARGET; id++) {
err = device_for_each_child(&shost->shost_gendev, &id,
iscsi_get_next_target_id);
if (!err)
break;
}
if (id == ISCSI_MAX_TARGET) {
iscsi_cls_session_printk(KERN_ERR, session,
"Too many iscsi targets. Max "
"number of targets is %d.\n",
ISCSI_MAX_TARGET - 1);
goto release_host;
}
}
session->target_id = id;
snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
session->sid); session->sid);
@ -531,10 +737,6 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
list_add(&session->sess_list, &sesslist); list_add(&session->sess_list, &sesslist);
spin_unlock_irqrestore(&sesslock, flags); spin_unlock_irqrestore(&sesslock, flags);
mutex_lock(&ihost->mutex);
list_add(&session->host_list, &ihost->sessions);
mutex_unlock(&ihost->mutex);
iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION); iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
return 0; return 0;
@ -548,18 +750,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
* iscsi_create_session - create iscsi class session * iscsi_create_session - create iscsi class session
* @shost: scsi host * @shost: scsi host
* @transport: iscsi transport * @transport: iscsi transport
* @dd_size: private driver data size
* @target_id: which target * @target_id: which target
* *
* This can be called from a LLD or iscsi_transport. * This can be called from a LLD or iscsi_transport.
*/ */
struct iscsi_cls_session * struct iscsi_cls_session *
iscsi_create_session(struct Scsi_Host *shost, iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
struct iscsi_transport *transport, int dd_size, unsigned int target_id)
unsigned int target_id)
{ {
struct iscsi_cls_session *session; struct iscsi_cls_session *session;
session = iscsi_alloc_session(shost, transport); session = iscsi_alloc_session(shost, transport, dd_size);
if (!session) if (!session)
return NULL; return NULL;
@ -595,7 +797,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
void iscsi_remove_session(struct iscsi_cls_session *session) void iscsi_remove_session(struct iscsi_cls_session *session)
{ {
struct Scsi_Host *shost = iscsi_session_to_shost(session); struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data; struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags; unsigned long flags;
int err; int err;
@ -661,6 +863,7 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
/** /**
* iscsi_create_conn - create iscsi class connection * iscsi_create_conn - create iscsi class connection
* @session: iscsi cls session * @session: iscsi cls session
* @dd_size: private driver data size
* @cid: connection id * @cid: connection id
* *
* This can be called from a LLD or iscsi_transport. The connection * This can be called from a LLD or iscsi_transport. The connection
@ -673,18 +876,17 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
* non-zero. * non-zero.
*/ */
struct iscsi_cls_conn * struct iscsi_cls_conn *
iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid) iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
{ {
struct iscsi_transport *transport = session->transport; struct iscsi_transport *transport = session->transport;
struct iscsi_cls_conn *conn; struct iscsi_cls_conn *conn;
unsigned long flags; unsigned long flags;
int err; int err;
conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL); conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
if (!conn) if (!conn)
return NULL; return NULL;
if (dd_size)
if (transport->conndata_size)
conn->dd_data = &conn[1]; conn->dd_data = &conn[1];
INIT_LIST_HEAD(&conn->conn_list); INIT_LIST_HEAD(&conn->conn_list);
@ -1017,21 +1219,20 @@ int iscsi_session_event(struct iscsi_cls_session *session,
EXPORT_SYMBOL_GPL(iscsi_session_event); EXPORT_SYMBOL_GPL(iscsi_session_event);
static int static int
iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
struct iscsi_uevent *ev, uint32_t initial_cmdsn,
uint16_t cmds_max, uint16_t queue_depth)
{ {
struct iscsi_transport *transport = priv->iscsi_transport; struct iscsi_transport *transport = priv->iscsi_transport;
struct iscsi_cls_session *session; struct iscsi_cls_session *session;
uint32_t hostno; uint32_t host_no;
session = transport->create_session(transport, &priv->t, session = transport->create_session(ep, cmds_max, queue_depth,
ev->u.c_session.cmds_max, initial_cmdsn, &host_no);
ev->u.c_session.queue_depth,
ev->u.c_session.initial_cmdsn,
&hostno);
if (!session) if (!session)
return -ENOMEM; return -ENOMEM;
ev->r.c_session_ret.host_no = hostno; ev->r.c_session_ret.host_no = host_no;
ev->r.c_session_ret.sid = session->sid; ev->r.c_session_ret.sid = session->sid;
return 0; return 0;
} }
@ -1106,6 +1307,7 @@ static int
iscsi_if_transport_ep(struct iscsi_transport *transport, iscsi_if_transport_ep(struct iscsi_transport *transport,
struct iscsi_uevent *ev, int msg_type) struct iscsi_uevent *ev, int msg_type)
{ {
struct iscsi_endpoint *ep;
struct sockaddr *dst_addr; struct sockaddr *dst_addr;
int rc = 0; int rc = 0;
@ -1115,22 +1317,33 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
return -EINVAL; return -EINVAL;
dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
rc = transport->ep_connect(dst_addr, ep = transport->ep_connect(dst_addr,
ev->u.ep_connect.non_blocking, ev->u.ep_connect.non_blocking);
&ev->r.ep_connect_ret.handle); if (IS_ERR(ep))
return PTR_ERR(ep);
ev->r.ep_connect_ret.handle = ep->id;
break; break;
case ISCSI_UEVENT_TRANSPORT_EP_POLL: case ISCSI_UEVENT_TRANSPORT_EP_POLL:
if (!transport->ep_poll) if (!transport->ep_poll)
return -EINVAL; return -EINVAL;
ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle, ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
if (!ep)
return -EINVAL;
ev->r.retcode = transport->ep_poll(ep,
ev->u.ep_poll.timeout_ms); ev->u.ep_poll.timeout_ms);
break; break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
if (!transport->ep_disconnect) if (!transport->ep_disconnect)
return -EINVAL; return -EINVAL;
transport->ep_disconnect(ev->u.ep_disconnect.ep_handle); ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
if (!ep)
return -EINVAL;
transport->ep_disconnect(ep);
break; break;
} }
return rc; return rc;
@ -1195,6 +1408,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct iscsi_internal *priv; struct iscsi_internal *priv;
struct iscsi_cls_session *session; struct iscsi_cls_session *session;
struct iscsi_cls_conn *conn; struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep = NULL;
priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
if (!priv) if (!priv)
@ -1208,7 +1422,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
switch (nlh->nlmsg_type) { switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION: case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ev); err = iscsi_if_create_session(priv, ep, ev,
ev->u.c_session.initial_cmdsn,
ev->u.c_session.cmds_max,
ev->u.c_session.queue_depth);
break;
case ISCSI_UEVENT_CREATE_BOUND_SESSION:
ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
if (!ep) {
err = -EINVAL;
break;
}
err = iscsi_if_create_session(priv, ep, ev,
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
break; break;
case ISCSI_UEVENT_DESTROY_SESSION: case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid); session = iscsi_session_lookup(ev->u.d_session.sid);
@ -1414,6 +1643,8 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
static ssize_t static ssize_t
show_priv_session_state(struct device *dev, struct device_attribute *attr, show_priv_session_state(struct device *dev, struct device_attribute *attr,
@ -1580,6 +1811,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
priv->daemon_pid = -1; priv->daemon_pid = -1;
priv->iscsi_transport = tt; priv->iscsi_transport = tt;
priv->t.user_scan = iscsi_user_scan; priv->t.user_scan = iscsi_user_scan;
if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
priv->t.create_work_queue = 1;
priv->dev.class = &iscsi_transport_class; priv->dev.class = &iscsi_transport_class;
snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name); snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
@ -1595,7 +1828,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
priv->t.host_attrs.ac.attrs = &priv->host_attrs[0]; priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
priv->t.host_attrs.ac.class = &iscsi_host_class.class; priv->t.host_attrs.ac.class = &iscsi_host_class.class;
priv->t.host_attrs.ac.match = iscsi_host_match; priv->t.host_attrs.ac.match = iscsi_host_match;
priv->t.host_size = sizeof(struct iscsi_host); priv->t.host_size = sizeof(struct iscsi_cls_host);
transport_container_register(&priv->t.host_attrs); transport_container_register(&priv->t.host_attrs);
SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME); SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
@ -1653,6 +1886,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT); SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
SETUP_PRIV_SESSION_RD_ATTR(state); SETUP_PRIV_SESSION_RD_ATTR(state);
@ -1668,6 +1903,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
unregister_dev: unregister_dev:
device_unregister(&priv->dev); device_unregister(&priv->dev);
return NULL;
free_priv: free_priv:
kfree(priv); kfree(priv);
return NULL; return NULL;
@ -1715,10 +1951,14 @@ static __init int iscsi_transport_init(void)
if (err) if (err)
return err; return err;
err = transport_class_register(&iscsi_host_class); err = class_register(&iscsi_endpoint_class);
if (err) if (err)
goto unregister_transport_class; goto unregister_transport_class;
err = transport_class_register(&iscsi_host_class);
if (err)
goto unregister_endpoint_class;
err = transport_class_register(&iscsi_connection_class); err = transport_class_register(&iscsi_connection_class);
if (err) if (err)
goto unregister_host_class; goto unregister_host_class;
@ -1727,8 +1967,8 @@ static __init int iscsi_transport_init(void)
if (err) if (err)
goto unregister_conn_class; goto unregister_conn_class;
nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL, nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
THIS_MODULE); NULL, THIS_MODULE);
if (!nls) { if (!nls) {
err = -ENOBUFS; err = -ENOBUFS;
goto unregister_session_class; goto unregister_session_class;
@ -1748,6 +1988,8 @@ unregister_conn_class:
transport_class_unregister(&iscsi_connection_class); transport_class_unregister(&iscsi_connection_class);
unregister_host_class: unregister_host_class:
transport_class_unregister(&iscsi_host_class); transport_class_unregister(&iscsi_host_class);
unregister_endpoint_class:
class_unregister(&iscsi_endpoint_class);
unregister_transport_class: unregister_transport_class:
class_unregister(&iscsi_transport_class); class_unregister(&iscsi_transport_class);
return err; return err;
@ -1760,6 +2002,7 @@ static void __exit iscsi_transport_exit(void)
transport_class_unregister(&iscsi_connection_class); transport_class_unregister(&iscsi_connection_class);
transport_class_unregister(&iscsi_session_class); transport_class_unregister(&iscsi_session_class);
transport_class_unregister(&iscsi_host_class); transport_class_unregister(&iscsi_host_class);
class_unregister(&iscsi_endpoint_class);
class_unregister(&iscsi_transport_class); class_unregister(&iscsi_transport_class);
} }

View File

@ -58,8 +58,8 @@
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h> #include <scsi/scsi_ioctl.h>
#include <scsi/scsicam.h> #include <scsi/scsicam.h>
#include <scsi/sd.h>
#include "sd.h"
#include "scsi_logging.h" #include "scsi_logging.h"
MODULE_AUTHOR("Eric Youngdale"); MODULE_AUTHOR("Eric Youngdale");
@ -295,11 +295,6 @@ static int sd_major(int major_idx)
} }
} }
static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
{
return container_of(disk->private_data, struct scsi_disk, driver);
}
static struct scsi_disk *__scsi_disk_get(struct gendisk *disk) static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
{ {
struct scsi_disk *sdkp = NULL; struct scsi_disk *sdkp = NULL;

View File

@ -48,6 +48,11 @@ struct scsi_disk {
}; };
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
{
return container_of(disk->private_data, struct scsi_disk, driver);
}
#define sd_printk(prefix, sdsk, fmt, a...) \ #define sd_printk(prefix, sdsk, fmt, a...) \
(sdsk)->disk ? \ (sdsk)->disk ? \
sdev_printk(prefix, (sdsk)->device, "[%s] " fmt, \ sdev_printk(prefix, (sdsk)->device, "[%s] " fmt, \

View File

@ -1036,6 +1036,9 @@ sg_ioctl(struct inode *inode, struct file *filp,
case SG_SCSI_RESET_DEVICE: case SG_SCSI_RESET_DEVICE:
val = SCSI_TRY_RESET_DEVICE; val = SCSI_TRY_RESET_DEVICE;
break; break;
case SG_SCSI_RESET_TARGET:
val = SCSI_TRY_RESET_TARGET;
break;
case SG_SCSI_RESET_BUS: case SG_SCSI_RESET_BUS:
val = SCSI_TRY_RESET_BUS; val = SCSI_TRY_RESET_BUS;
break; break;

View File

@ -121,9 +121,7 @@ static __inline void sym_que_move(struct sym_quehead *orig,
} }
} }
#define sym_que_entry(ptr, type, member) \ #define sym_que_entry(ptr, type, member) container_of(ptr, type, member)
((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member)))
#define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink)

View File

@ -0,0 +1,8 @@
#ifndef _LINUX_CRC_T10DIF_H
#define _LINUX_CRC_T10DIF_H
#include <linux/types.h>
__u16 crc_t10dif(unsigned char const *, size_t);
#endif

View File

@ -50,6 +50,7 @@ enum iscsi_uevent_e {
ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15, ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16, ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17, ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
/* up events */ /* up events */
ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@ -78,6 +79,12 @@ struct iscsi_uevent {
uint16_t cmds_max; uint16_t cmds_max;
uint16_t queue_depth; uint16_t queue_depth;
} c_session; } c_session;
struct msg_create_bound_session {
uint64_t ep_handle;
uint32_t initial_cmdsn;
uint16_t cmds_max;
uint16_t queue_depth;
} c_bound_session;
struct msg_destroy_session { struct msg_destroy_session {
uint32_t sid; uint32_t sid;
} d_session; } d_session;
@ -250,42 +257,49 @@ enum iscsi_param {
ISCSI_PARAM_PING_TMO, ISCSI_PARAM_PING_TMO,
ISCSI_PARAM_RECV_TMO, ISCSI_PARAM_RECV_TMO,
ISCSI_PARAM_IFACE_NAME,
ISCSI_PARAM_ISID,
ISCSI_PARAM_INITIATOR_NAME,
/* must always be last */ /* must always be last */
ISCSI_PARAM_MAX, ISCSI_PARAM_MAX,
}; };
#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH) #define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH) #define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN) #define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN) #define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN) #define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T) #define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN) #define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST) #define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST) #define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN) #define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN) #define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
#define ISCSI_ERL (1 << ISCSI_PARAM_ERL) #define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN) #define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN) #define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN) #define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME) #define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT) #define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS) #define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT) #define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO) #define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT) #define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS) #define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
#define ISCSI_USERNAME (1 << ISCSI_PARAM_USERNAME) #define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
#define ISCSI_USERNAME_IN (1 << ISCSI_PARAM_USERNAME_IN) #define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
#define ISCSI_PASSWORD (1 << ISCSI_PARAM_PASSWORD) #define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
#define ISCSI_PASSWORD_IN (1 << ISCSI_PARAM_PASSWORD_IN) #define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
#define ISCSI_FAST_ABORT (1 << ISCSI_PARAM_FAST_ABORT) #define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
#define ISCSI_ABORT_TMO (1 << ISCSI_PARAM_ABORT_TMO) #define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
#define ISCSI_LU_RESET_TMO (1 << ISCSI_PARAM_LU_RESET_TMO) #define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
#define ISCSI_HOST_RESET_TMO (1 << ISCSI_PARAM_HOST_RESET_TMO) #define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
#define ISCSI_PING_TMO (1 << ISCSI_PARAM_PING_TMO) #define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
#define ISCSI_RECV_TMO (1 << ISCSI_PARAM_RECV_TMO) #define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
/* iSCSI HBA params */ /* iSCSI HBA params */
enum iscsi_host_param { enum iscsi_host_param {
@ -296,20 +310,13 @@ enum iscsi_host_param {
ISCSI_HOST_PARAM_MAX, ISCSI_HOST_PARAM_MAX,
}; };
#define ISCSI_HOST_HWADDRESS (1 << ISCSI_HOST_PARAM_HWADDRESS) #define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
#define ISCSI_HOST_INITIATOR_NAME (1 << ISCSI_HOST_PARAM_INITIATOR_NAME) #define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
#define ISCSI_HOST_NETDEV_NAME (1 << ISCSI_HOST_PARAM_NETDEV_NAME) #define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
#define ISCSI_HOST_IPADDRESS (1 << ISCSI_HOST_PARAM_IPADDRESS) #define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
/**
* iscsi_hostdata - get LLD hostdata from scsi_host
* @_hostdata: pointer to scsi host's hostdata
**/
#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
/* /*
* These flags presents iSCSI Data-Path capabilities. * These flags presents iSCSI Data-Path capabilities.

View File

@ -22,6 +22,7 @@
#define ISCSI_PROTO_H #define ISCSI_PROTO_H
#include <linux/types.h> #include <linux/types.h>
#include <scsi/scsi.h>
#define ISCSI_DRAFT20_VERSION 0x00 #define ISCSI_DRAFT20_VERSION 0x00
@ -156,7 +157,7 @@ struct iscsi_ecdb_ahdr {
uint8_t ahstype; uint8_t ahstype;
uint8_t reserved; uint8_t reserved;
/* 4-byte aligned extended CDB spillover */ /* 4-byte aligned extended CDB spillover */
uint8_t ecdb[260 - ISCSI_CDB_SIZE]; uint8_t ecdb[SCSI_MAX_VARLEN_CDB_SIZE - ISCSI_CDB_SIZE];
}; };
/* SCSI Response Header */ /* SCSI Response Header */

View File

@ -24,6 +24,7 @@
#define LIBISCSI_H #define LIBISCSI_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/wait.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
@ -31,6 +32,7 @@
#include <scsi/iscsi_if.h> #include <scsi/iscsi_if.h>
struct scsi_transport_template; struct scsi_transport_template;
struct scsi_host_template;
struct scsi_device; struct scsi_device;
struct Scsi_Host; struct Scsi_Host;
struct scsi_cmnd; struct scsi_cmnd;
@ -40,6 +42,7 @@ struct iscsi_cls_session;
struct iscsi_cls_conn; struct iscsi_cls_conn;
struct iscsi_session; struct iscsi_session;
struct iscsi_nopin; struct iscsi_nopin;
struct device;
/* #define DEBUG_SCSI */ /* #define DEBUG_SCSI */
#ifdef DEBUG_SCSI #ifdef DEBUG_SCSI
@ -49,9 +52,7 @@ struct iscsi_nopin;
#endif #endif
#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */ #define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
#define ISCSI_MGMT_CMDS_MAX 16 /* must be power of 2 */ #define ISCSI_MGMT_CMDS_MAX 15
#define ISCSI_MGMT_ITT_OFFSET 0xa00
#define ISCSI_DEF_CMD_PER_LUN 32 #define ISCSI_DEF_CMD_PER_LUN 32
#define ISCSI_MAX_CMD_PER_LUN 128 #define ISCSI_MAX_CMD_PER_LUN 128
@ -69,7 +70,10 @@ enum {
/* Connection suspend "bit" */ /* Connection suspend "bit" */
#define ISCSI_SUSPEND_BIT 1 #define ISCSI_SUSPEND_BIT 1
#define ISCSI_ITT_MASK (0xfff) #define ISCSI_ITT_MASK (0x1fff)
#define ISCSI_TOTAL_CMDS_MAX 4096
/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
#define ISCSI_TOTAL_CMDS_MIN 16
#define ISCSI_AGE_SHIFT 28 #define ISCSI_AGE_SHIFT 28
#define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT) #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
@ -82,18 +86,6 @@ enum {
ISCSI_DIGEST_SIZE = sizeof(__u32), ISCSI_DIGEST_SIZE = sizeof(__u32),
}; };
struct iscsi_mgmt_task {
/*
* Becuae LLDs allocate their hdr differently, this is a pointer to
* that storage. It must be setup at session creation time.
*/
struct iscsi_hdr *hdr;
char *data; /* mgmt payload */
unsigned data_count; /* counts data to be sent */
uint32_t itt; /* this ITT */
void *dd_data; /* driver/transport data */
struct list_head running;
};
enum { enum {
ISCSI_TASK_COMPLETED, ISCSI_TASK_COMPLETED,
@ -101,7 +93,7 @@ enum {
ISCSI_TASK_RUNNING, ISCSI_TASK_RUNNING,
}; };
struct iscsi_cmd_task { struct iscsi_task {
/* /*
* Because LLDs allocate their hdr differently, this is a pointer * Because LLDs allocate their hdr differently, this is a pointer
* and length to that storage. It must be setup at session * and length to that storage. It must be setup at session
@ -118,6 +110,7 @@ struct iscsi_cmd_task {
/* offset in unsolicited stream (bytes); */ /* offset in unsolicited stream (bytes); */
unsigned unsol_offset; unsigned unsol_offset;
unsigned data_count; /* remaining Data-Out */ unsigned data_count; /* remaining Data-Out */
char *data; /* mgmt payload */
struct scsi_cmnd *sc; /* associated SCSI cmd*/ struct scsi_cmnd *sc; /* associated SCSI cmd*/
struct iscsi_conn *conn; /* used connection */ struct iscsi_conn *conn; /* used connection */
@ -128,9 +121,9 @@ struct iscsi_cmd_task {
void *dd_data; /* driver/transport data */ void *dd_data; /* driver/transport data */
}; };
static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask) static inline void* iscsi_next_hdr(struct iscsi_task *task)
{ {
return (void*)ctask->hdr + ctask->hdr_len; return (void*)task->hdr + task->hdr_len;
} }
/* Connection's states */ /* Connection's states */
@ -145,11 +138,6 @@ struct iscsi_conn {
struct iscsi_cls_conn *cls_conn; /* ptr to class connection */ struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
void *dd_data; /* iscsi_transport data */ void *dd_data; /* iscsi_transport data */
struct iscsi_session *session; /* parent session */ struct iscsi_session *session; /* parent session */
/*
* LLDs should set this lock. It protects the transport recv
* code
*/
rwlock_t *recv_lock;
/* /*
* conn_stop() flag: stop to recover, stop to terminate * conn_stop() flag: stop to recover, stop to terminate
*/ */
@ -159,7 +147,7 @@ struct iscsi_conn {
unsigned long last_ping; unsigned long last_ping;
int ping_timeout; int ping_timeout;
int recv_timeout; int recv_timeout;
struct iscsi_mgmt_task *ping_mtask; struct iscsi_task *ping_task;
/* iSCSI connection-wide sequencing */ /* iSCSI connection-wide sequencing */
uint32_t exp_statsn; uint32_t exp_statsn;
@ -175,9 +163,8 @@ struct iscsi_conn {
* should always fit in this buffer * should always fit in this buffer
*/ */
char *data; char *data;
struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ struct iscsi_task *login_task; /* mtask used for login/text */
struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ struct iscsi_task *task; /* xmit task in progress */
struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
/* xmit */ /* xmit */
struct list_head mgmtqueue; /* mgmt (control) xmit queue */ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
@ -208,9 +195,6 @@ struct iscsi_conn {
/* remote portal currently connected to */ /* remote portal currently connected to */
int portal_port; int portal_port;
char portal_address[ISCSI_ADDRESS_BUF_LEN]; char portal_address[ISCSI_ADDRESS_BUF_LEN];
/* local address */
int local_port;
char local_address[ISCSI_ADDRESS_BUF_LEN];
/* MIB-statistics */ /* MIB-statistics */
uint64_t txdata_octets; uint64_t txdata_octets;
@ -246,6 +230,7 @@ enum {
}; };
struct iscsi_session { struct iscsi_session {
struct iscsi_cls_session *cls_session;
/* /*
* Syncs up the scsi eh thread with the iscsi eh thread when sending * Syncs up the scsi eh thread with the iscsi eh thread when sending
* task management functions. This must be taken before the session * task management functions. This must be taken before the session
@ -281,10 +266,8 @@ struct iscsi_session {
char *password; char *password;
char *password_in; char *password_in;
char *targetname; char *targetname;
char *ifacename;
char *initiatorname; char *initiatorname;
/* hw address or netdev iscsi connection is bound to */
char *hwaddress;
char *netdev;
/* control data */ /* control data */
struct iscsi_transport *tt; struct iscsi_transport *tt;
struct Scsi_Host *host; struct Scsi_Host *host;
@ -298,12 +281,20 @@ struct iscsi_session {
int state; /* session state */ int state; /* session state */
int age; /* counts session re-opens */ int age; /* counts session re-opens */
int scsi_cmds_max; /* max scsi commands */
int cmds_max; /* size of cmds array */ int cmds_max; /* size of cmds array */
struct iscsi_cmd_task **cmds; /* Original Cmds arr */ struct iscsi_task **cmds; /* Original Cmds arr */
struct iscsi_pool cmdpool; /* PDU's pool */ struct iscsi_pool cmdpool; /* PDU's pool */
int mgmtpool_max; /* size of mgmt array */ };
struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
struct iscsi_pool mgmtpool; /* Mgmt PDU's pool */ struct iscsi_host {
char *initiatorname;
/* hw address or netdev iscsi connection is bound to */
char *hwaddress;
char *netdev;
/* local address */
int local_port;
char local_address[ISCSI_ADDRESS_BUF_LEN];
}; };
/* /*
@ -316,42 +307,44 @@ extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
extern int iscsi_queuecommand(struct scsi_cmnd *sc, extern int iscsi_queuecommand(struct scsi_cmnd *sc,
void (*done)(struct scsi_cmnd *)); void (*done)(struct scsi_cmnd *));
/* /*
* iSCSI host helpers. * iSCSI host helpers.
*/ */
#define iscsi_host_priv(_shost) \
(shost_priv(_shost) + sizeof(struct iscsi_host))
extern int iscsi_host_set_param(struct Scsi_Host *shost, extern int iscsi_host_set_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf, enum iscsi_host_param param, char *buf,
int buflen); int buflen);
extern int iscsi_host_get_param(struct Scsi_Host *shost, extern int iscsi_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf); enum iscsi_host_param param, char *buf);
extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
int dd_data_size, uint16_t qdepth);
extern void iscsi_host_remove(struct Scsi_Host *shost);
extern void iscsi_host_free(struct Scsi_Host *shost);
/* /*
* session management * session management
*/ */
extern struct iscsi_cls_session * extern struct iscsi_cls_session *
iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *, iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
uint16_t, uint16_t, int, int, uint32_t, uint32_t *); uint16_t, int, uint32_t, unsigned int);
extern void iscsi_session_teardown(struct iscsi_cls_session *); extern void iscsi_session_teardown(struct iscsi_cls_session *);
extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *); extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn, extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen); enum iscsi_param param, char *buf, int buflen);
extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session, extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
enum iscsi_param param, char *buf); enum iscsi_param param, char *buf);
#define session_to_cls(_sess) \
hostdata_session(_sess->host->hostdata)
#define iscsi_session_printk(prefix, _sess, fmt, a...) \ #define iscsi_session_printk(prefix, _sess, fmt, a...) \
iscsi_cls_session_printk(prefix, \ iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
(struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
/* /*
* connection management * connection management
*/ */
extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *, extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
uint32_t); int, uint32_t);
extern void iscsi_conn_teardown(struct iscsi_cls_conn *); extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
extern int iscsi_conn_start(struct iscsi_cls_conn *); extern int iscsi_conn_start(struct iscsi_cls_conn *);
extern void iscsi_conn_stop(struct iscsi_cls_conn *, int); extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
@ -360,25 +353,29 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err); extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf); enum iscsi_param param, char *buf);
extern void iscsi_suspend_tx(struct iscsi_conn *conn);
#define iscsi_conn_printk(prefix, _c, fmt, a...) \ #define iscsi_conn_printk(prefix, _c, fmt, a...) \
iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a) iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
fmt, ##a)
/* /*
* pdu and task processing * pdu and task processing
*/ */
extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *); extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *, extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
struct iscsi_data *hdr); struct iscsi_data *hdr);
extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *, extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
char *, uint32_t); char *, uint32_t);
extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *, extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
char *, int); char *, int);
extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *, extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
uint32_t *); char *, int);
extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask); extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
extern void iscsi_free_mgmt_task(struct iscsi_conn *conn, extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
struct iscsi_mgmt_task *mtask); extern void iscsi_requeue_task(struct iscsi_task *task);
extern void iscsi_put_task(struct iscsi_task *task);
extern void __iscsi_get_task(struct iscsi_task *task);
/* /*
* generic helpers * generic helpers

View File

@ -9,6 +9,7 @@
#define _SCSI_SCSI_H #define _SCSI_SCSI_H
#include <linux/types.h> #include <linux/types.h>
#include <scsi/scsi_cmnd.h>
/* /*
* The maximum number of SG segments that we will put inside a * The maximum number of SG segments that we will put inside a
@ -400,6 +401,7 @@ struct scsi_lun {
#define SOFT_ERROR 0x2005 #define SOFT_ERROR 0x2005
#define ADD_TO_MLQUEUE 0x2006 #define ADD_TO_MLQUEUE 0x2006
#define TIMEOUT_ERROR 0x2007 #define TIMEOUT_ERROR 0x2007
#define SCSI_RETURN_NOT_HANDLED 0x2008
/* /*
* Midlevel queue return values. * Midlevel queue return values.
@ -424,6 +426,22 @@ struct scsi_lun {
#define driver_byte(result) (((result) >> 24) & 0xff) #define driver_byte(result) (((result) >> 24) & 0xff)
#define suggestion(result) (driver_byte(result) & SUGGEST_MASK) #define suggestion(result) (driver_byte(result) & SUGGEST_MASK)
static inline void set_msg_byte(struct scsi_cmnd *cmd, char status)
{
cmd->result |= status << 8;
}
static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
{
cmd->result |= status << 16;
}
static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
{
cmd->result |= status << 24;
}
#define sense_class(sense) (((sense) >> 4) & 0x7) #define sense_class(sense) (((sense) >> 4) & 0x7)
#define sense_error(sense) ((sense) & 0xf) #define sense_error(sense) ((sense) & 0xf)
#define sense_valid(sense) ((sense) & 0x80); #define sense_valid(sense) ((sense) & 0x80);

View File

@ -7,7 +7,6 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/blkdev.h>
struct Scsi_Host; struct Scsi_Host;
struct scsi_device; struct scsi_device;

View File

@ -162,9 +162,29 @@ struct scsi_device {
struct execute_work ew; /* used to get process context on put */ struct execute_work ew; /* used to get process context on put */
struct scsi_dh_data *scsi_dh_data;
enum scsi_device_state sdev_state; enum scsi_device_state sdev_state;
unsigned long sdev_data[0]; unsigned long sdev_data[0];
} __attribute__((aligned(sizeof(unsigned long)))); } __attribute__((aligned(sizeof(unsigned long))));
struct scsi_device_handler {
/* Used by the infrastructure */
struct list_head list; /* list of scsi_device_handlers */
struct notifier_block nb;
/* Filled by the hardware handler */
struct module *module;
const char *name;
int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
int (*activate)(struct scsi_device *);
int (*prep_fn)(struct scsi_device *, struct request *);
};
struct scsi_dh_data {
struct scsi_device_handler *scsi_dh;
char buf[0];
};
#define to_scsi_device(d) \ #define to_scsi_device(d) \
container_of(d, struct scsi_device, sdev_gendev) container_of(d, struct scsi_device, sdev_gendev)
#define class_to_sdev(d) \ #define class_to_sdev(d) \
@ -231,7 +251,9 @@ extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
uint, uint, uint, void *hostdata); uint, uint, uint, void *hostdata);
extern int scsi_add_device(struct Scsi_Host *host, uint channel, extern int scsi_add_device(struct Scsi_Host *host, uint channel,
uint target, uint lun); uint target, uint lun);
extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh);
extern void scsi_remove_device(struct scsi_device *); extern void scsi_remove_device(struct scsi_device *);
extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
extern int scsi_device_get(struct scsi_device *); extern int scsi_device_get(struct scsi_device *);
extern void scsi_device_put(struct scsi_device *); extern void scsi_device_put(struct scsi_device *);

Some files were not shown because too many files have changed in this diff Show More