Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

This commit is contained in:
Linus Torvalds 2006-01-04 16:30:12 -08:00
commit f61ea1b0c8
95 changed files with 17448 additions and 4473 deletions

View File

@ -1,3 +1,38 @@
Release Date : Fri Nov 11 12:27:22 EST 2005 - Seokmann Ju <sju@lsil.com>
Current Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.6 (scsi module), 2.20.2.6 (cmm module)
1. Sorted out PCI IDs to remove megaraid support overlaps.
Based on the patch from Daniel, sorted out PCI IDs along with
charactor node name change from 'megadev' to 'megadev_legacy' to avoid
conflict.
---
Hopefully we'll be getting the build restriction zapped much sooner,
but we should also be thinking about totally removing the hardware
support overlap in the megaraid drivers.
This patch pencils in a date of Feb 06 for this, and performs some
printk abuse in hope that existing legacy users might pick up on what's
going on.
Signed-off-by: Daniel Drake <dsd@gentoo.org>
---
2. Fixed a issue: megaraid always fails to reset handler.
---
I found that the megaraid driver always fails to reset the
adapter with the following message:
megaraid: resetting the host...
megaraid mbox: reset sequence completed successfully
megaraid: fast sync command timed out
megaraid: reservation reset failed
when the "Cluster mode" of the adapter BIOS is enabled.
So, whenever the reset occurs, the adapter goes to
offline and just become unavailable.
Jun'ichi Nomura [mailto:jnomura@mtc.biglobe.ne.jp]
---
Release Date : Mon Mar 07 12:27:22 EST 2005 - Seokmann Ju <sju@lsil.com>
Current Version : 2.20.4.6 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.5 (scsi module), 2.20.2.5 (cmm module)

View File

@ -150,7 +150,8 @@ scsi devices of which only the first 2 respond:
LLD mid level LLD
===-------------------=========--------------------===------
scsi_host_alloc() -->
scsi_add_host() --------+
scsi_add_host() ---->
scsi_scan_host() -------+
|
slave_alloc()
slave_configure() --> scsi_adjust_queue_depth()
@ -196,7 +197,7 @@ of the issues involved. See the section on reference counting below.
The hotplug concept may be extended to SCSI devices. Currently, when an
HBA is added, the scsi_add_host() function causes a scan for SCSI devices
HBA is added, the scsi_scan_host() function causes a scan for SCSI devices
attached to the HBA's SCSI transport. On newer SCSI transports the HBA
may become aware of a new SCSI device _after_ the scan has completed.
An LLD can use this sequence to make the mid level aware of a SCSI device:
@ -372,7 +373,7 @@ names all start with "scsi_".
Summary:
scsi_activate_tcq - turn on tag command queueing
scsi_add_device - creates new scsi device (lu) instance
scsi_add_host - perform sysfs registration and SCSI bus scan.
scsi_add_host - perform sysfs registration and set up transport class
scsi_adjust_queue_depth - change the queue depth on a SCSI device
scsi_assign_lock - replace default host_lock with given lock
scsi_bios_ptable - return copy of block device's partition table
@ -386,6 +387,7 @@ Summary:
scsi_remove_device - detach and remove a SCSI device
scsi_remove_host - detach and remove all SCSI devices owned by host
scsi_report_bus_reset - report scsi _bus_ reset observed
scsi_scan_host - scan SCSI bus
scsi_track_queue_full - track successive QUEUE_FULL events
scsi_unblock_requests - allow further commands to be queued to given host
scsi_unregister - [calls scsi_host_put()]
@ -425,10 +427,10 @@ void scsi_activate_tcq(struct scsi_device *sdev, int depth)
* Might block: yes
*
* Notes: This call is usually performed internally during a scsi
* bus scan when an HBA is added (i.e. scsi_add_host()). So it
* bus scan when an HBA is added (i.e. scsi_scan_host()). So it
* should only be called if the HBA becomes aware of a new scsi
* device (lu) after scsi_add_host() has completed. If successful
* this call we lead to slave_alloc() and slave_configure() callbacks
* device (lu) after scsi_scan_host() has completed. If successful
* this call can lead to slave_alloc() and slave_configure() callbacks
* into the LLD.
*
* Defined in: drivers/scsi/scsi_scan.c
@ -439,7 +441,7 @@ struct scsi_device * scsi_add_device(struct Scsi_Host *shost,
/**
* scsi_add_host - perform sysfs registration and SCSI bus scan.
* scsi_add_host - perform sysfs registration and set up transport class
* @shost: pointer to scsi host instance
* @dev: pointer to struct device of type scsi class
*
@ -448,7 +450,11 @@ struct scsi_device * scsi_add_device(struct Scsi_Host *shost,
* Might block: no
*
* Notes: Only required in "hotplug initialization model" after a
* successful call to scsi_host_alloc().
* successful call to scsi_host_alloc(). This function does not
* scan the bus; this can be done by calling scsi_scan_host() or
* in some other transport-specific way. The LLD must set up
* the transport template before calling this function and may only
* access the transport class data after this function has been called.
*
* Defined in: drivers/scsi/hosts.c
**/
@ -559,7 +565,7 @@ void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
* area for the LLD's exclusive use.
* Both associated refcounting objects have their refcount set to 1.
* Full registration (in sysfs) and a bus scan are performed later when
* scsi_add_host() is called.
* scsi_add_host() and scsi_scan_host() are called.
*
* Defined in: drivers/scsi/hosts.c .
**/
@ -698,6 +704,19 @@ int scsi_remove_host(struct Scsi_Host *shost)
void scsi_report_bus_reset(struct Scsi_Host * shost, int channel)
/**
* scsi_scan_host - scan SCSI bus
* @shost: a pointer to a scsi host instance
*
* Might block: yes
*
* Notes: Should be called after scsi_add_host()
*
* Defined in: drivers/scsi/scsi_scan.c
**/
void scsi_scan_host(struct Scsi_Host *shost)
/**
* scsi_track_queue_full - track successive QUEUE_FULL events on given
* device to determine if and when there is a need

View File

@ -239,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q);
@ -555,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
}
q->max_sectors = q->max_hw_sectors = max_sectors;
if (BLK_DEF_MAX_SECTORS > max_sectors)
q->max_hw_sectors = q->max_sectors = max_sectors;
else {
q->max_sectors = BLK_DEF_MAX_SECTORS;
q->max_hw_sectors = max_sectors;
}
}
EXPORT_SYMBOL(blk_queue_max_sectors);
@ -657,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
{
/* zero is "infinity" */
t->max_sectors = t->max_hw_sectors =
min_not_zero(t->max_sectors,b->max_sectors);
t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
@ -1293,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q,
static int ll_back_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
{
unsigned short max_sectors;
int len;
if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors;
else
max_sectors = q->max_sectors;
if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
req->flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@ -1325,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
static int ll_front_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
{
unsigned short max_sectors;
int len;
if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors;
else
max_sectors = q->max_sectors;
if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
req->flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@ -2144,7 +2162,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
struct bio *bio;
int reading;
if (len > (q->max_sectors << 9))
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (!len || !ubuf)
return -EINVAL;
@ -2259,7 +2277,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
{
struct bio *bio;
if (len > (q->max_sectors << 9))
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
@ -2306,6 +2324,8 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
generic_unplug_device(q);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
/**
* blk_execute_rq - insert a request into queue for execution
* @q: queue to insert the request in
@ -2444,7 +2464,7 @@ void disk_round_stats(struct gendisk *disk)
/*
* queue lock must be held
*/
static void __blk_put_request(request_queue_t *q, struct request *req)
void __blk_put_request(request_queue_t *q, struct request *req)
{
struct request_list *rl = req->rl;
@ -2473,6 +2493,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
}
}
EXPORT_SYMBOL_GPL(__blk_put_request);
void blk_put_request(struct request *req)
{
unsigned long flags;

View File

@ -233,7 +233,7 @@ static int sg_io(struct file *file, request_queue_t *q,
if (verify_command(file, cmd))
return -EPERM;
if (hdr->dxfer_len > (q->max_sectors << 9))
if (hdr->dxfer_len > (q->max_hw_sectors << 9))
return -EIO;
if (hdr->dxfer_len)

View File

@ -638,7 +638,7 @@ int dm_split_args(int *argc, char ***argvp, char *input)
static void check_for_valid_limits(struct io_restrictions *rs)
{
if (!rs->max_sectors)
rs->max_sectors = MAX_SECTORS;
rs->max_sectors = SAFE_MAX_SECTORS;
if (!rs->max_phys_segments)
rs->max_phys_segments = MAX_PHYS_SEGMENTS;
if (!rs->max_hw_segments)

View File

@ -313,13 +313,13 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
if (ioc->bus_type == FC)
mpt_fc_log_info(ioc, log_info);
else if (ioc->bus_type == SCSI)
else if (ioc->bus_type == SPI)
mpt_sp_log_info(ioc, log_info);
else if (ioc->bus_type == SAS)
mpt_sas_log_info(ioc, log_info);
}
if (ioc_stat & MPI_IOCSTATUS_MASK) {
if (ioc->bus_type == SCSI &&
if (ioc->bus_type == SPI &&
cb_idx != mpt_stm_index &&
cb_idx != mpt_lan_index)
mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
@ -1376,7 +1376,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) {
ioc->prod_name = "LSI53C1030";
ioc->bus_type = SCSI;
ioc->bus_type = SPI;
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
@ -1389,7 +1389,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_1030_53C1035) {
ioc->prod_name = "LSI53C1035";
ioc->bus_type = SCSI;
ioc->bus_type = SPI;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) {
ioc->prod_name = "LSISAS1064";
@ -3042,7 +3042,7 @@ mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
/* Clear the internal flash bad bit - autoincrementing register,
* so must do two writes.
*/
if (ioc->bus_type == SCSI) {
if (ioc->bus_type == SPI) {
/*
* 1030 and 1035 H/W errata, workaround to access
* the ClearFlashBadSignatureBit
@ -3152,7 +3152,7 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
int cnt,cntdn;
dinitprintk((KERN_WARNING MYNAM ": KickStarting %s!\n", ioc->name));
if (ioc->bus_type == SCSI) {
if (ioc->bus_type == SPI) {
/* Always issue a Msg Unit Reset first. This will clear some
* SCSI bus hang conditions.
*/
@ -3580,7 +3580,7 @@ initChainBuffers(MPT_ADAPTER *ioc)
dinitprintk((KERN_INFO MYNAM ": %s Now numSGE=%d num_sge=%d num_chain=%d\n",
ioc->name, numSGE, num_sge, num_chain));
if (ioc->bus_type == SCSI)
if (ioc->bus_type == SPI)
num_chain *= MPT_SCSI_CAN_QUEUE;
else
num_chain *= MPT_FC_CAN_QUEUE;

View File

@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
#endif
#define MPT_LINUX_VERSION_COMMON "3.03.04"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.04"
#define MPT_LINUX_VERSION_COMMON "3.03.05"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.05"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@ -321,7 +321,7 @@ typedef struct _SYSIF_REGS
* Dynamic Multi-Pathing specific stuff...
*/
/* VirtDevice negoFlags field */
/* VirtTarget negoFlags field */
#define MPT_TARGET_NO_NEGO_WIDE 0x01
#define MPT_TARGET_NO_NEGO_SYNC 0x02
#define MPT_TARGET_NO_NEGO_QAS 0x04
@ -330,8 +330,7 @@ typedef struct _SYSIF_REGS
/*
* VirtDevice - FC LUN device or SCSI target device
*/
typedef struct _VirtDevice {
struct scsi_device *device;
typedef struct _VirtTarget {
u8 tflags;
u8 ioc_id;
u8 target_id;
@ -342,21 +341,18 @@ typedef struct _VirtDevice {
u8 negoFlags; /* bit field, see above */
u8 raidVolume; /* set, if RAID Volume */
u8 type; /* byte 0 of Inquiry data */
u8 cflags; /* controller flags */
u8 rsvd1raid;
u16 fc_phys_lun;
u16 fc_xlat_lun;
u32 num_luns;
u32 luns[8]; /* Max LUNs is 256 */
u8 pad[4];
u8 inq_data[8];
/* IEEE Registered Extended Identifier
obtained via INQUIRY VPD page 0x83 */
/* NOTE: Do not separate uniq_prepad and uniq_data
as they are treateed as a single entity in the code */
u8 uniq_prepad[8];
u8 uniq_data[20];
u8 pad2[4];
} VirtTarget;
typedef struct _VirtDevice {
VirtTarget *vtarget;
u8 ioc_id;
u8 bus_id;
u8 target_id;
u8 configured_lun;
u32 lun;
} VirtDevice;
/*
@ -903,7 +899,7 @@ typedef struct _MPT_LOCAL_REPLY {
typedef enum {
FC,
SCSI,
SPI,
SAS
} BUS_TYPE;
@ -912,7 +908,7 @@ typedef struct _MPT_SCSI_HOST {
int port;
u32 pad0;
struct scsi_cmnd **ScsiLookup;
VirtDevice **Targets;
VirtTarget **Targets;
MPT_LOCAL_REPLY *pLocal; /* used for internal commands */
struct timer_list timer;
/* Pool of memory for holding SCpnts before doing

View File

@ -1245,7 +1245,7 @@ mptctl_gettargetinfo (unsigned long arg)
MPT_ADAPTER *ioc;
struct Scsi_Host *sh;
MPT_SCSI_HOST *hd;
VirtDevice *vdev;
VirtTarget *vdev;
char *pmem;
int *pdata;
IOCPage2_t *pIoc2;
@ -1822,7 +1822,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
case MPI_FUNCTION_SCSI_IO_REQUEST:
if (ioc->sh) {
SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
VirtDevice *pTarget = NULL;
VirtTarget *pTarget = NULL;
MPT_SCSI_HOST *hd = NULL;
int qtag = MPI_SCSIIO_CONTROL_UNTAGGED;
int scsidir = 0;

View File

@ -84,13 +84,16 @@ static int mptfcTaskCtx = -1;
static int mptfcInternalCtx = -1; /* Used only for internal commands */
static struct scsi_host_template mptfc_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptfc",
.proc_info = mptscsih_proc_info,
.name = "MPT FC Host",
.info = mptscsih_info,
.queuecommand = mptscsih_qcmd,
.target_alloc = mptscsih_target_alloc,
.slave_alloc = mptscsih_slave_alloc,
.slave_configure = mptscsih_slave_configure,
.target_destroy = mptscsih_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
@ -167,13 +170,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Skipping because it's not operational!\n",
ioc->name);
return -ENODEV;
error = -ENODEV;
goto out_mptfc_probe;
}
if (!ioc->active) {
printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
ioc->name);
return -ENODEV;
error = -ENODEV;
goto out_mptfc_probe;
}
/* Sanity check - ensure at least 1 port is INITIATOR capable
@ -198,7 +203,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Unable to register controller with SCSI subsystem\n",
ioc->name);
return -1;
error = -1;
goto out_mptfc_probe;
}
spin_lock_irqsave(&ioc->FreeQlock, flags);
@ -266,7 +272,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
goto mptfc_probe_failed;
goto out_mptfc_probe;
}
memset(mem, 0, sz);
@ -284,14 +290,14 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
goto mptfc_probe_failed;
goto out_mptfc_probe;
}
memset(mem, 0, sz);
hd->Targets = (VirtDevice **) mem;
hd->Targets = (VirtTarget **) mem;
dprintk((KERN_INFO
" Targets @ %p, sz=%d\n", hd->Targets, sz));
" vdev @ %p, sz=%d\n", hd->Targets, sz));
/* Clear the TM flags
*/
@ -330,13 +336,13 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if(error) {
dprintk((KERN_ERR MYNAM
"scsi_add_host failed\n"));
goto mptfc_probe_failed;
goto out_mptfc_probe;
}
scsi_scan_host(sh);
return 0;
mptfc_probe_failed:
out_mptfc_probe:
mptscsih_remove(pdev);
return error;

View File

@ -228,31 +228,35 @@ static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
* implement ->target_alloc.
*/
static int
mptsas_slave_alloc(struct scsi_device *device)
mptsas_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *host = device->host;
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
struct sas_rphy *rphy;
struct mptsas_portinfo *p;
VirtTarget *vtarget;
VirtDevice *vdev;
uint target = device->id;
struct scsi_target *starget;
int i;
if ((vdev = hd->Targets[target]) != NULL)
goto out;
vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdev) {
printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
hd->ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
memset(vdev, 0, sizeof(VirtDevice));
vdev->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
vdev->ioc_id = hd->ioc->id;
sdev->hostdata = vdev;
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdev->vtarget = vtarget;
if (vtarget->num_luns == 0) {
vtarget->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
hd->Targets[sdev->id] = vtarget;
}
rphy = dev_to_rphy(device->sdev_target->dev.parent);
rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
list_for_each_entry(p, &hd->ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address ==
@ -260,7 +264,7 @@ mptsas_slave_alloc(struct scsi_device *device)
vdev->target_id =
p->phy_info[i].attached.target;
vdev->bus_id = p->phy_info[i].attached.bus;
hd->Targets[device->id] = vdev;
vdev->lun = sdev->lun;
goto out;
}
}
@ -271,19 +275,24 @@ mptsas_slave_alloc(struct scsi_device *device)
return -ENODEV;
out:
vdev->num_luns++;
device->hostdata = vdev;
vtarget->ioc_id = vdev->ioc_id;
vtarget->target_id = vdev->target_id;
vtarget->bus_id = vdev->bus_id;
vtarget->num_luns++;
return 0;
}
static struct scsi_host_template mptsas_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptsas",
.proc_info = mptscsih_proc_info,
.name = "MPT SPI Host",
.info = mptscsih_info,
.queuecommand = mptscsih_qcmd,
.target_alloc = mptscsih_target_alloc,
.slave_alloc = mptsas_slave_alloc,
.slave_configure = mptscsih_slave_configure,
.target_destroy = mptscsih_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
@ -986,7 +995,6 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
goto out_free_port_info;
list_add_tail(&port_info->list, &ioc->sas_topology);
for (i = 0; i < port_info->num_phys; i++) {
mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
(MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
@ -1133,13 +1141,15 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Skipping because it's not operational!\n",
ioc->name);
return -ENODEV;
error = -ENODEV;
goto out_mptsas_probe;
}
if (!ioc->active) {
printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
ioc->name);
return -ENODEV;
error = -ENODEV;
goto out_mptsas_probe;
}
/* Sanity check - ensure at least 1 port is INITIATOR capable
@ -1163,7 +1173,8 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Unable to register controller with SCSI subsystem\n",
ioc->name);
return -1;
error = -1;
goto out_mptsas_probe;
}
spin_lock_irqsave(&ioc->FreeQlock, flags);
@ -1237,7 +1248,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
goto mptsas_probe_failed;
goto out_mptsas_probe;
}
memset(mem, 0, sz);
@ -1255,14 +1266,14 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
goto mptsas_probe_failed;
goto out_mptsas_probe;
}
memset(mem, 0, sz);
hd->Targets = (VirtDevice **) mem;
hd->Targets = (VirtTarget **) mem;
dprintk((KERN_INFO
" Targets @ %p, sz=%d\n", hd->Targets, sz));
" vtarget @ %p, sz=%d\n", hd->Targets, sz));
/* Clear the TM flags
*/
@ -1308,14 +1319,14 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (error) {
dprintk((KERN_ERR MYNAM
"scsi_add_host failed\n"));
goto mptsas_probe_failed;
goto out_mptsas_probe;
}
mptsas_scan_sas_topology(ioc);
return 0;
mptsas_probe_failed:
out_mptsas_probe:
mptscsih_remove(pdev);
return error;

File diff suppressed because it is too large Load Diff

View File

@ -91,7 +91,9 @@ extern int mptscsih_resume(struct pci_dev *pdev);
extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
extern const char * mptscsih_info(struct Scsi_Host *SChost);
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
extern int mptscsih_target_alloc(struct scsi_target *starget);
extern int mptscsih_slave_alloc(struct scsi_device *device);
extern void mptscsih_target_destroy(struct scsi_target *starget);
extern void mptscsih_slave_destroy(struct scsi_device *device);
extern int mptscsih_slave_configure(struct scsi_device *device);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt);

View File

@ -103,13 +103,16 @@ static int mptspiTaskCtx = -1;
static int mptspiInternalCtx = -1; /* Used only for internal commands */
static struct scsi_host_template mptspi_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptspi",
.proc_info = mptscsih_proc_info,
.name = "MPT SPI Host",
.info = mptscsih_info,
.queuecommand = mptscsih_qcmd,
.target_alloc = mptscsih_target_alloc,
.slave_alloc = mptscsih_slave_alloc,
.slave_configure = mptscsih_slave_configure,
.target_destroy = mptscsih_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
@ -177,13 +180,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Skipping because it's not operational!\n",
ioc->name);
return -ENODEV;
error = -ENODEV;
goto out_mptspi_probe;
}
if (!ioc->active) {
printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
ioc->name);
return -ENODEV;
error = -ENODEV;
goto out_mptspi_probe;
}
/* Sanity check - ensure at least 1 port is INITIATOR capable
@ -208,7 +213,8 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Unable to register controller with SCSI subsystem\n",
ioc->name);
return -1;
error = -1;
goto out_mptspi_probe;
}
spin_lock_irqsave(&ioc->FreeQlock, flags);
@ -286,7 +292,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
goto mptspi_probe_failed;
goto out_mptspi_probe;
}
memset(mem, 0, sz);
@ -304,14 +310,14 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
goto mptspi_probe_failed;
goto out_mptspi_probe;
}
memset(mem, 0, sz);
hd->Targets = (VirtDevice **) mem;
hd->Targets = (VirtTarget **) mem;
dprintk((KERN_INFO
" Targets @ %p, sz=%d\n", hd->Targets, sz));
" vdev @ %p, sz=%d\n", hd->Targets, sz));
/* Clear the TM flags
*/
@ -385,13 +391,13 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if(error) {
dprintk((KERN_ERR MYNAM
"scsi_add_host failed\n"));
goto mptspi_probe_failed;
goto out_mptspi_probe;
}
scsi_scan_host(sh);
return 0;
mptspi_probe_failed:
out_mptspi_probe:
mptscsih_remove(pdev);
return error;

View File

@ -857,7 +857,7 @@ process_extended_message(struct Scsi_Host *host,
printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
host->host_no, pun, lun,
NCR_700_phase[(dsps & 0xf00) >> 8]);
scsi_print_msg(hostdata->msgin);
spi_print_msg(hostdata->msgin);
printk("\n");
/* just reject it */
hostdata->msgout[0] = A_REJECT_MSG;
@ -887,7 +887,7 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
#ifdef NCR_700_DEBUG
printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
NCR_700_phase[(dsps & 0xf00) >> 8]);
scsi_print_msg(hostdata->msgin);
spi_print_msg(hostdata->msgin);
printk("\n");
#endif
@ -939,7 +939,7 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
host->host_no, pun, lun,
NCR_700_phase[(dsps & 0xf00) >> 8]);
scsi_print_msg(hostdata->msgin);
spi_print_msg(hostdata->msgin);
printk("\n");
/* just reject it */
hostdata->msgout[0] = A_REJECT_MSG;

View File

@ -238,21 +238,23 @@ struct NCR_700_Host_Parameters {
#ifdef CONFIG_53C700_LE_ON_BE
#define bE (hostdata->force_le_on_be ? 0 : 3)
#define bSWAP (hostdata->force_le_on_be)
/* This is terrible, but there's no raw version of ioread32. That means
* that on a be board we swap twice (once in ioread32 and once again to
* get the value correct) */
#define bS_to_io(x) ((hostdata->force_le_on_be) ? (x) : cpu_to_le32(x))
#define bEBus (!hostdata->force_le_on_be)
#elif defined(__BIG_ENDIAN)
#define bE 3
#define bSWAP 0
#define bS_to_io(x) (x)
#elif defined(__LITTLE_ENDIAN)
#define bE 0
#define bSWAP 0
#define bS_to_io(x) (x)
#else
#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined, did you include byteorder.h?"
#endif
#ifndef bEBus
#ifdef CONFIG_53C700_BE_BUS
#define bEBus 1
#else
#define bEBus 0
#endif
#endif
#define bS_to_cpu(x) (bSWAP ? le32_to_cpu(x) : (x))
#define bS_to_host(x) (bSWAP ? cpu_to_le32(x) : (x))
@ -466,14 +468,15 @@ NCR_700_readl(struct Scsi_Host *host, __u32 reg)
{
const struct NCR_700_Host_Parameters *hostdata
= (struct NCR_700_Host_Parameters *)host->hostdata[0];
__u32 value = ioread32(hostdata->base + reg);
__u32 value = bEBus ? ioread32be(hostdata->base + reg) :
ioread32(hostdata->base + reg);
#if 1
/* sanity check the register */
if((reg & 0x3) != 0)
BUG();
#endif
return bS_to_io(value);
return value;
}
static inline void
@ -497,7 +500,8 @@ NCR_700_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
BUG();
#endif
iowrite32(bS_to_io(value), hostdata->base + reg);
bEBus ? iowrite32be(value, hostdata->base + reg):
iowrite32(value, hostdata->base + reg);
}
#endif

View File

@ -282,6 +282,7 @@
#include "scsi.h"
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_spi.h>
#include "53c7xx.h"
#include <linux/stat.h>
#include <linux/stddef.h>
@ -1724,7 +1725,7 @@ NCR53c7xx_run_tests (struct Scsi_Host *host) {
printk ("scsi%d : status ", host->host_no);
scsi_print_status (status);
printk ("\nscsi%d : message ", host->host_no);
scsi_print_msg (&msg);
spi_print_msg(&msg);
printk ("\n");
} else if (hostdata->test_completed == 3) {
printk("scsi%d : test 2 no connection with target %d\n",
@ -2313,7 +2314,7 @@ NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host, struct
printk ("scsi%d : received message", host->host_no);
if (c)
printk (" from target %d lun %d ", c->device->id, c->device->lun);
scsi_print_msg ((unsigned char *) hostdata->msg_buf);
spi_print_msg((unsigned char *) hostdata->msg_buf);
printk("\n");
}
@ -5540,7 +5541,7 @@ print_dsa (struct Scsi_Host *host, u32 *dsa, const char *prefix) {
i > 0 && !check_address ((unsigned long) ptr, 1);
ptr += len, i -= len) {
printk(" ");
len = scsi_print_msg (ptr);
len = spi_print_msg(ptr);
printk("\n");
if (!len)
break;

View File

@ -336,6 +336,7 @@ config SCSI_ACARD
config SCSI_AHA152X
tristate "Adaptec AHA152X/2825 support"
depends on ISA && SCSI && !64BIT
select SCSI_SPI_ATTRS
---help---
This is a driver for the AHA-1510, AHA-1520, AHA-1522, and AHA-2825
SCSI host adapters. It also works for the AVA-1505, but the IRQ etc.
@ -623,6 +624,7 @@ config SCSI_OMIT_FLASHPOINT
config SCSI_DMX3191D
tristate "DMX3191D SCSI support"
depends on PCI && SCSI
select SCSI_SPI_ATTRS
help
This is support for Domex DMX3191D SCSI Host Adapters.
@ -632,6 +634,7 @@ config SCSI_DMX3191D
config SCSI_DTC3280
tristate "DTC3180/3280 SCSI support"
depends on ISA && SCSI
select SCSI_SPI_ATTRS
help
This is support for DTC 3180/3280 SCSI Host Adapters. Please read
the SCSI-HOWTO, available from
@ -752,6 +755,7 @@ config SCSI_GDTH
config SCSI_GENERIC_NCR5380
tristate "Generic NCR5380/53c400 SCSI PIO support"
depends on ISA && SCSI
select SCSI_SPI_ATTRS
---help---
This is a driver for the old NCR 53c80 series of SCSI controllers
on boards using PIO. Most boards such as the Trantor T130 fit this
@ -771,6 +775,7 @@ config SCSI_GENERIC_NCR5380
config SCSI_GENERIC_NCR5380_MMIO
tristate "Generic NCR5380/53c400 SCSI MMIO support"
depends on ISA && SCSI
select SCSI_SPI_ATTRS
---help---
This is a driver for the old NCR 53c80 series of SCSI controllers
on boards using memory mapped I/O.
@ -1254,6 +1259,7 @@ config SCSI_MCA_53C9X
config SCSI_PAS16
tristate "PAS16 SCSI support"
depends on ISA && SCSI
select SCSI_SPI_ATTRS
---help---
This is support for a SCSI host adapter. It is explained in section
3.10 of the SCSI-HOWTO, available from
@ -1423,6 +1429,7 @@ config SCSI_DC390T
config SCSI_T128
tristate "Trantor T128/T128F/T228 SCSI support"
depends on ISA && SCSI
select SCSI_SPI_ATTRS
---help---
This is support for a SCSI host adapter. It is explained in section
3.11 of the SCSI-HOWTO, available from
@ -1681,6 +1688,7 @@ config OKTAGON_SCSI
config ATARI_SCSI
tristate "Atari native SCSI support"
depends on ATARI && SCSI && BROKEN
select SCSI_SPI_ATTRS
---help---
If you have an Atari with built-in NCR5380 SCSI controller (TT,
Falcon, ...) say Y to get it supported. Of course also, if you have
@ -1722,6 +1730,7 @@ config TT_DMA_EMUL
config MAC_SCSI
bool "Macintosh NCR5380 SCSI"
depends on MAC && SCSI=y
select SCSI_SPI_ATTRS
help
This is the NCR 5380 SCSI controller included on most of the 68030
based Macintoshes. If you have one of these say Y and read the
@ -1743,6 +1752,7 @@ config SCSI_MAC_ESP
config MVME147_SCSI
bool "WD33C93 SCSI driver for MVME147"
depends on MVME147 && SCSI=y
select SCSI_SPI_ATTRS
help
Support for the on-board SCSI controller on the Motorola MVME147
single-board computer.
@ -1750,6 +1760,7 @@ config MVME147_SCSI
config MVME16x_SCSI
bool "NCR53C710 SCSI driver for MVME16x"
depends on MVME16x && SCSI && BROKEN
select SCSI_SPI_ATTRS
help
The Motorola MVME162, 166, 167, 172 and 177 boards use the NCR53C710
SCSI controller chip. Almost everyone using one of these boards
@ -1758,6 +1769,7 @@ config MVME16x_SCSI
config BVME6000_SCSI
bool "NCR53C710 SCSI driver for BVME6000"
depends on BVME6000 && SCSI && BROKEN
select SCSI_SPI_ATTRS
help
The BVME4000 and BVME6000 boards from BVM Ltd use the NCR53C710
SCSI controller chip. Almost everyone using one of these boards
@ -1774,6 +1786,7 @@ config SCSI_NCR53C7xx_FAST
config SUN3_SCSI
tristate "Sun3 NCR5380 SCSI"
depends on SUN3 && SCSI && BROKEN
select SCSI_SPI_ATTRS
help
This option will enable support for the OBIO (onboard io) NCR5380
SCSI controller found in the Sun 3/50 and 3/60, as well as for

View File

@ -87,6 +87,7 @@
* the high level code.
*/
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_transport_spi.h>
#ifndef NDEBUG
#define NDEBUG 0
@ -2377,7 +2378,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
* 3..length+1 arguments
*
* Start the extended message buffer with the EXTENDED_MESSAGE
* byte, since scsi_print_msg() wants the whole thing.
* byte, since spi_print_msg() wants the whole thing.
*/
extended_msg[0] = EXTENDED_MESSAGE;
/* Accept first byte by clearing ACK */
@ -2424,7 +2425,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
default:
if (!tmp) {
printk("scsi%d: rejecting message ", instance->host_no);
scsi_print_msg(extended_msg);
spi_print_msg(extended_msg);
printk("\n");
} else if (tmp != EXTENDED_MESSAGE)
scmd_printk(KERN_INFO, cmd,
@ -2560,7 +2561,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
if (!(msg[0] & 0x80)) {
printk(KERN_ERR "scsi%d : expecting IDENTIFY message, got ", instance->host_no);
scsi_print_msg(msg);
spi_print_msg(msg);
abort = 1;
} else {
/* Accept message by clearing ACK */

View File

@ -259,6 +259,7 @@
#include "scsi.h"
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_spi.h>
#include "aha152x.h"
@ -1845,7 +1846,7 @@ static void msgi_run(struct Scsi_Host *shpnt)
#if defined(AHA152X_DEBUG)
if (HOSTDATA(shpnt)->debug & debug_msgi) {
printk(INFO_LEAD "inbound message %02x ", CMDINFO(CURRENT_SC), MSGI(0));
scsi_print_msg(&MSGI(0));
spi_print_msg(&MSGI(0));
printk("\n");
}
#endif
@ -1933,7 +1934,7 @@ static void msgi_run(struct Scsi_Host *shpnt)
break;
printk(INFO_LEAD, CMDINFO(CURRENT_SC));
scsi_print_msg(&MSGI(0));
spi_print_msg(&MSGI(0));
printk("\n");
ticks = (MSGI(3) * 4 + 49) / 50;
@ -2031,7 +2032,7 @@ static void msgo_init(struct Scsi_Host *shpnt)
int i;
printk(DEBUG_LEAD "messages( ", CMDINFO(CURRENT_SC));
for (i=0; i<MSGOLEN; i+=scsi_print_msg(&MSGO(i)), printk(" "))
for (i=0; i<MSGOLEN; i+=spi_print_msg(&MSGO(i)), printk(" "))
;
printk(")\n");
}

View File

@ -1064,6 +1064,7 @@ ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *templa
struct Scsi_Host *host;
char *new_name;
u_long s;
int retval;
template->name = ahd->description;
host = scsi_host_alloc(template, sizeof(struct ahd_softc *));
@ -1096,9 +1097,15 @@ ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *templa
host->transportt = ahd_linux_transport_template;
scsi_add_host(host, &ahd->dev_softc->dev); /* XXX handle failure */
retval = scsi_add_host(host, &ahd->dev_softc->dev);
if (retval) {
printk(KERN_WARNING "aic79xx: scsi_add_host failed\n");
scsi_host_put(host);
return retval;
}
scsi_scan_host(host);
return (0);
return 0;
}
uint64_t

View File

@ -1061,10 +1061,11 @@ uint32_t aic7xxx_verbose;
int
ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *template)
{
char buf[80];
struct Scsi_Host *host;
char buf[80];
struct Scsi_Host *host;
char *new_name;
u_long s;
u_long s;
int retval;
template->name = ahc->description;
host = scsi_host_alloc(template, sizeof(struct ahc_softc *));
@ -1097,9 +1098,16 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
host->transportt = ahc_linux_transport_template;
scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */
retval = scsi_add_host(host,
(ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
if (retval) {
printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
scsi_host_put(host);
return retval;
}
scsi_scan_host(host);
return (0);
return 0;
}
/*

View File

@ -4,6 +4,7 @@
config SCSI_ACORNSCSI_3
tristate "Acorn SCSI card (aka30) support"
depends on ARCH_ACORN && SCSI && BROKEN
select SCSI_SPI_ATTRS
help
This enables support for the Acorn SCSI card (aka30). If you have an
Acorn system with one of these, say Y. If unsure, say N.

View File

@ -152,6 +152,7 @@
#include "../scsi.h"
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_spi.h>
#include "acornscsi.h"
#include "msgqueue.h"
#include "scsi.h"
@ -1370,7 +1371,7 @@ void acornscsi_sendmessage(AS_Host *host)
host->scsi.last_message = msg->msg[0];
#if (DEBUG & DEBUG_MESSAGES)
scsi_print_msg(msg->msg);
spi_print_msg(msg->msg);
#endif
break;
@ -1392,7 +1393,7 @@ void acornscsi_sendmessage(AS_Host *host)
while ((msg = msgqueue_getmsg(&host->scsi.msgs, msgnr++)) != NULL) {
unsigned int i;
#if (DEBUG & DEBUG_MESSAGES)
scsi_print_msg(msg);
spi_print_msg(msg);
#endif
i = 0;
if (acornscsi_write_pio(host, msg->msg, &i, msg->length, 1000000))
@ -1488,7 +1489,7 @@ void acornscsi_message(AS_Host *host)
#if (DEBUG & DEBUG_MESSAGES)
printk("scsi%d.%c: message in: ",
host->host->host_no, acornscsi_target(host));
scsi_print_msg(message);
spi_print_msg(message);
printk("\n");
#endif

View File

@ -74,6 +74,7 @@
* the high level code.
*/
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_transport_spi.h>
#if (NDEBUG & NDEBUG_LISTS)
#define LIST(x,y) \
@ -2355,7 +2356,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
* 3..length+1 arguments
*
* Start the extended message buffer with the EXTENDED_MESSAGE
* byte, since scsi_print_msg() wants the whole thing.
* byte, since spi_print_msg() wants the whole thing.
*/
extended_msg[0] = EXTENDED_MESSAGE;
/* Accept first byte by clearing ACK */
@ -2408,7 +2409,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
default:
if (!tmp) {
printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO);
scsi_print_msg (extended_msg);
spi_print_msg(extended_msg);
printk("\n");
} else if (tmp != EXTENDED_MESSAGE)
printk(KERN_DEBUG "scsi%d: rejecting unknown "
@ -2541,7 +2542,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
if (!(msg[0] & 0x80)) {
printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO);
scsi_print_msg(msg);
spi_print_msg(msg);
do_abort(instance);
return;
}

View File

@ -75,7 +75,7 @@ static int vendor_counts[CH_TYPES-4];
module_param_array(vendor_firsts, int, NULL, 0444);
module_param_array(vendor_counts, int, NULL, 0444);
static char *vendor_labels[CH_TYPES-4] = {
static const char * vendor_labels[CH_TYPES-4] = {
"v0", "v1", "v2", "v3"
};
// module_param_string_array(vendor_labels, NULL, 0444);
@ -140,7 +140,7 @@ static struct file_operations changer_fops =
#endif
};
static struct {
static const struct {
unsigned char sense;
unsigned char asc;
unsigned char ascq;

View File

@ -1065,7 +1065,7 @@ struct error_info2 {
const char * fmt;
};
static struct error_info2 additional2[] =
static const struct error_info2 additional2[] =
{
{0x40,0x00,0x7f,"Ram failure (%x)"},
{0x40,0x80,0xff,"Diagnostic failure on component (%x)"},
@ -1077,7 +1077,7 @@ static struct error_info2 additional2[] =
};
/* description of the sense key values */
static const char *snstext[] = {
static const char * const snstext[] = {
"No Sense", /* 0: There is no sense information */
"Recovered Error", /* 1: The last command completed successfully
but used error correction */
@ -1278,114 +1278,6 @@ void scsi_print_req_sense(const char *devclass, struct scsi_request *sreq)
}
EXPORT_SYMBOL(scsi_print_req_sense);
#ifdef CONFIG_SCSI_CONSTANTS
static const char *one_byte_msgs[] = {
/* 0x00 */ "Command Complete", NULL, "Save Pointers",
/* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error",
/* 0x06 */ "Abort", "Message Reject", "Nop", "Message Parity Error",
/* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag",
/* 0x0c */ "Bus device reset", "Abort Tag", "Clear Queue",
/* 0x0f */ "Initiate Recovery", "Release Recovery"
};
#define NO_ONE_BYTE_MSGS (sizeof(one_byte_msgs) / sizeof (const char *))
static const char *two_byte_msgs[] = {
/* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag"
/* 0x23 */ "Ignore Wide Residue"
};
#define NO_TWO_BYTE_MSGS (sizeof(two_byte_msgs) / sizeof (const char *))
static const char *extended_msgs[] = {
/* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request",
/* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request"
};
#define NO_EXTENDED_MSGS (sizeof(two_byte_msgs) / sizeof (const char *))
int scsi_print_msg (const unsigned char *msg)
{
int len = 0, i;
if (msg[0] == EXTENDED_MESSAGE) {
len = 3 + msg[1];
if (msg[2] < NO_EXTENDED_MSGS)
printk ("%s ", extended_msgs[msg[2]]);
else
printk ("Extended Message, reserved code (0x%02x) ",
(int) msg[2]);
switch (msg[2]) {
case EXTENDED_MODIFY_DATA_POINTER:
printk("pointer = %d", (int) (msg[3] << 24) |
(msg[4] << 16) | (msg[5] << 8) | msg[6]);
break;
case EXTENDED_SDTR:
printk("period = %d ns, offset = %d",
(int) msg[3] * 4, (int) msg[4]);
break;
case EXTENDED_WDTR:
printk("width = 2^%d bytes", msg[3]);
break;
default:
for (i = 2; i < len; ++i)
printk("%02x ", msg[i]);
}
/* Identify */
} else if (msg[0] & 0x80) {
printk("Identify disconnect %sallowed %s %d ",
(msg[0] & 0x40) ? "" : "not ",
(msg[0] & 0x20) ? "target routine" : "lun",
msg[0] & 0x7);
len = 1;
/* Normal One byte */
} else if (msg[0] < 0x1f) {
if (msg[0] < NO_ONE_BYTE_MSGS)
printk(one_byte_msgs[msg[0]]);
else
printk("reserved (%02x) ", msg[0]);
len = 1;
/* Two byte */
} else if (msg[0] <= 0x2f) {
if ((msg[0] - 0x20) < NO_TWO_BYTE_MSGS)
printk("%s %02x ", two_byte_msgs[msg[0] - 0x20],
msg[1]);
else
printk("reserved two byte (%02x %02x) ",
msg[0], msg[1]);
len = 2;
} else
printk("reserved");
return len;
}
EXPORT_SYMBOL(scsi_print_msg);
#else /* ifndef CONFIG_SCSI_CONSTANTS */
int scsi_print_msg (const unsigned char *msg)
{
int len = 0, i;
if (msg[0] == EXTENDED_MESSAGE) {
len = 3 + msg[1];
for (i = 0; i < len; ++i)
printk("%02x ", msg[i]);
/* Identify */
} else if (msg[0] & 0x80) {
printk("%02x ", msg[0]);
len = 1;
/* Normal One byte */
} else if (msg[0] < 0x1f) {
printk("%02x ", msg[0]);
len = 1;
/* Two byte */
} else if (msg[0] <= 0x2f) {
printk("%02x %02x", msg[0], msg[1]);
len = 2;
} else
printk("%02x ", msg[0]);
return len;
}
EXPORT_SYMBOL(scsi_print_msg);
#endif /* ! CONFIG_SCSI_CONSTANTS */
void scsi_print_command(struct scsi_cmnd *cmd)
{
/* Assume appended output (i.e. not at start of line) */
@ -1397,7 +1289,7 @@ EXPORT_SYMBOL(scsi_print_command);
#ifdef CONFIG_SCSI_CONSTANTS
static const char * hostbyte_table[]={
static const char * const hostbyte_table[]={
"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY"};
@ -1422,12 +1314,12 @@ void scsi_print_hostbyte(int scsiresult)
#ifdef CONFIG_SCSI_CONSTANTS
static const char * driverbyte_table[]={
static const char * const driverbyte_table[]={
"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
#define NUM_DRIVERBYTE_STRS (sizeof(driverbyte_table) / sizeof(const char *))
static const char * driversuggest_table[]={"SUGGEST_OK",
static const char * const driversuggest_table[]={"SUGGEST_OK",
"SUGGEST_RETRY", "SUGGEST_ABORT", "SUGGEST_REMAP", "SUGGEST_DIE",
"SUGGEST_5", "SUGGEST_6", "SUGGEST_7", "SUGGEST_SENSE"};
#define NUM_SUGGEST_STRS (sizeof(driversuggest_table) / sizeof(const char *))

View File

@ -5887,7 +5887,12 @@ static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
if (ioa_cfg->needs_hard_reset) {
ioa_cfg->needs_hard_reset = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
} else
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
@ -6264,6 +6269,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
unsigned long ipr_regs_pci;
void __iomem *ipr_regs;
u32 rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc;
ENTER;
@ -6356,6 +6362,15 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
goto cleanup_nomem;
}
/*
* If HRRQ updated interrupt is not masked, or reset alert is set,
* the card is in an unknown state and needs a hard reset
*/
mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
ioa_cfg->needs_hard_reset = 1;
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);

View File

@ -36,8 +36,8 @@
/*
* Literals
*/
#define IPR_DRIVER_VERSION "2.1.0"
#define IPR_DRIVER_DATE "(October 31, 2005)"
#define IPR_DRIVER_VERSION "2.1.1"
#define IPR_DRIVER_DATE "(November 15, 2005)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@ -922,6 +922,7 @@ struct ipr_ioa_cfg {
u8 dump_taken:1;
u8 allow_cmds:1;
u8 allow_ml_add_del:1;
u8 needs_hard_reset:1;
enum ipr_cache_state cache_state;
u16 type; /* CCIN of the card */

View File

@ -49,7 +49,7 @@ MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
"Alex Aizman <itn780@yahoo.com>");
MODULE_DESCRIPTION("iSCSI/TCP data-path");
MODULE_LICENSE("GPL");
MODULE_VERSION("0:4.409");
MODULE_VERSION("0:4.445");
/* #define DEBUG_TCP */
/* #define DEBUG_SCSI */
#define DEBUG_ASSERT
@ -581,10 +581,16 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
crypto_digest_digest(conn->rx_tfm, &sg, 1, (u8 *)&cdgst);
rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
conn->in.ahslen);
if (cdgst != rdgst) {
printk(KERN_ERR "iscsi_tcp: itt %x: hdrdgst error "
"recv 0x%x calc 0x%x\n", conn->in.itt, rdgst,
cdgst);
return ISCSI_ERR_HDR_DGST;
}
}
/* save opcode for later */
conn->in.opcode = hdr->opcode;
conn->in.opcode = hdr->opcode & ISCSI_OPCODE_MASK;
/* verify itt (itt encoding: age+cid+itt) */
if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
@ -610,13 +616,6 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
conn->in.ahslen, conn->in.datalen);
if (conn->in.itt < session->cmds_max) {
if (conn->hdrdgst_en && cdgst != rdgst) {
printk(KERN_ERR "iscsi_tcp: itt %x: hdrdgst error "
"recv 0x%x calc 0x%x\n", conn->in.itt, rdgst,
cdgst);
return ISCSI_ERR_HDR_DGST;
}
ctask = (struct iscsi_cmd_task *)session->cmds[conn->in.itt];
if (!ctask->sc) {
@ -642,9 +641,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
switch(conn->in.opcode) {
case ISCSI_OP_SCSI_CMD_RSP:
BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
if (ctask->hdr.flags & ISCSI_FLAG_CMD_WRITE)
rc = iscsi_cmd_rsp(conn, ctask);
else if (!conn->in.datalen)
if (!conn->in.datalen)
rc = iscsi_cmd_rsp(conn, ctask);
else
/*
@ -666,8 +663,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
break;
case ISCSI_OP_R2T:
BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
if (ctask->hdr.flags & ISCSI_FLAG_CMD_WRITE &&
ctask->sc->sc_data_direction == DMA_TO_DEVICE)
if (ctask->sc->sc_data_direction == DMA_TO_DEVICE)
rc = iscsi_r2t_rsp(conn, ctask);
else
rc = ISCSI_ERR_PROTO;
@ -906,11 +902,20 @@ partial_sg_digest_update(struct iscsi_conn *conn, struct scatterlist *sg,
crypto_digest_update(conn->data_rx_tfm, &temp, 1);
}
static void
iscsi_recv_digest_update(struct iscsi_conn *conn, char* buf, int len)
{
struct scatterlist tmp;
sg_init_one(&tmp, buf, len);
crypto_digest_update(conn->data_rx_tfm, &tmp, 1);
}
static int iscsi_scsi_data_in(struct iscsi_conn *conn)
{
struct iscsi_cmd_task *ctask = conn->in.ctask;
struct scsi_cmnd *sc = ctask->sc;
struct scatterlist tmp, *sg;
struct scatterlist *sg;
int i, offset, rc = 0;
BUG_ON((void*)ctask != sc->SCp.ptr);
@ -924,10 +929,8 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
sc->request_bufflen, ctask->data_offset);
if (rc == -EAGAIN)
return rc;
if (conn->datadgst_en) {
sg_init_one(&tmp, sc->request_buffer, i);
crypto_digest_update(conn->data_rx_tfm, &tmp, 1);
}
if (conn->datadgst_en)
iscsi_recv_digest_update(conn, sc->request_buffer, i);
rc = 0;
goto done;
}
@ -1021,6 +1024,9 @@ iscsi_data_recv(struct iscsi_conn *conn)
conn->in.hdr = &conn->hdr;
conn->senselen = (conn->data[0] << 8) | conn->data[1];
rc = iscsi_cmd_rsp(conn, conn->in.ctask);
if (!rc && conn->datadgst_en)
iscsi_recv_digest_update(conn, conn->data,
conn->in.datalen);
}
break;
case ISCSI_OP_TEXT_RSP:
@ -1045,6 +1051,11 @@ iscsi_data_recv(struct iscsi_conn *conn)
rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr,
conn->data, conn->in.datalen);
if (!rc && conn->datadgst_en &&
conn->in.opcode != ISCSI_OP_LOGIN_RSP)
iscsi_recv_digest_update(conn, conn->data,
conn->in.datalen);
if (mtask && conn->login_mtask != mtask) {
spin_lock(&session->lock);
__kfifo_put(session->mgmtpool.queue, (void*)&mtask,
@ -1053,6 +1064,8 @@ iscsi_data_recv(struct iscsi_conn *conn)
}
}
break;
case ISCSI_OP_ASYNC_EVENT:
case ISCSI_OP_REJECT:
default:
BUG_ON(1);
}
@ -1114,8 +1127,7 @@ more:
*/
rc = iscsi_hdr_recv(conn);
if (!rc && conn->in.datalen) {
if (conn->datadgst_en &&
conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) {
if (conn->datadgst_en) {
BUG_ON(!conn->data_rx_tfm);
crypto_digest_init(conn->data_rx_tfm);
}
@ -1127,26 +1139,24 @@ more:
}
if (conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
uint32_t recv_digest;
debug_tcp("extra data_recv offset %d copy %d\n",
conn->in.offset, conn->in.copy);
if (conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) {
uint32_t recv_digest;
skb_copy_bits(conn->in.skb, conn->in.offset,
&recv_digest, 4);
conn->in.offset += 4;
conn->in.copy -= 4;
if (recv_digest != conn->in.datadgst) {
debug_tcp("iscsi_tcp: data digest error!"
"0x%x != 0x%x\n", recv_digest,
conn->in.datadgst);
iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
return 0;
} else {
debug_tcp("iscsi_tcp: data digest match!"
"0x%x == 0x%x\n", recv_digest,
conn->in.datadgst);
conn->in_progress = IN_PROGRESS_WAIT_HEADER;
}
skb_copy_bits(conn->in.skb, conn->in.offset,
&recv_digest, 4);
conn->in.offset += 4;
conn->in.copy -= 4;
if (recv_digest != conn->in.datadgst) {
debug_tcp("iscsi_tcp: data digest error!"
"0x%x != 0x%x\n", recv_digest,
conn->in.datadgst);
iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
return 0;
} else {
debug_tcp("iscsi_tcp: data digest match!"
"0x%x == 0x%x\n", recv_digest,
conn->in.datadgst);
conn->in_progress = IN_PROGRESS_WAIT_HEADER;
}
}
@ -1167,8 +1177,7 @@ more:
}
conn->in.copy -= conn->in.padding;
conn->in.offset += conn->in.padding;
if (conn->datadgst_en &&
conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) {
if (conn->datadgst_en) {
if (conn->in.padding) {
debug_tcp("padding -> %d\n", conn->in.padding);
memset(pad, 0, conn->in.padding);
@ -1237,8 +1246,9 @@ iscsi_tcp_state_change(struct sock *sk)
conn = (struct iscsi_conn*)sk->sk_user_data;
session = conn->session;
if (sk->sk_state == TCP_CLOSE_WAIT ||
sk->sk_state == TCP_CLOSE) {
if ((sk->sk_state == TCP_CLOSE_WAIT ||
sk->sk_state == TCP_CLOSE) &&
!atomic_read(&sk->sk_rmem_alloc)) {
debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
}
@ -2388,6 +2398,15 @@ fault:
return 0;
}
static int
iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
{
if (depth > ISCSI_MAX_CMD_PER_LUN)
depth = ISCSI_MAX_CMD_PER_LUN;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
return sdev->queue_depth;
}
static int
iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
{
@ -2853,8 +2872,11 @@ iscsi_conn_stop(iscsi_connh_t connh, int flag)
* in hdr_extract() and will be re-negotiated at
* set_param() time.
*/
if (flag == STOP_CONN_RECOVER)
if (flag == STOP_CONN_RECOVER) {
conn->hdr_size = sizeof(struct iscsi_hdr);
conn->hdrdgst_en = 0;
conn->datadgst_en = 0;
}
}
up(&conn->xmitsema);
}
@ -3247,13 +3269,14 @@ iscsi_r2tpool_free(struct iscsi_session *session)
static struct scsi_host_template iscsi_sht = {
.name = "iSCSI Initiator over TCP/IP, v."
ISCSI_VERSION_STR,
.queuecommand = iscsi_queuecommand,
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = ISCSI_XMIT_CMDS_MAX - 1,
.sg_tablesize = ISCSI_SG_TABLESIZE,
.cmd_per_lun = ISCSI_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_host_reset_handler = iscsi_eh_host_reset,
.use_clustering = DISABLE_CLUSTERING,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_host_reset_handler = iscsi_eh_host_reset,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "iscsi_tcp",
.this_id = -1,
};

View File

@ -71,7 +71,8 @@
#define ISCSI_MGMT_CMDS_MAX 32 /* must be power of 2 */
#define ISCSI_MGMT_ITT_OFFSET 0xa00
#define ISCSI_SG_TABLESIZE SG_ALL
#define ISCSI_CMD_PER_LUN 128
#define ISCSI_DEF_CMD_PER_LUN 32
#define ISCSI_MAX_CMD_PER_LUN 128
#define ISCSI_TCP_MAX_CMD_LEN 16
#define ITT_MASK (0xfff)

View File

@ -29,9 +29,10 @@ struct lpfc_sli2_slim;
#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
#define LPFC_CMD_PER_LUN 30 /* max outstanding cmds per lun */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
#define LPFC_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
/* Define macros for 64 bit support */
#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
@ -45,6 +46,11 @@ struct lpfc_sli2_slim;
#define MAX_HBAEVT 32
enum lpfc_polling_flags {
ENABLE_FCP_RING_POLLING = 0x1,
DISABLE_FCP_RING_INT = 0x2
};
/* Provide DMA memory definitions the driver uses per port instance. */
struct lpfc_dmabuf {
struct list_head list;
@ -167,6 +173,7 @@ struct lpfc_hba {
dma_addr_t slim2p_mapping;
uint16_t pci_cfg_value;
struct semaphore hba_can_block;
uint32_t hba_state;
#define LPFC_INIT_START 1 /* Initial state after board reset */
@ -286,6 +293,8 @@ struct lpfc_hba {
uint32_t cfg_fcp_bind_method;
uint32_t cfg_discovery_threads;
uint32_t cfg_max_luns;
uint32_t cfg_poll;
uint32_t cfg_poll_tmo;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
@ -337,7 +346,9 @@ struct lpfc_hba {
#define VPD_PORT 0x8 /* valid vpd port data */
#define VPD_MASK 0xf /* mask for any vpd data */
struct timer_list fcp_poll_timer;
struct timer_list els_tmofunc;
/*
* stat counters
*/
@ -348,6 +359,7 @@ struct lpfc_hba {
struct lpfc_sysfs_mbox sysfs_mbox;
/* fastpath list. */
spinlock_t scsi_buf_list_lock;
struct list_head lpfc_scsi_buf_list;
uint32_t total_scsi_bufs;
struct list_head lpfc_iocb_list;

View File

@ -278,6 +278,71 @@ lpfc_board_online_store(struct class_device *cdev, const char *buf,
return -EIO;
}
static ssize_t
lpfc_poll_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
}
static ssize_t
lpfc_poll_store(struct class_device *cdev, const char *buf,
size_t count)
{
struct Scsi_Host *host = class_to_shost(cdev);
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
uint32_t creg_val;
uint32_t old_val;
int val=0;
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &val) != 1)
return -EINVAL;
if ((val & 0x3) != val)
return -EINVAL;
spin_lock_irq(phba->host->host_lock);
old_val = phba->cfg_poll;
if (val & ENABLE_FCP_RING_POLLING) {
if ((val & DISABLE_FCP_RING_INT) &&
!(old_val & DISABLE_FCP_RING_INT)) {
creg_val = readl(phba->HCregaddr);
creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
lpfc_poll_start_timer(phba);
}
} else if (val != 0x0) {
spin_unlock_irq(phba->host->host_lock);
return -EINVAL;
}
if (!(val & DISABLE_FCP_RING_INT) &&
(old_val & DISABLE_FCP_RING_INT))
{
spin_unlock_irq(phba->host->host_lock);
del_timer(&phba->fcp_poll_timer);
spin_lock_irq(phba->host->host_lock);
creg_val = readl(phba->HCregaddr);
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
phba->cfg_poll = val;
spin_unlock_irq(phba->host->host_lock);
return strlen(buf);
}
#define lpfc_param_show(attr) \
static ssize_t \
@ -416,6 +481,15 @@ static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
lpfc_board_online_show, lpfc_board_online_store);
static int lpfc_poll = 0;
module_param(lpfc_poll, int, 0);
MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
" 0 - none,"
" 1 - poll with interrupts enabled"
" 3 - poll and disable FCP ring interrupts");
static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
/*
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
@ -523,10 +597,10 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
# cr_delay is set to 0.
*/
LPFC_ATTR(cr_delay, 0, 0, 63, "A count of milliseconds after which an"
LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an"
"interrupt response is generated");
LPFC_ATTR(cr_count, 1, 1, 255, "A count of I/O completions after which an"
LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an"
"interrupt response is generated");
/*
@ -553,6 +627,13 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands"
LPFC_ATTR_R(max_luns, 256, 1, 32768,
"Maximum number of LUNs per target driver will support");
/*
# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
# Value range is [1,255], default value is 10.
*/
LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
"Milliseconds driver will wait between polling FCP ring");
struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_info,
&class_device_attr_serialnum,
@ -575,11 +656,15 @@ struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_lpfc_topology,
&class_device_attr_lpfc_scan_down,
&class_device_attr_lpfc_link_speed,
&class_device_attr_lpfc_cr_delay,
&class_device_attr_lpfc_cr_count,
&class_device_attr_lpfc_fdmi_on,
&class_device_attr_lpfc_max_luns,
&class_device_attr_nport_evt_cnt,
&class_device_attr_management_version,
&class_device_attr_board_online,
&class_device_attr_lpfc_poll,
&class_device_attr_lpfc_poll_tmo,
NULL,
};
@ -1292,6 +1377,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
lpfc_max_luns_init(phba, lpfc_max_luns);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
phba->cfg_poll = lpfc_poll;
/*
* The total number of segments is the configuration value plus 2

View File

@ -143,6 +143,9 @@ LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *);
void lpfc_poll_timeout(unsigned long ptr);
void lpfc_poll_start_timer(struct lpfc_hba * phba);
void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);

View File

@ -73,6 +73,8 @@ struct lpfc_nodelist {
struct lpfc_hba *nlp_phba;
struct lpfc_work_evt nodev_timeout_evt;
struct lpfc_work_evt els_retry_evt;
unsigned long last_ramp_up_time; /* jiffy of last ramp up */
unsigned long last_q_full_time; /* jiffy of last queue full */
};
/* Defines for nlp_flag (uint32) */

View File

@ -720,6 +720,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
(irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
}
@ -869,6 +870,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
(irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
goto out;
}
@ -1054,6 +1056,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
(irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
}
@ -1205,6 +1208,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
(irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
goto out;
}

View File

@ -1017,12 +1017,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rport_ids.port_id = ndlp->nlp_DID;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
if (ndlp->nlp_type & NLP_FCP_TARGET)
rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
scsi_block_requests(phba->host);
ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
if (!rport) {
dev_printk(KERN_WARNING, &phba->pcidev->dev,
@ -1039,7 +1034,16 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
}
rdata = rport->dd_data;
rdata->pnode = ndlp;
scsi_unblock_requests(phba->host);
if (ndlp->nlp_type & NLP_FCP_TARGET)
rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
return;
}
@ -1053,9 +1057,7 @@ lpfc_unregister_remote_port(struct lpfc_hba * phba,
ndlp->rport = NULL;
rdata->pnode = NULL;
scsi_block_requests(phba->host);
fc_remote_port_delete(rport);
scsi_unblock_requests(phba->host);
return;
}

View File

@ -266,9 +266,11 @@ struct lpfc_name {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint8_t nameType:4; /* FC Word 0, bit 28:31 */
uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit
8:11 of IEEE ext */
#else /* __LITTLE_ENDIAN_BITFIELD */
uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit
8:11 of IEEE ext */
uint8_t nameType:4; /* FC Word 0, bit 28:31 */
#endif
@ -278,7 +280,8 @@ struct lpfc_name {
#define NAME_IP_TYPE 0x4 /* IP address */
#define NAME_CCITT_TYPE 0xC
#define NAME_CCITT_GR_TYPE 0xE
uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE
extended Lsb */
uint8_t IEEE[6]; /* FC IEEE address */
} s;
uint8_t wwn[8];
@ -1024,23 +1027,38 @@ typedef struct {
/* Start FireFly Register definitions */
#define PCI_VENDOR_ID_EMULEX 0x10df
#define PCI_DEVICE_ID_FIREFLY 0x1ae5
#define PCI_DEVICE_ID_SUPERFLY 0xf700
#define PCI_DEVICE_ID_DRAGONFLY 0xf800
#define PCI_DEVICE_ID_RFLY 0xf095
#define PCI_DEVICE_ID_PFLY 0xf098
#define PCI_DEVICE_ID_LP101 0xf0a1
#define PCI_DEVICE_ID_TFLY 0xf0a5
#define PCI_DEVICE_ID_BSMB 0xf0d1
#define PCI_DEVICE_ID_BMID 0xf0d5
#define PCI_DEVICE_ID_ZSMB 0xf0e1
#define PCI_DEVICE_ID_ZMID 0xf0e5
#define PCI_DEVICE_ID_NEPTUNE 0xf0f5
#define PCI_DEVICE_ID_NEPTUNE_SCSP 0xf0f6
#define PCI_DEVICE_ID_NEPTUNE_DCSP 0xf0f7
#define PCI_DEVICE_ID_SUPERFLY 0xf700
#define PCI_DEVICE_ID_DRAGONFLY 0xf800
#define PCI_DEVICE_ID_CENTAUR 0xf900
#define PCI_DEVICE_ID_PEGASUS 0xf980
#define PCI_DEVICE_ID_THOR 0xfa00
#define PCI_DEVICE_ID_VIPER 0xfb00
#define PCI_DEVICE_ID_LP10000S 0xfc00
#define PCI_DEVICE_ID_LP11000S 0xfc10
#define PCI_DEVICE_ID_LPE11000S 0xfc20
#define PCI_DEVICE_ID_HELIOS 0xfd00
#define PCI_DEVICE_ID_BMID 0xf0d5
#define PCI_DEVICE_ID_BSMB 0xf0d1
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
#define PCI_DEVICE_ID_ZEPHYR 0xfe00
#define PCI_DEVICE_ID_ZMID 0xf0e5
#define PCI_DEVICE_ID_ZSMB 0xf0e1
#define PCI_DEVICE_ID_LP101 0xf0a1
#define PCI_DEVICE_ID_LP10000S 0xfc00
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_SUBSYSTEM_ID_LP11000S 0xfc11
#define PCI_SUBSYSTEM_ID_LP11002S 0xfc12
#define PCI_SUBSYSTEM_ID_LPE11000S 0xfc21
#define PCI_SUBSYSTEM_ID_LPE11002S 0xfc22
#define PCI_SUBSYSTEM_ID_LPE11010S 0xfc2A
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC

View File

@ -126,34 +126,26 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
return -ERESTART;
}
/* The HBA's current state is provided by the ProgType and rr fields.
* Read and check the value of these fields before continuing to config
* this port.
/*
* The value of rr must be 1 since the driver set the cv field to 1.
* This setting requires the FW to set all revision fields.
*/
if (mb->un.varRdRev.rr == 0 || mb->un.varRdRev.un.b.ProgType != 2) {
/* Old firmware */
if (mb->un.varRdRev.rr == 0) {
vp->rev.rBit = 0;
lpfc_printf_log(phba,
KERN_ERR,
LOG_INIT,
"%d:0440 Adapter failed to init, mbxCmd x%x "
"READ_REV detected outdated firmware"
"Data: x%x\n",
phba->brd_no,
mb->mbxCommand, 0);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0440 Adapter failed to init, READ_REV has "
"missing revision information.\n",
phba->brd_no);
mempool_free(pmb, phba->mbox_mem_pool);
return -ERESTART;
} else {
vp->rev.rBit = 1;
vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
memcpy(vp->rev.sli1FwName,
(char*)mb->un.varRdRev.sli1FwName, 16);
vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
memcpy(vp->rev.sli2FwName,
(char *)mb->un.varRdRev.sli2FwName, 16);
}
/* Save information as VPD data */
vp->rev.rBit = 1;
vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
vp->rev.biuRev = mb->un.varRdRev.biuRev;
vp->rev.smRev = mb->un.varRdRev.smRev;
vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
@ -378,6 +370,10 @@ lpfc_config_port_post(struct lpfc_hba * phba)
if (psli->num_rings > 3)
status |= HC_R3INT_ENA;
if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
(phba->cfg_poll & DISABLE_FCP_RING_INT))
status &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
spin_unlock_irq(phba->host->host_lock);
@ -571,6 +567,8 @@ lpfc_handle_latt(struct lpfc_hba * phba)
rc = -EIO;
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_cmd(phba);
psli->slistat.link_event++;
lpfc_read_la(phba, pmb, mp);
@ -765,96 +763,139 @@ static void
lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
{
lpfc_vpd_t *vp;
uint32_t id;
uint8_t hdrtype;
char str[16];
uint16_t dev_id = phba->pcidev->device;
uint16_t dev_subid = phba->pcidev->subsystem_device;
uint8_t hdrtype = phba->pcidev->hdr_type;
char *model_str = "";
vp = &phba->vpd;
pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id);
pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
switch ((id >> 16) & 0xffff) {
switch (dev_id) {
case PCI_DEVICE_ID_FIREFLY:
strcpy(str, "LP6000 1");
model_str = "LP6000 1Gb PCI";
break;
case PCI_DEVICE_ID_SUPERFLY:
if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
strcpy(str, "LP7000 1");
model_str = "LP7000 1Gb PCI";
else
strcpy(str, "LP7000E 1");
model_str = "LP7000E 1Gb PCI";
break;
case PCI_DEVICE_ID_DRAGONFLY:
strcpy(str, "LP8000 1");
model_str = "LP8000 1Gb PCI";
break;
case PCI_DEVICE_ID_CENTAUR:
if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
strcpy(str, "LP9002 2");
model_str = "LP9002 2Gb PCI";
else
strcpy(str, "LP9000 1");
model_str = "LP9000 1Gb PCI";
break;
case PCI_DEVICE_ID_RFLY:
strcpy(str, "LP952 2");
model_str = "LP952 2Gb PCI";
break;
case PCI_DEVICE_ID_PEGASUS:
strcpy(str, "LP9802 2");
model_str = "LP9802 2Gb PCI-X";
break;
case PCI_DEVICE_ID_THOR:
if (hdrtype == 0x80)
strcpy(str, "LP10000DC 2");
model_str = "LP10000DC 2Gb 2-port PCI-X";
else
strcpy(str, "LP10000 2");
model_str = "LP10000 2Gb PCI-X";
break;
case PCI_DEVICE_ID_VIPER:
strcpy(str, "LPX1000 10");
model_str = "LPX1000 10Gb PCI-X";
break;
case PCI_DEVICE_ID_PFLY:
strcpy(str, "LP982 2");
model_str = "LP982 2Gb PCI-X";
break;
case PCI_DEVICE_ID_TFLY:
if (hdrtype == 0x80)
strcpy(str, "LP1050DC 2");
model_str = "LP1050DC 2Gb 2-port PCI-X";
else
strcpy(str, "LP1050 2");
model_str = "LP1050 2Gb PCI-X";
break;
case PCI_DEVICE_ID_HELIOS:
if (hdrtype == 0x80)
strcpy(str, "LP11002 4");
model_str = "LP11002 4Gb 2-port PCI-X2";
else
strcpy(str, "LP11000 4");
model_str = "LP11000 4Gb PCI-X2";
break;
case PCI_DEVICE_ID_HELIOS_SCSP:
model_str = "LP11000-SP 4Gb PCI-X2";
break;
case PCI_DEVICE_ID_HELIOS_DCSP:
model_str = "LP11002-SP 4Gb 2-port PCI-X2";
break;
case PCI_DEVICE_ID_NEPTUNE:
if (hdrtype == 0x80)
model_str = "LPe1002 4Gb 2-port";
else
model_str = "LPe1000 4Gb PCIe";
break;
case PCI_DEVICE_ID_NEPTUNE_SCSP:
model_str = "LPe1000-SP 4Gb PCIe";
break;
case PCI_DEVICE_ID_NEPTUNE_DCSP:
model_str = "LPe1002-SP 4Gb 2-port PCIe";
break;
case PCI_DEVICE_ID_BMID:
strcpy(str, "LP1150 4");
model_str = "LP1150 4Gb PCI-X2";
break;
case PCI_DEVICE_ID_BSMB:
strcpy(str, "LP111 4");
model_str = "LP111 4Gb PCI-X2";
break;
case PCI_DEVICE_ID_ZEPHYR:
if (hdrtype == 0x80)
strcpy(str, "LPe11002 4");
model_str = "LPe11002 4Gb 2-port PCIe";
else
strcpy(str, "LPe11000 4");
model_str = "LPe11000 4Gb PCIe";
break;
case PCI_DEVICE_ID_ZEPHYR_SCSP:
model_str = "LPe11000-SP 4Gb PCIe";
break;
case PCI_DEVICE_ID_ZEPHYR_DCSP:
model_str = "LPe11002-SP 4Gb 2-port PCIe";
break;
case PCI_DEVICE_ID_ZMID:
strcpy(str, "LPe1150 4");
model_str = "LPe1150 4Gb PCIe";
break;
case PCI_DEVICE_ID_ZSMB:
strcpy(str, "LPe111 4");
model_str = "LPe111 4Gb PCIe";
break;
case PCI_DEVICE_ID_LP101:
strcpy(str, "LP101 2");
model_str = "LP101 2Gb PCI-X";
break;
case PCI_DEVICE_ID_LP10000S:
strcpy(str, "LP10000-S 2");
model_str = "LP10000-S 2Gb PCI";
break;
case PCI_DEVICE_ID_LP11000S:
case PCI_DEVICE_ID_LPE11000S:
switch (dev_subid) {
case PCI_SUBSYSTEM_ID_LP11000S:
model_str = "LP11002-S 4Gb PCI-X2";
break;
case PCI_SUBSYSTEM_ID_LP11002S:
model_str = "LP11000-S 4Gb 2-port PCI-X2";
break;
case PCI_SUBSYSTEM_ID_LPE11000S:
model_str = "LPe11002-S 4Gb PCIe";
break;
case PCI_SUBSYSTEM_ID_LPE11002S:
model_str = "LPe11002-S 4Gb 2-port PCIe";
break;
case PCI_SUBSYSTEM_ID_LPE11010S:
model_str = "LPe11010-S 4Gb 10-port PCIe";
break;
default:
break;
}
break;
default:
memset(str, 0, 16);
break;
}
if (mdp)
sscanf(str, "%s", mdp);
sscanf(model_str, "%s", mdp);
if (descp)
sprintf(descp, "Emulex LightPulse %s Gigabit PCI Fibre "
"Channel Adapter", str);
sprintf(descp, "Emulex %s Fibre Channel Adapter", model_str);
}
/**************************************************/
@ -1196,6 +1237,7 @@ lpfc_stop_timer(struct lpfc_hba * phba)
}
}
del_timer_sync(&phba->fcp_poll_timer);
del_timer_sync(&phba->fc_estabtmo);
del_timer_sync(&phba->fc_disctmo);
del_timer_sync(&phba->fc_fdmitmo);
@ -1351,7 +1393,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_put_host;
host->unique_id = phba->brd_no;
init_MUTEX(&phba->hba_can_block);
INIT_LIST_HEAD(&phba->ctrspbuflist);
INIT_LIST_HEAD(&phba->rnidrspbuflist);
INIT_LIST_HEAD(&phba->freebufList);
@ -1375,6 +1417,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
psli->mbox_tmo.function = lpfc_mbox_timeout;
psli->mbox_tmo.data = (unsigned long)phba;
init_timer(&phba->fcp_poll_timer);
phba->fcp_poll_timer.function = lpfc_poll_timeout;
phba->fcp_poll_timer.data = (unsigned long)phba;
/*
* Get all the module params for configuring this host and then
* establish the host parameters.
@ -1489,6 +1535,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
host->max_cmd_len = 16;
/* Initialize the list of scsi buffers used by driver for scsi IO. */
spin_lock_init(&phba->scsi_buf_list_lock);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
host->transportt = lpfc_transport_template;
@ -1520,6 +1567,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (error)
goto out_free_irq;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
spin_lock_irq(phba->host->host_lock);
lpfc_poll_start_timer(phba);
spin_unlock_irq(phba->host->host_lock);
}
/*
* set fixed host attributes
* Must done after lpfc_sli_hba_setup()
@ -1679,14 +1732,28 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
@ -1697,6 +1764,10 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};

View File

@ -55,55 +55,76 @@ lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
return (1);
}
int
lpfc_check_sparm(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, struct serv_parm * sp,
uint32_t class)
{
volatile struct serv_parm *hsp = &phba->fc_sparam;
/* First check for supported version */
uint16_t hsp_value, ssp_value = 0;
/* Next check for class validity */
/*
* The receive data field size and buffer-to-buffer receive data field
* size entries are 16 bits but are represented as two 8-bit fields in
* the driver data structure to account for rsvd bits and other control
* bits. Reconstruct and compare the fields as a 16-bit values before
* correcting the byte values.
*/
if (sp->cls1.classValid) {
if (sp->cls1.rcvDataSizeMsb > hsp->cls1.rcvDataSizeMsb)
sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
if (sp->cls1.rcvDataSizeLsb > hsp->cls1.rcvDataSizeLsb)
hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) |
hsp->cls1.rcvDataSizeLsb;
ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
sp->cls1.rcvDataSizeLsb;
if (ssp_value > hsp_value) {
sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
}
} else if (class == CLASS1) {
return (0);
return 0;
}
if (sp->cls2.classValid) {
if (sp->cls2.rcvDataSizeMsb > hsp->cls2.rcvDataSizeMsb)
sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
if (sp->cls2.rcvDataSizeLsb > hsp->cls2.rcvDataSizeLsb)
hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) |
hsp->cls2.rcvDataSizeLsb;
ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
sp->cls2.rcvDataSizeLsb;
if (ssp_value > hsp_value) {
sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
}
} else if (class == CLASS2) {
return (0);
return 0;
}
if (sp->cls3.classValid) {
if (sp->cls3.rcvDataSizeMsb > hsp->cls3.rcvDataSizeMsb)
sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
if (sp->cls3.rcvDataSizeLsb > hsp->cls3.rcvDataSizeLsb)
hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) |
hsp->cls3.rcvDataSizeLsb;
ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
sp->cls3.rcvDataSizeLsb;
if (ssp_value > hsp_value) {
sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
}
} else if (class == CLASS3) {
return (0);
return 0;
}
if (sp->cmn.bbRcvSizeMsb > hsp->cmn.bbRcvSizeMsb)
sp->cmn.bbRcvSizeMsb = hsp->cmn.bbRcvSizeMsb;
if (sp->cmn.bbRcvSizeLsb > hsp->cmn.bbRcvSizeLsb)
/*
* Preserve the upper four bits of the MSB from the PLOGI response.
* These bits contain the Buffer-to-Buffer State Change Number
* from the target and need to be passed to the FW.
*/
hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
if (ssp_value > hsp_value) {
sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
(hsp->cmn.bbRcvSizeMsb & 0x0F);
}
/* If check is good, copy wwpn wwnn into ndlp */
memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
return (1);
return 1;
}
static void *

View File

@ -41,6 +41,20 @@
#define LPFC_ABORT_WAIT 2
static inline void
lpfc_block_requests(struct lpfc_hba * phba)
{
down(&phba->hba_can_block);
scsi_block_requests(phba->host);
}
static inline void
lpfc_unblock_requests(struct lpfc_hba * phba)
{
scsi_unblock_requests(phba->host);
up(&phba->hba_can_block);
}
/*
* This routine allocates a scsi buffer, which contains all the necessary
* information needed to initiate a SCSI I/O. The non-DMAable buffer region
@ -137,18 +151,22 @@ lpfc_new_scsi_buf(struct lpfc_hba * phba)
}
struct lpfc_scsi_buf*
lpfc_sli_get_scsi_buf(struct lpfc_hba * phba)
lpfc_get_scsi_buf(struct lpfc_hba * phba)
{
struct lpfc_scsi_buf * lpfc_cmd = NULL;
struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
unsigned long iflag = 0;
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
return lpfc_cmd;
}
static void
lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
{
unsigned long iflag = 0;
/*
* There are only two special cases to consider. (1) the scsi command
* requested scatter-gather usage or (2) the scsi command allocated
@ -166,8 +184,10 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
}
}
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
psb->pCmd = NULL;
list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
}
static int
@ -389,7 +409,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long iflag;
int result;
struct scsi_device *sdev, *tmp_sdev;
int depth = 0;
lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
@ -441,11 +463,64 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
*lp, *(lp + 3), cmd->retries, cmd->resid);
}
result = cmd->result;
sdev = cmd->device;
cmd->scsi_done(cmd);
spin_lock_irqsave(phba->host->host_lock, iflag);
if (!result &&
((jiffies - pnode->last_ramp_up_time) >
LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
((jiffies - pnode->last_q_full_time) >
LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
(phba->cfg_lun_queue_depth > sdev->queue_depth)) {
shost_for_each_device(tmp_sdev, sdev->host) {
if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) {
if (tmp_sdev->id != sdev->id)
continue;
if (tmp_sdev->ordered_tags)
scsi_adjust_queue_depth(tmp_sdev,
MSG_ORDERED_TAG,
tmp_sdev->queue_depth+1);
else
scsi_adjust_queue_depth(tmp_sdev,
MSG_SIMPLE_TAG,
tmp_sdev->queue_depth+1);
pnode->last_ramp_up_time = jiffies;
}
}
}
/*
* Check for queue full. If the lun is reporting queue full, then
* back off the lun queue depth to prevent target overloads.
*/
if (result == SAM_STAT_TASK_SET_FULL) {
pnode->last_q_full_time = jiffies;
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->id != sdev->id)
continue;
depth = scsi_track_queue_full(tmp_sdev,
tmp_sdev->queue_depth - 1);
}
/*
* The queue depth cannot be lowered any more.
* Modify the returned error code to store
* the final depth value set by
* scsi_track_queue_full.
*/
if (depth == -1)
depth = sdev->host->cmd_per_lun;
if (depth) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d:0711 detected queue full - lun queue depth "
" adjusted to %d.\n", phba->brd_no, depth);
}
}
lpfc_release_scsi_buf(phba, lpfc_cmd);
spin_unlock_irqrestore(phba->host->host_lock, iflag);
}
static void
@ -693,6 +768,37 @@ lpfc_info(struct Scsi_Host *host)
return lpfcinfobuf;
}
static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
{
unsigned long poll_tmo_expires =
(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
mod_timer(&phba->fcp_poll_timer,
poll_tmo_expires);
}
void lpfc_poll_start_timer(struct lpfc_hba * phba)
{
lpfc_poll_rearm_timer(phba);
}
void lpfc_poll_timeout(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
unsigned long iflag;
spin_lock_irqsave(phba->host->host_lock, iflag);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_poll_fcp_ring (phba);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
}
spin_unlock_irqrestore(phba->host->host_lock, iflag);
}
static int
lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
{
@ -719,10 +825,11 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
goto out_fail_command;
}
lpfc_cmd = lpfc_sli_get_scsi_buf (phba);
lpfc_cmd = lpfc_get_scsi_buf (phba);
if (lpfc_cmd == NULL) {
printk(KERN_WARNING "%s: No buffer available - list empty, "
"total count %d\n", __FUNCTION__, phba->total_scsi_bufs);
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d:0707 driver's buffer pool is empty, "
"IO busied\n", phba->brd_no);
goto out_host_busy;
}
@ -746,11 +853,17 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err)
goto out_host_busy_free_buf;
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_poll_fcp_ring(phba);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
}
return 0;
out_host_busy_free_buf:
lpfc_release_scsi_buf(phba, lpfc_cmd);
cmnd->host_scribble = NULL;
out_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@ -759,11 +872,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
}
static int
__lpfc_abort_handler(struct scsi_cmnd *cmnd)
lpfc_abort_handler(struct scsi_cmnd *cmnd)
{
struct lpfc_hba *phba =
(struct lpfc_hba *)cmnd->device->host->hostdata[0];
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
struct lpfc_iocbq *iocb;
struct lpfc_iocbq *abtsiocb;
@ -772,6 +886,8 @@ __lpfc_abort_handler(struct scsi_cmnd *cmnd)
unsigned int loop_count = 0;
int ret = SUCCESS;
lpfc_block_requests(phba);
spin_lock_irq(shost->host_lock);
lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
BUG_ON(!lpfc_cmd);
@ -821,9 +937,15 @@ __lpfc_abort_handler(struct scsi_cmnd *cmnd)
goto out;
}
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_poll_fcp_ring (phba);
/* Wait for abort to complete */
while (lpfc_cmd->pCmd == cmnd)
{
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_poll_fcp_ring (phba);
spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
spin_lock_irq(phba->host->host_lock);
@ -844,26 +966,19 @@ __lpfc_abort_handler(struct scsi_cmnd *cmnd)
out:
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d:0749 SCSI layer issued abort device: ret %#x, "
"ID %d, LUN %d, snum %#lx\n",
"%d:0749 SCSI Layer I/O Abort Request "
"Status x%x ID %d LUN %d snum %#lx\n",
phba->brd_no, ret, cmnd->device->id,
cmnd->device->lun, cmnd->serial_number);
spin_unlock_irq(shost->host_lock);
lpfc_unblock_requests(phba);
return ret;
}
static int
lpfc_abort_handler(struct scsi_cmnd *cmnd)
{
int rc;
spin_lock_irq(cmnd->device->host->host_lock);
rc = __lpfc_abort_handler(cmnd);
spin_unlock_irq(cmnd->device->host->host_lock);
return rc;
}
static int
__lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
@ -871,9 +986,12 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
struct lpfc_iocbq *iocbq, *iocbqrsp;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
struct lpfc_nodelist *pnode = rdata->pnode;
uint32_t cmd_result = 0, cmd_status = 0;
int ret = FAILED;
int cnt, loopcnt;
lpfc_block_requests(phba);
spin_lock_irq(shost->host_lock);
/*
* If target is not in a MAPPED state, delay the reset until
* target is rediscovered or nodev timeout expires.
@ -891,7 +1009,7 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
break;
}
lpfc_cmd = lpfc_sli_get_scsi_buf (phba);
lpfc_cmd = lpfc_get_scsi_buf (phba);
if (lpfc_cmd == NULL)
goto out;
@ -916,26 +1034,28 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
if (ret == IOCB_SUCCESS)
ret = SUCCESS;
lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT)
if (lpfc_cmd->result & IOERR_DRVR_MASK)
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
cmd_result = iocbqrsp->iocb.un.ulpWord[4];
cmd_status = iocbqrsp->iocb.ulpStatus;
lpfc_sli_release_iocbq(phba, iocbqrsp);
lpfc_release_scsi_buf(phba, lpfc_cmd);
/*
* All outstanding txcmplq I/Os should have been aborted by the target.
* All outstanding txcmplq I/Os should have been aborted by the device.
* Unfortunately, some targets do not abide by this forcing the driver
* to double check.
*/
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun, 0,
LPFC_CTX_LUN);
cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
LPFC_CTX_LUN);
if (cnt)
lpfc_sli_abort_iocb(phba,
&phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
0, LPFC_CTX_LUN);
loopcnt = 0;
while((cnt = lpfc_sli_sum_iocb(phba,
&phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
LPFC_CTX_LUN))) {
while(cnt) {
spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
spin_lock_irq(phba->host->host_lock);
@ -943,6 +1063,11 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
if (++loopcnt
> (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
break;
cnt = lpfc_sli_sum_iocb(phba,
&phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
LPFC_CTX_LUN);
}
if (cnt) {
@ -952,35 +1077,21 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
ret = FAILED;
}
lpfc_sli_release_iocbq(phba, iocbqrsp);
out_free_scsi_buf:
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0713 SCSI layer issued LUN reset (%d, %d) "
"Data: x%x x%x x%x\n",
phba->brd_no, lpfc_cmd->pCmd->device->id,
lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status,
lpfc_cmd->result);
lpfc_release_scsi_buf(phba, lpfc_cmd);
phba->brd_no, cmnd->device->id,cmnd->device->lun,
ret, cmd_status, cmd_result);
out:
spin_unlock_irq(shost->host_lock);
lpfc_unblock_requests(phba);
return ret;
}
static int
lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
{
int rc;
spin_lock_irq(cmnd->device->host->host_lock);
rc = __lpfc_reset_lun_handler(cmnd);
spin_unlock_irq(cmnd->device->host->host_lock);
return rc;
}
/*
* Note: midlayer calls this function with the host_lock held
*/
static int
__lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
@ -991,7 +1102,10 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
unsigned int midlayer_id = 0;
struct lpfc_scsi_buf * lpfc_cmd;
lpfc_cmd = lpfc_sli_get_scsi_buf (phba);
lpfc_block_requests(phba);
spin_lock_irq(shost->host_lock);
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL)
goto out;
@ -1022,18 +1136,31 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data;
ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
if (ret != SUCCESS) {
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0713 Bus Reset on target %d failed\n",
phba->brd_no, i);
err_count++;
}
}
if (err_count == 0)
ret = SUCCESS;
lpfc_release_scsi_buf(phba, lpfc_cmd);
/*
* All outstanding txcmplq I/Os should have been aborted by
* the targets. Unfortunately, some targets do not abide by
* this forcing the driver to double check.
*/
cmnd->device->id = midlayer_id;
cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST);
if (cnt)
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, 0, LPFC_CTX_HOST);
loopcnt = 0;
while((cnt = lpfc_sli_sum_iocb(phba,
&phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST))) {
while(cnt) {
spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
spin_lock_irq(phba->host->host_lock);
@ -1041,44 +1168,30 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
if (++loopcnt
> (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
break;
cnt = lpfc_sli_sum_iocb(phba,
&phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST);
}
if (cnt) {
/* flush all outstanding commands on the host */
i = lpfc_sli_abort_iocb(phba,
&phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0,
LPFC_CTX_HOST);
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
phba->brd_no, cnt, i);
ret = FAILED;
}
if (cnt == 0)
ret = SUCCESS;
else
ret = FAILED;
lpfc_release_scsi_buf(phba, lpfc_cmd);
lpfc_printf_log(phba,
KERN_ERR,
LOG_FCP,
"%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
phba->brd_no, ret);
out:
spin_unlock_irq(shost->host_lock);
lpfc_unblock_requests(phba);
return ret;
}
static int
lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
{
int rc;
spin_lock_irq(cmnd->device->host->host_lock);
rc = __lpfc_reset_bus_handler(cmnd);
spin_unlock_irq(cmnd->device->host->host_lock);
return rc;
}
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
@ -1127,10 +1240,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
break;
}
spin_lock_irqsave(phba->host->host_lock, flags);
spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
phba->total_scsi_bufs++;
list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
spin_unlock_irqrestore(phba->host->host_lock, flags);
spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
}
return 0;
}
@ -1154,6 +1267,12 @@ lpfc_slave_configure(struct scsi_device *sdev)
*/
rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5;
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_poll_fcp_ring(phba);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
}
return 0;
}

View File

@ -886,6 +886,182 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
return rc;
}
static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring)
{
struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
/*
* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
* rsp ring <portRspMax>
*/
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0312 Ring %d handler: portRspPut %d "
"is bigger then rsp ring %d\n",
phba->brd_no, pring->ringno,
le32_to_cpu(pgp->rspPutInx),
pring->numRiocb);
phba->hba_state = LPFC_HBA_ERROR;
/*
* All error attention handlers are posted to
* worker thread
*/
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
if (phba->work_wait)
wake_up(phba->work_wait);
return;
}
void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
{
struct lpfc_sli * psli = &phba->sli;
struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING];
IOCB_t *irsp = NULL;
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
struct lpfc_iocbq rspiocbq;
struct lpfc_pgp *pgp;
uint32_t status;
uint32_t portRspPut, portRspMax;
int type;
uint32_t rsp_cmpl = 0;
void __iomem *to_slim;
uint32_t ha_copy;
pring->stats.iocb_event++;
/* The driver assumes SLI-2 mode */
pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
/*
* The next available response entry should never exceed the maximum
* entries. If it does, treat it as an adapter hardware error.
*/
portRspMax = pring->numRiocb;
portRspPut = le32_to_cpu(pgp->rspPutInx);
if (unlikely(portRspPut >= portRspMax)) {
lpfc_sli_rsp_pointers_error(phba, pring);
return;
}
rmb();
while (pring->rspidx != portRspPut) {
entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
sizeof (IOCB_t));
irsp = &rspiocbq.iocb;
type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
pring->stats.iocb_rsp++;
rsp_cmpl++;
if (unlikely(irsp->ulpStatus)) {
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d:0326 Rsp Ring %d error: IOCB Data: "
"x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no, pring->ringno,
irsp->un.ulpWord[0],
irsp->un.ulpWord[1],
irsp->un.ulpWord[2],
irsp->un.ulpWord[3],
irsp->un.ulpWord[4],
irsp->un.ulpWord[5],
*(((uint32_t *) irsp) + 6),
*(((uint32_t *) irsp) + 7));
}
switch (type) {
case LPFC_ABORT_IOCB:
case LPFC_SOL_IOCB:
/*
* Idle exchange closed via ABTS from port. No iocb
* resources need to be recovered.
*/
if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
printk(KERN_INFO "%s: IOCB cmd 0x%x processed."
" Skipping completion\n", __FUNCTION__,
irsp->ulpCommand);
break;
}
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
&rspiocbq);
}
break;
default:
if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
char adaptermsg[LPFC_MAX_ADPTMSG];
memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
memcpy(&adaptermsg[0], (uint8_t *) irsp,
MAX_MSG_DATA);
dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
phba->brd_no, adaptermsg);
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0321 Unknown IOCB command "
"Data: x%x, x%x x%x x%x x%x\n",
phba->brd_no, type,
irsp->ulpCommand,
irsp->ulpStatus,
irsp->ulpIoTag,
irsp->ulpContext);
}
break;
}
/*
* The response IOCB has been processed. Update the ring
* pointer in SLIM. If the port response put pointer has not
* been updated, sync the pgp->rspPutInx and fetch the new port
* response put pointer.
*/
to_slim = phba->MBslimaddr +
(SLIMOFF + (pring->ringno * 2) + 1) * 4;
writeb(pring->rspidx, to_slim);
if (pring->rspidx == portRspPut)
portRspPut = le32_to_cpu(pgp->rspPutInx);
}
ha_copy = readl(phba->HAregaddr);
ha_copy >>= (LPFC_FCP_RING * 4);
if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
pring->stats.iocb_rsp_full++;
status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
writel(status, phba->CAregaddr);
readl(phba->CAregaddr);
}
if ((ha_copy & HA_R0CE_RSP) &&
(pring->flag & LPFC_CALL_RING_AVAILABLE)) {
pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
pring->stats.iocb_cmd_empty++;
/* Force update of the local copy of cmdGetInx */
pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
lpfc_sli_resume_iocb(phba, pring);
if ((pring->lpfc_sli_cmd_available))
(pring->lpfc_sli_cmd_available) (phba, pring);
}
return;
}
/*
* This routine presumes LPFC_FCP_RING handling and doesn't bother
* to check it explicitly.
@ -917,24 +1093,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
portRspMax = pring->numRiocb;
portRspPut = le32_to_cpu(pgp->rspPutInx);
if (unlikely(portRspPut >= portRspMax)) {
/*
* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
* rsp ring <portRspMax>
*/
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0312 Ring %d handler: portRspPut %d "
"is bigger then rsp ring %d\n",
phba->brd_no, pring->ringno, portRspPut,
portRspMax);
phba->hba_state = LPFC_HBA_ERROR;
/* All error attention handlers are posted to worker thread */
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
if (phba->work_wait)
wake_up(phba->work_wait);
lpfc_sli_rsp_pointers_error(phba, pring);
spin_unlock_irqrestore(phba->host->host_lock, iflag);
return 1;
}
@ -947,6 +1106,10 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
* network byte order and pci byte orders are different.
*/
entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
sizeof (IOCB_t));
@ -1020,9 +1183,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
* been updated, sync the pgp->rspPutInx and fetch the new port
* response put pointer.
*/
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
to_slim = phba->MBslimaddr +
(SLIMOFF + (pring->ringno * 2) + 1) * 4;
writel(pring->rspidx, to_slim);
@ -2615,6 +2775,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
DECLARE_WAIT_QUEUE_HEAD(done_q);
long timeleft, timeout_req = 0;
int retval = IOCB_SUCCESS;
uint32_t creg_val;
/*
* If the caller has provided a response iocbq buffer, then context2
@ -2630,6 +2791,13 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
piocb->context_un.wait_queue = &done_q;
piocb->iocb_flag &= ~LPFC_IO_WAKE;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
creg_val = readl(phba->HCregaddr);
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
if (retval == IOCB_SUCCESS) {
timeout_req = timeout * HZ;
@ -2663,6 +2831,13 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
retval = IOCB_ERROR;
}
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
creg_val = readl(phba->HCregaddr);
creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
if (prspiocbq)
piocb->context2 = NULL;

View File

@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.1.0"
#define LPFC_DRIVER_VERSION "8.1.1"
#define LPFC_DRIVER_NAME "lpfc"

View File

@ -2,7 +2,7 @@
*
* Linux MegaRAID device driver
*
* Copyright © 2002 LSI Logic Corporation.
* Copyright (c) 2002 LSI Logic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -17,7 +17,8 @@
* Copyright (c) 2003 Christoph Hellwig <hch@lst.de>
* - new-style, hotplug-aware pci probing and scsi registration
*
* Version : v2.00.3 (Feb 19, 2003) - Atul Mukker <Atul.Mukker@lsil.com>
* Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju
* <Seokmann.Ju@lsil.com>
*
* Description: Linux device driver for LSI Logic MegaRAID controller
*
@ -51,10 +52,10 @@
#include "megaraid.h"
#define MEGARAID_MODULE_VERSION "2.00.3"
#define MEGARAID_MODULE_VERSION "2.00.4"
MODULE_AUTHOR ("LSI Logic Corporation");
MODULE_DESCRIPTION ("LSI Logic MegaRAID driver");
MODULE_AUTHOR ("sju@lsil.com");
MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver");
MODULE_LICENSE ("GPL");
MODULE_VERSION(MEGARAID_MODULE_VERSION);
@ -4553,7 +4554,7 @@ mega_internal_done(Scsi_Cmnd *scmd)
static struct scsi_host_template megaraid_template = {
.module = THIS_MODULE,
.name = "MegaRAID",
.proc_name = "megaraid",
.proc_name = "megaraid_legacy",
.info = megaraid_info,
.queuecommand = megaraid_queue,
.bios_param = megaraid_biosparam,
@ -5037,22 +5038,12 @@ megaraid_shutdown(struct pci_dev *pdev)
}
static struct pci_device_id megaraid_pci_tbl[] = {
{PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DISCOVERY,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4_DI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, BOARD_64BIT},
{PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_PERC4_QC_VERDE,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, BOARD_64BIT},
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
@ -5095,7 +5086,7 @@ static int __init megaraid_init(void)
* First argument (major) to register_chrdev implies a dynamic
* major number allocation.
*/
major = register_chrdev(0, "megadev", &megadev_fops);
major = register_chrdev(0, "megadev_legacy", &megadev_fops);
if (!major) {
printk(KERN_WARNING
"megaraid: failed to register char device\n");
@ -5109,7 +5100,7 @@ static void __exit megaraid_exit(void)
/*
* Unregister the character device interface to the driver.
*/
unregister_chrdev(major, "megadev");
unregister_chrdev(major, "megadev_legacy");
pci_unregister_driver(&megaraid_pci_driver);

View File

@ -64,7 +64,6 @@ config MEGARAID_MAILBOX
To compile this driver as a module, choose M here: the
module will be called megaraid_mbox
if MEGARAID_NEWGEN=n
config MEGARAID_LEGACY
tristate "LSI Logic Legacy MegaRAID Driver"
depends on PCI && SCSI
@ -75,7 +74,6 @@ config MEGARAID_LEGACY
To compile this driver as a module, choose M here: the
module will be called megaraid
endif
config MEGARAID_SAS
tristate "LSI Logic MegaRAID SAS RAID Module"

View File

@ -10,12 +10,13 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_mbox.c
* Version : v2.20.4.6 (Mar 07 2005)
* Version : v2.20.4.7 (Nov 14 2005)
*
* Authors:
* Atul Mukker <Atul.Mukker@lsil.com>
* Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
* Manoj Jose <Manoj.Jose@lsil.com>
* Seokmann Ju <Seokmann.Ju@lsil.com>
*
* List of supported controllers
*
@ -136,7 +137,7 @@ static int wait_till_fw_empty(adapter_t *);
MODULE_AUTHOR("LSI Logic Corporation");
MODULE_AUTHOR("sju@lsil.com");
MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGARAID_VERSION);
@ -278,68 +279,14 @@ static struct pci_device_id pci_id_table_g[] = {
{
PCI_VENDOR_ID_AMI,
PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC3_QC,
PCI_ANY_ID,
PCI_ANY_ID,
},
{
PCI_VENDOR_ID_AMI,
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC3_DC,
},
{
PCI_VENDOR_ID_AMI,
PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC3_SC,
},
{
PCI_VENDOR_ID_AMI,
PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_VENDOR_ID_AMI,
PCI_SUBSYS_ID_PERC3_SC,
},
{
PCI_VENDOR_ID_AMI,
PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_VENDOR_ID_AMI,
PCI_SUBSYS_ID_PERC3_DC,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_MEGARAID_SCSI_320_0,
PCI_VENDOR_ID_LSI_LOGIC,
PCI_SUBSYS_ID_MEGARAID_SCSI_320_0,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_MEGARAID_SCSI_320_1,
PCI_VENDOR_ID_LSI_LOGIC,
PCI_SUBSYS_ID_MEGARAID_SCSI_320_1,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_MEGARAID_SCSI_320_2,
PCI_VENDOR_ID_LSI_LOGIC,
PCI_SUBSYS_ID_MEGARAID_SCSI_320_2,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_MEGARAID_I4_133_RAID,
PCI_VENDOR_ID_LSI_LOGIC,
PCI_SUBSYS_ID_MEGARAID_I4_133_RAID,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_MEGARAID_SATA_150_4,
PCI_VENDOR_ID_LSI_LOGIC,
PCI_SUBSYS_ID_MEGARAID_SATA_150_4,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_MEGARAID_SATA_150_6,
PCI_VENDOR_ID_LSI_LOGIC,
PCI_SUBSYS_ID_MEGARAID_SATA_150_6,
PCI_ANY_ID,
PCI_ANY_ID,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
@ -347,18 +294,6 @@ static struct pci_device_id pci_id_table_g[] = {
PCI_ANY_ID,
PCI_ANY_ID,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_INTEL_RAID_SRCS16,
PCI_VENDOR_ID_INTEL,
PCI_SUBSYS_ID_INTEL_RAID_SRCS16,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK,
PCI_VENDOR_ID_INTEL,
PCI_SUBSYS_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK,
},
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, pci_id_table_g);
@ -2985,6 +2920,7 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
for (i = 0; i < 0xFFFFF; i++) {
if (mbox->numstatus != 0xFF) break;
rmb();
}
if (i == 0xFFFFF) {

View File

@ -21,8 +21,8 @@
#include "megaraid_ioctl.h"
#define MEGARAID_VERSION "2.20.4.6"
#define MEGARAID_EXT_VERSION "(Release Date: Mon Mar 07 12:27:22 EST 2005)"
#define MEGARAID_VERSION "2.20.4.7"
#define MEGARAID_EXT_VERSION "(Release Date: Mon Nov 14 12:27:22 EST 2005)"
/*

View File

@ -131,7 +131,739 @@
#define NAME53C "ncr53c"
#define NAME53C8XX "ncr53c8xx"
#include "sym53c8xx_comm.h"
/*==========================================================
**
** Debugging tags
**
**==========================================================
*/
#define DEBUG_ALLOC (0x0001)
#define DEBUG_PHASE (0x0002)
#define DEBUG_QUEUE (0x0008)
#define DEBUG_RESULT (0x0010)
#define DEBUG_POINTER (0x0020)
#define DEBUG_SCRIPT (0x0040)
#define DEBUG_TINY (0x0080)
#define DEBUG_TIMING (0x0100)
#define DEBUG_NEGO (0x0200)
#define DEBUG_TAGS (0x0400)
#define DEBUG_SCATTER (0x0800)
#define DEBUG_IC (0x1000)
/*
** Enable/Disable debug messages.
** Can be changed at runtime too.
*/
#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
#define DEBUG_FLAGS ncr_debug
#else
#define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
#endif
static inline struct list_head *ncr_list_pop(struct list_head *head)
{
if (!list_empty(head)) {
struct list_head *elem = head->next;
list_del(elem);
return elem;
}
return NULL;
}
/*==========================================================
**
** Simple power of two buddy-like allocator.
**
** This simple code is not intended to be fast, but to
** provide power of 2 aligned memory allocations.
** Since the SCRIPTS processor only supplies 8 bit
** arithmetic, this allocator allows simple and fast
** address calculations from the SCRIPTS code.
** In addition, cache line alignment is guaranteed for
** power of 2 cache line size.
** Enhanced in linux-2.3.44 to provide a memory pool
** per pcidev to support dynamic dma mapping. (I would
** have preferred a real bus astraction, btw).
**
**==========================================================
*/
#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
#if PAGE_SIZE >= 8192
#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
#else
#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */
#endif
#define MEMO_FREE_UNUSED /* Free unused pages immediately */
#define MEMO_WARN 1
#define MEMO_GFP_FLAGS GFP_ATOMIC
#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
typedef struct device *m_bush_t; /* Something that addresses DMAable */
typedef struct m_link { /* Link between free memory chunks */
struct m_link *next;
} m_link_s;
typedef struct m_vtob { /* Virtual to Bus address translation */
struct m_vtob *next;
m_addr_t vaddr;
m_addr_t baddr;
} m_vtob_s;
#define VTOB_HASH_SHIFT 5
#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
#define VTOB_HASH_CODE(m) \
((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
typedef struct m_pool { /* Memory pool of a given kind */
m_bush_t bush;
m_addr_t (*getp)(struct m_pool *);
void (*freep)(struct m_pool *, m_addr_t);
int nump;
m_vtob_s *(vtob[VTOB_HASH_SIZE]);
struct m_pool *next;
struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];
} m_pool_s;
static void *___m_alloc(m_pool_s *mp, int size)
{
int i = 0;
int s = (1 << MEMO_SHIFT);
int j;
m_addr_t a;
m_link_s *h = mp->h;
if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
return NULL;
while (size > s) {
s <<= 1;
++i;
}
j = i;
while (!h[j].next) {
if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
h[j].next = (m_link_s *)mp->getp(mp);
if (h[j].next)
h[j].next->next = NULL;
break;
}
++j;
s <<= 1;
}
a = (m_addr_t) h[j].next;
if (a) {
h[j].next = h[j].next->next;
while (j > i) {
j -= 1;
s >>= 1;
h[j].next = (m_link_s *) (a+s);
h[j].next->next = NULL;
}
}
#ifdef DEBUG
printk("___m_alloc(%d) = %p\n", size, (void *) a);
#endif
return (void *) a;
}
static void ___m_free(m_pool_s *mp, void *ptr, int size)
{
int i = 0;
int s = (1 << MEMO_SHIFT);
m_link_s *q;
m_addr_t a, b;
m_link_s *h = mp->h;
#ifdef DEBUG
printk("___m_free(%p, %d)\n", ptr, size);
#endif
if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
return;
while (size > s) {
s <<= 1;
++i;
}
a = (m_addr_t) ptr;
while (1) {
#ifdef MEMO_FREE_UNUSED
if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
mp->freep(mp, a);
break;
}
#endif
b = a ^ s;
q = &h[i];
while (q->next && q->next != (m_link_s *) b) {
q = q->next;
}
if (!q->next) {
((m_link_s *) a)->next = h[i].next;
h[i].next = (m_link_s *) a;
break;
}
q->next = q->next->next;
a = a & b;
s <<= 1;
++i;
}
}
static DEFINE_SPINLOCK(ncr53c8xx_lock);
static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags)
{
void *p;
p = ___m_alloc(mp, size);
if (DEBUG_FLAGS & DEBUG_ALLOC)
printk ("new %-10s[%4d] @%p.\n", name, size, p);
if (p)
memset(p, 0, size);
else if (uflags & MEMO_WARN)
printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size);
return p;
}
#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)
static void __m_free(m_pool_s *mp, void *ptr, int size, char *name)
{
if (DEBUG_FLAGS & DEBUG_ALLOC)
printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
___m_free(mp, ptr, size);
}
/*
* With pci bus iommu support, we use a default pool of unmapped memory
* for memory we donnot need to DMA from/to and one pool per pcidev for
* memory accessed by the PCI chip. `mp0' is the default not DMAable pool.
*/
static m_addr_t ___mp0_getp(m_pool_s *mp)
{
m_addr_t m = __get_free_pages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER);
if (m)
++mp->nump;
return m;
}
static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
{
free_pages(m, MEMO_PAGE_ORDER);
--mp->nump;
}
static m_pool_s mp0 = {NULL, ___mp0_getp, ___mp0_freep};
/*
* DMAable pools.
*/
/*
* With pci bus iommu support, we maintain one pool per pcidev and a
* hashed reverse table for virtual to bus physical address translations.
*/
static m_addr_t ___dma_getp(m_pool_s *mp)
{
m_addr_t vp;
m_vtob_s *vbp;
vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB");
if (vbp) {
dma_addr_t daddr;
vp = (m_addr_t) dma_alloc_coherent(mp->bush,
PAGE_SIZE<<MEMO_PAGE_ORDER,
&daddr, GFP_ATOMIC);
if (vp) {
int hc = VTOB_HASH_CODE(vp);
vbp->vaddr = vp;
vbp->baddr = daddr;
vbp->next = mp->vtob[hc];
mp->vtob[hc] = vbp;
++mp->nump;
return vp;
}
}
if (vbp)
__m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
return 0;
}
static void ___dma_freep(m_pool_s *mp, m_addr_t m)
{
m_vtob_s **vbpp, *vbp;
int hc = VTOB_HASH_CODE(m);
vbpp = &mp->vtob[hc];
while (*vbpp && (*vbpp)->vaddr != m)
vbpp = &(*vbpp)->next;
if (*vbpp) {
vbp = *vbpp;
*vbpp = (*vbpp)->next;
dma_free_coherent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
(void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
__m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
--mp->nump;
}
}
static inline m_pool_s *___get_dma_pool(m_bush_t bush)
{
m_pool_s *mp;
for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next);
return mp;
}
static m_pool_s *___cre_dma_pool(m_bush_t bush)
{
m_pool_s *mp;
mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL");
if (mp) {
memset(mp, 0, sizeof(*mp));
mp->bush = bush;
mp->getp = ___dma_getp;
mp->freep = ___dma_freep;
mp->next = mp0.next;
mp0.next = mp;
}
return mp;
}
static void ___del_dma_pool(m_pool_s *p)
{
struct m_pool **pp = &mp0.next;
while (*pp && *pp != p)
pp = &(*pp)->next;
if (*pp) {
*pp = (*pp)->next;
__m_free(&mp0, p, sizeof(*p), "MPOOL");
}
}
static void *__m_calloc_dma(m_bush_t bush, int size, char *name)
{
u_long flags;
struct m_pool *mp;
void *m = NULL;
spin_lock_irqsave(&ncr53c8xx_lock, flags);
mp = ___get_dma_pool(bush);
if (!mp)
mp = ___cre_dma_pool(bush);
if (mp)
m = __m_calloc(mp, size, name);
if (mp && !mp->nump)
___del_dma_pool(mp);
spin_unlock_irqrestore(&ncr53c8xx_lock, flags);
return m;
}
static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)
{
u_long flags;
struct m_pool *mp;
spin_lock_irqsave(&ncr53c8xx_lock, flags);
mp = ___get_dma_pool(bush);
if (mp)
__m_free(mp, m, size, name);
if (mp && !mp->nump)
___del_dma_pool(mp);
spin_unlock_irqrestore(&ncr53c8xx_lock, flags);
}
static m_addr_t __vtobus(m_bush_t bush, void *m)
{
u_long flags;
m_pool_s *mp;
int hc = VTOB_HASH_CODE(m);
m_vtob_s *vp = NULL;
m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
spin_lock_irqsave(&ncr53c8xx_lock, flags);
mp = ___get_dma_pool(bush);
if (mp) {
vp = mp->vtob[hc];
while (vp && (m_addr_t) vp->vaddr != a)
vp = vp->next;
}
spin_unlock_irqrestore(&ncr53c8xx_lock, flags);
return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
}
#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->dev, s, n)
#define _m_free_dma(np, p, s, n) __m_free_dma(np->dev, p, s, n)
#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)
#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)
#define _vtobus(np, p) __vtobus(np->dev, p)
#define vtobus(p) _vtobus(np, p)
/*
* Deal with DMA mapping/unmapping.
*/
/* To keep track of the dma mapping (sg/single) that has been set */
#define __data_mapped SCp.phase
#define __data_mapping SCp.have_data_in
static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd)
{
switch(cmd->__data_mapped) {
case 2:
dma_unmap_sg(dev, cmd->buffer, cmd->use_sg,
cmd->sc_data_direction);
break;
case 1:
dma_unmap_single(dev, cmd->__data_mapping,
cmd->request_bufflen,
cmd->sc_data_direction);
break;
}
cmd->__data_mapped = 0;
}
static u_long __map_scsi_single_data(struct device *dev, struct scsi_cmnd *cmd)
{
dma_addr_t mapping;
if (cmd->request_bufflen == 0)
return 0;
mapping = dma_map_single(dev, cmd->request_buffer,
cmd->request_bufflen,
cmd->sc_data_direction);
cmd->__data_mapped = 1;
cmd->__data_mapping = mapping;
return mapping;
}
static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
{
int use_sg;
if (cmd->use_sg == 0)
return 0;
use_sg = dma_map_sg(dev, cmd->buffer, cmd->use_sg,
cmd->sc_data_direction);
cmd->__data_mapped = 2;
cmd->__data_mapping = use_sg;
return use_sg;
}
#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->dev, cmd)
#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->dev, cmd)
#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->dev, cmd)
/*==========================================================
**
** Driver setup.
**
** This structure is initialized from linux config
** options. It can be overridden at boot-up by the boot
** command line.
**
**==========================================================
*/
static struct ncr_driver_setup
driver_setup = SCSI_NCR_DRIVER_SETUP;
#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
static struct ncr_driver_setup
driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
#endif
#define initverbose (driver_setup.verbose)
#define bootverbose (np->verbose)
/*===================================================================
**
** Driver setup from the boot command line
**
**===================================================================
*/
#ifdef MODULE
#define ARG_SEP ' '
#else
#define ARG_SEP ','
#endif
#define OPT_TAGS 1
#define OPT_MASTER_PARITY 2
#define OPT_SCSI_PARITY 3
#define OPT_DISCONNECTION 4
#define OPT_SPECIAL_FEATURES 5
#define OPT_UNUSED_1 6
#define OPT_FORCE_SYNC_NEGO 7
#define OPT_REVERSE_PROBE 8
#define OPT_DEFAULT_SYNC 9
#define OPT_VERBOSE 10
#define OPT_DEBUG 11
#define OPT_BURST_MAX 12
#define OPT_LED_PIN 13
#define OPT_MAX_WIDE 14
#define OPT_SETTLE_DELAY 15
#define OPT_DIFF_SUPPORT 16
#define OPT_IRQM 17
#define OPT_PCI_FIX_UP 18
#define OPT_BUS_CHECK 19
#define OPT_OPTIMIZE 20
#define OPT_RECOVERY 21
#define OPT_SAFE_SETUP 22
#define OPT_USE_NVRAM 23
#define OPT_EXCLUDE 24
#define OPT_HOST_ID 25
#ifdef SCSI_NCR_IARB_SUPPORT
#define OPT_IARB 26
#endif
static char setup_token[] __initdata =
"tags:" "mpar:"
"spar:" "disc:"
"specf:" "ultra:"
"fsn:" "revprob:"
"sync:" "verb:"
"debug:" "burst:"
"led:" "wide:"
"settle:" "diff:"
"irqm:" "pcifix:"
"buschk:" "optim:"
"recovery:"
"safe:" "nvram:"
"excl:" "hostid:"
#ifdef SCSI_NCR_IARB_SUPPORT
"iarb:"
#endif
; /* DONNOT REMOVE THIS ';' */
#ifdef MODULE
#define ARG_SEP ' '
#else
#define ARG_SEP ','
#endif
static int __init get_setup_token(char *p)
{
char *cur = setup_token;
char *pc;
int i = 0;
while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
++pc;
++i;
if (!strncmp(p, cur, pc - cur))
return i;
cur = pc;
}
return 0;
}
static int __init sym53c8xx__setup(char *str)
{
#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
char *cur = str;
char *pc, *pv;
int i, val, c;
int xi = 0;
while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
char *pe;
val = 0;
pv = pc;
c = *++pv;
if (c == 'n')
val = 0;
else if (c == 'y')
val = 1;
else
val = (int) simple_strtoul(pv, &pe, 0);
switch (get_setup_token(cur)) {
case OPT_TAGS:
driver_setup.default_tags = val;
if (pe && *pe == '/') {
i = 0;
while (*pe && *pe != ARG_SEP &&
i < sizeof(driver_setup.tag_ctrl)-1) {
driver_setup.tag_ctrl[i++] = *pe++;
}
driver_setup.tag_ctrl[i] = '\0';
}
break;
case OPT_MASTER_PARITY:
driver_setup.master_parity = val;
break;
case OPT_SCSI_PARITY:
driver_setup.scsi_parity = val;
break;
case OPT_DISCONNECTION:
driver_setup.disconnection = val;
break;
case OPT_SPECIAL_FEATURES:
driver_setup.special_features = val;
break;
case OPT_FORCE_SYNC_NEGO:
driver_setup.force_sync_nego = val;
break;
case OPT_REVERSE_PROBE:
driver_setup.reverse_probe = val;
break;
case OPT_DEFAULT_SYNC:
driver_setup.default_sync = val;
break;
case OPT_VERBOSE:
driver_setup.verbose = val;
break;
case OPT_DEBUG:
driver_setup.debug = val;
break;
case OPT_BURST_MAX:
driver_setup.burst_max = val;
break;
case OPT_LED_PIN:
driver_setup.led_pin = val;
break;
case OPT_MAX_WIDE:
driver_setup.max_wide = val? 1:0;
break;
case OPT_SETTLE_DELAY:
driver_setup.settle_delay = val;
break;
case OPT_DIFF_SUPPORT:
driver_setup.diff_support = val;
break;
case OPT_IRQM:
driver_setup.irqm = val;
break;
case OPT_PCI_FIX_UP:
driver_setup.pci_fix_up = val;
break;
case OPT_BUS_CHECK:
driver_setup.bus_check = val;
break;
case OPT_OPTIMIZE:
driver_setup.optimize = val;
break;
case OPT_RECOVERY:
driver_setup.recovery = val;
break;
case OPT_USE_NVRAM:
driver_setup.use_nvram = val;
break;
case OPT_SAFE_SETUP:
memcpy(&driver_setup, &driver_safe_setup,
sizeof(driver_setup));
break;
case OPT_EXCLUDE:
if (xi < SCSI_NCR_MAX_EXCLUDES)
driver_setup.excludes[xi++] = val;
break;
case OPT_HOST_ID:
driver_setup.host_id = val;
break;
#ifdef SCSI_NCR_IARB_SUPPORT
case OPT_IARB:
driver_setup.iarb = val;
break;
#endif
default:
printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
break;
}
if ((cur = strchr(cur, ARG_SEP)) != NULL)
++cur;
}
#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
return 1;
}
/*===================================================================
**
** Get device queue depth from boot command line.
**
**===================================================================
*/
#define DEF_DEPTH (driver_setup.default_tags)
#define ALL_TARGETS -2
#define NO_TARGET -1
#define ALL_LUNS -2
#define NO_LUN -1
static int device_queue_depth(int unit, int target, int lun)
{
int c, h, t, u, v;
char *p = driver_setup.tag_ctrl;
char *ep;
h = -1;
t = NO_TARGET;
u = NO_LUN;
while ((c = *p++) != 0) {
v = simple_strtoul(p, &ep, 0);
switch(c) {
case '/':
++h;
t = ALL_TARGETS;
u = ALL_LUNS;
break;
case 't':
if (t != target)
t = (target == v) ? v : NO_TARGET;
u = ALL_LUNS;
break;
case 'u':
if (u != lun)
u = (lun == v) ? v : NO_LUN;
break;
case 'q':
if (h == unit &&
(t == ALL_TARGETS || t == target) &&
(u == ALL_LUNS || u == lun))
return v;
break;
case '-':
t = ALL_TARGETS;
u = ALL_LUNS;
break;
default:
break;
}
p = ep;
}
return DEF_DEPTH;
}
/*==========================================================
@ -2971,21 +3703,10 @@ struct host_data {
static void ncr_print_msg(struct ccb *cp, char *label, u_char *msg)
{
int i;
PRINT_ADDR(cp->cmd, "%s: ", label);
printk ("%x",*msg);
if (*msg == M_EXTENDED) {
for (i = 1; i < 8; i++) {
if (i - 1 > msg[1])
break;
printk ("-%x",msg[i]);
}
} else if ((*msg & 0xf0) == 0x20) {
printk ("-%x",msg[1]);
}
printk(".\n");
spi_print_msg(msg);
printk("\n");
}
/*==========================================================

File diff suppressed because it is too large Load Diff

View File

@ -1,55 +1,70 @@
config SCSI_QLA2XXX
tristate
default (SCSI && PCI)
depends on SCSI && PCI
tristate "QLogic QLA2XXX Fibre Channel Support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
select FW_LOADER
---help---
This qla2xxx driver supports all QLogic Fibre Channel
PCI and PCIe host adapters.
By default, firmware for the ISP parts will be loaded
via the Firmware Loader interface.
ISP Firmware Filename
---------- -----------------
21xx ql2100_fw.bin
22xx ql2200_fw.bin
2300, 2312 ql2300_fw.bin
2322 ql2322_fw.bin
6312, 6322 ql6312_fw.bin
24xx ql2400_fw.bin
Upon request, the driver caches the firmware image until
the driver is unloaded.
NOTE: The original method of building firmware-loader
modules has been deprecated as the firmware-images will
be removed from the kernel sources.
config SCSI_QLA2XXX_EMBEDDED_FIRMWARE
bool " Use firmware-loader modules (DEPRECATED)"
depends on SCSI_QLA2XXX
config SCSI_QLA21XX
tristate "QLogic ISP2100 host adapter family support"
depends on SCSI_QLA2XXX
select SCSI_FC_ATTRS
select FW_LOADER
tristate " Build QLogic ISP2100 firmware-module"
depends on SCSI_QLA2XXX_EMBEDDED_FIRMWARE
---help---
This driver supports the QLogic 21xx (ISP2100) host adapter family.
config SCSI_QLA22XX
tristate "QLogic ISP2200 host adapter family support"
depends on SCSI_QLA2XXX
select SCSI_FC_ATTRS
select FW_LOADER
tristate " Build QLogic ISP2200 firmware-module"
depends on SCSI_QLA2XXX_EMBEDDED_FIRMWARE
---help---
This driver supports the QLogic 22xx (ISP2200) host adapter family.
config SCSI_QLA2300
tristate "QLogic ISP2300 host adapter family support"
depends on SCSI_QLA2XXX
select SCSI_FC_ATTRS
select FW_LOADER
tristate " Build QLogic ISP2300 firmware-module"
depends on SCSI_QLA2XXX_EMBEDDED_FIRMWARE
---help---
This driver supports the QLogic 2300 (ISP2300 and ISP2312) host
adapter family.
config SCSI_QLA2322
tristate "QLogic ISP2322 host adapter family support"
depends on SCSI_QLA2XXX
select SCSI_FC_ATTRS
select FW_LOADER
tristate " Build QLogic ISP2322 firmware-module"
depends on SCSI_QLA2XXX_EMBEDDED_FIRMWARE
---help---
This driver supports the QLogic 2322 (ISP2322) host adapter family.
config SCSI_QLA6312
tristate "QLogic ISP63xx host adapter family support"
depends on SCSI_QLA2XXX
select SCSI_FC_ATTRS
select FW_LOADER
tristate " Build QLogic ISP63xx firmware-module"
depends on SCSI_QLA2XXX_EMBEDDED_FIRMWARE
---help---
This driver supports the QLogic 63xx (ISP6312 and ISP6322) host
adapter family.
config SCSI_QLA24XX
tristate "QLogic ISP24xx host adapter family support"
depends on SCSI_QLA2XXX
select SCSI_FC_ATTRS
select FW_LOADER
tristate " Build QLogic ISP24xx firmware-module"
depends on SCSI_QLA2XXX_EMBEDDED_FIRMWARE
---help---
This driver supports the QLogic 24xx (ISP2422 and ISP2432) host
adapter family.

View File

@ -3,15 +3,18 @@ EXTRA_CFLAGS += -DUNIQUE_FW_NAME
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_rscn.o qla_attr.o
obj-$(CONFIG_SCSI_QLA2XXX) += qla2xxx.o
qla2100-y := ql2100.o ql2100_fw.o
qla2200-y := ql2200.o ql2200_fw.o
qla2300-y := ql2300.o ql2300_fw.o
qla2322-y := ql2322.o ql2322_fw.o
qla6312-y := ql6312.o ql6312_fw.o
qla2400-y := ql2400.o ql2400_fw.o
obj-$(CONFIG_SCSI_QLA21XX) += qla2xxx.o qla2100.o
obj-$(CONFIG_SCSI_QLA22XX) += qla2xxx.o qla2200.o
obj-$(CONFIG_SCSI_QLA2300) += qla2xxx.o qla2300.o
obj-$(CONFIG_SCSI_QLA2322) += qla2xxx.o qla2322.o
obj-$(CONFIG_SCSI_QLA6312) += qla2xxx.o qla6312.o
obj-$(CONFIG_SCSI_QLA24XX) += qla2xxx.o
obj-$(CONFIG_SCSI_QLA24XX) += qla2xxx.o qla2400.o

View File

@ -0,0 +1,111 @@
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2005 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "qla_def.h"
static char qla_driver_name[] = "qla2400";
extern uint32_t fw2400_version_str[];
extern uint32_t fw2400_addr01;
extern uint32_t fw2400_code01[];
extern uint32_t fw2400_length01;
extern uint32_t fw2400_addr02;
extern uint32_t fw2400_code02[];
extern uint32_t fw2400_length02;
static struct qla_fw_info qla_fw_tbl[] = {
{
.addressing = FW_INFO_ADDR_EXTENDED,
.fwcode = (unsigned short *)&fw2400_code01[0],
.fwlen = (unsigned short *)&fw2400_length01,
.lfwstart = (unsigned long *)&fw2400_addr01,
},
{
.addressing = FW_INFO_ADDR_EXTENDED,
.fwcode = (unsigned short *)&fw2400_code02[0],
.fwlen = (unsigned short *)&fw2400_length02,
.lfwstart = (unsigned long *)&fw2400_addr02,
},
{ FW_INFO_ADDR_NOMORE, },
};
static struct qla_board_info qla_board_tbl[] = {
{
.drv_name = qla_driver_name,
.isp_name = "ISP2422",
.fw_info = qla_fw_tbl,
.fw_fname = "ql2400_fw.bin",
},
{
.drv_name = qla_driver_name,
.isp_name = "ISP2432",
.fw_info = qla_fw_tbl,
.fw_fname = "ql2400_fw.bin",
},
};
static struct pci_device_id qla24xx_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP2422,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (unsigned long)&qla_board_tbl[0],
},
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP2432,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (unsigned long)&qla_board_tbl[1],
},
{0, 0},
};
MODULE_DEVICE_TABLE(pci, qla24xx_pci_tbl);
static int __devinit
qla24xx_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
return qla2x00_probe_one(pdev,
(struct qla_board_info *)id->driver_data);
}
static void __devexit
qla24xx_remove_one(struct pci_dev *pdev)
{
qla2x00_remove_one(pdev);
}
static struct pci_driver qla24xx_pci_driver = {
.name = "qla2400",
.id_table = qla24xx_pci_tbl,
.probe = qla24xx_probe_one,
.remove = __devexit_p(qla24xx_remove_one),
};
static int __init
qla24xx_init(void)
{
return pci_module_init(&qla24xx_pci_driver);
}
static void __exit
qla24xx_exit(void)
{
pci_unregister_driver(&qla24xx_pci_driver);
}
module_init(qla24xx_init);
module_exit(qla24xx_exit);
MODULE_AUTHOR("QLogic Corporation");
MODULE_DESCRIPTION("QLogic ISP24xx FC-SCSI Host Bus Adapter driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(QLA2XXX_VERSION);

File diff suppressed because it is too large Load Diff

View File

@ -232,7 +232,7 @@ static ssize_t
qla2x00_isp_name_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->brd_info->isp_name);
return snprintf(buf, PAGE_SIZE, "ISP%04X\n", ha->pdev->device);
}
static ssize_t

View File

@ -22,6 +22,7 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
#include <asm/semaphore.h>
#include <scsi/scsi.h>
@ -29,6 +30,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE)
#if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE)
#define IS_QLA2100(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2100)
#else
@ -79,9 +81,23 @@
#define IS_QLA2522(ha) 0
#endif
#else /* !defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE) */
#define IS_QLA2100(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2100)
#define IS_QLA2200(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2200)
#define IS_QLA2300(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2300)
#define IS_QLA2312(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2312)
#define IS_QLA2322(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2322)
#define IS_QLA6312(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP6312)
#define IS_QLA6322(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP6322)
#define IS_QLA2422(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422)
#define IS_QLA2432(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432)
#define IS_QLA2512(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2512)
#define IS_QLA2522(ha) ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2522)
#endif
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
#define IS_QLA25XX(ha) (IS_QLA2512(ha) || IS_QLA2522(ha))
@ -2124,6 +2140,12 @@ struct qla_board_info {
struct scsi_host_template *sht;
};
struct fw_blob {
char *name;
uint32_t segs[4];
const struct firmware *fw;
};
/* Return data from MBC_GET_ID_LIST call. */
struct gid_list_info {
uint8_t al_pa;

View File

@ -33,8 +33,8 @@ extern int qla24xx_nvram_config(struct scsi_qla_host *);
extern void qla2x00_update_fw_options(struct scsi_qla_host *);
extern void qla24xx_update_fw_options(scsi_qla_host_t *);
extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
extern int qla24xx_load_risc_flash(scsi_qla_host_t *, uint32_t *);
extern int qla24xx_load_risc_hotplug(scsi_qla_host_t *, uint32_t *);
extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t);
@ -76,6 +76,8 @@ extern void qla2x00_blink_led(scsi_qla_host_t *);
extern int qla2x00_down_timeout(struct semaphore *, unsigned long);
extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_iocb.c source file.
*/

View File

@ -8,7 +8,6 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/firmware.h>
#include <scsi/scsi_transport_fc.h>
#include "qla_devtbl.h"
@ -3484,17 +3483,16 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
return (rval);
}
#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE)
int
qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
{
int rval;
uint16_t cnt;
uint16_t *risc_code;
unsigned long risc_address;
unsigned long risc_code_size;
int num;
int i;
uint16_t *req_ring;
int rval, num, i;
uint32_t cnt;
uint16_t *risc_code;
uint32_t risc_addr, risc_size;
uint16_t *req_ring;
struct qla_fw_info *fw_iter;
rval = QLA_SUCCESS;
@ -3504,37 +3502,29 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
*srisc_addr = *ha->brd_info->fw_info->fwstart;
while (fw_iter->addressing != FW_INFO_ADDR_NOMORE) {
risc_code = fw_iter->fwcode;
risc_code_size = *fw_iter->fwlen;
if (fw_iter->addressing == FW_INFO_ADDR_NORMAL) {
risc_address = *fw_iter->fwstart;
} else {
/* Extended address */
risc_address = *fw_iter->lfwstart;
}
risc_size = *fw_iter->fwlen;
if (fw_iter->addressing == FW_INFO_ADDR_NORMAL)
risc_addr = *fw_iter->fwstart;
else
risc_addr = *fw_iter->lfwstart;
num = 0;
rval = 0;
while (risc_code_size > 0 && !rval) {
while (risc_size > 0 && !rval) {
cnt = (uint16_t)(ha->fw_transfer_size >> 1);
if (cnt > risc_code_size)
cnt = risc_code_size;
if (cnt > risc_size)
cnt = risc_size;
DEBUG7(printk("scsi(%ld): Loading risc segment@ "
"addr %p, number of bytes 0x%x, offset 0x%lx.\n",
ha->host_no, risc_code, cnt, risc_address));
ha->host_no, risc_code, cnt, risc_addr));
req_ring = (uint16_t *)ha->request_ring;
for (i = 0; i < cnt; i++)
req_ring[i] = cpu_to_le16(risc_code[i]);
if (fw_iter->addressing == FW_INFO_ADDR_NORMAL) {
rval = qla2x00_load_ram(ha, ha->request_dma,
risc_address, cnt);
} else {
rval = qla2x00_load_ram_ext(ha,
ha->request_dma, risc_address, cnt);
}
rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr,
cnt);
if (rval) {
DEBUG(printk("scsi(%ld): [ERROR] Failed to "
"load segment %d of firmware\n",
@ -3548,16 +3538,76 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
}
risc_code += cnt;
risc_address += cnt;
risc_code_size -= cnt;
risc_addr += cnt;
risc_size -= cnt;
num++;
}
/* Next firmware sequence */
fw_iter++;
}
return rval;
}
return (rval);
int
qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
{
int rval, num, i;
uint32_t cnt;
uint32_t *risc_code;
uint32_t risc_addr, risc_size;
uint32_t *req_ring;
struct qla_fw_info *fw_iter;
rval = QLA_SUCCESS;
/* Load firmware sequences */
fw_iter = ha->brd_info->fw_info;
*srisc_addr = *((uint32_t *)fw_iter->lfwstart);
while (fw_iter->addressing != FW_INFO_ADDR_NOMORE) {
risc_code = (uint32_t *)fw_iter->fwcode;
risc_size = *((uint32_t *)fw_iter->fwlen);
risc_addr = *((uint32_t *)fw_iter->lfwstart);
num = 0;
rval = 0;
while (risc_size > 0 && !rval) {
cnt = (uint32_t)(ha->fw_transfer_size >> 2);
if (cnt > risc_size)
cnt = risc_size;
DEBUG7(printk("scsi(%ld): Loading risc segment@ "
"addr %p, number of bytes 0x%x, offset 0x%lx.\n",
ha->host_no, risc_code, cnt, risc_addr));
req_ring = (uint32_t *)ha->request_ring;
for (i = 0; i < cnt; i++)
req_ring[i] = cpu_to_le32(risc_code[i]);
rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr,
cnt);
if (rval) {
DEBUG(printk("scsi(%ld): [ERROR] Failed to "
"load segment %d of firmware\n",
ha->host_no, num));
qla_printk(KERN_WARNING, ha,
"[ERROR] Failed to load segment %d of "
"firmware\n", num);
qla2x00_dump_regs(ha);
break;
}
risc_code += cnt;
risc_addr += cnt;
risc_size -= cnt;
num++;
}
/* Next firmware sequence */
fw_iter++;
}
return rval;
}
int
@ -3642,8 +3692,108 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
return rval;
}
#else /* !defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE) */
int
qla24xx_load_risc_hotplug(scsi_qla_host_t *ha, uint32_t *srisc_addr)
qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
{
int rval;
int i, fragment;
uint16_t *wcode, *fwcode;
uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
struct fw_blob *blob;
/* Load firmware blob. */
blob = qla2x00_request_firmware(ha);
if (!blob) {
qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
return QLA_FUNCTION_FAILED;
}
rval = QLA_SUCCESS;
wcode = (uint16_t *)ha->request_ring;
*srisc_addr = 0;
fwcode = (uint16_t *)blob->fw->data;
fwclen = 0;
/* Validate firmware image by checking version. */
if (blob->fw->size < 8 * sizeof(uint16_t)) {
qla_printk(KERN_WARNING, ha,
"Unable to verify integrity of firmware image (%Zd)!\n",
blob->fw->size);
goto fail_fw_integrity;
}
for (i = 0; i < 4; i++)
wcode[i] = be16_to_cpu(fwcode[i + 4]);
if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
wcode[2] == 0 && wcode[3] == 0)) {
qla_printk(KERN_WARNING, ha,
"Unable to verify integrity of firmware image!\n");
qla_printk(KERN_WARNING, ha,
"Firmware data: %04x %04x %04x %04x!\n", wcode[0],
wcode[1], wcode[2], wcode[3]);
goto fail_fw_integrity;
}
seg = blob->segs;
while (*seg && rval == QLA_SUCCESS) {
risc_addr = *seg;
*srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
risc_size = be16_to_cpu(fwcode[3]);
/* Validate firmware image size. */
fwclen += risc_size * sizeof(uint16_t);
if (blob->fw->size < fwclen) {
qla_printk(KERN_WARNING, ha,
"Unable to verify integrity of firmware image "
"(%Zd)!\n", blob->fw->size);
goto fail_fw_integrity;
}
fragment = 0;
while (risc_size > 0 && rval == QLA_SUCCESS) {
wlen = (uint16_t)(ha->fw_transfer_size >> 1);
if (wlen > risc_size)
wlen = risc_size;
DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
"addr %x, number of words 0x%x.\n", ha->host_no,
risc_addr, wlen));
for (i = 0; i < wlen; i++)
wcode[i] = swab16(fwcode[i]);
rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr,
wlen);
if (rval) {
DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
"segment %d of firmware\n", ha->host_no,
fragment));
qla_printk(KERN_WARNING, ha,
"[ERROR] Failed to load segment %d of "
"firmware\n", fragment);
break;
}
fwcode += wlen;
risc_addr += wlen;
risc_size -= wlen;
fragment++;
}
/* Next segment. */
seg++;
}
return rval;
fail_fw_integrity:
return QLA_FUNCTION_FAILED;
}
int
qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
{
int rval;
int segments, fragment;
@ -3651,14 +3801,13 @@ qla24xx_load_risc_hotplug(scsi_qla_host_t *ha, uint32_t *srisc_addr)
uint32_t risc_addr;
uint32_t risc_size;
uint32_t i;
const struct firmware *fw_entry;
struct fw_blob *blob;
uint32_t *fwcode, fwclen;
if (request_firmware(&fw_entry, ha->brd_info->fw_fname,
&ha->pdev->dev)) {
qla_printk(KERN_ERR, ha,
"Firmware image file not available: '%s'\n",
ha->brd_info->fw_fname);
/* Load firmware blob. */
blob = qla2x00_request_firmware(ha);
if (!blob) {
qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
return QLA_FUNCTION_FAILED;
}
@ -3667,14 +3816,14 @@ qla24xx_load_risc_hotplug(scsi_qla_host_t *ha, uint32_t *srisc_addr)
segments = FA_RISC_CODE_SEGMENTS;
dcode = (uint32_t *)ha->request_ring;
*srisc_addr = 0;
fwcode = (uint32_t *)fw_entry->data;
fwcode = (uint32_t *)blob->fw->data;
fwclen = 0;
/* Validate firmware image by checking version. */
if (fw_entry->size < 8 * sizeof(uint32_t)) {
if (blob->fw->size < 8 * sizeof(uint32_t)) {
qla_printk(KERN_WARNING, ha,
"Unable to verify integrity of flash firmware image "
"(%Zd)!\n", fw_entry->size);
"Unable to verify integrity of firmware image (%Zd)!\n",
blob->fw->size);
goto fail_fw_integrity;
}
for (i = 0; i < 4; i++)
@ -3684,7 +3833,7 @@ qla24xx_load_risc_hotplug(scsi_qla_host_t *ha, uint32_t *srisc_addr)
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
qla_printk(KERN_WARNING, ha,
"Unable to verify integrity of flash firmware image!\n");
"Unable to verify integrity of firmware image!\n");
qla_printk(KERN_WARNING, ha,
"Firmware data: %08x %08x %08x %08x!\n", dcode[0],
dcode[1], dcode[2], dcode[3]);
@ -3698,10 +3847,11 @@ qla24xx_load_risc_hotplug(scsi_qla_host_t *ha, uint32_t *srisc_addr)
/* Validate firmware image size. */
fwclen += risc_size * sizeof(uint32_t);
if (fw_entry->size < fwclen) {
if (blob->fw->size < fwclen) {
qla_printk(KERN_WARNING, ha,
"Unable to verify integrity of flash firmware "
"image (%Zd)!\n", fw_entry->size);
"Unable to verify integrity of firmware image "
"(%Zd)!\n", blob->fw->size);
goto fail_fw_integrity;
}
@ -3739,13 +3889,9 @@ qla24xx_load_risc_hotplug(scsi_qla_host_t *ha, uint32_t *srisc_addr)
/* Next segment. */
segments--;
}
release_firmware(fw_entry);
return rval;
fail_fw_integrity:
release_firmware(fw_entry);
return QLA_FUNCTION_FAILED;
}
#endif

View File

@ -54,10 +54,12 @@ module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xloginretrycount,
"Specify an alternate value for the NVRAM login retry count.");
int ql2xfwloadbin=1;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfwloadbin,
"Load ISP2xxx firmware image via hotplug.");
#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE)
int ql2xfwloadflash;
module_param(ql2xfwloadflash, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfwloadflash,
"Load ISP24xx firmware image from FLASH (onboard memory).");
#endif
static void qla2x00_free_device(scsi_qla_host_t *);
@ -1261,12 +1263,16 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
char pci_info[20];
char fw_str[30];
fc_port_t *fcport;
struct scsi_host_template *sht;
if (pci_enable_device(pdev))
goto probe_out;
host = scsi_host_alloc(brd_info->sht ? brd_info->sht:
&qla2x00_driver_template, sizeof(scsi_qla_host_t));
sht = &qla2x00_driver_template;
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432)
sht = &qla24xx_driver_template;
host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
if (host == NULL) {
printk(KERN_WARNING
"qla2xxx: Couldn't allocate host from scsi layer!\n");
@ -1291,8 +1297,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
goto probe_failed;
qla_printk(KERN_INFO, ha,
"Found an %s, irq %d, iobase 0x%p\n", ha->brd_info->isp_name,
pdev->irq, ha->iobase);
"Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
ha->iobase);
spin_lock_init(&ha->hardware_lock);
@ -1368,9 +1374,11 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
ha->isp_ops.reset_adapter = qla24xx_reset_adapter;
ha->isp_ops.nvram_config = qla24xx_nvram_config;
ha->isp_ops.update_fw_options = qla24xx_update_fw_options;
ha->isp_ops.load_risc = qla24xx_load_risc_flash;
if (ql2xfwloadbin)
ha->isp_ops.load_risc = qla24xx_load_risc_hotplug;
ha->isp_ops.load_risc = qla24xx_load_risc;
#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE)
if (ql2xfwloadflash)
ha->isp_ops.load_risc = qla24xx_load_risc_flash;
#endif
ha->isp_ops.pci_info_str = qla24xx_pci_info_str;
ha->isp_ops.fw_version_str = qla24xx_fw_version_str;
ha->isp_ops.intr_handler = qla24xx_intr_handler;
@ -1531,11 +1539,12 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
qla_printk(KERN_INFO, ha, "\n"
" QLogic Fibre Channel HBA Driver: %s\n"
" QLogic %s - %s\n"
" %s: %s @ %s hdma%c, host#=%ld, fw=%s\n", qla2x00_version_str,
ha->model_number, ha->model_desc ? ha->model_desc: "",
ha->brd_info->isp_name, ha->isp_ops.pci_info_str(ha, pci_info),
pci_name(pdev), ha->flags.enable_64bit_addressing ? '+': '-',
ha->host_no, ha->isp_ops.fw_version_str(ha, fw_str));
" ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
qla2x00_version_str, ha->model_number,
ha->model_desc ? ha->model_desc: "", pdev->device,
ha->isp_ops.pci_info_str(ha, pci_info), pci_name(pdev),
ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
ha->isp_ops.fw_version_str(ha, fw_str));
/* Go with fc_rport registration. */
list_for_each_entry(fcport, &ha->fcports, list)
@ -2483,45 +2492,115 @@ qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
return -ETIMEDOUT;
}
static struct qla_board_info qla_board_tbl[] = {
{
.drv_name = "qla2400",
.isp_name = "ISP2422",
.fw_fname = "ql2400_fw.bin",
.sht = &qla24xx_driver_template,
},
{
.drv_name = "qla2400",
.isp_name = "ISP2432",
.fw_fname = "ql2400_fw.bin",
.sht = &qla24xx_driver_template,
},
#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE)
#define qla2x00_release_firmware() do { } while (0)
#define qla2x00_pci_module_init() (0)
#define qla2x00_pci_module_exit() do { } while (0)
#else /* !defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE) */
/* Firmware interface routines. */
#define FW_BLOBS 6
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
#define FW_ISP2322 3
#define FW_ISP63XX 4
#define FW_ISP24XX 5
static DECLARE_MUTEX(qla_fw_lock);
static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = "ql2100_fw.bin", .segs = { 0x1000, 0 }, },
{ .name = "ql2200_fw.bin", .segs = { 0x1000, 0 }, },
{ .name = "ql2300_fw.bin", .segs = { 0x800, 0 }, },
{ .name = "ql2322_fw.bin", .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
{ .name = "ql6312_fw.bin", .segs = { 0x800, 0 }, },
{ .name = "ql2400_fw.bin", },
};
struct fw_blob *
qla2x00_request_firmware(scsi_qla_host_t *ha)
{
struct fw_blob *blob;
blob = NULL;
if (IS_QLA2100(ha)) {
blob = &qla_fw_blobs[FW_ISP21XX];
} else if (IS_QLA2200(ha)) {
blob = &qla_fw_blobs[FW_ISP22XX];
} else if (IS_QLA2300(ha) || IS_QLA2312(ha)) {
blob = &qla_fw_blobs[FW_ISP2300];
} else if (IS_QLA2322(ha)) {
blob = &qla_fw_blobs[FW_ISP2322];
} else if (IS_QLA6312(ha) || IS_QLA6322(ha)) {
blob = &qla_fw_blobs[FW_ISP63XX];
} else if (IS_QLA24XX(ha)) {
blob = &qla_fw_blobs[FW_ISP24XX];
}
down(&qla_fw_lock);
if (blob->fw)
goto out;
if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
DEBUG2(printk("scsi(%ld): Failed to load firmware image "
"(%s).\n", ha->host_no, blob->name));
blob->fw = NULL;
blob = NULL;
goto out;
}
out:
up(&qla_fw_lock);
return blob;
}
static void
qla2x00_release_firmware(void)
{
int idx;
down(&qla_fw_lock);
for (idx = 0; idx < FW_BLOBS; idx++)
if (qla_fw_blobs[idx].fw)
release_firmware(qla_fw_blobs[idx].fw);
up(&qla_fw_lock);
}
static struct qla_board_info qla_board_tbl = {
.drv_name = "qla2xxx",
};
static struct pci_device_id qla2xxx_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP2422,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (unsigned long)&qla_board_tbl[0],
},
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP2432,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (unsigned long)&qla_board_tbl[1],
},
{0, 0},
{ PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
static int __devinit
qla2xxx_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
return qla2x00_probe_one(pdev,
(struct qla_board_info *)id->driver_data);
return qla2x00_probe_one(pdev, &qla_board_tbl);
}
static void __devexit
@ -2532,11 +2611,28 @@ qla2xxx_remove_one(struct pci_dev *pdev)
static struct pci_driver qla2xxx_pci_driver = {
.name = "qla2xxx",
.driver = {
.owner = THIS_MODULE,
},
.id_table = qla2xxx_pci_tbl,
.probe = qla2xxx_probe_one,
.remove = __devexit_p(qla2xxx_remove_one),
};
static inline int
qla2x00_pci_module_init(void)
{
return pci_module_init(&qla2xxx_pci_driver);
}
static inline void
qla2x00_pci_module_exit(void)
{
pci_unregister_driver(&qla2xxx_pci_driver);
}
#endif
/**
* qla2x00_module_init - Module initialization.
**/
@ -2556,6 +2652,9 @@ qla2x00_module_init(void)
/* Derive version string. */
strcpy(qla2x00_version_str, QLA2XXX_VERSION);
#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE)
strcat(qla2x00_version_str, "-fw");
#endif
#if DEBUG_QLA2100
strcat(qla2x00_version_str, "-debug");
#endif
@ -2565,7 +2664,7 @@ qla2x00_module_init(void)
return -ENODEV;
printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n");
ret = pci_module_init(&qla2xxx_pci_driver);
ret = qla2x00_pci_module_init();
if (ret) {
kmem_cache_destroy(srb_cachep);
fc_release_transport(qla2xxx_transport_template);
@ -2579,7 +2678,8 @@ qla2x00_module_init(void)
static void __exit
qla2x00_module_exit(void)
{
pci_unregister_driver(&qla2xxx_pci_driver);
qla2x00_pci_module_exit();
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
fc_release_transport(qla2xxx_transport_template);
}

View File

@ -115,7 +115,7 @@ static DECLARE_TRANSPORT_CLASS(raid_class,
raid_remove,
NULL);
static struct {
static const struct {
enum raid_state value;
char *name;
} raid_states[] = {

View File

@ -354,8 +354,9 @@ static int scsi_dev_info_list_add_str(char *dev_list)
* @model, if found, return the matching flags value, else return
* the host or global default settings.
**/
int scsi_get_device_flags(struct scsi_device *sdev, unsigned char *vendor,
unsigned char *model)
int scsi_get_device_flags(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model)
{
struct scsi_dev_info_list *devinfo;
unsigned int bflags;

View File

@ -1319,23 +1319,6 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
}
}
/**
* scsi_eh_lock_done - done function for eh door lock request
* @scmd: SCSI command block for the door lock request
*
* Notes:
* We completed the asynchronous door lock request, and it has either
* locked the door or failed. We must free the command structures
* associated with this request.
**/
static void scsi_eh_lock_done(struct scsi_cmnd *scmd)
{
struct scsi_request *sreq = scmd->sc_request;
scsi_release_request(sreq);
}
/**
* scsi_eh_lock_door - Prevent medium removal for the specified device
* @sdev: SCSI device to prevent medium removal
@ -1358,29 +1341,17 @@ static void scsi_eh_lock_done(struct scsi_cmnd *scmd)
**/
static void scsi_eh_lock_door(struct scsi_device *sdev)
{
struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
unsigned char cmnd[MAX_COMMAND_SIZE];
if (unlikely(!sreq)) {
printk(KERN_ERR "%s: request allocate failed,"
"prevent media removal cmd not sent\n", __FUNCTION__);
return;
}
cmnd[0] = ALLOW_MEDIUM_REMOVAL;
cmnd[1] = 0;
cmnd[2] = 0;
cmnd[3] = 0;
cmnd[4] = SCSI_REMOVAL_PREVENT;
cmnd[5] = 0;
sreq->sr_cmnd[0] = ALLOW_MEDIUM_REMOVAL;
sreq->sr_cmnd[1] = 0;
sreq->sr_cmnd[2] = 0;
sreq->sr_cmnd[3] = 0;
sreq->sr_cmnd[4] = SCSI_REMOVAL_PREVENT;
sreq->sr_cmnd[5] = 0;
sreq->sr_data_direction = DMA_NONE;
sreq->sr_bufflen = 0;
sreq->sr_buffer = NULL;
sreq->sr_allowed = 5;
sreq->sr_done = scsi_eh_lock_done;
sreq->sr_timeout_per_command = 10 * HZ;
sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
scsi_insert_special_req(sreq, 1);
scsi_execute_async(sdev, cmnd, DMA_NONE, NULL, 0, 0, 10 * HZ,
5, NULL, NULL, GFP_KERNEL);
}

View File

@ -63,39 +63,6 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
};
#undef SP
/*
* Function: scsi_insert_special_req()
*
* Purpose: Insert pre-formed request into request queue.
*
* Arguments: sreq - request that is ready to be queued.
* at_head - boolean. True if we should insert at head
* of queue, false if we should insert at tail.
*
* Lock status: Assumed that lock is not held upon entry.
*
* Returns: Nothing
*
* Notes: This function is called from character device and from
* ioctl types of functions where the caller knows exactly
* what SCSI command needs to be issued. The idea is that
* we merely inject the command into the queue (at the head
* for now), and then call the queue request function to actually
* process it.
*/
int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
{
/*
* Because users of this function are apt to reuse requests with no
* modification, we have to sanitise the request flags here
*/
sreq->sr_request->flags &= ~REQ_DONTPREP;
blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
at_head, sreq);
return 0;
}
static void scsi_run_queue(struct request_queue *q);
/*
@ -249,8 +216,13 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
/*
* head injection *required* here otherwise quiesce won't work
*
* Because users of this function are apt to reuse requests with no
* modification, we have to sanitise the request flags here
*/
scsi_insert_special_req(sreq, 1);
sreq->sr_request->flags &= ~REQ_DONTPREP;
blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
1, sreq);
}
EXPORT_SYMBOL(scsi_do_req);
@ -287,6 +259,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
memcpy(req->cmd, cmd, req->cmd_len);
req->sense = sense;
req->sense_len = 0;
req->retries = retries;
req->timeout = timeout;
req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
@ -327,6 +300,200 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
}
EXPORT_SYMBOL(scsi_execute_req);
struct scsi_io_context {
void *data;
void (*done)(void *data, char *sense, int result, int resid);
char sense[SCSI_SENSE_BUFFERSIZE];
};
static kmem_cache_t *scsi_io_context_cache;
static void scsi_end_async(struct request *req)
{
struct scsi_io_context *sioc = req->end_io_data;
if (sioc->done)
sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
kmem_cache_free(scsi_io_context_cache, sioc);
__blk_put_request(req->q, req);
}
static int scsi_merge_bio(struct request *rq, struct bio *bio)
{
struct request_queue *q = rq->q;
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << BIO_RW);
blk_queue_bounce(q, &bio);
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!q->back_merge_fn(q, rq, bio))
return -EINVAL;
else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->hard_nr_sectors += bio_sectors(bio);
rq->nr_sectors = rq->hard_nr_sectors;
}
return 0;
}
static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
{
if (bio->bi_size)
return 1;
bio_put(bio);
return 0;
}
/**
* scsi_req_map_sg - map a scatterlist into a request
* @rq: request to fill
* @sg: scatterlist
* @nsegs: number of elements
* @bufflen: len of buffer
* @gfp: memory allocation flags
*
* scsi_req_map_sg maps a scatterlist into a request so that the
* request can be sent to the block layer. We do not trust the scatterlist
* sent to use, as some ULDs use that struct to only organize the pages.
*/
static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
int nsegs, unsigned bufflen, gfp_t gfp)
{
struct request_queue *q = rq->q;
int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned int data_len = 0, len, bytes, off;
struct page *page;
struct bio *bio = NULL;
int i, err, nr_vecs = 0;
for (i = 0; i < nsegs; i++) {
page = sgl[i].page;
off = sgl[i].offset;
len = sgl[i].length;
data_len += len;
while (len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
if (!bio) {
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
nr_pages -= nr_vecs;
bio = bio_alloc(gfp, nr_vecs);
if (!bio) {
err = -ENOMEM;
goto free_bios;
}
bio->bi_end_io = scsi_bi_endio;
}
if (bio_add_pc_page(q, bio, page, bytes, off) !=
bytes) {
bio_put(bio);
err = -EINVAL;
goto free_bios;
}
if (bio->bi_vcnt >= nr_vecs) {
err = scsi_merge_bio(rq, bio);
if (err) {
bio_endio(bio, bio->bi_size, 0);
goto free_bios;
}
bio = NULL;
}
page++;
len -= bytes;
off = 0;
}
}
rq->buffer = rq->data = NULL;
rq->data_len = data_len;
return 0;
free_bios:
while ((bio = rq->bio) != NULL) {
rq->bio = bio->bi_next;
/*
* call endio instead of bio_put incase it was bounced
*/
bio_endio(bio, bio->bi_size, 0);
}
return err;
}
/**
* scsi_execute_async - insert request
* @sdev: scsi device
* @cmd: scsi command
* @data_direction: data direction
* @buffer: data buffer (this can be a kernel buffer or scatterlist)
* @bufflen: len of buffer
* @use_sg: if buffer is a scatterlist this is the number of elements
* @timeout: request timeout in seconds
* @retries: number of times to retry request
* @flags: or into request flags
**/
int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
int use_sg, int timeout, int retries, void *privdata,
void (*done)(void *, char *, int, int), gfp_t gfp)
{
struct request *req;
struct scsi_io_context *sioc;
int err = 0;
int write = (data_direction == DMA_TO_DEVICE);
sioc = kmem_cache_alloc(scsi_io_context_cache, gfp);
if (!sioc)
return DRIVER_ERROR << 24;
memset(sioc, 0, sizeof(*sioc));
req = blk_get_request(sdev->request_queue, write, gfp);
if (!req)
goto free_sense;
req->flags |= REQ_BLOCK_PC | REQ_QUIET;
if (use_sg)
err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
else if (bufflen)
err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
if (err)
goto free_req;
req->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(req->cmd, cmd, req->cmd_len);
req->sense = sioc->sense;
req->sense_len = 0;
req->timeout = timeout;
req->retries = retries;
req->end_io_data = sioc;
sioc->data = privdata;
sioc->done = done;
blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
return 0;
free_req:
blk_put_request(req);
free_sense:
kfree(sioc);
return DRIVER_ERROR << 24;
}
EXPORT_SYMBOL_GPL(scsi_execute_async);
/*
* Function: scsi_init_cmd_errh()
*
@ -884,7 +1051,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
* system where READ CAPACITY failed, we may have read
* past the end of the disk.
*/
if (cmd->device->use_10_for_rw &&
if ((cmd->device->use_10_for_rw &&
sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
(cmd->cmnd[0] == READ_10 ||
cmd->cmnd[0] == WRITE_10)) {
cmd->device->use_10_for_rw = 0;
@ -1082,10 +1250,16 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
static void scsi_generic_done(struct scsi_cmnd *cmd)
{
BUG_ON(!blk_pc_request(cmd->request));
scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
/*
* This will complete the whole command with uptodate=1 so
* as far as the block layer is concerned the command completed
* successfully. Since this is a REQ_BLOCK_PC command the
* caller should check the request's errors value
*/
scsi_io_completion(cmd, cmd->bufflen, 0);
}
void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries)
void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
{
struct request *req = cmd->request;
@ -1100,7 +1274,7 @@ void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries)
cmd->sc_data_direction = DMA_FROM_DEVICE;
cmd->transfersize = req->data_len;
cmd->allowed = retries;
cmd->allowed = req->retries;
cmd->timeout_per_command = req->timeout;
}
EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd);
@ -1240,7 +1414,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
goto kill;
}
} else {
scsi_setup_blk_pc_cmnd(cmd, 3);
scsi_setup_blk_pc_cmnd(cmd);
cmd->done = scsi_generic_done;
}
}
@ -1603,6 +1777,14 @@ int __init scsi_init_queue(void)
{
int i;
scsi_io_context_cache = kmem_cache_create("scsi_io_context",
sizeof(struct scsi_io_context),
0, 0, NULL, NULL);
if (!scsi_io_context_cache) {
printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
return -ENOMEM;
}
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
int size = sgp->size * sizeof(struct scatterlist);
@ -1630,6 +1812,8 @@ void scsi_exit_queue(void)
{
int i;
kmem_cache_destroy(scsi_io_context_cache);
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
mempool_destroy(sgp->pool);

View File

@ -40,7 +40,6 @@ extern void scsi_exit_hosts(void);
extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
extern int scsi_insert_special_req(struct scsi_request *sreq, int);
extern void scsi_init_cmd_from_req(struct scsi_cmnd *cmd,
struct scsi_request *sreq);
extern void __scsi_release_request(struct scsi_request *sreq);
@ -57,7 +56,8 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
/* scsi_devinfo.c */
extern int scsi_get_device_flags(struct scsi_device *sdev,
unsigned char *vendor, unsigned char *model);
const unsigned char *vendor,
const unsigned char *model);
extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);

View File

@ -74,7 +74,7 @@
#define SCSI_SCAN_TARGET_PRESENT 1
#define SCSI_SCAN_LUN_PRESENT 2
static char *scsi_null_device_strs = "nullnullnullnull";
static const char *scsi_null_device_strs = "nullnullnullnull";
#define MAX_SCSI_LUNS 512

View File

@ -21,7 +21,7 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
static struct {
static const struct {
enum scsi_device_state value;
char *name;
} sdev_states[] = {
@ -48,7 +48,7 @@ const char *scsi_device_state_name(enum scsi_device_state state)
return name;
}
static struct {
static const struct {
enum scsi_host_state value;
char *name;
} shost_states[] = {

View File

@ -112,7 +112,7 @@ fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
/* Convert fc_tgtid_binding_type values to ascii string name */
static struct {
static const struct {
enum fc_tgtid_binding_type value;
char *name;
int matchlen;
@ -150,7 +150,7 @@ get_fc_##title##_names(u32 table_key, char *buf) \
/* Convert FC_COS bit values to ascii string name */
static struct {
static const struct {
u32 value;
char *name;
} fc_cos_names[] = {
@ -164,7 +164,7 @@ fc_bitfield_name_search(cos, fc_cos_names)
/* Convert FC_PORTSPEED bit values to ascii string name */
static struct {
static const struct {
u32 value;
char *name;
} fc_port_speed_names[] = {
@ -190,7 +190,7 @@ show_fc_fc4s (char *buf, u8 *fc4_list)
/* Convert FC_RPORT_ROLE bit values to ascii string name */
static struct {
static const struct {
u32 value;
char *name;
} fc_remote_port_role_names[] = {

View File

@ -18,6 +18,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/config.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/module.h>
@ -378,9 +379,7 @@ static CLASS_DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate);
/* Translate the period into ns according to the current spec
* for SDTR/PPR messages */
static ssize_t
show_spi_transport_period_helper(struct class_device *cdev, char *buf,
int period)
static int period_to_str(char *buf, int period)
{
int len, picosec;
@ -398,6 +397,14 @@ show_spi_transport_period_helper(struct class_device *cdev, char *buf,
len = sprint_frac(buf, picosec, 1000);
}
return len;
}
static ssize_t
show_spi_transport_period_helper(struct class_device *cdev, char *buf,
int period)
{
int len = period_to_str(buf, period);
buf[len++] = '\n';
buf[len] = '\0';
return len;
@ -1041,12 +1048,133 @@ void spi_display_xfer_agreement(struct scsi_target *starget)
tp->hold_mcs ? " HMCS" : "",
tmp, tp->offset);
} else {
dev_info(&starget->dev, "%sasynchronous.\n",
dev_info(&starget->dev, "%sasynchronous\n",
tp->width ? "wide " : "");
}
}
EXPORT_SYMBOL(spi_display_xfer_agreement);
#ifdef CONFIG_SCSI_CONSTANTS
static const char * const one_byte_msgs[] = {
/* 0x00 */ "Command Complete", NULL, "Save Pointers",
/* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error",
/* 0x06 */ "Abort", "Message Reject", "Nop", "Message Parity Error",
/* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag",
/* 0x0c */ "Bus device reset", "Abort Tag", "Clear Queue",
/* 0x0f */ "Initiate Recovery", "Release Recovery"
};
static const char * const two_byte_msgs[] = {
/* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag",
/* 0x23 */ "Ignore Wide Residue"
};
static const char * const extended_msgs[] = {
/* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request",
/* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request",
/* 0x04 */ "Parallel Protocol Request"
};
void print_nego(const unsigned char *msg, int per, int off, int width)
{
if (per) {
char buf[20];
period_to_str(buf, msg[per]);
printk("period = %s ns ", buf);
}
if (off)
printk("offset = %d ", msg[off]);
if (width)
printk("width = %d ", 8 << msg[width]);
}
int spi_print_msg(const unsigned char *msg)
{
int len = 0, i;
if (msg[0] == EXTENDED_MESSAGE) {
len = 3 + msg[1];
if (msg[2] < ARRAY_SIZE(extended_msgs))
printk ("%s ", extended_msgs[msg[2]]);
else
printk ("Extended Message, reserved code (0x%02x) ",
(int) msg[2]);
switch (msg[2]) {
case EXTENDED_MODIFY_DATA_POINTER:
printk("pointer = %d", (int) (msg[3] << 24) |
(msg[4] << 16) | (msg[5] << 8) | msg[6]);
break;
case EXTENDED_SDTR:
print_nego(msg, 3, 4, 0);
break;
case EXTENDED_WDTR:
print_nego(msg, 0, 0, 3);
break;
case EXTENDED_PPR:
print_nego(msg, 3, 5, 6);
break;
default:
for (i = 2; i < len; ++i)
printk("%02x ", msg[i]);
}
/* Identify */
} else if (msg[0] & 0x80) {
printk("Identify disconnect %sallowed %s %d ",
(msg[0] & 0x40) ? "" : "not ",
(msg[0] & 0x20) ? "target routine" : "lun",
msg[0] & 0x7);
len = 1;
/* Normal One byte */
} else if (msg[0] < 0x1f) {
if (msg[0] < ARRAY_SIZE(one_byte_msgs))
printk(one_byte_msgs[msg[0]]);
else
printk("reserved (%02x) ", msg[0]);
len = 1;
/* Two byte */
} else if (msg[0] <= 0x2f) {
if ((msg[0] - 0x20) < ARRAY_SIZE(two_byte_msgs))
printk("%s %02x ", two_byte_msgs[msg[0] - 0x20],
msg[1]);
else
printk("reserved two byte (%02x %02x) ",
msg[0], msg[1]);
len = 2;
} else
printk("reserved");
return len;
}
EXPORT_SYMBOL(spi_print_msg);
#else /* ifndef CONFIG_SCSI_CONSTANTS */
int spi_print_msg(const unsigned char *msg)
{
int len = 0, i;
if (msg[0] == EXTENDED_MESSAGE) {
len = 3 + msg[1];
for (i = 0; i < len; ++i)
printk("%02x ", msg[i]);
/* Identify */
} else if (msg[0] & 0x80) {
printk("%02x ", msg[0]);
len = 1;
/* Normal One byte */
} else if (msg[0] < 0x1f) {
printk("%02x ", msg[0]);
len = 1;
/* Two byte */
} else if (msg[0] <= 0x2f) {
printk("%02x %02x", msg[0], msg[1]);
len = 2;
} else
printk("%02x ", msg[0]);
return len;
}
EXPORT_SYMBOL(spi_print_msg);
#endif /* ! CONFIG_SCSI_CONSTANTS */
#define SETUP_ATTRIBUTE(field) \
i->private_attrs[count] = class_device_attr_##field; \
if (!i->f->set_##field) { \

View File

@ -245,7 +245,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
* SG_IO from block layer already setup, just copy cdb basically
*/
if (blk_pc_request(rq)) {
scsi_setup_blk_pc_cmnd(SCpnt, SD_PASSTHROUGH_RETRIES);
scsi_setup_blk_pc_cmnd(SCpnt);
if (rq->timeout)
timeout = rq->timeout;
@ -1495,9 +1495,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
if (sdkp->media_present) {
sd_read_capacity(sdkp, disk->disk_name, buffer);
if (sdp->removable)
sd_read_write_protect_flag(sdkp, disk->disk_name,
buffer);
sd_read_write_protect_flag(sdkp, disk->disk_name, buffer);
sd_read_cache_type(sdkp, disk->disk_name, buffer);
}

File diff suppressed because it is too large Load Diff

View File

@ -320,7 +320,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
* these are already setup, just copy cdb basically
*/
if (SCpnt->request->flags & REQ_BLOCK_PC) {
scsi_setup_blk_pc_cmnd(SCpnt, MAX_RETRIES);
scsi_setup_blk_pc_cmnd(SCpnt);
if (SCpnt->timeout_per_command)
timeout = SCpnt->timeout_per_command;
@ -716,7 +716,7 @@ static void get_capabilities(struct scsi_cd *cd)
unsigned int the_result;
int retries, rc, n;
static char *loadmech[] =
static const char *loadmech[] =
{
"caddy",
"tray",

View File

@ -68,8 +68,8 @@ void sr_vendor_init(Scsi_CD *cd)
#ifndef CONFIG_BLK_DEV_SR_VENDOR
cd->vendor = VENDOR_SCSI3;
#else
char *vendor = cd->device->vendor;
char *model = cd->device->model;
const char *vendor = cd->device->vendor;
const char *model = cd->device->model;
/* default */
cd->vendor = VENDOR_SCSI3;

View File

@ -17,7 +17,7 @@
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*/
static char *verstr = "20050830";
static const char *verstr = "20050830";
#include <linux/module.h>
@ -50,7 +50,6 @@ static char *verstr = "20050830";
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_request.h>
#include <scsi/sg.h>
@ -134,7 +133,7 @@ static struct st_dev_parm {
#endif
/* Bit reversed order to get same names for same minors with all
mode counts */
static char *st_formats[] = {
static const char *st_formats[] = {
"", "r", "k", "s", "l", "t", "o", "u",
"m", "v", "p", "x", "a", "y", "q", "z"};
@ -188,8 +187,6 @@ static int from_buffer(struct st_buffer *, char __user *, int);
static void move_buffer_data(struct st_buffer *, int);
static void buf_to_sg(struct st_buffer *, unsigned int);
static int st_map_user_pages(struct scatterlist *, const unsigned int,
unsigned long, size_t, int, unsigned long);
static int sgl_map_user_pages(struct scatterlist *, const unsigned int,
unsigned long, size_t, int);
static int sgl_unmap_user_pages(struct scatterlist *, const unsigned int, int);
@ -313,12 +310,13 @@ static inline char *tape_name(struct scsi_tape *tape)
}
static void st_analyze_sense(struct scsi_request *SRpnt, struct st_cmdstatus *s)
static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s)
{
const u8 *ucp;
const u8 *sense = SRpnt->sr_sense_buffer;
const u8 *sense = SRpnt->sense;
s->have_sense = scsi_request_normalize_sense(SRpnt, &s->sense_hdr);
s->have_sense = scsi_normalize_sense(SRpnt->sense,
SCSI_SENSE_BUFFERSIZE, &s->sense_hdr);
s->flags = 0;
if (s->have_sense) {
@ -345,9 +343,9 @@ static void st_analyze_sense(struct scsi_request *SRpnt, struct st_cmdstatus *s)
/* Convert the result to success code */
static int st_chk_result(struct scsi_tape *STp, struct scsi_request * SRpnt)
static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
{
int result = SRpnt->sr_result;
int result = SRpnt->result;
u8 scode;
DEB(const char *stp;)
char *name = tape_name(STp);
@ -366,13 +364,12 @@ static int st_chk_result(struct scsi_tape *STp, struct scsi_request * SRpnt)
DEB(
if (debugging) {
printk(ST_DEB_MSG "%s: Error: %x, cmd: %x %x %x %x %x %x Len: %d\n",
printk(ST_DEB_MSG "%s: Error: %x, cmd: %x %x %x %x %x %x\n",
name, result,
SRpnt->sr_cmnd[0], SRpnt->sr_cmnd[1], SRpnt->sr_cmnd[2],
SRpnt->sr_cmnd[3], SRpnt->sr_cmnd[4], SRpnt->sr_cmnd[5],
SRpnt->sr_bufflen);
SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
if (cmdstatp->have_sense)
scsi_print_req_sense("st", SRpnt);
__scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
} ) /* end DEB */
if (!debugging) { /* Abnormal conditions for tape */
if (!cmdstatp->have_sense)
@ -386,20 +383,21 @@ static int st_chk_result(struct scsi_tape *STp, struct scsi_request * SRpnt)
/* scode != UNIT_ATTENTION && */
scode != BLANK_CHECK &&
scode != VOLUME_OVERFLOW &&
SRpnt->sr_cmnd[0] != MODE_SENSE &&
SRpnt->sr_cmnd[0] != TEST_UNIT_READY) {
SRpnt->cmd[0] != MODE_SENSE &&
SRpnt->cmd[0] != TEST_UNIT_READY) {
printk(KERN_WARNING "%s: Error with sense data: ", name);
scsi_print_req_sense("st", SRpnt);
__scsi_print_sense("st", SRpnt->sense,
SCSI_SENSE_BUFFERSIZE);
}
}
if (cmdstatp->fixed_format &&
STp->cln_mode >= EXTENDED_SENSE_START) { /* Only fixed format sense */
if (STp->cln_sense_value)
STp->cleaning_req |= ((SRpnt->sr_sense_buffer[STp->cln_mode] &
STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] &
STp->cln_sense_mask) == STp->cln_sense_value);
else
STp->cleaning_req |= ((SRpnt->sr_sense_buffer[STp->cln_mode] &
STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] &
STp->cln_sense_mask) != 0);
}
if (cmdstatp->have_sense &&
@ -411,8 +409,8 @@ static int st_chk_result(struct scsi_tape *STp, struct scsi_request * SRpnt)
if (cmdstatp->have_sense &&
scode == RECOVERED_ERROR
#if ST_RECOVERED_WRITE_FATAL
&& SRpnt->sr_cmnd[0] != WRITE_6
&& SRpnt->sr_cmnd[0] != WRITE_FILEMARKS
&& SRpnt->cmd[0] != WRITE_6
&& SRpnt->cmd[0] != WRITE_FILEMARKS
#endif
) {
STp->recover_count++;
@ -420,9 +418,9 @@ static int st_chk_result(struct scsi_tape *STp, struct scsi_request * SRpnt)
DEB(
if (debugging) {
if (SRpnt->sr_cmnd[0] == READ_6)
if (SRpnt->cmd[0] == READ_6)
stp = "read";
else if (SRpnt->sr_cmnd[0] == WRITE_6)
else if (SRpnt->cmd[0] == WRITE_6)
stp = "write";
else
stp = "ioctl";
@ -438,28 +436,37 @@ static int st_chk_result(struct scsi_tape *STp, struct scsi_request * SRpnt)
/* Wakeup from interrupt */
static void st_sleep_done(struct scsi_cmnd * SCpnt)
static void st_sleep_done(void *data, char *sense, int result, int resid)
{
struct scsi_tape *STp = container_of(SCpnt->request->rq_disk->private_data,
struct scsi_tape, driver);
struct st_request *SRpnt = data;
struct scsi_tape *STp = SRpnt->stp;
(STp->buffer)->cmdstat.midlevel_result = SCpnt->result;
SCpnt->request->rq_status = RQ_SCSI_DONE;
memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE);
(STp->buffer)->cmdstat.midlevel_result = SRpnt->result = result;
DEB( STp->write_pending = 0; )
if (SCpnt->request->waiting)
complete(SCpnt->request->waiting);
if (SRpnt->waiting)
complete(SRpnt->waiting);
}
static struct st_request *st_allocate_request(void)
{
return kzalloc(sizeof(struct st_request), GFP_KERNEL);
}
static void st_release_request(struct st_request *streq)
{
kfree(streq);
}
/* Do the scsi command. Waits until command performed if do_wait is true.
Otherwise write_behind_check() is used to check that the command
has finished. */
static struct scsi_request *
st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd,
static struct st_request *
st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd,
int bytes, int direction, int timeout, int retries, int do_wait)
{
struct completion *waiting;
unsigned char *bp;
/* if async, make sure there's no command outstanding */
if (!do_wait && ((STp->buffer)->last_SRpnt)) {
@ -473,7 +480,7 @@ st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *c
}
if (SRpnt == NULL) {
SRpnt = scsi_allocate_request(STp->device, GFP_ATOMIC);
SRpnt = st_allocate_request();
if (SRpnt == NULL) {
DEBC( printk(KERN_ERR "%s: Can't get SCSI request.\n",
tape_name(STp)); );
@ -483,6 +490,7 @@ st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *c
(STp->buffer)->syscall_result = (-EBUSY);
return NULL;
}
SRpnt->stp = STp;
}
/* If async IO, set last_SRpnt. This ptr tells write_behind_check
@ -492,32 +500,28 @@ st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *c
waiting = &STp->wait;
init_completion(waiting);
SRpnt->sr_use_sg = STp->buffer->do_dio || (bytes > (STp->buffer)->frp[0].length);
if (SRpnt->sr_use_sg) {
if (!STp->buffer->do_dio)
buf_to_sg(STp->buffer, bytes);
SRpnt->sr_use_sg = (STp->buffer)->sg_segs;
bp = (char *) &((STp->buffer)->sg[0]);
} else
bp = (STp->buffer)->b_data;
SRpnt->sr_data_direction = direction;
SRpnt->sr_cmd_len = 0;
SRpnt->sr_request->waiting = waiting;
SRpnt->sr_request->rq_status = RQ_SCSI_BUSY;
SRpnt->sr_request->rq_disk = STp->disk;
SRpnt->sr_request->end_io = blk_end_sync_rq;
SRpnt->waiting = waiting;
if (!STp->buffer->do_dio)
buf_to_sg(STp->buffer, bytes);
memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
STp->buffer->cmdstat.have_sense = 0;
STp->buffer->syscall_result = 0;
scsi_do_req(SRpnt, (void *) cmd, bp, bytes,
st_sleep_done, timeout, retries);
if (do_wait) {
if (scsi_execute_async(STp->device, cmd, direction,
&((STp->buffer)->sg[0]), bytes, (STp->buffer)->sg_segs,
timeout, retries, SRpnt, st_sleep_done, GFP_KERNEL)) {
/* could not allocate the buffer or request was too large */
(STp->buffer)->syscall_result = (-EBUSY);
(STp->buffer)->last_SRpnt = NULL;
}
else if (do_wait) {
wait_for_completion(waiting);
SRpnt->sr_request->waiting = NULL;
if (SRpnt->sr_request->rq_status != RQ_SCSI_DONE)
SRpnt->sr_result |= (DRIVER_ERROR << 24);
SRpnt->waiting = NULL;
(STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
}
return SRpnt;
}
@ -532,7 +536,7 @@ static int write_behind_check(struct scsi_tape * STp)
struct st_buffer *STbuffer;
struct st_partstat *STps;
struct st_cmdstatus *cmdstatp;
struct scsi_request *SRpnt;
struct st_request *SRpnt;
STbuffer = STp->buffer;
if (!STbuffer->writing)
@ -548,12 +552,10 @@ static int write_behind_check(struct scsi_tape * STp)
wait_for_completion(&(STp->wait));
SRpnt = STbuffer->last_SRpnt;
STbuffer->last_SRpnt = NULL;
SRpnt->sr_request->waiting = NULL;
if (SRpnt->sr_request->rq_status != RQ_SCSI_DONE)
SRpnt->sr_result |= (DRIVER_ERROR << 24);
SRpnt->waiting = NULL;
(STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
scsi_release_request(SRpnt);
st_release_request(SRpnt);
STbuffer->buffer_bytes -= STbuffer->writing;
STps = &(STp->ps[STp->partition]);
@ -593,7 +595,7 @@ static int write_behind_check(struct scsi_tape * STp)
it messes up the block number). */
static int cross_eof(struct scsi_tape * STp, int forward)
{
struct scsi_request *SRpnt;
struct st_request *SRpnt;
unsigned char cmd[MAX_COMMAND_SIZE];
cmd[0] = SPACE;
@ -613,7 +615,7 @@ static int cross_eof(struct scsi_tape * STp, int forward)
if (!SRpnt)
return (STp->buffer)->syscall_result;
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
if ((STp->buffer)->cmdstat.midlevel_result != 0)
@ -630,7 +632,7 @@ static int flush_write_buffer(struct scsi_tape * STp)
int offset, transfer, blks;
int result;
unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_request *SRpnt;
struct st_request *SRpnt;
struct st_partstat *STps;
result = write_behind_check(STp);
@ -688,7 +690,7 @@ static int flush_write_buffer(struct scsi_tape * STp)
STp->dirty = 0;
(STp->buffer)->buffer_bytes = 0;
}
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
}
return result;
@ -785,7 +787,7 @@ static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm)
}
/* Lock or unlock the drive door. Don't use when scsi_request allocated. */
/* Lock or unlock the drive door. Don't use when st_request allocated. */
static int do_door_lock(struct scsi_tape * STp, int do_lock)
{
int retval, cmd;
@ -844,7 +846,7 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
int attentions, waits, max_wait, scode;
int retval = CHKRES_READY, new_session = 0;
unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_request *SRpnt = NULL;
struct st_request *SRpnt = NULL;
struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
@ -903,7 +905,7 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
}
if (SRpnt != NULL)
scsi_release_request(SRpnt);
st_release_request(SRpnt);
return retval;
}
@ -918,7 +920,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
int i, retval, new_session = 0, do_wait;
unsigned char cmd[MAX_COMMAND_SIZE], saved_cleaning;
unsigned short st_flags = filp->f_flags;
struct scsi_request *SRpnt = NULL;
struct st_request *SRpnt = NULL;
struct st_modedef *STm;
struct st_partstat *STps;
char *name = tape_name(STp);
@ -993,7 +995,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
goto err_out;
}
if (!SRpnt->sr_result && !STp->buffer->cmdstat.have_sense) {
if (!SRpnt->result && !STp->buffer->cmdstat.have_sense) {
STp->max_block = ((STp->buffer)->b_data[1] << 16) |
((STp->buffer)->b_data[2] << 8) | (STp->buffer)->b_data[3];
STp->min_block = ((STp->buffer)->b_data[4] << 8) |
@ -1045,7 +1047,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
}
STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0;
}
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
STp->inited = 1;
@ -1196,7 +1198,7 @@ static int st_flush(struct file *filp)
{
int result = 0, result2;
unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_request *SRpnt;
struct st_request *SRpnt;
struct scsi_tape *STp = filp->private_data;
struct st_modedef *STm = &(STp->modes[STp->current_mode]);
struct st_partstat *STps = &(STp->ps[STp->partition]);
@ -1249,7 +1251,7 @@ static int st_flush(struct file *filp)
cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
(!cmdstatp->remainder_valid || cmdstatp->uremainder64 == 0))) {
/* Write successful at EOM */
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
if (STps->drv_file >= 0)
STps->drv_file++;
@ -1259,7 +1261,7 @@ static int st_flush(struct file *filp)
STps->eof = ST_FM;
}
else { /* Write error */
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
printk(KERN_ERR "%s: Error on write filemark.\n", name);
if (result == 0)
@ -1400,11 +1402,11 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
i = STp->try_dio && try_rdio;
else
i = STp->try_dio && try_wdio;
if (i && ((unsigned long)buf & queue_dma_alignment(
STp->device->request_queue)) == 0) {
i = st_map_user_pages(&(STbp->sg[0]), STbp->use_sg,
(unsigned long)buf, count, (is_read ? READ : WRITE),
STp->max_pfn);
i = sgl_map_user_pages(&(STbp->sg[0]), STbp->use_sg,
(unsigned long)buf, count, (is_read ? READ : WRITE));
if (i > 0) {
STbp->do_dio = i;
STbp->buffer_bytes = 0; /* can be used as transfer counter */
@ -1449,14 +1451,15 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
/* Can be called more than once after each setup_buffer() */
static void release_buffering(struct scsi_tape *STp)
static void release_buffering(struct scsi_tape *STp, int is_read)
{
struct st_buffer *STbp;
STbp = STp->buffer;
if (STbp->do_dio) {
sgl_unmap_user_pages(&(STbp->sg[0]), STbp->do_dio, 0);
sgl_unmap_user_pages(&(STbp->sg[0]), STbp->do_dio, is_read);
STbp->do_dio = 0;
STbp->sg_segs = 0;
}
}
@ -1472,7 +1475,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
int async_write;
unsigned char cmd[MAX_COMMAND_SIZE];
const char __user *b_point;
struct scsi_request *SRpnt = NULL;
struct st_request *SRpnt = NULL;
struct scsi_tape *STp = filp->private_data;
struct st_modedef *STm;
struct st_partstat *STps;
@ -1624,7 +1627,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
retval = STbp->syscall_result;
goto out;
}
if (async_write) {
if (async_write && !STbp->syscall_result) {
STbp->writing = transfer;
STp->dirty = !(STbp->writing ==
STbp->buffer_bytes);
@ -1698,7 +1701,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
} else {
count += do_count;
STps->drv_block = (-1); /* Too cautious? */
retval = (-EIO);
retval = STbp->syscall_result;
}
}
@ -1728,8 +1731,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
out:
if (SRpnt != NULL)
scsi_release_request(SRpnt);
release_buffering(STp);
st_release_request(SRpnt);
release_buffering(STp, 0);
up(&STp->lock);
return retval;
@ -1742,11 +1745,11 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
Does release user buffer mapping if it is set.
*/
static long read_tape(struct scsi_tape *STp, long count,
struct scsi_request ** aSRpnt)
struct st_request ** aSRpnt)
{
int transfer, blks, bytes;
unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_request *SRpnt;
struct st_request *SRpnt;
struct st_modedef *STm;
struct st_partstat *STps;
struct st_buffer *STbp;
@ -1787,7 +1790,7 @@ static long read_tape(struct scsi_tape *STp, long count,
SRpnt = *aSRpnt;
SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE,
STp->device->timeout, MAX_RETRIES, 1);
release_buffering(STp);
release_buffering(STp, 1);
*aSRpnt = SRpnt;
if (!SRpnt)
return STbp->syscall_result;
@ -1802,10 +1805,10 @@ static long read_tape(struct scsi_tape *STp, long count,
retval = 1;
DEBC(printk(ST_DEB_MSG "%s: Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n",
name,
SRpnt->sr_sense_buffer[0], SRpnt->sr_sense_buffer[1],
SRpnt->sr_sense_buffer[2], SRpnt->sr_sense_buffer[3],
SRpnt->sr_sense_buffer[4], SRpnt->sr_sense_buffer[5],
SRpnt->sr_sense_buffer[6], SRpnt->sr_sense_buffer[7]));
SRpnt->sense[0], SRpnt->sense[1],
SRpnt->sense[2], SRpnt->sense[3],
SRpnt->sense[4], SRpnt->sense[5],
SRpnt->sense[6], SRpnt->sense[7]));
if (cmdstatp->have_sense) {
if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK)
@ -1835,7 +1838,7 @@ static long read_tape(struct scsi_tape *STp, long count,
}
STbp->buffer_bytes = bytes - transfer;
} else {
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = *aSRpnt = NULL;
if (transfer == blks) { /* We did not get anything, error */
printk(KERN_NOTICE "%s: Incorrect block size.\n", name);
@ -1929,7 +1932,7 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
ssize_t retval = 0;
ssize_t i, transfer;
int special, do_dio = 0;
struct scsi_request *SRpnt = NULL;
struct st_request *SRpnt = NULL;
struct scsi_tape *STp = filp->private_data;
struct st_modedef *STm;
struct st_partstat *STps;
@ -2054,11 +2057,11 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
out:
if (SRpnt != NULL) {
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
}
if (do_dio) {
release_buffering(STp);
release_buffering(STp, 1);
STbp->buffer_bytes = 0;
}
up(&STp->lock);
@ -2284,7 +2287,7 @@ static int st_set_options(struct scsi_tape *STp, long options)
static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
{
unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_request *SRpnt = NULL;
struct st_request *SRpnt = NULL;
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = MODE_SENSE;
@ -2298,7 +2301,7 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
if (SRpnt == NULL)
return (STp->buffer)->syscall_result;
scsi_release_request(SRpnt);
st_release_request(SRpnt);
return (STp->buffer)->syscall_result;
}
@ -2310,7 +2313,7 @@ static int write_mode_page(struct scsi_tape *STp, int page, int slow)
{
int pgo;
unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_request *SRpnt = NULL;
struct st_request *SRpnt = NULL;
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = MODE_SELECT;
@ -2329,7 +2332,7 @@ static int write_mode_page(struct scsi_tape *STp, int page, int slow)
if (SRpnt == NULL)
return (STp->buffer)->syscall_result;
scsi_release_request(SRpnt);
st_release_request(SRpnt);
return (STp->buffer)->syscall_result;
}
@ -2412,7 +2415,7 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
DEB( char *name = tape_name(STp); )
unsigned char cmd[MAX_COMMAND_SIZE];
struct st_partstat *STps;
struct scsi_request *SRpnt;
struct st_request *SRpnt;
if (STp->ready != ST_READY && !load_code) {
if (STp->ready == ST_NO_TAPE)
@ -2455,7 +2458,7 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
return (STp->buffer)->syscall_result;
retval = (STp->buffer)->syscall_result;
scsi_release_request(SRpnt);
st_release_request(SRpnt);
if (!retval) { /* SCSI command successful */
@ -2503,7 +2506,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
int ioctl_result;
int chg_eof = 1;
unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_request *SRpnt;
struct st_request *SRpnt;
struct st_partstat *STps;
int fileno, blkno, at_sm, undone;
int datalen = 0, direction = DMA_NONE;
@ -2757,7 +2760,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
ioctl_result = (STp->buffer)->syscall_result;
if (!ioctl_result) { /* SCSI command successful */
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
STps->drv_block = blkno;
STps->drv_file = fileno;
@ -2872,7 +2875,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
/* Try the other possible state of Page Format if not
already tried */
STp->use_pf = !STp->use_pf | PF_TESTED;
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
return st_int_ioctl(STp, cmd_in, arg);
}
@ -2882,7 +2885,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK)
STps->eof = ST_EOD;
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
}
@ -2898,7 +2901,7 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
{
int result;
unsigned char scmd[MAX_COMMAND_SIZE];
struct scsi_request *SRpnt;
struct st_request *SRpnt;
DEB( char *name = tape_name(STp); )
if (STp->ready != ST_READY)
@ -2944,7 +2947,7 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
*block, *partition));
}
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
return result;
@ -2961,7 +2964,7 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
unsigned int blk;
int timeout;
unsigned char scmd[MAX_COMMAND_SIZE];
struct scsi_request *SRpnt;
struct st_request *SRpnt;
DEB( char *name = tape_name(STp); )
if (STp->ready != ST_READY)
@ -3047,7 +3050,7 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
result = 0;
}
scsi_release_request(SRpnt);
st_release_request(SRpnt);
SRpnt = NULL;
return result;
@ -3577,7 +3580,7 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
static struct st_buffer *
new_tape_buffer(int from_initialization, int need_dma, int max_sg)
{
int i, got = 0, segs = 0;
int i, got = 0;
gfp_t priority;
struct st_buffer *tb;
@ -3594,10 +3597,8 @@ static struct st_buffer *
return NULL;
}
memset(tb, 0, i);
tb->frp_segs = tb->orig_frp_segs = segs;
tb->frp_segs = tb->orig_frp_segs = 0;
tb->use_sg = max_sg;
if (segs > 0)
tb->b_data = page_address(tb->sg[0].page);
tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg);
tb->in_use = 1;
@ -3628,7 +3629,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
priority = GFP_KERNEL | __GFP_NOWARN;
if (need_dma)
priority |= GFP_DMA;
for (b_size = PAGE_SIZE, order=0;
for (b_size = PAGE_SIZE, order=0; order <= 6 &&
b_size < new_size - STbuffer->buffer_size;
order++, b_size *= 2)
; /* empty */
@ -3670,6 +3671,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
}
STbuffer->frp_segs = STbuffer->orig_frp_segs;
STbuffer->frp_sg_current = 0;
STbuffer->sg_segs = 0;
}
@ -3882,7 +3884,6 @@ static int st_probe(struct device *dev)
struct st_buffer *buffer;
int i, j, mode, dev_num, error;
char *stp;
u64 bounce_limit;
if (SDp->type != TYPE_TAPE)
return -ENODEV;
@ -3892,7 +3893,8 @@ static int st_probe(struct device *dev)
return -ENODEV;
}
i = SDp->host->sg_tablesize;
i = min(SDp->request_queue->max_hw_segments,
SDp->request_queue->max_phys_segments);
if (st_max_sg_segs < i)
i = st_max_sg_segs;
buffer = new_tape_buffer(1, (SDp->host)->unchecked_isa_dma, i);
@ -3994,11 +3996,6 @@ static int st_probe(struct device *dev)
tpnt->long_timeout = ST_LONG_TIMEOUT;
tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
bounce_limit = scsi_calculate_bounce_limit(SDp->host) >> PAGE_SHIFT;
if (bounce_limit > ULONG_MAX)
bounce_limit = ULONG_MAX;
tpnt->max_pfn = bounce_limit;
for (i = 0; i < ST_NBR_MODES; i++) {
STm = &(tpnt->modes[i]);
STm->defined = 0;
@ -4077,9 +4074,9 @@ static int st_probe(struct device *dev)
sdev_printk(KERN_WARNING, SDp,
"Attached scsi tape %s", tape_name(tpnt));
printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B), max page reachable by HBA %lu\n",
printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n",
tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
queue_dma_alignment(SDp->request_queue) + 1, tpnt->max_pfn);
queue_dma_alignment(SDp->request_queue) + 1);
return 0;
@ -4185,7 +4182,11 @@ static void scsi_tape_release(struct kref *kref)
static void st_intr(struct scsi_cmnd *SCpnt)
{
scsi_io_completion(SCpnt, (SCpnt->result ? 0: SCpnt->bufflen), 1);
/*
* The caller should be checking the request's errors
* value.
*/
scsi_io_completion(SCpnt, SCpnt->bufflen, 0);
}
/*
@ -4197,7 +4198,7 @@ static int st_init_command(struct scsi_cmnd *SCpnt)
if (!(SCpnt->request->flags & REQ_BLOCK_PC))
return 0;
scsi_setup_blk_pc_cmnd(SCpnt, 0);
scsi_setup_blk_pc_cmnd(SCpnt);
SCpnt->done = st_intr;
return 1;
}
@ -4390,34 +4391,6 @@ static void do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
return;
}
/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
- mapping of all pages not successful
- any page is above max_pfn
(i.e., either completely successful or fails)
*/
static int st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
unsigned long uaddr, size_t count, int rw,
unsigned long max_pfn)
{
int i, nr_pages;
nr_pages = sgl_map_user_pages(sgl, max_pages, uaddr, count, rw);
if (nr_pages <= 0)
return nr_pages;
for (i=0; i < nr_pages; i++) {
if (page_to_pfn(sgl[i].page) > max_pfn)
goto out_unmap;
}
return nr_pages;
out_unmap:
sgl_unmap_user_pages(sgl, nr_pages, 0);
return 0;
}
/* The following functions may be useful for a larger audience. */
static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
unsigned long uaddr, size_t count, int rw)

View File

@ -4,6 +4,7 @@
#include <linux/completion.h>
#include <linux/kref.h>
#include <scsi/scsi_cmnd.h>
/* Descriptor for analyzed sense data */
struct st_cmdstatus {
@ -17,6 +18,17 @@ struct st_cmdstatus {
u8 deferred;
};
struct scsi_tape;
/* scsi tape command */
struct st_request {
unsigned char cmd[MAX_COMMAND_SIZE];
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
int result;
struct scsi_tape *stp;
struct completion *waiting;
};
/* The tape buffer descriptor. */
struct st_buffer {
unsigned char in_use;
@ -28,7 +40,7 @@ struct st_buffer {
int read_pointer;
int writing;
int syscall_result;
struct scsi_request *last_SRpnt;
struct st_request *last_SRpnt;
struct st_cmdstatus cmdstat;
unsigned char *b_data;
unsigned short use_sg; /* zero or max number of s/g segments for this adapter */

View File

@ -70,6 +70,7 @@
*
*/
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_transport_spi.h>
/*
* Further development / testing that should be done :
@ -2378,7 +2379,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
* 3..length+1 arguments
*
* Start the extended message buffer with the EXTENDED_MESSAGE
* byte, since scsi_print_msg() wants the whole thing.
* byte, since spi_print_msg() wants the whole thing.
*/
extended_msg[0] = EXTENDED_MESSAGE;
/* Accept first byte by clearing ACK */
@ -2431,7 +2432,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
default:
if (!tmp) {
printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO);
scsi_print_msg (extended_msg);
spi_print_msg(extended_msg);
printk("\n");
} else if (tmp != EXTENDED_MESSAGE)
printk(KERN_DEBUG "scsi%d: rejecting unknown "
@ -2566,7 +2567,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
if (!(msg[0] & 0x80)) {
printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO);
scsi_print_msg(msg);
spi_print_msg(msg);
do_abort(instance);
return;
}

View File

@ -40,7 +40,7 @@
#ifndef SYM_DEFS_H
#define SYM_DEFS_H
#define SYM_VERSION "2.2.1"
#define SYM_VERSION "2.2.2"
#define SYM_DRIVER_NAME "sym-" SYM_VERSION
/*

View File

@ -37,11 +37,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef __FreeBSD__
#include <dev/sym/sym_glue.h>
#else
#include "sym_glue.h"
#endif
/*
* Macros used for all firmwares.
@ -60,19 +56,12 @@
#define SYM_FWA_SCR sym_fw1a_scr
#define SYM_FWB_SCR sym_fw1b_scr
#define SYM_FWZ_SCR sym_fw1z_scr
#ifdef __FreeBSD__
#include <dev/sym/sym_fw1.h>
#else
#include "sym_fw1.h"
#endif
static struct sym_fwa_ofs sym_fw1a_ofs = {
SYM_GEN_FW_A(struct SYM_FWA_SCR)
};
static struct sym_fwb_ofs sym_fw1b_ofs = {
SYM_GEN_FW_B(struct SYM_FWB_SCR)
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
SYM_GEN_B(struct SYM_FWB_SCR, data_io)
#endif
};
static struct sym_fwz_ofs sym_fw1z_ofs = {
SYM_GEN_FW_Z(struct SYM_FWZ_SCR)
@ -88,19 +77,12 @@ static struct sym_fwz_ofs sym_fw1z_ofs = {
#define SYM_FWA_SCR sym_fw2a_scr
#define SYM_FWB_SCR sym_fw2b_scr
#define SYM_FWZ_SCR sym_fw2z_scr
#ifdef __FreeBSD__
#include <dev/sym/sym_fw2.h>
#else
#include "sym_fw2.h"
#endif
static struct sym_fwa_ofs sym_fw2a_ofs = {
SYM_GEN_FW_A(struct SYM_FWA_SCR)
};
static struct sym_fwb_ofs sym_fw2b_ofs = {
SYM_GEN_FW_B(struct SYM_FWB_SCR)
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
SYM_GEN_B(struct SYM_FWB_SCR, data_io)
#endif
SYM_GEN_B(struct SYM_FWB_SCR, start64)
SYM_GEN_B(struct SYM_FWB_SCR, pm_handle)
};

View File

@ -92,9 +92,6 @@ struct sym_fwa_ofs {
};
struct sym_fwb_ofs {
SYM_GEN_FW_B(u_short)
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
SYM_GEN_B(u_short, data_io)
#endif
SYM_GEN_B(u_short, start64)
SYM_GEN_B(u_short, pm_handle)
};
@ -111,9 +108,6 @@ struct sym_fwa_ba {
};
struct sym_fwb_ba {
SYM_GEN_FW_B(u32)
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
SYM_GEN_B(u32, data_io)
#endif
SYM_GEN_B(u32, start64);
SYM_GEN_B(u32, pm_handle);
};

View File

@ -197,12 +197,6 @@ struct SYM_FWB_SCR {
u32 bad_status [ 7];
u32 wsr_ma_helper [ 4];
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
/* Unknown direction handling */
u32 data_io [ 2];
u32 data_io_com [ 8];
u32 data_io_out [ 7];
#endif
/* Data area */
u32 zero [ 1];
u32 scratch [ 1];
@ -1747,48 +1741,6 @@ static struct SYM_FWB_SCR SYM_FWB_SCR = {
SCR_JUMP,
PADDR_A (dispatch),
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
}/*-------------------------< DATA_IO >--------------------------*/,{
/*
* We jump here if the data direction was unknown at the
* time we had to queue the command to the scripts processor.
* Pointers had been set as follow in this situation:
* savep --> DATA_IO
* lastp --> start pointer when DATA_IN
* wlastp --> start pointer when DATA_OUT
* This script sets savep and lastp according to the
* direction chosen by the target.
*/
SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
PADDR_B (data_io_out),
}/*-------------------------< DATA_IO_COM >----------------------*/,{
/*
* Direction is DATA IN.
*/
SCR_COPY (4),
HADDR_1 (ccb_head.lastp),
HADDR_1 (ccb_head.savep),
/*
* Jump to the SCRIPTS according to actual direction.
*/
SCR_COPY (4),
HADDR_1 (ccb_head.savep),
RADDR_1 (temp),
SCR_RETURN,
0,
}/*-------------------------< DATA_IO_OUT >----------------------*/,{
/*
* Direction is DATA OUT.
*/
SCR_REG_REG (HF_REG, SCR_AND, (~HF_DATA_IN)),
0,
SCR_COPY (4),
HADDR_1 (ccb_head.wlastp),
HADDR_1 (ccb_head.lastp),
SCR_JUMP,
PADDR_B(data_io_com),
#endif /* SYM_OPT_HANDLE_DIR_UNKNOWN */
}/*-------------------------< ZERO >-----------------------------*/,{
SCR_DATA_ZERO,
}/*-------------------------< SCRATCH >--------------------------*/,{

View File

@ -191,13 +191,6 @@ struct SYM_FWB_SCR {
u32 pm_wsr_handle [ 38];
u32 wsr_ma_helper [ 4];
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
/* Unknown direction handling */
u32 data_io [ 2];
u32 data_io_in [ 2];
u32 data_io_com [ 6];
u32 data_io_out [ 8];
#endif
/* Data area */
u32 zero [ 1];
u32 scratch [ 1];
@ -1838,51 +1831,6 @@ static struct SYM_FWB_SCR SYM_FWB_SCR = {
SCR_JUMP,
PADDR_A (dispatch),
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
}/*-------------------------< DATA_IO >--------------------------*/,{
/*
* We jump here if the data direction was unknown at the
* time we had to queue the command to the scripts processor.
* Pointers had been set as follow in this situation:
* savep --> DATA_IO
* lastp --> start pointer when DATA_IN
* wlastp --> start pointer when DATA_OUT
* This script sets savep and lastp according to the
* direction chosen by the target.
*/
SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
PADDR_B (data_io_out),
}/*-------------------------< DATA_IO_IN >-----------------------*/,{
/*
* Direction is DATA IN.
*/
SCR_LOAD_REL (scratcha, 4),
offsetof (struct sym_ccb, phys.head.lastp),
}/*-------------------------< DATA_IO_COM >----------------------*/,{
SCR_STORE_REL (scratcha, 4),
offsetof (struct sym_ccb, phys.head.savep),
/*
* Jump to the SCRIPTS according to actual direction.
*/
SCR_LOAD_REL (temp, 4),
offsetof (struct sym_ccb, phys.head.savep),
SCR_RETURN,
0,
}/*-------------------------< DATA_IO_OUT >----------------------*/,{
/*
* Direction is DATA OUT.
*/
SCR_REG_REG (HF_REG, SCR_AND, (~HF_DATA_IN)),
0,
SCR_LOAD_REL (scratcha, 4),
offsetof (struct sym_ccb, phys.head.wlastp),
SCR_STORE_REL (scratcha, 4),
offsetof (struct sym_ccb, phys.head.lastp),
SCR_JUMP,
PADDR_B(data_io_com),
#endif /* SYM_OPT_HANDLE_DIR_UNKNOWN */
}/*-------------------------< ZERO >-----------------------------*/,{
SCR_DATA_ZERO,
}/*-------------------------< SCRATCH >--------------------------*/,{

View File

@ -514,9 +514,10 @@ static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struc
*/
int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
{
int dir;
struct sym_tcb *tp = &np->target[cp->target];
struct sym_lcb *lp = sym_lp(tp, cp->lun);
u32 lastp, goalp;
int dir;
/*
* Build the CDB.
@ -534,15 +535,47 @@ int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct s
sym_set_cam_status(cmd, DID_ERROR);
goto out_abort;
}
/*
* No segments means no data.
*/
if (!cp->segments)
dir = DMA_NONE;
} else {
cp->data_len = 0;
cp->segments = 0;
}
/*
* Set data pointers.
* Set the data pointer.
*/
sym_setup_data_pointers(np, cp, dir);
switch (dir) {
case DMA_BIDIRECTIONAL:
printk("%s: got DMA_BIDIRECTIONAL command", sym_name(np));
sym_set_cam_status(cmd, DID_ERROR);
goto out_abort;
case DMA_TO_DEVICE:
goalp = SCRIPTA_BA(np, data_out2) + 8;
lastp = goalp - 8 - (cp->segments * (2*4));
break;
case DMA_FROM_DEVICE:
cp->host_flags |= HF_DATA_IN;
goalp = SCRIPTA_BA(np, data_in2) + 8;
lastp = goalp - 8 - (cp->segments * (2*4));
break;
case DMA_NONE:
default:
lastp = goalp = SCRIPTB_BA(np, no_data);
break;
}
/*
* Set all pointers values needed by SCRIPTS.
*/
cp->phys.head.lastp = cpu_to_scr(lastp);
cp->phys.head.savep = cpu_to_scr(lastp);
cp->startp = cp->phys.head.savep;
cp->goalp = cpu_to_scr(goalp);
/*
* When `#ifed 1', the code below makes the driver
@ -563,10 +596,7 @@ int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct s
/*
* activate this job.
*/
if (lp)
sym_start_next_ccbs(np, lp, 2);
else
sym_put_start_queue(np, cp);
sym_start_next_ccbs(np, lp, 2);
return 0;
out_abort:
@ -981,15 +1011,14 @@ static int device_queue_depth(struct sym_hcb *np, int target, int lun)
static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
{
struct sym_hcb *np;
struct sym_tcb *tp;
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
struct sym_lcb *lp;
if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
return -ENXIO;
np = sym_get_hcb(sdev->host);
tp = &np->target[sdev->id];
tp->starget = sdev->sdev_target;
/*
* Fail the device init if the device is flagged NOSCAN at BOOT in
* the NVRAM. This may speed up boot and maintain coherency with
@ -999,34 +1028,40 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
* lun devices behave badly when asked for a non zero LUN.
*/
if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) ||
((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && sdev->lun != 0)) {
if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
starget_printk(KERN_INFO, tp->starget,
"Scan at boot disabled in NVRAM\n");
return -ENXIO;
}
tp->starget = sdev->sdev_target;
if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
if (sdev->lun != 0)
return -ENXIO;
starget_printk(KERN_INFO, tp->starget,
"Multiple LUNs disabled in NVRAM\n");
}
lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
if (!lp)
return -ENOMEM;
spi_min_period(tp->starget) = tp->usr_period;
spi_max_width(tp->starget) = tp->usr_width;
return 0;
}
/*
* Linux entry point for device queue sizing.
*/
static int sym53c8xx_slave_configure(struct scsi_device *device)
static int sym53c8xx_slave_configure(struct scsi_device *sdev)
{
struct sym_hcb *np = sym_get_hcb(device->host);
struct sym_tcb *tp = &np->target[device->id];
struct sym_lcb *lp;
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
struct sym_lcb *lp = sym_lp(tp, sdev->lun);
int reqtags, depth_to_use;
/*
* Allocate the LCB if not yet.
* If it fail, we may well be in the sh*t. :)
*/
lp = sym_alloc_lcb(np, device->id, device->lun);
if (!lp)
return -ENOMEM;
/*
* Get user flags.
*/
@ -1038,10 +1073,10 @@ static int sym53c8xx_slave_configure(struct scsi_device *device)
* Use at least 2.
* Donnot use more than our maximum.
*/
reqtags = device_queue_depth(np, device->id, device->lun);
reqtags = device_queue_depth(np, sdev->id, sdev->lun);
if (reqtags > tp->usrtags)
reqtags = tp->usrtags;
if (!device->tagged_supported)
if (!sdev->tagged_supported)
reqtags = 0;
#if 1 /* Avoid to locally queue commands for no good reasons */
if (reqtags > SYM_CONF_MAX_TAG)
@ -1050,19 +1085,30 @@ static int sym53c8xx_slave_configure(struct scsi_device *device)
#else
depth_to_use = (reqtags ? SYM_CONF_MAX_TAG : 2);
#endif
scsi_adjust_queue_depth(device,
(device->tagged_supported ?
scsi_adjust_queue_depth(sdev,
(sdev->tagged_supported ?
MSG_SIMPLE_TAG : 0),
depth_to_use);
lp->s.scdev_depth = depth_to_use;
sym_tune_dev_queuing(tp, device->lun, reqtags);
sym_tune_dev_queuing(tp, sdev->lun, reqtags);
if (!spi_initial_dv(device->sdev_target))
spi_dv_device(device);
if (!spi_initial_dv(sdev->sdev_target))
spi_dv_device(sdev);
return 0;
}
static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun);
if (lp->itlq_tbl)
sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL");
kfree(lp->cb_tags);
sym_mfree_dma(lp, sizeof(*lp), "LCB");
}
/*
* Linux entry point for info() function
*/
@ -1497,7 +1543,7 @@ static int sym_setup_bus_dma_mask(struct sym_hcb *np)
{
#if SYM_CONF_DMA_ADDRESSING_MODE > 0
#if SYM_CONF_DMA_ADDRESSING_MODE == 1
#define DMA_DAC_MASK 0x000000ffffffffffULL /* 40-bit */
#define DMA_DAC_MASK DMA_40BIT_MASK
#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
#define DMA_DAC_MASK DMA_64BIT_MASK
#endif
@ -1926,6 +1972,7 @@ static struct scsi_host_template sym2_template = {
.queuecommand = sym53c8xx_queue_command,
.slave_alloc = sym53c8xx_slave_alloc,
.slave_configure = sym53c8xx_slave_configure,
.slave_destroy = sym53c8xx_slave_destroy,
.eh_abort_handler = sym53c8xx_eh_abort_handler,
.eh_device_reset_handler = sym53c8xx_eh_device_reset_handler,
.eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,

View File

@ -68,7 +68,6 @@
*/
#define SYM_CONF_TIMER_INTERVAL ((HZ+1)/2)
#define SYM_OPT_HANDLE_DIR_UNKNOWN
#define SYM_OPT_HANDLE_DEVICE_QUEUEING
#define SYM_OPT_LIMIT_COMMAND_REORDERING
@ -268,6 +267,5 @@ void sym_xpt_async_bus_reset(struct sym_hcb *np);
void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target);
int sym_setup_data_and_start (struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
void sym_log_bus_error(struct sym_hcb *np);
void sym_sniff_inquiry(struct sym_hcb *np, struct scsi_cmnd *cmd, int resid);
#endif /* SYM_GLUE_H */

View File

@ -40,6 +40,7 @@
#include <linux/slab.h>
#include <asm/param.h> /* for timeouts in units of HZ */
#include <scsi/scsi_dbg.h>
#include "sym_glue.h"
#include "sym_nvram.h"
@ -70,32 +71,12 @@ static void sym_printl_hex(u_char *p, int n)
printf (".\n");
}
/*
* Print out the content of a SCSI message.
*/
static int sym_show_msg (u_char * msg)
{
u_char i;
printf ("%x",*msg);
if (*msg==M_EXTENDED) {
for (i=1;i<8;i++) {
if (i-1>msg[1]) break;
printf ("-%x",msg[i]);
}
return (i+1);
} else if ((*msg & 0xf0) == 0x20) {
printf ("-%x",msg[1]);
return (2);
}
return (1);
}
static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
{
sym_print_addr(cp->cmd, "%s: ", label);
sym_show_msg(msg);
printf(".\n");
spi_print_msg(msg);
printf("\n");
}
static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg)
@ -103,8 +84,8 @@ static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_ch
struct sym_tcb *tp = &np->target[target];
dev_info(&tp->starget->dev, "%s: ", label);
sym_show_msg(msg);
printf(".\n");
spi_print_msg(msg);
printf("\n");
}
/*
@ -635,29 +616,6 @@ static __inline void sym_init_burst(struct sym_hcb *np, u_char bc)
}
}
/*
* Print out the list of targets that have some flag disabled by user.
*/
static void sym_print_targets_flag(struct sym_hcb *np, int mask, char *msg)
{
int cnt;
int i;
for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
if (i == np->myaddr)
continue;
if (np->target[i].usrflags & mask) {
if (!cnt++)
printf("%s: %s disabled for targets",
sym_name(np), msg);
printf(" %d", i);
}
}
if (cnt)
printf(".\n");
}
/*
* Save initial settings of some IO registers.
* Assumed to have been set by BIOS.
@ -962,7 +920,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru
tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
tp->usrtags = SYM_SETUP_MAX_TAG;
sym_nvram_setup_target(np, i, nvram);
sym_nvram_setup_target(tp, i, nvram);
if (!tp->usrtags)
tp->usrflags &= ~SYM_TAGS_ENABLED;
@ -1005,13 +963,6 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru
sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
}
/*
* Let user be aware of targets that have some disable flags set.
*/
sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT");
if (sym_verbose)
sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED,
"SCAN FOR LUNS");
return 0;
}
@ -1523,7 +1474,7 @@ static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgp
/*
* Insert a job into the start queue.
*/
void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp)
static void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp)
{
u_short qidx;
@ -3654,7 +3605,7 @@ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int
* If result is dp_sg = SYM_CONF_MAX_SG, then we are at the
* end of the data.
*/
tmp = scr_to_cpu(sym_goalp(cp));
tmp = scr_to_cpu(cp->goalp);
dp_sg = SYM_CONF_MAX_SG;
if (dp_scr != tmp)
dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4);
@ -3761,7 +3712,7 @@ static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb
* And our alchemy:) allows to easily calculate the data
* script address we want to return for the next data phase.
*/
dp_ret = cpu_to_scr(sym_goalp(cp));
dp_ret = cpu_to_scr(cp->goalp);
dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4);
/*
@ -3857,7 +3808,7 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
* If all data has been transferred,
* there is no residual.
*/
if (cp->phys.head.lastp == sym_goalp(cp))
if (cp->phys.head.lastp == cp->goalp)
return resid;
/*
@ -4664,30 +4615,7 @@ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char t
goto out;
cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
/*
* If the LCB is not yet available and the LUN
* has been probed ok, try to allocate the LCB.
*/
if (!lp && sym_is_bit(tp->lun_map, ln)) {
lp = sym_alloc_lcb(np, tn, ln);
if (!lp)
goto out_free;
}
#endif
/*
* If the LCB is not available here, then the
* logical unit is not yet discovered. For those
* ones only accept 1 SCSI IO per logical unit,
* since we cannot allow disconnections.
*/
if (!lp) {
if (!sym_is_bit(tp->busy0_map, ln))
sym_set_bit(tp->busy0_map, ln);
else
goto out_free;
} else {
{
/*
* If we have been asked for a tagged command.
*/
@ -4840,12 +4768,6 @@ void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp)
lp->head.resel_sa =
cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
}
/*
* Otherwise, we only accept 1 IO per LUN.
* Clear the bit that keeps track of this IO.
*/
else
sym_clr_bit(tp->busy0_map, cp->lun);
/*
* We donnot queue more than 1 ccb per target
@ -4997,20 +4919,7 @@ static void sym_init_tcb (struct sym_hcb *np, u_char tn)
struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
{
struct sym_tcb *tp = &np->target[tn];
struct sym_lcb *lp = sym_lp(tp, ln);
/*
* Already done, just return.
*/
if (lp)
return lp;
/*
* Donnot allow LUN control block
* allocation for not probed LUNs.
*/
if (!sym_is_bit(tp->lun_map, ln))
return NULL;
struct sym_lcb *lp = NULL;
/*
* Initialize the target control block if not yet.
@ -5082,13 +4991,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
lp->started_max = SYM_CONF_MAX_TASK;
lp->started_limit = SYM_CONF_MAX_TASK;
#endif
/*
* If we are busy, count the IO.
*/
if (sym_is_bit(tp->busy0_map, ln)) {
lp->busy_itl = 1;
sym_clr_bit(tp->busy0_map, ln);
}
fail:
return lp;
}
@ -5102,12 +5005,6 @@ static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln)
struct sym_lcb *lp = sym_lp(tp, ln);
int i;
/*
* If LCB not available, try to allocate it.
*/
if (!lp && !(lp = sym_alloc_lcb(np, tn, ln)))
goto fail;
/*
* Allocate the task table and and the tag allocation
* circular buffer. We want both or none.
@ -5481,8 +5378,7 @@ finish:
/*
* Donnot start more than 1 command after an error.
*/
if (lp)
sym_start_next_ccbs(np, lp, 1);
sym_start_next_ccbs(np, lp, 1);
#endif
}
@ -5520,18 +5416,12 @@ void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp)
tp = &np->target[cp->target];
lp = sym_lp(tp, cp->lun);
/*
* Assume device discovered on first success.
*/
if (!lp)
sym_set_bit(tp->lun_map, cp->lun);
/*
* If all data have been transferred, given than no
* extended error did occur, there is no residual.
*/
resid = 0;
if (cp->phys.head.lastp != sym_goalp(cp))
if (cp->phys.head.lastp != cp->goalp)
resid = sym_compute_residual(np, cp);
/*
@ -5551,15 +5441,6 @@ if (resid)
*/
sym_set_cam_result_ok(cp, cmd, resid);
#ifdef SYM_OPT_SNIFF_INQUIRY
/*
* On standard INQUIRY response (EVPD and CmDt
* not set), sniff out device capabilities.
*/
if (cp->cdb_buf[0] == INQUIRY && !(cp->cdb_buf[1] & 0x3))
sym_sniff_inquiry(np, cmd, resid);
#endif
#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
/*
* If max number of started ccbs had been reduced,
@ -5587,7 +5468,7 @@ if (resid)
/*
* Requeue a couple of awaiting scsi commands.
*/
if (lp && !sym_que_empty(&lp->waiting_ccbq))
if (!sym_que_empty(&lp->waiting_ccbq))
sym_start_next_ccbs(np, lp, 2);
#endif
/*
@ -5830,8 +5711,7 @@ void sym_hcb_free(struct sym_hcb *np)
SYM_QUEHEAD *qp;
struct sym_ccb *cp;
struct sym_tcb *tp;
struct sym_lcb *lp;
int target, lun;
int target;
if (np->scriptz0)
sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0");
@ -5857,16 +5737,6 @@ void sym_hcb_free(struct sym_hcb *np)
for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
tp = &np->target[target];
for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) {
lp = sym_lp(tp, lun);
if (!lp)
continue;
if (lp->itlq_tbl)
sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4,
"ITLQ_TBL");
kfree(lp->cb_tags);
sym_mfree_dma(lp, sizeof(*lp), "LCB");
}
#if SYM_CONF_MAX_LUN > 1
kfree(tp->lunmp);
#endif

View File

@ -48,12 +48,6 @@
* They may be defined in platform specific headers, if they
* are useful.
*
* SYM_OPT_HANDLE_DIR_UNKNOWN
* When this option is set, the SCRIPTS used by the driver
* are able to handle SCSI transfers with direction not
* supplied by user.
* (set for Linux-2.0.X)
*
* SYM_OPT_HANDLE_DEVICE_QUEUEING
* When this option is set, the driver will use a queue per
* device and handle QUEUE FULL status requeuing internally.
@ -64,7 +58,6 @@
* (set for Linux)
*/
#if 0
#define SYM_OPT_HANDLE_DIR_UNKNOWN
#define SYM_OPT_HANDLE_DEVICE_QUEUEING
#define SYM_OPT_LIMIT_COMMAND_REORDERING
#endif
@ -416,19 +409,6 @@ struct sym_tcb {
struct sym_lcb **lunmp; /* Other LCBs [1..MAX_LUN] */
#endif
/*
* Bitmap that tells about LUNs that succeeded at least
* 1 IO and therefore assumed to be a real device.
* Avoid useless allocation of the LCB structure.
*/
u32 lun_map[(SYM_CONF_MAX_LUN+31)/32];
/*
* Bitmap that tells about LUNs that haven't yet an LCB
* allocated (not discovered or LCB allocation failed).
*/
u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32];
#ifdef SYM_HAVE_STCB
/*
* O/S specific data structure.
@ -454,8 +434,10 @@ struct sym_tcb {
* Other user settable limits and options.
* These limits are read from the NVRAM if present.
*/
u_char usrflags;
u_short usrtags;
unsigned char usrflags;
unsigned char usr_period;
unsigned char usr_width;
unsigned short usrtags;
struct scsi_target *starget;
};
@ -672,9 +654,6 @@ struct sym_ccbh {
*/
u32 savep; /* Jump address to saved data pointer */
u32 lastp; /* SCRIPTS address at end of data */
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
u32 wlastp;
#endif
/*
* Status fields.
@ -804,9 +783,6 @@ struct sym_ccb {
SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */
u32 startp; /* Initial data pointer */
u32 goalp; /* Expected last data pointer */
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
u32 wgoalp;
#endif
int ext_sg; /* Extreme data pointer, used */
int ext_ofs; /* to calculate the residual. */
#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
@ -821,12 +797,6 @@ struct sym_ccb {
#define CCB_BA(cp,lbl) cpu_to_scr(cp->ccb_ba + offsetof(struct sym_ccb, lbl))
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
#define sym_goalp(cp) ((cp->host_flags & HF_DATA_IN) ? cp->goalp : cp->wgoalp)
#else
#define sym_goalp(cp) (cp->goalp)
#endif
typedef struct device *m_pool_ident_t;
/*
@ -1077,7 +1047,6 @@ char *sym_driver_name(void);
void sym_print_xerr(struct scsi_cmnd *cmd, int x_status);
int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int);
struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision);
void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp);
#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn);
#endif
@ -1135,71 +1104,6 @@ bad:
#error "Unsupported DMA addressing mode"
#endif
/*
* Set up data pointers used by SCRIPTS.
* Called from O/S specific code.
*/
static inline void sym_setup_data_pointers(struct sym_hcb *np,
struct sym_ccb *cp, int dir)
{
u32 lastp, goalp;
/*
* No segments means no data.
*/
if (!cp->segments)
dir = DMA_NONE;
/*
* Set the data pointer.
*/
switch(dir) {
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
case DMA_BIDIRECTIONAL:
#endif
case DMA_TO_DEVICE:
goalp = SCRIPTA_BA(np, data_out2) + 8;
lastp = goalp - 8 - (cp->segments * (2*4));
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
cp->wgoalp = cpu_to_scr(goalp);
if (dir != DMA_BIDIRECTIONAL)
break;
cp->phys.head.wlastp = cpu_to_scr(lastp);
/* fall through */
#else
break;
#endif
case DMA_FROM_DEVICE:
cp->host_flags |= HF_DATA_IN;
goalp = SCRIPTA_BA(np, data_in2) + 8;
lastp = goalp - 8 - (cp->segments * (2*4));
break;
case DMA_NONE:
default:
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
cp->host_flags |= HF_DATA_IN;
#endif
lastp = goalp = SCRIPTB_BA(np, no_data);
break;
}
/*
* Set all pointers values needed by SCRIPTS.
*/
cp->phys.head.lastp = cpu_to_scr(lastp);
cp->phys.head.savep = cpu_to_scr(lastp);
cp->startp = cp->phys.head.savep;
cp->goalp = cpu_to_scr(goalp);
#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
/*
* If direction is unknown, start at data_io.
*/
if (dir == DMA_BIDIRECTIONAL)
cp->phys.head.savep = cpu_to_scr(SCRIPTB_BA(np, data_io));
#endif
}
/*
* MEMORY ALLOCATOR.
*/

View File

@ -37,11 +37,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef __FreeBSD__
#include <dev/sym/sym_glue.h>
#else
#include "sym_glue.h"
#endif
/*
* Simple power of two buddy-like generic allocator.

View File

@ -92,29 +92,32 @@ void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sy
* Get target set-up from Symbios format NVRAM.
*/
static void
sym_Symbios_setup_target(struct sym_hcb *np, int target, Symbios_nvram *nvram)
sym_Symbios_setup_target(struct sym_tcb *tp, int target, Symbios_nvram *nvram)
{
struct sym_tcb *tp = &np->target[target];
Symbios_target *tn = &nvram->target[target];
tp->usrtags =
(tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0;
if (!(tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED))
tp->usrtags = 0;
if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
tp->usrflags &= ~SYM_DISC_ENABLED;
if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
tp->usrflags |= SYM_SCAN_BOOT_DISABLED;
if (!(tn->flags & SYMBIOS_SCAN_LUNS))
tp->usrflags |= SYM_SCAN_LUNS_DISABLED;
tp->usr_period = (tn->sync_period + 3) / 4;
tp->usr_width = (tn->bus_width == 0x8) ? 0 : 1;
}
static const unsigned char Tekram_sync[16] = {
25, 31, 37, 43, 50, 62, 75, 125, 12, 15, 18, 21, 6, 7, 9, 10
};
/*
* Get target set-up from Tekram format NVRAM.
*/
static void
sym_Tekram_setup_target(struct sym_hcb *np, int target, Tekram_nvram *nvram)
sym_Tekram_setup_target(struct sym_tcb *tp, int target, Tekram_nvram *nvram)
{
struct sym_tcb *tp = &np->target[target];
struct Tekram_target *tn = &nvram->target[target];
if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
@ -124,22 +127,22 @@ sym_Tekram_setup_target(struct sym_hcb *np, int target, Tekram_nvram *nvram)
if (tn->flags & TEKRAM_DISCONNECT_ENABLE)
tp->usrflags |= SYM_DISC_ENABLED;
/* If any device does not support parity, we will not use this option */
if (!(tn->flags & TEKRAM_PARITY_CHECK))
np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */
if (tn->flags & TEKRAM_SYNC_NEGO)
tp->usr_period = Tekram_sync[tn->sync_index & 0xf];
tp->usr_width = (tn->flags & TEKRAM_WIDE_NEGO) ? 1 : 0;
}
/*
* Get target setup from NVRAM.
*/
void sym_nvram_setup_target(struct sym_hcb *np, int target, struct sym_nvram *nvp)
void sym_nvram_setup_target(struct sym_tcb *tp, int target, struct sym_nvram *nvp)
{
switch (nvp->type) {
case SYM_SYMBIOS_NVRAM:
sym_Symbios_setup_target(np, target, &nvp->data.Symbios);
sym_Symbios_setup_target(tp, target, &nvp->data.Symbios);
break;
case SYM_TEKRAM_NVRAM:
sym_Tekram_setup_target(np, target, &nvp->data.Tekram);
sym_Tekram_setup_target(tp, target, &nvp->data.Tekram);
break;
default:
break;

View File

@ -194,12 +194,12 @@ struct sym_nvram {
#if SYM_CONF_NVRAM_SUPPORT
void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram);
void sym_nvram_setup_target (struct sym_hcb *np, int target, struct sym_nvram *nvp);
void sym_nvram_setup_target (struct sym_tcb *tp, int target, struct sym_nvram *nvp);
int sym_read_nvram (struct sym_device *np, struct sym_nvram *nvp);
char *sym_nvram_type(struct sym_nvram *nvp);
#else
static inline void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) { }
static inline void sym_nvram_setup_target(struct sym_hcb *np, struct sym_nvram *nvram) { }
static inline void sym_nvram_setup_target(struct sym_tcb *tp, struct sym_nvram *nvram) { }
static inline int sym_read_nvram(struct sym_device *np, struct sym_nvram *nvp)
{
nvp->type = 0;

View File

@ -1,792 +0,0 @@
/******************************************************************************
** High Performance device driver for the Symbios 53C896 controller.
**
** Copyright (C) 1998-2001 Gerard Roudier <groudier@free.fr>
**
** This driver also supports all the Symbios 53C8XX controller family,
** except 53C810 revisions < 16, 53C825 revisions < 16 and all
** revisions of 53C815 controllers.
**
** This driver is based on the Linux port of the FreeBSD ncr driver.
**
** Copyright (C) 1994 Wolfgang Stanglmeier
**
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
**
** The Linux port of the FreeBSD ncr driver has been achieved in
** november 1995 by:
**
** Gerard Roudier <groudier@free.fr>
**
** Being given that this driver originates from the FreeBSD version, and
** in order to keep synergy on both, any suggested enhancements and corrections
** received on Linux are automatically a potential candidate for the FreeBSD
** version.
**
** The original driver has been written for 386bsd and FreeBSD by
** Wolfgang Stanglmeier <wolf@cologne.de>
** Stefan Esser <se@mi.Uni-Koeln.de>
**
**-----------------------------------------------------------------------------
**
** Major contributions:
** --------------------
**
** NVRAM detection and reading.
** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
**
*******************************************************************************
*/
/*==========================================================
**
** Debugging tags
**
**==========================================================
*/
#define DEBUG_ALLOC (0x0001)
#define DEBUG_PHASE (0x0002)
#define DEBUG_QUEUE (0x0008)
#define DEBUG_RESULT (0x0010)
#define DEBUG_POINTER (0x0020)
#define DEBUG_SCRIPT (0x0040)
#define DEBUG_TINY (0x0080)
#define DEBUG_TIMING (0x0100)
#define DEBUG_NEGO (0x0200)
#define DEBUG_TAGS (0x0400)
#define DEBUG_SCATTER (0x0800)
#define DEBUG_IC (0x1000)
/*
** Enable/Disable debug messages.
** Can be changed at runtime too.
*/
#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
#define DEBUG_FLAGS ncr_debug
#else
#define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
#endif
static inline struct list_head *ncr_list_pop(struct list_head *head)
{
if (!list_empty(head)) {
struct list_head *elem = head->next;
list_del(elem);
return elem;
}
return NULL;
}
#ifdef __sparc__
#include <asm/irq.h>
#endif
/*==========================================================
**
** Simple power of two buddy-like allocator.
**
** This simple code is not intended to be fast, but to
** provide power of 2 aligned memory allocations.
** Since the SCRIPTS processor only supplies 8 bit
** arithmetic, this allocator allows simple and fast
** address calculations from the SCRIPTS code.
** In addition, cache line alignment is guaranteed for
** power of 2 cache line size.
** Enhanced in linux-2.3.44 to provide a memory pool
** per pcidev to support dynamic dma mapping. (I would
** have preferred a real bus astraction, btw).
**
**==========================================================
*/
#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
#if PAGE_SIZE >= 8192
#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
#else
#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */
#endif
#define MEMO_FREE_UNUSED /* Free unused pages immediately */
#define MEMO_WARN 1
#define MEMO_GFP_FLAGS GFP_ATOMIC
#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
typedef struct device *m_bush_t; /* Something that addresses DMAable */
typedef struct m_link { /* Link between free memory chunks */
struct m_link *next;
} m_link_s;
typedef struct m_vtob { /* Virtual to Bus address translation */
struct m_vtob *next;
m_addr_t vaddr;
m_addr_t baddr;
} m_vtob_s;
#define VTOB_HASH_SHIFT 5
#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
#define VTOB_HASH_CODE(m) \
((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
typedef struct m_pool { /* Memory pool of a given kind */
m_bush_t bush;
m_addr_t (*getp)(struct m_pool *);
void (*freep)(struct m_pool *, m_addr_t);
int nump;
m_vtob_s *(vtob[VTOB_HASH_SIZE]);
struct m_pool *next;
struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];
} m_pool_s;
static void *___m_alloc(m_pool_s *mp, int size)
{
int i = 0;
int s = (1 << MEMO_SHIFT);
int j;
m_addr_t a;
m_link_s *h = mp->h;
if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
return NULL;
while (size > s) {
s <<= 1;
++i;
}
j = i;
while (!h[j].next) {
if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
h[j].next = (m_link_s *)mp->getp(mp);
if (h[j].next)
h[j].next->next = NULL;
break;
}
++j;
s <<= 1;
}
a = (m_addr_t) h[j].next;
if (a) {
h[j].next = h[j].next->next;
while (j > i) {
j -= 1;
s >>= 1;
h[j].next = (m_link_s *) (a+s);
h[j].next->next = NULL;
}
}
#ifdef DEBUG
printk("___m_alloc(%d) = %p\n", size, (void *) a);
#endif
return (void *) a;
}
static void ___m_free(m_pool_s *mp, void *ptr, int size)
{
int i = 0;
int s = (1 << MEMO_SHIFT);
m_link_s *q;
m_addr_t a, b;
m_link_s *h = mp->h;
#ifdef DEBUG
printk("___m_free(%p, %d)\n", ptr, size);
#endif
if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
return;
while (size > s) {
s <<= 1;
++i;
}
a = (m_addr_t) ptr;
while (1) {
#ifdef MEMO_FREE_UNUSED
if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
mp->freep(mp, a);
break;
}
#endif
b = a ^ s;
q = &h[i];
while (q->next && q->next != (m_link_s *) b) {
q = q->next;
}
if (!q->next) {
((m_link_s *) a)->next = h[i].next;
h[i].next = (m_link_s *) a;
break;
}
q->next = q->next->next;
a = a & b;
s <<= 1;
++i;
}
}
static DEFINE_SPINLOCK(ncr53c8xx_lock);
static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags)
{
void *p;
p = ___m_alloc(mp, size);
if (DEBUG_FLAGS & DEBUG_ALLOC)
printk ("new %-10s[%4d] @%p.\n", name, size, p);
if (p)
memset(p, 0, size);
else if (uflags & MEMO_WARN)
printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size);
return p;
}
#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)
static void __m_free(m_pool_s *mp, void *ptr, int size, char *name)
{
if (DEBUG_FLAGS & DEBUG_ALLOC)
printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
___m_free(mp, ptr, size);
}
/*
* With pci bus iommu support, we use a default pool of unmapped memory
* for memory we donnot need to DMA from/to and one pool per pcidev for
* memory accessed by the PCI chip. `mp0' is the default not DMAable pool.
*/
static m_addr_t ___mp0_getp(m_pool_s *mp)
{
m_addr_t m = __get_free_pages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER);
if (m)
++mp->nump;
return m;
}
static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
{
free_pages(m, MEMO_PAGE_ORDER);
--mp->nump;
}
static m_pool_s mp0 = {NULL, ___mp0_getp, ___mp0_freep};
/*
* DMAable pools.
*/
/*
* With pci bus iommu support, we maintain one pool per pcidev and a
* hashed reverse table for virtual to bus physical address translations.
*/
static m_addr_t ___dma_getp(m_pool_s *mp)
{
m_addr_t vp;
m_vtob_s *vbp;
vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB");
if (vbp) {
dma_addr_t daddr;
vp = (m_addr_t) dma_alloc_coherent(mp->bush,
PAGE_SIZE<<MEMO_PAGE_ORDER,
&daddr, GFP_ATOMIC);
if (vp) {
int hc = VTOB_HASH_CODE(vp);
vbp->vaddr = vp;
vbp->baddr = daddr;
vbp->next = mp->vtob[hc];
mp->vtob[hc] = vbp;
++mp->nump;
return vp;
}
}
if (vbp)
__m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
return 0;
}
static void ___dma_freep(m_pool_s *mp, m_addr_t m)
{
m_vtob_s **vbpp, *vbp;
int hc = VTOB_HASH_CODE(m);
vbpp = &mp->vtob[hc];
while (*vbpp && (*vbpp)->vaddr != m)
vbpp = &(*vbpp)->next;
if (*vbpp) {
vbp = *vbpp;
*vbpp = (*vbpp)->next;
dma_free_coherent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
(void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
__m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
--mp->nump;
}
}
static inline m_pool_s *___get_dma_pool(m_bush_t bush)
{
m_pool_s *mp;
for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next);
return mp;
}
static m_pool_s *___cre_dma_pool(m_bush_t bush)
{
m_pool_s *mp;
mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL");
if (mp) {
memset(mp, 0, sizeof(*mp));
mp->bush = bush;
mp->getp = ___dma_getp;
mp->freep = ___dma_freep;
mp->next = mp0.next;
mp0.next = mp;
}
return mp;
}
static void ___del_dma_pool(m_pool_s *p)
{
struct m_pool **pp = &mp0.next;
while (*pp && *pp != p)
pp = &(*pp)->next;
if (*pp) {
*pp = (*pp)->next;
__m_free(&mp0, p, sizeof(*p), "MPOOL");
}
}
static void *__m_calloc_dma(m_bush_t bush, int size, char *name)
{
u_long flags;
struct m_pool *mp;
void *m = NULL;
spin_lock_irqsave(&ncr53c8xx_lock, flags);
mp = ___get_dma_pool(bush);
if (!mp)
mp = ___cre_dma_pool(bush);
if (mp)
m = __m_calloc(mp, size, name);
if (mp && !mp->nump)
___del_dma_pool(mp);
spin_unlock_irqrestore(&ncr53c8xx_lock, flags);
return m;
}
static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)
{
u_long flags;
struct m_pool *mp;
spin_lock_irqsave(&ncr53c8xx_lock, flags);
mp = ___get_dma_pool(bush);
if (mp)
__m_free(mp, m, size, name);
if (mp && !mp->nump)
___del_dma_pool(mp);
spin_unlock_irqrestore(&ncr53c8xx_lock, flags);
}
static m_addr_t __vtobus(m_bush_t bush, void *m)
{
u_long flags;
m_pool_s *mp;
int hc = VTOB_HASH_CODE(m);
m_vtob_s *vp = NULL;
m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
spin_lock_irqsave(&ncr53c8xx_lock, flags);
mp = ___get_dma_pool(bush);
if (mp) {
vp = mp->vtob[hc];
while (vp && (m_addr_t) vp->vaddr != a)
vp = vp->next;
}
spin_unlock_irqrestore(&ncr53c8xx_lock, flags);
return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
}
#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->dev, s, n)
#define _m_free_dma(np, p, s, n) __m_free_dma(np->dev, p, s, n)
#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)
#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)
#define _vtobus(np, p) __vtobus(np->dev, p)
#define vtobus(p) _vtobus(np, p)
/*
* Deal with DMA mapping/unmapping.
*/
/* To keep track of the dma mapping (sg/single) that has been set */
#define __data_mapped SCp.phase
#define __data_mapping SCp.have_data_in
static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd)
{
switch(cmd->__data_mapped) {
case 2:
dma_unmap_sg(dev, cmd->buffer, cmd->use_sg,
cmd->sc_data_direction);
break;
case 1:
dma_unmap_single(dev, cmd->__data_mapping,
cmd->request_bufflen,
cmd->sc_data_direction);
break;
}
cmd->__data_mapped = 0;
}
static u_long __map_scsi_single_data(struct device *dev, struct scsi_cmnd *cmd)
{
dma_addr_t mapping;
if (cmd->request_bufflen == 0)
return 0;
mapping = dma_map_single(dev, cmd->request_buffer,
cmd->request_bufflen,
cmd->sc_data_direction);
cmd->__data_mapped = 1;
cmd->__data_mapping = mapping;
return mapping;
}
static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
{
int use_sg;
if (cmd->use_sg == 0)
return 0;
use_sg = dma_map_sg(dev, cmd->buffer, cmd->use_sg,
cmd->sc_data_direction);
cmd->__data_mapped = 2;
cmd->__data_mapping = use_sg;
return use_sg;
}
#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->dev, cmd)
#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->dev, cmd)
#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->dev, cmd)
/*==========================================================
**
** Driver setup.
**
** This structure is initialized from linux config
** options. It can be overridden at boot-up by the boot
** command line.
**
**==========================================================
*/
static struct ncr_driver_setup
driver_setup = SCSI_NCR_DRIVER_SETUP;
#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
static struct ncr_driver_setup
driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
#endif
#define initverbose (driver_setup.verbose)
#define bootverbose (np->verbose)
/*===================================================================
**
** Driver setup from the boot command line
**
**===================================================================
*/
#ifdef MODULE
#define ARG_SEP ' '
#else
#define ARG_SEP ','
#endif
#define OPT_TAGS 1
#define OPT_MASTER_PARITY 2
#define OPT_SCSI_PARITY 3
#define OPT_DISCONNECTION 4
#define OPT_SPECIAL_FEATURES 5
#define OPT_UNUSED_1 6
#define OPT_FORCE_SYNC_NEGO 7
#define OPT_REVERSE_PROBE 8
#define OPT_DEFAULT_SYNC 9
#define OPT_VERBOSE 10
#define OPT_DEBUG 11
#define OPT_BURST_MAX 12
#define OPT_LED_PIN 13
#define OPT_MAX_WIDE 14
#define OPT_SETTLE_DELAY 15
#define OPT_DIFF_SUPPORT 16
#define OPT_IRQM 17
#define OPT_PCI_FIX_UP 18
#define OPT_BUS_CHECK 19
#define OPT_OPTIMIZE 20
#define OPT_RECOVERY 21
#define OPT_SAFE_SETUP 22
#define OPT_USE_NVRAM 23
#define OPT_EXCLUDE 24
#define OPT_HOST_ID 25
#ifdef SCSI_NCR_IARB_SUPPORT
#define OPT_IARB 26
#endif
static char setup_token[] __initdata =
"tags:" "mpar:"
"spar:" "disc:"
"specf:" "ultra:"
"fsn:" "revprob:"
"sync:" "verb:"
"debug:" "burst:"
"led:" "wide:"
"settle:" "diff:"
"irqm:" "pcifix:"
"buschk:" "optim:"
"recovery:"
"safe:" "nvram:"
"excl:" "hostid:"
#ifdef SCSI_NCR_IARB_SUPPORT
"iarb:"
#endif
; /* DONNOT REMOVE THIS ';' */
#ifdef MODULE
#define ARG_SEP ' '
#else
#define ARG_SEP ','
#endif
static int __init get_setup_token(char *p)
{
char *cur = setup_token;
char *pc;
int i = 0;
while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
++pc;
++i;
if (!strncmp(p, cur, pc - cur))
return i;
cur = pc;
}
return 0;
}
static int __init sym53c8xx__setup(char *str)
{
#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
char *cur = str;
char *pc, *pv;
int i, val, c;
int xi = 0;
while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
char *pe;
val = 0;
pv = pc;
c = *++pv;
if (c == 'n')
val = 0;
else if (c == 'y')
val = 1;
else
val = (int) simple_strtoul(pv, &pe, 0);
switch (get_setup_token(cur)) {
case OPT_TAGS:
driver_setup.default_tags = val;
if (pe && *pe == '/') {
i = 0;
while (*pe && *pe != ARG_SEP &&
i < sizeof(driver_setup.tag_ctrl)-1) {
driver_setup.tag_ctrl[i++] = *pe++;
}
driver_setup.tag_ctrl[i] = '\0';
}
break;
case OPT_MASTER_PARITY:
driver_setup.master_parity = val;
break;
case OPT_SCSI_PARITY:
driver_setup.scsi_parity = val;
break;
case OPT_DISCONNECTION:
driver_setup.disconnection = val;
break;
case OPT_SPECIAL_FEATURES:
driver_setup.special_features = val;
break;
case OPT_FORCE_SYNC_NEGO:
driver_setup.force_sync_nego = val;
break;
case OPT_REVERSE_PROBE:
driver_setup.reverse_probe = val;
break;
case OPT_DEFAULT_SYNC:
driver_setup.default_sync = val;
break;
case OPT_VERBOSE:
driver_setup.verbose = val;
break;
case OPT_DEBUG:
driver_setup.debug = val;
break;
case OPT_BURST_MAX:
driver_setup.burst_max = val;
break;
case OPT_LED_PIN:
driver_setup.led_pin = val;
break;
case OPT_MAX_WIDE:
driver_setup.max_wide = val? 1:0;
break;
case OPT_SETTLE_DELAY:
driver_setup.settle_delay = val;
break;
case OPT_DIFF_SUPPORT:
driver_setup.diff_support = val;
break;
case OPT_IRQM:
driver_setup.irqm = val;
break;
case OPT_PCI_FIX_UP:
driver_setup.pci_fix_up = val;
break;
case OPT_BUS_CHECK:
driver_setup.bus_check = val;
break;
case OPT_OPTIMIZE:
driver_setup.optimize = val;
break;
case OPT_RECOVERY:
driver_setup.recovery = val;
break;
case OPT_USE_NVRAM:
driver_setup.use_nvram = val;
break;
case OPT_SAFE_SETUP:
memcpy(&driver_setup, &driver_safe_setup,
sizeof(driver_setup));
break;
case OPT_EXCLUDE:
if (xi < SCSI_NCR_MAX_EXCLUDES)
driver_setup.excludes[xi++] = val;
break;
case OPT_HOST_ID:
driver_setup.host_id = val;
break;
#ifdef SCSI_NCR_IARB_SUPPORT
case OPT_IARB:
driver_setup.iarb = val;
break;
#endif
default:
printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
break;
}
if ((cur = strchr(cur, ARG_SEP)) != NULL)
++cur;
}
#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
return 1;
}
/*===================================================================
**
** Get device queue depth from boot command line.
**
**===================================================================
*/
#define DEF_DEPTH (driver_setup.default_tags)
#define ALL_TARGETS -2
#define NO_TARGET -1
#define ALL_LUNS -2
#define NO_LUN -1
static int device_queue_depth(int unit, int target, int lun)
{
int c, h, t, u, v;
char *p = driver_setup.tag_ctrl;
char *ep;
h = -1;
t = NO_TARGET;
u = NO_LUN;
while ((c = *p++) != 0) {
v = simple_strtoul(p, &ep, 0);
switch(c) {
case '/':
++h;
t = ALL_TARGETS;
u = ALL_LUNS;
break;
case 't':
if (t != target)
t = (target == v) ? v : NO_TARGET;
u = ALL_LUNS;
break;
case 'u':
if (u != lun)
u = (lun == v) ? v : NO_LUN;
break;
case 'q':
if (h == unit &&
(t == ALL_TARGETS || t == target) &&
(u == ALL_LUNS || u == lun))
return v;
break;
case '-':
t = ALL_TARGETS;
u = ALL_LUNS;
break;
default:
break;
}
p = ep;
}
return DEF_DEPTH;
}

File diff suppressed because it is too large Load Diff

View File

@ -313,7 +313,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
}
static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset)
*page, unsigned int len, unsigned int offset,
unsigned short max_sectors)
{
int retried_segments = 0;
struct bio_vec *bvec;
@ -327,7 +328,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
if (bio->bi_vcnt >= bio->bi_max_vecs)
return 0;
if (((bio->bi_size + len) >> 9) > q->max_sectors)
if (((bio->bi_size + len) >> 9) > max_sectors)
return 0;
/*
@ -385,6 +386,25 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
return len;
}
/**
* bio_add_pc_page - attempt to add page to bio
* @bio: destination bio
* @page: page to add
* @len: vec entry length
* @offset: vec entry offset
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
* number of reasons, such as the bio being full or target block
* device limitations. The target block device must allow bio's
* smaller than PAGE_SIZE, so it is always possible to add a single
* page to an empty bio. This should only be used by REQ_PC bios.
*/
int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
}
/**
* bio_add_page - attempt to add page to bio
* @bio: destination bio
@ -401,8 +421,8 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
unsigned int offset)
{
return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page,
len, offset);
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
}
struct bio_map_data {
@ -514,7 +534,7 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
break;
}
if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
ret = -EINVAL;
break;
}
@ -628,7 +648,8 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
/*
* sorry...
*/
if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)
if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
bytes)
break;
len -= bytes;
@ -801,8 +822,8 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
if (bytes > len)
bytes = len;
if (__bio_add_page(q, bio, virt_to_page(data), bytes,
offset) < bytes)
if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
offset) < bytes)
break;
data += bytes;
@ -1228,6 +1249,7 @@ EXPORT_SYMBOL(bio_clone);
EXPORT_SYMBOL(bio_phys_segments);
EXPORT_SYMBOL(bio_hw_segments);
EXPORT_SYMBOL(bio_add_page);
EXPORT_SYMBOL(bio_add_pc_page);
EXPORT_SYMBOL(bio_get_nr_vecs);
EXPORT_SYMBOL(bio_map_user);
EXPORT_SYMBOL(bio_unmap_user);

View File

@ -292,6 +292,8 @@ extern struct bio *bio_clone(struct bio *, gfp_t);
extern void bio_init(struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
extern int bio_get_nr_vecs(struct block_device *);
extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
unsigned long, unsigned int, int);

View File

@ -184,6 +184,7 @@ struct request {
void *sense;
unsigned int timeout;
int retries;
/*
* For Power Management requests
@ -558,6 +559,7 @@ extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);
extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(request_queue_t *, struct request *);
extern void blk_end_sync_rq(struct request *rq);
extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
@ -579,6 +581,10 @@ extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned
extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);
extern int blk_execute_rq(request_queue_t *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
struct request *, int,
void (*done)(struct request *));
static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
{
return bdev->bd_disk->queue;
@ -696,7 +702,8 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
#define MAX_SECTORS 255
#define SAFE_MAX_SECTORS 255
#define BLK_DEF_MAX_SECTORS 1024
#define MAX_SEGMENT_SIZE 65536

View File

@ -15,6 +15,7 @@
#define PCI_CLASS_STORAGE_FLOPPY 0x0102
#define PCI_CLASS_STORAGE_IPI 0x0103
#define PCI_CLASS_STORAGE_RAID 0x0104
#define PCI_CLASS_STORAGE_SAS 0x0107
#define PCI_CLASS_STORAGE_OTHER 0x0180
#define PCI_BASE_CLASS_NETWORK 0x02

View File

@ -151,6 +151,6 @@ extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
extern void scsi_put_command(struct scsi_cmnd *);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int);
extern void scsi_finish_command(struct scsi_cmnd *cmd);
extern void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries);
extern void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd);
#endif /* _SCSI_SCSI_CMND_H */

View File

@ -16,7 +16,6 @@ extern void __scsi_print_sense(const char *name,
extern void scsi_print_driverbyte(int);
extern void scsi_print_hostbyte(int);
extern void scsi_print_status(unsigned char);
extern int scsi_print_msg(const unsigned char *);
extern const char *scsi_sense_key_string(unsigned char);
extern const char *scsi_extd_sense_format(unsigned char, unsigned char);

View File

@ -79,9 +79,9 @@ struct scsi_device {
char inq_periph_qual; /* PQ from INQUIRY data */
unsigned char inquiry_len; /* valid bytes in 'inquiry' */
unsigned char * inquiry; /* INQUIRY response data */
char * vendor; /* [back_compat] point into 'inquiry' ... */
char * model; /* ... after scan; point to static string */
char * rev; /* ... "nullnullnullnull" before scan */
const char * vendor; /* [back_compat] point into 'inquiry' ... */
const char * model; /* ... after scan; point to static string */
const char * rev; /* ... "nullnullnullnull" before scan */
unsigned char current_tag; /* current tag */
struct scsi_target *sdev_target; /* used only for single_lun */
@ -274,6 +274,12 @@ extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *, int timeout, int retries);
extern int scsi_execute_async(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction,
void *buffer, unsigned bufflen, int use_sg,
int timeout, int retries, void *privdata,
void (*done)(void *, char *, int, int),
gfp_t gfp);
static inline unsigned int sdev_channel(struct scsi_device *sdev)
{

View File

@ -24,6 +24,9 @@
#include <linux/transport_class.h>
struct scsi_transport_template;
struct scsi_target;
struct scsi_device;
struct Scsi_Host;
struct spi_transport_attrs {
int period; /* value in the PPR/SDTR command */
@ -143,5 +146,6 @@ void spi_release_transport(struct scsi_transport_template *);
void spi_schedule_dv_device(struct scsi_device *);
void spi_dv_device(struct scsi_device *);
void spi_display_xfer_agreement(struct scsi_target *);
int spi_print_msg(const unsigned char *);
#endif /* SCSI_TRANSPORT_SPI_H */