SCSI misc on 20150416

This is the usual grab bag of driver updates (lpfc, qla2xxx, storvsc, aacraid,
 ipr) plus an assortment of minor updates.  There's also a major update to
 aic1542 which moves the driver into this millenium.
 
 Signed-off-by: James Bottomley <JBottomley@Odin.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQEcBAABAgAGBQJVL/DEAAoJEDeqqVYsXL0MOwgIALPlgI0aMAtX5wLxzPMLB/2j
 fhNlsB9XZ6TeYIqE7syOY7geVJqsbACMGmDhGHs5Gt6jkTnwix/G49x3T1PXBODZ
 frz8GgNB6iGSqfCp+YbhJkTNHdudDIy2LrQ92EzNMb2+x0v6KTYTSq2dekgrC1zK
 8GUZ9bEzuxEGaBx9TK/Sy6H8QpvMtqqJig2eCL189U3JMMU3okWtSGya708u5Whh
 knbUgraMxFWNs+oHJHFclVYvekP+61i/TVyacQEM4KLDsmlxsLn49eRdiGMY6rpX
 LgDIvMjggQhbY2WcCXzetF7tsFFl0joJp1wFK1fUn9YN5e+J3MRWYVBDt8FMPX8=
 =OBny
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This is the usual grab bag of driver updates (lpfc, qla2xxx, storvsc,
  aacraid, ipr) plus an assortment of minor updates.  There's also a
  major update to aic1542 which moves the driver into this millenium"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (106 commits)
  change SCSI Maintainer email
  sd, mmc, virtio_blk, string_helpers: fix block size units
  ufs: add support to allow non standard behaviours (quirks)
  ufs-qcom: save controller revision info in internal structure
  qla2xxx: Update driver version to 8.07.00.18-k
  qla2xxx: Restore physical port WWPN only, when port down detected for FA-WWPN port.
  qla2xxx: Fix virtual port configuration, when switch port is disabled/enabled.
  qla2xxx: Prevent multiple firmware dump collection for ISP27XX.
  qla2xxx: Disable Interrupt handshake for ISP27XX.
  qla2xxx: Add debugging info for MBX timeout.
  qla2xxx: Add serdes read/write support for ISP27XX
  qla2xxx: Add udev notification to save fw dump for ISP27XX
  qla2xxx: Add message for sucessful FW dump collected for ISP27XX.
  qla2xxx: Add support to load firmware from file for ISP 26XX/27XX.
  qla2xxx: Fix beacon blink for ISP27XX.
  qla2xxx: Increase the wait time for firmware to be ready for P3P.
  qla2xxx: Fix crash due to wrong casting of reg for ISP27XX.
  qla2xxx: Fix warnings reported by static checker.
  lpfc: Update version to 10.5.0.0 for upstream patch set
  lpfc: Update copyright to 2015
  ...
This commit is contained in:
Linus Torvalds 2015-04-16 19:02:04 -04:00
commit 7d69cff26c
69 changed files with 3527 additions and 2237 deletions

View File

@ -8653,11 +8653,9 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h
SCSI SUBSYSTEM
M: "James E.J. Bottomley" <JBottomley@parallels.com>
M: "James E.J. Bottomley" <JBottomley@odin.com>
L: linux-scsi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-pending-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
S: Maintained
F: drivers/scsi/
F: include/scsi/

View File

@ -342,7 +342,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
u64 capacity, size;
u64 capacity;
/* Host must always specify the capacity. */
virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
@ -354,9 +354,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
capacity = (sector_t)-1;
}
size = capacity * queue_logical_block_size(q);
string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
string_get_size(capacity, queue_logical_block_size(q),
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
string_get_size(capacity, queue_logical_block_size(q),
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev,
"new size: %llu %d-byte logical blocks (%s/%s)\n",

View File

@ -2230,7 +2230,7 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
part_md->part_type = part_type;
list_add(&part_md->part, &md->part);
string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s partition %u %s\n",
part_md->disk->disk_name, mmc_card_id(card),
@ -2436,7 +2436,7 @@ static int mmc_blk_probe(struct device *dev)
if (IS_ERR(md))
return PTR_ERR(md);
string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s %s %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),

View File

@ -474,11 +474,11 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
*/
#ifndef USLEEP_SLEEP
/* 20 ms (reasonable hard disk speed) */
#define USLEEP_SLEEP (20*HZ/1000)
#define USLEEP_SLEEP msecs_to_jiffies(20)
#endif
/* 300 RPM (floppy speed) */
#ifndef USLEEP_POLL
#define USLEEP_POLL (200*HZ/1000)
#define USLEEP_POLL msecs_to_jiffies(200)
#endif
#ifndef USLEEP_WAITLONG
/* RvC: (reasonable time to wait on select error) */
@ -576,7 +576,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
trying_irqs |= mask;
timeout = jiffies + (250 * HZ / 1000);
timeout = jiffies + msecs_to_jiffies(250);
probe_irq = NO_IRQ;
/*
@ -634,7 +634,7 @@ static void prepare_info(struct Scsi_Host *instance)
"sg_tablesize %d, this_id %d, "
"flags { %s%s%s}, "
#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG)
"USLEEP_POLL %d, USLEEP_WAITLONG %d, "
"USLEEP_POLL %lu, USLEEP_WAITLONG %lu, "
#endif
"options { %s} ",
instance->hostt->name, instance->io_port, instance->n_io_port,
@ -1346,7 +1346,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
* selection.
*/
timeout = jiffies + (250 * HZ / 1000);
timeout = jiffies + msecs_to_jiffies(250);
/*
* XXX very interesting - we're seeing a bounce where the BSY we

View File

@ -111,6 +111,41 @@
#define BYTE2(x) (unsigned char)((x) >> 16)
#define BYTE3(x) (unsigned char)((x) >> 24)
/* MODE_SENSE data format */
typedef struct {
struct {
u8 data_length;
u8 med_type;
u8 dev_par;
u8 bd_length;
} __attribute__((packed)) hd;
struct {
u8 dens_code;
u8 block_count[3];
u8 reserved;
u8 block_length[3];
} __attribute__((packed)) bd;
u8 mpc_buf[3];
} __attribute__((packed)) aac_modep_data;
/* MODE_SENSE_10 data format */
typedef struct {
struct {
u8 data_length[2];
u8 med_type;
u8 dev_par;
u8 rsrvd[2];
u8 bd_length[2];
} __attribute__((packed)) hd;
struct {
u8 dens_code;
u8 block_count[3];
u8 reserved;
u8 block_length[3];
} __attribute__((packed)) bd;
u8 mpc_buf[3];
} __attribute__((packed)) aac_modep10_data;
/*------------------------------------------------------------------------------
* S T R U C T S / T Y P E D E F S
*----------------------------------------------------------------------------*/
@ -128,6 +163,48 @@ struct inquiry_data {
u8 inqd_prl[4]; /* Product Revision Level */
};
/* Added for VPD 0x83 */
typedef struct {
u8 CodeSet:4; /* VPD_CODE_SET */
u8 Reserved:4;
u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
u8 Reserved2:4;
u8 Reserved3;
u8 IdentifierLength;
u8 VendId[8];
u8 ProductId[16];
u8 SerialNumber[8]; /* SN in ASCII */
} TVPD_ID_Descriptor_Type_1;
typedef struct {
u8 CodeSet:4; /* VPD_CODE_SET */
u8 Reserved:4;
u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
u8 Reserved2:4;
u8 Reserved3;
u8 IdentifierLength;
struct TEU64Id {
u32 Serial;
/* The serial number supposed to be 40 bits,
* bit we only support 32, so make the last byte zero. */
u8 Reserved;
u8 VendId[3];
} EU64Id;
} TVPD_ID_Descriptor_Type_2;
typedef struct {
u8 DeviceType:5;
u8 DeviceTypeQualifier:3;
u8 PageCode;
u8 Reserved;
u8 PageLength;
TVPD_ID_Descriptor_Type_1 IdDescriptorType1;
TVPD_ID_Descriptor_Type_2 IdDescriptorType2;
} TVPD_Page83;
/*
* M O D U L E G L O B A L S
*/
@ -385,6 +462,11 @@ int aac_get_containers(struct aac_dev *dev)
if (status >= 0) {
dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_SUPPORTED_240_VOLUMES) {
maximum_num_containers =
le32_to_cpu(dresp->MaxSimpleVolumes);
}
aac_fib_complete(fibptr);
}
/* FIB should be freed only after getting the response from the F/W */
@ -438,7 +520,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
if ((le32_to_cpu(get_name_reply->status) == CT_OK)
&& (get_name_reply->data[0] != '\0')) {
char *sp = get_name_reply->data;
sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0';
while (*sp == ' ')
++sp;
if (*sp) {
@ -539,6 +621,14 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
fsa_dev_ptr->block_size = 0x200;
} else {
fsa_dev_ptr->block_size =
le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
}
fsa_dev_ptr->valid = 1;
/* sense_key holds the current state of the spin-up */
if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
@ -571,7 +661,9 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
int status;
dresp = (struct aac_mount *) fib_data(fibptr);
dresp->mnt[0].capacityhigh = 0;
if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE))
dresp->mnt[0].capacityhigh = 0;
if ((le32_to_cpu(dresp->status) != ST_OK) ||
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
_aac_probe_container2(context, fibptr);
@ -586,7 +678,12 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
dinfo = (struct aac_query_mount *)fib_data(fibptr);
dinfo->command = cpu_to_le32(VM_NameServe64);
if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)
dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
else
dinfo->command = cpu_to_le32(VM_NameServe64);
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
@ -621,7 +718,12 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
dinfo = (struct aac_query_mount *)fib_data(fibptr);
dinfo->command = cpu_to_le32(VM_NameServe);
if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)
dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
else
dinfo->command = cpu_to_le32(VM_NameServe);
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
scsicmd->SCp.ptr = (char *)callback;
@ -835,14 +937,88 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
/* Failure is irrelevant, using default value instead */
if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
char sp[13];
/* EVPD bit set */
sp[0] = INQD_PDT_DA;
sp[1] = scsicmd->cmnd[2];
sp[2] = 0;
sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
le32_to_cpu(get_serial_reply->uid));
scsi_sg_copy_from_buffer(scsicmd, sp, sizeof(sp));
/*Check to see if it's for VPD 0x83 or 0x80 */
if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
int i;
TVPD_Page83 VPDPage83Data;
memset(((u8 *)&VPDPage83Data), 0,
sizeof(VPDPage83Data));
/* DIRECT_ACCESS_DEVIC */
VPDPage83Data.DeviceType = 0;
/* DEVICE_CONNECTED */
VPDPage83Data.DeviceTypeQualifier = 0;
/* VPD_DEVICE_IDENTIFIERS */
VPDPage83Data.PageCode = 0x83;
VPDPage83Data.Reserved = 0;
VPDPage83Data.PageLength =
sizeof(VPDPage83Data.IdDescriptorType1) +
sizeof(VPDPage83Data.IdDescriptorType2);
/* T10 Vendor Identifier Field Format */
/* VpdCodeSetAscii */
VPDPage83Data.IdDescriptorType1.CodeSet = 2;
/* VpdIdentifierTypeVendorId */
VPDPage83Data.IdDescriptorType1.IdentifierType = 1;
VPDPage83Data.IdDescriptorType1.IdentifierLength =
sizeof(VPDPage83Data.IdDescriptorType1) - 4;
/* "ADAPTEC " for adaptec */
memcpy(VPDPage83Data.IdDescriptorType1.VendId,
"ADAPTEC ",
sizeof(VPDPage83Data.IdDescriptorType1.VendId));
memcpy(VPDPage83Data.IdDescriptorType1.ProductId,
"ARRAY ",
sizeof(
VPDPage83Data.IdDescriptorType1.ProductId));
/* Convert to ascii based serial number.
* The LSB is the the end.
*/
for (i = 0; i < 8; i++) {
u8 temp =
(u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
if (temp > 0x9) {
VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
'A' + (temp - 0xA);
} else {
VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
'0' + temp;
}
}
/* VpdCodeSetBinary */
VPDPage83Data.IdDescriptorType2.CodeSet = 1;
/* VpdIdentifierTypeEUI64 */
VPDPage83Data.IdDescriptorType2.IdentifierType = 2;
VPDPage83Data.IdDescriptorType2.IdentifierLength =
sizeof(VPDPage83Data.IdDescriptorType2) - 4;
VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0;
VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0;
VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0;
VPDPage83Data.IdDescriptorType2.EU64Id.Serial =
get_serial_reply->uid;
VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0;
/* Move the inquiry data to the response buffer. */
scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data,
sizeof(VPDPage83Data));
} else {
/* It must be for VPD 0x80 */
char sp[13];
/* EVPD bit set */
sp[0] = INQD_PDT_DA;
sp[1] = scsicmd->cmnd[2];
sp[2] = 0;
sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
le32_to_cpu(get_serial_reply->uid));
scsi_sg_copy_from_buffer(scsicmd, sp,
sizeof(sp));
}
}
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
@ -982,7 +1158,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
memset(readcmd2, 0, sizeof(struct aac_raw_io2));
readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
readcmd2->byteCount = cpu_to_le32(count<<9);
readcmd2->byteCount = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
readcmd2->cid = cpu_to_le16(scmd_id(cmd));
readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
ret = aac_build_sgraw2(cmd, readcmd2,
@ -997,7 +1174,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd = (struct aac_raw_io *) fib_data(fib);
readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
readcmd->count = cpu_to_le32(count<<9);
readcmd->count = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
readcmd->cid = cpu_to_le16(scmd_id(cmd));
readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
readcmd->bpTotal = 0;
@ -1062,6 +1240,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
{
u16 fibsize;
struct aac_read *readcmd;
struct aac_dev *dev = fib->dev;
long ret;
aac_fib_init(fib);
@ -1069,7 +1248,8 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
readcmd->command = cpu_to_le32(VM_CtBlockRead);
readcmd->cid = cpu_to_le32(scmd_id(cmd));
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->count = cpu_to_le32(count * 512);
readcmd->count = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
ret = aac_build_sg(cmd, &readcmd->sg);
if (ret < 0)
@ -1104,7 +1284,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
memset(writecmd2, 0, sizeof(struct aac_raw_io2));
writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd2->byteCount = cpu_to_le32(count<<9);
writecmd2->byteCount = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd2->cid = cpu_to_le16(scmd_id(cmd));
writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
@ -1122,7 +1303,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
writecmd = (struct aac_raw_io *) fib_data(fib);
writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd->count = cpu_to_le32(count<<9);
writecmd->count = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
@ -1190,6 +1372,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
{
u16 fibsize;
struct aac_write *writecmd;
struct aac_dev *dev = fib->dev;
long ret;
aac_fib_init(fib);
@ -1197,7 +1380,8 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
writecmd->cid = cpu_to_le32(scmd_id(cmd));
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->count = cpu_to_le32(count * 512);
writecmd->count = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
@ -2246,9 +2430,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
INQD_PDT_PROC : INQD_PDT_DA;
if (scsicmd->cmnd[2] == 0) {
/* supported vital product data pages */
arr[3] = 2;
arr[3] = 3;
arr[4] = 0x0;
arr[5] = 0x80;
arr[6] = 0x83;
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
@ -2264,7 +2449,16 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
/* SLES 10 SP1 special */
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
char *sno = (char *)&inq_data;
sno[3] = setinqserial(dev, &sno[4],
scmd_id(scsicmd));
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
} else {
@ -2329,10 +2523,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp[5] = (capacity >> 16) & 0xff;
cp[6] = (capacity >> 8) & 0xff;
cp[7] = (capacity >> 0) & 0xff;
cp[8] = 0;
cp[9] = 0;
cp[10] = 2;
cp[11] = 0;
cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
cp[12] = 0;
alloc_len = ((scsicmd->cmnd[10] << 24)
@ -2369,10 +2563,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp[1] = (capacity >> 16) & 0xff;
cp[2] = (capacity >> 8) & 0xff;
cp[3] = (capacity >> 0) & 0xff;
cp[4] = 0;
cp[5] = 0;
cp[6] = 2;
cp[7] = 0;
cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
@ -2385,30 +2579,79 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case MODE_SENSE:
{
char mode_buf[7];
int mode_buf_length = 4;
u32 capacity;
aac_modep_data mpd;
if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
capacity = fsa_dev_ptr[cid].size - 1;
else
capacity = (u32)-1;
dprintk((KERN_DEBUG "MODE SENSE command.\n"));
mode_buf[0] = 3; /* Mode data length */
mode_buf[1] = 0; /* Medium type - default */
mode_buf[2] = 0; /* Device-specific param,
bit 8: 0/1 = write enabled/protected
bit 4: 0/1 = FUA enabled */
memset((char *)&mpd, 0, sizeof(aac_modep_data));
/* Mode data length */
mpd.hd.data_length = sizeof(mpd.hd) - 1;
/* Medium type - default */
mpd.hd.med_type = 0;
/* Device-specific param,
bit 8: 0/1 = write enabled/protected
bit 4: 0/1 = FUA enabled */
mpd.hd.dev_par = 0;
if (dev->raw_io_interface && ((aac_cache & 5) != 1))
mode_buf[2] = 0x10;
mode_buf[3] = 0; /* Block descriptor length */
mpd.hd.dev_par = 0x10;
if (scsicmd->cmnd[1] & 0x8)
mpd.hd.bd_length = 0; /* Block descriptor length */
else {
mpd.hd.bd_length = sizeof(mpd.bd);
mpd.hd.data_length += mpd.hd.bd_length;
mpd.bd.block_length[0] =
(fsa_dev_ptr[cid].block_size >> 16) & 0xff;
mpd.bd.block_length[1] =
(fsa_dev_ptr[cid].block_size >> 8) & 0xff;
mpd.bd.block_length[2] =
fsa_dev_ptr[cid].block_size & 0xff;
mpd.mpc_buf[0] = scsicmd->cmnd[2];
if (scsicmd->cmnd[2] == 0x1C) {
/* page length */
mpd.mpc_buf[1] = 0xa;
/* Mode data length */
mpd.hd.data_length = 23;
} else {
/* Mode data length */
mpd.hd.data_length = 15;
}
if (capacity > 0xffffff) {
mpd.bd.block_count[0] = 0xff;
mpd.bd.block_count[1] = 0xff;
mpd.bd.block_count[2] = 0xff;
} else {
mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
mpd.bd.block_count[2] = capacity & 0xff;
}
}
if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
mode_buf[0] = 6;
mode_buf[4] = 8;
mode_buf[5] = 1;
mode_buf[6] = ((aac_cache & 6) == 2)
mpd.hd.data_length += 3;
mpd.mpc_buf[0] = 8;
mpd.mpc_buf[1] = 1;
mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
? 0 : 0x04; /* WCE */
mode_buf_length = 7;
if (mode_buf_length > scsicmd->cmnd[4])
mode_buf_length = scsicmd->cmnd[4];
mode_buf_length = sizeof(mpd);
}
scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length);
if (mode_buf_length > scsicmd->cmnd[4])
mode_buf_length = scsicmd->cmnd[4];
else
mode_buf_length = sizeof(mpd);
scsi_sg_copy_from_buffer(scsicmd,
(char *)&mpd,
mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
@ -2416,34 +2659,77 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
case MODE_SENSE_10:
{
char mode_buf[11];
u32 capacity;
int mode_buf_length = 8;
aac_modep10_data mpd10;
if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
capacity = fsa_dev_ptr[cid].size - 1;
else
capacity = (u32)-1;
dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
mode_buf[0] = 0; /* Mode data length (MSB) */
mode_buf[1] = 6; /* Mode data length (LSB) */
mode_buf[2] = 0; /* Medium type - default */
mode_buf[3] = 0; /* Device-specific param,
bit 8: 0/1 = write enabled/protected
bit 4: 0/1 = FUA enabled */
memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
/* Mode data length (MSB) */
mpd10.hd.data_length[0] = 0;
/* Mode data length (LSB) */
mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
/* Medium type - default */
mpd10.hd.med_type = 0;
/* Device-specific param,
bit 8: 0/1 = write enabled/protected
bit 4: 0/1 = FUA enabled */
mpd10.hd.dev_par = 0;
if (dev->raw_io_interface && ((aac_cache & 5) != 1))
mode_buf[3] = 0x10;
mode_buf[4] = 0; /* reserved */
mode_buf[5] = 0; /* reserved */
mode_buf[6] = 0; /* Block descriptor length (MSB) */
mode_buf[7] = 0; /* Block descriptor length (LSB) */
mpd10.hd.dev_par = 0x10;
mpd10.hd.rsrvd[0] = 0; /* reserved */
mpd10.hd.rsrvd[1] = 0; /* reserved */
if (scsicmd->cmnd[1] & 0x8) {
/* Block descriptor length (MSB) */
mpd10.hd.bd_length[0] = 0;
/* Block descriptor length (LSB) */
mpd10.hd.bd_length[1] = 0;
} else {
mpd10.hd.bd_length[0] = 0;
mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
mpd10.bd.block_length[0] =
(fsa_dev_ptr[cid].block_size >> 16) & 0xff;
mpd10.bd.block_length[1] =
(fsa_dev_ptr[cid].block_size >> 8) & 0xff;
mpd10.bd.block_length[2] =
fsa_dev_ptr[cid].block_size & 0xff;
if (capacity > 0xffffff) {
mpd10.bd.block_count[0] = 0xff;
mpd10.bd.block_count[1] = 0xff;
mpd10.bd.block_count[2] = 0xff;
} else {
mpd10.bd.block_count[0] =
(capacity >> 16) & 0xff;
mpd10.bd.block_count[1] =
(capacity >> 8) & 0xff;
mpd10.bd.block_count[2] =
capacity & 0xff;
}
}
if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
mode_buf[1] = 9;
mode_buf[8] = 8;
mode_buf[9] = 1;
mode_buf[10] = ((aac_cache & 6) == 2)
mpd10.hd.data_length[1] += 3;
mpd10.mpc_buf[0] = 8;
mpd10.mpc_buf[1] = 1;
mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
? 0 : 0x04; /* WCE */
mode_buf_length = 11;
mode_buf_length = sizeof(mpd10);
if (mode_buf_length > scsicmd->cmnd[8])
mode_buf_length = scsicmd->cmnd[8];
}
scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length);
scsi_sg_copy_from_buffer(scsicmd,
(char *)&mpd10,
mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);

View File

@ -6,13 +6,63 @@
#define nblank(x) _nblank(x)[0]
#include <linux/interrupt.h>
#include <linux/pci.h>
/*------------------------------------------------------------------------------
* D E F I N E S
*----------------------------------------------------------------------------*/
#define AAC_MAX_MSIX 8 /* vectors */
#define AAC_PCI_MSI_ENABLE 0x8000
enum {
AAC_ENABLE_INTERRUPT = 0x0,
AAC_DISABLE_INTERRUPT,
AAC_ENABLE_MSIX,
AAC_DISABLE_MSIX,
AAC_CLEAR_AIF_BIT,
AAC_CLEAR_SYNC_BIT,
AAC_ENABLE_INTX
};
#define AAC_INT_MODE_INTX (1<<0)
#define AAC_INT_MODE_MSI (1<<1)
#define AAC_INT_MODE_AIF (1<<2)
#define AAC_INT_MODE_SYNC (1<<3)
#define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb
#define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa
#define AAC_INT_DISABLE_ALL 0xffffffff
/* Bit definitions in IOA->Host Interrupt Register */
#define PMC_TRANSITION_TO_OPERATIONAL (1<<31)
#define PMC_IOARCB_TRANSFER_FAILED (1<<28)
#define PMC_IOA_UNIT_CHECK (1<<27)
#define PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE (1<<26)
#define PMC_CRITICAL_IOA_OP_IN_PROGRESS (1<<25)
#define PMC_IOARRIN_LOST (1<<4)
#define PMC_SYSTEM_BUS_MMIO_ERROR (1<<3)
#define PMC_IOA_PROCESSOR_IN_ERROR_STATE (1<<2)
#define PMC_HOST_RRQ_VALID (1<<1)
#define PMC_OPERATIONAL_STATUS (1<<31)
#define PMC_ALLOW_MSIX_VECTOR0 (1<<0)
#define PMC_IOA_ERROR_INTERRUPTS (PMC_IOARCB_TRANSFER_FAILED | \
PMC_IOA_UNIT_CHECK | \
PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE | \
PMC_IOARRIN_LOST | \
PMC_SYSTEM_BUS_MMIO_ERROR | \
PMC_IOA_PROCESSOR_IN_ERROR_STATE)
#define PMC_ALL_INTERRUPT_BITS (PMC_IOA_ERROR_INTERRUPTS | \
PMC_HOST_RRQ_VALID | \
PMC_TRANSITION_TO_OPERATIONAL | \
PMC_ALLOW_MSIX_VECTOR0)
#define PMC_GLOBAL_INT_BIT2 0x00000004
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
# define AAC_DRIVER_BUILD 30300
# define AAC_DRIVER_BUILD 40709
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@ -36,6 +86,7 @@
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
#define PMC_DEVICE_S6 0x28b
#define PMC_DEVICE_S7 0x28c
#define PMC_DEVICE_S8 0x28d
#define PMC_DEVICE_S9 0x28f
@ -434,7 +485,7 @@ enum fib_xfer_state {
struct aac_init
{
__le32 InitStructRevision;
__le32 MiniPortRevision;
__le32 Sa_MSIXVectors;
__le32 fsrev;
__le32 CommHeaderAddress;
__le32 FastIoCommAreaAddress;
@ -582,7 +633,8 @@ struct aac_queue {
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
/* only valid for command queues which receive entries from the adapter. */
u32 numpending; /* Number of entries on outstanding queue. */
/* Number of entries on outstanding queue. */
atomic_t numpending;
struct aac_dev * dev; /* Back pointer to adapter structure */
};
@ -755,7 +807,8 @@ struct rkt_registers {
struct src_mu_registers {
/* PCI*| Name */
__le32 reserved0[8]; /* 00h | Reserved */
__le32 reserved0[6]; /* 00h | Reserved */
__le32 IOAR[2]; /* 18h | IOA->host interrupt register */
__le32 IDR; /* 20h | Inbound Doorbell Register */
__le32 IISR; /* 24h | Inbound Int. Status Register */
__le32 reserved1[3]; /* 28h | Reserved */
@ -767,17 +820,18 @@ struct src_mu_registers {
__le32 OMR; /* bch | Outbound Message Register */
__le32 IQ_L; /* c0h | Inbound Queue (Low address) */
__le32 IQ_H; /* c4h | Inbound Queue (High address) */
__le32 ODR_MSI; /* c8h | MSI register for sync./AIF */
};
struct src_registers {
struct src_mu_registers MUnit; /* 00h - c7h */
struct src_mu_registers MUnit; /* 00h - cbh */
union {
struct {
__le32 reserved1[130790]; /* c8h - 7fc5fh */
__le32 reserved1[130789]; /* cch - 7fc5fh */
struct src_inbound IndexRegs; /* 7fc60h */
} tupelo;
struct {
__le32 reserved1[974]; /* c8h - fffh */
__le32 reserved1[973]; /* cch - fffh */
struct src_inbound IndexRegs; /* 1000h */
} denali;
} u;
@ -857,6 +911,7 @@ struct fsa_dev_info {
u8 deleted;
char devname[8];
struct sense_data sense_data;
u32 block_size;
};
struct fib {
@ -960,6 +1015,10 @@ struct aac_supplement_adapter_info
#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
#define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004)
#define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000)
/* 4KB sector size */
#define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000)
/* 240 simple volume support */
#define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000)
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@ -1026,6 +1085,11 @@ struct aac_bus_info_response {
#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30)
#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31)
/* MSIX context */
struct aac_msix_ctx {
int vector_no;
struct aac_dev *dev;
};
struct aac_dev
{
@ -1081,8 +1145,10 @@ struct aac_dev
* if AAC_COMM_MESSAGE_TYPE1 */
dma_addr_t host_rrq_pa; /* phys. address */
u32 host_rrq_idx; /* index into rrq buffer */
/* index into rrq buffer */
u32 host_rrq_idx[AAC_MAX_MSIX];
atomic_t rrq_outstanding[AAC_MAX_MSIX];
u32 fibs_pushed_no;
struct pci_dev *pdev; /* Our PCI interface */
void * printfbuf; /* pointer to buffer used for printf's from the adapter */
void * comm_addr; /* Base address of Comm area */
@ -1151,6 +1217,13 @@ struct aac_dev
int sync_mode;
struct fib *sync_fib;
struct list_head sync_fib_list;
u32 doorbell_mask;
u32 max_msix; /* max. MSI-X vectors */
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
u8 adapter_shutdown;
};
#define aac_adapter_interrupt(dev) \
@ -1589,6 +1662,7 @@ struct aac_srb_reply
#define VM_CtHostWrite64 20
#define VM_DrvErrTblLog 21
#define VM_NameServe64 22
#define VM_NameServeAllBlk 30
#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
@ -1611,8 +1685,13 @@ struct aac_fsinfo {
__le32 fsInodeDensity;
}; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
struct aac_blockdevinfo {
__le32 block_size;
};
union aac_contentinfo {
struct aac_fsinfo filesys; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
struct aac_fsinfo filesys;
struct aac_blockdevinfo bdevinfo;
};
/*
@ -1677,6 +1756,7 @@ struct aac_get_container_count_resp {
__le32 MaxContainers;
__le32 ContainerSwitchEntries;
__le32 MaxPartitions;
__le32 MaxSimpleVolumes;
};
@ -1951,6 +2031,8 @@ extern struct aac_common aac_config;
#define AifEnEnclosureManagement 13 /* EM_DRIVE_* */
#define EM_DRIVE_INSERTION 31
#define EM_DRIVE_REMOVAL 32
#define EM_SES_DRIVE_INSERTION 33
#define EM_SES_DRIVE_REMOVAL 26
#define AifEnBatteryEvent 14 /* Change in Battery State */
#define AifEnAddContainer 15 /* A new array was created */
#define AifEnDeleteContainer 16 /* A container was deleted */
@ -1983,6 +2065,9 @@ extern struct aac_common aac_config;
/* PMC NEW COMM: Request the event data */
#define AifReqEvent 200
/* RAW device deleted */
#define AifRawDeviceRemove 203
/*
* Adapter Initiated FIB command structures. Start with the adapter
* initiated FIBs that really come from the adapter, and get responded
@ -2025,6 +2110,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
int aac_fib_complete(struct fib * context);
#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
struct aac_dev *aac_init_adapter(struct aac_dev *dev);
void aac_src_access_devreg(struct aac_dev *dev, int mode);
int aac_get_config_status(struct aac_dev *dev, int commit_flag);
int aac_get_containers(struct aac_dev *dev);
int aac_scsi_cmd(struct scsi_cmnd *cmd);

View File

@ -689,7 +689,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
kfree (usg);
}
srbcmd->count = cpu_to_le32(byte_count);
psg->count = cpu_to_le32(sg_indx+1);
if (user_srbcmd->sg.count)
psg->count = cpu_to_le32(sg_indx+1);
else
psg->count = 0;
status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
} else {
struct user_sgmap* upsg = &user_srbcmd->sg;
@ -775,7 +778,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
}
}
srbcmd->count = cpu_to_le32(byte_count);
psg->count = cpu_to_le32(sg_indx+1);
if (user_srbcmd->sg.count)
psg->count = cpu_to_le32(sg_indx+1);
else
psg->count = 0;
status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
}
if (status == -ERESTARTSYS) {

View File

@ -43,6 +43,8 @@
#include "aacraid.h"
static void aac_define_int_mode(struct aac_dev *dev);
struct aac_common aac_config = {
.irq_mod = 1
};
@ -51,7 +53,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
{
unsigned char *base;
unsigned long size, align;
const unsigned long fibsize = 4096;
const unsigned long fibsize = dev->max_fib_size;
const unsigned long printfbufsiz = 256;
unsigned long host_rrq_size = 0;
struct aac_init *init;
@ -91,7 +93,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
if (dev->max_fib_size != sizeof(struct hw_fib))
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION);
init->fsrev = cpu_to_le32(dev->fsrev);
/*
@ -140,7 +142,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
init->MiniPortRevision = cpu_to_le32(0L); /* number of MSI-X */
/* number of MSI-X */
init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n"));
}
@ -179,7 +182,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
{
q->numpending = 0;
atomic_set(&q->numpending, 0);
q->dev = dev;
init_waitqueue_head(&q->cmdready);
INIT_LIST_HEAD(&q->cmdq);
@ -228,6 +231,12 @@ int aac_send_shutdown(struct aac_dev * dev)
/* FIB should be freed only after getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibctx);
dev->adapter_shutdown = 1;
if ((dev->pdev->device == PMC_DEVICE_S7 ||
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9) &&
dev->msi_enabled)
aac_src_access_devreg(dev, AAC_ENABLE_INTX);
return status;
}
@ -350,8 +359,10 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->raw_io_interface = dev->raw_io_64 = 0;
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
0, 0, 0, 0, 0, 0,
status+0, status+1, status+2, status+3, NULL)) &&
(status[0] == 0x00000001)) {
dev->doorbell_mask = status[3];
if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
dev->raw_io_64 = 1;
dev->sync_mode = aac_sync_mode;
@ -388,6 +399,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
}
}
}
dev->max_msix = 0;
dev->msi_enabled = 0;
dev->adapter_shutdown = 0;
if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
0, 0, 0, 0, 0, 0,
status+0, status+1, status+2, status+3, status+4))
@ -461,6 +475,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
if (host->can_queue > AAC_NUM_IO_FIB)
host->can_queue = AAC_NUM_IO_FIB;
if (dev->pdev->device == PMC_DEVICE_S6 ||
dev->pdev->device == PMC_DEVICE_S7 ||
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9)
aac_define_int_mode(dev);
/*
* Ok now init the communication subsystem
*/
@ -489,4 +508,79 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
return dev;
}
static void aac_define_int_mode(struct aac_dev *dev)
{
int i, msi_count;
msi_count = i = 0;
/* max. vectors from GET_COMM_PREFERRED_SETTINGS */
if (dev->max_msix == 0 ||
dev->pdev->device == PMC_DEVICE_S6 ||
dev->sync_mode) {
dev->max_msix = 1;
dev->vector_cap =
dev->scsi_host_ptr->can_queue +
AAC_NUM_MGT_FIB;
return;
}
msi_count = min(dev->max_msix,
(unsigned int)num_online_cpus());
dev->max_msix = msi_count;
if (msi_count > AAC_MAX_MSIX)
msi_count = AAC_MAX_MSIX;
for (i = 0; i < msi_count; i++)
dev->msixentry[i].entry = i;
if (msi_count > 1 &&
pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
i = pci_enable_msix(dev->pdev,
dev->msixentry,
msi_count);
/* Check how many MSIX vectors are allocated */
if (i >= 0) {
dev->msi_enabled = 1;
if (i) {
msi_count = i;
if (pci_enable_msix(dev->pdev,
dev->msixentry,
msi_count)) {
dev->msi_enabled = 0;
printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
dev->name, dev->id, i);
}
}
} else {
dev->msi_enabled = 0;
printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
dev->name, dev->id, i);
}
}
if (!dev->msi_enabled) {
msi_count = 1;
i = pci_enable_msi(dev->pdev);
if (!i) {
dev->msi_enabled = 1;
dev->msi = 1;
} else {
printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n",
dev->name, dev->id, i);
}
}
if (!dev->msi_enabled)
dev->max_msix = msi_count = 1;
else {
if (dev->max_msix > msi_count)
dev->max_msix = msi_count;
}
dev->vector_cap =
(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) /
msi_count;
}

View File

@ -208,14 +208,10 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
void aac_fib_free(struct fib *fibptr)
{
unsigned long flags, flagsv;
unsigned long flags;
spin_lock_irqsave(&fibptr->event_lock, flagsv);
if (fibptr->done == 2) {
spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
if (fibptr->done == 2)
return;
}
spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
@ -321,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
/* Queue is full */
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
qid, q->numpending);
qid, atomic_read(&q->numpending));
return 0;
} else {
*entry = q->base + *index;
@ -414,7 +410,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
struct aac_dev * dev = fibptr->dev;
struct hw_fib * hw_fib = fibptr->hw_fib_va;
unsigned long flags = 0;
unsigned long qflags;
unsigned long mflags = 0;
unsigned long sflags = 0;
@ -568,9 +563,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
int blink;
if (time_is_before_eq_jiffies(timeout)) {
struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
spin_lock_irqsave(q->lock, qflags);
q->numpending--;
spin_unlock_irqrestore(q->lock, qflags);
atomic_dec(&q->numpending);
if (wait == -1) {
printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
"Usually a result of a PCI interrupt routing problem;\n"
@ -775,7 +768,6 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
int aac_fib_complete(struct fib *fibptr)
{
unsigned long flags;
struct hw_fib * hw_fib = fibptr->hw_fib_va;
/*
@ -798,12 +790,6 @@ int aac_fib_complete(struct fib *fibptr)
* command is complete that we had sent to the adapter and this
* cdb could be reused.
*/
spin_lock_irqsave(&fibptr->event_lock, flags);
if (fibptr->done == 2) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
return 0;
}
spin_unlock_irqrestore(&fibptr->event_lock, flags);
if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
@ -868,7 +854,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
* dispatches it to the appropriate routine for handling.
*/
#define AIF_SNIFF_TIMEOUT (30*HZ)
#define AIF_SNIFF_TIMEOUT (500*HZ)
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{
struct hw_fib * hw_fib = fibptr->hw_fib_va;
@ -897,6 +883,39 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
switch (le32_to_cpu(aifcmd->command)) {
case AifCmdDriverNotify:
switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
case AifRawDeviceRemove:
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
if ((container >> 28)) {
container = (u32)-1;
break;
}
channel = (container >> 24) & 0xF;
if (channel >= dev->maximum_num_channels) {
container = (u32)-1;
break;
}
id = container & 0xFFFF;
if (id >= dev->maximum_num_physicals) {
container = (u32)-1;
break;
}
lun = (container >> 16) & 0xFF;
container = (u32)-1;
channel = aac_phys_to_logical(channel);
device_config_needed =
(((__le32 *)aifcmd->data)[0] ==
cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD;
if (device_config_needed == ADD) {
device = scsi_device_lookup(
dev->scsi_host_ptr,
channel, id, lun);
if (device) {
scsi_remove_device(device);
scsi_device_put(device);
}
}
break;
/*
* Morph or Expand complete
*/
@ -1044,6 +1063,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
case EM_DRIVE_INSERTION:
case EM_DRIVE_REMOVAL:
case EM_SES_DRIVE_INSERTION:
case EM_SES_DRIVE_REMOVAL:
container = le32_to_cpu(
((__le32 *)aifcmd->data)[2]);
if ((container >> 28)) {
@ -1069,8 +1090,10 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
}
channel = aac_phys_to_logical(channel);
device_config_needed =
(((__le32 *)aifcmd->data)[3]
== cpu_to_le32(EM_DRIVE_INSERTION)) ?
((((__le32 *)aifcmd->data)[3]
== cpu_to_le32(EM_DRIVE_INSERTION)) ||
(((__le32 *)aifcmd->data)[3]
== cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
ADD : DELETE;
break;
}
@ -1247,12 +1270,13 @@ retry_next:
static int _aac_reset_adapter(struct aac_dev *aac, int forced)
{
int index, quirks;
int retval;
int retval, i;
struct Scsi_Host *host;
struct scsi_device *dev;
struct scsi_cmnd *command;
struct scsi_cmnd *command_list;
int jafo = 0;
int cpu;
/*
* Assumptions:
@ -1315,7 +1339,33 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
aac->comm_phys = 0;
kfree(aac->queues);
aac->queues = NULL;
free_irq(aac->pdev->irq, aac);
cpu = cpumask_first(cpu_online_mask);
if (aac->pdev->device == PMC_DEVICE_S6 ||
aac->pdev->device == PMC_DEVICE_S7 ||
aac->pdev->device == PMC_DEVICE_S8 ||
aac->pdev->device == PMC_DEVICE_S9) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {
if (irq_set_affinity_hint(
aac->msixentry[i].vector,
NULL)) {
printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
aac->name,
aac->id,
cpu);
}
cpu = cpumask_next(cpu,
cpu_online_mask);
free_irq(aac->msixentry[i].vector,
&(aac->aac_msix[i]));
}
pci_disable_msix(aac->pdev);
} else {
free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
}
} else {
free_irq(aac->pdev->irq, aac);
}
if (aac->msi)
pci_disable_msi(aac->pdev);
kfree(aac->fsa_dev);

View File

@ -84,7 +84,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
* continue. The caller has already been notified that
* the fib timed out.
*/
dev->queues->queue[AdapNormCmdQueue].numpending--;
atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
spin_unlock_irqrestore(q->lock, flags);
@ -354,7 +354,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
* continue. The caller has already been notified that
* the fib timed out.
*/
dev->queues->queue[AdapNormCmdQueue].numpending--;
atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
aac_fib_complete(fib);
@ -389,8 +389,13 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
fib->callback(fib->callback_data, fib);
if (likely(fib->callback && fib->callback_data)) {
fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
fib->callback(fib->callback_data, fib);
} else {
aac_fib_complete(fib);
aac_fib_free(fib);
}
} else {
unsigned long flagv;
dprintk((KERN_INFO "event_wait up\n"));

View File

@ -56,7 +56,7 @@
#include "aacraid.h"
#define AAC_DRIVER_VERSION "1.2-0"
#define AAC_DRIVER_VERSION "1.2-1"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
@ -251,27 +251,15 @@ static struct aac_driver_ident aac_drivers[] = {
* TODO: unify with aac_scsi_cmd().
*/
static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
static int aac_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
u32 count = 0;
cmd->scsi_done = done;
for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
struct fib * fib = &dev->fibs[count];
struct scsi_cmnd * command;
if (fib->hw_fib_va->header.XferState &&
((command = fib->callback_data)) &&
(command == cmd) &&
(cmd->SCp.phase == AAC_OWNER_FIRMWARE))
return 0; /* Already owned by Adapter */
}
int r = 0;
cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
return (aac_scsi_cmd(cmd) ? FAILED : 0);
r = (aac_scsi_cmd(cmd) ? FAILED : 0);
return r;
}
static DEF_SCSI_QCMD(aac_queuecommand)
/**
* aac_info - Returns the host adapter name
* @shost: Scsi host to report on
@ -713,7 +701,9 @@ static long aac_cfg_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int ret;
if (!capable(CAP_SYS_RAWIO))
struct aac_dev *aac;
aac = (struct aac_dev *)file->private_data;
if (!capable(CAP_SYS_RAWIO) || aac->adapter_shutdown)
return -EPERM;
mutex_lock(&aac_mutex);
ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
@ -1082,6 +1072,9 @@ static struct scsi_host_template aac_driver_template = {
static void __aac_shutdown(struct aac_dev * aac)
{
int i;
int cpu;
if (aac->aif_thread) {
int i;
/* Clear out events first */
@ -1095,9 +1088,37 @@ static void __aac_shutdown(struct aac_dev * aac)
}
aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
free_irq(aac->pdev->irq, aac);
cpu = cpumask_first(cpu_online_mask);
if (aac->pdev->device == PMC_DEVICE_S6 ||
aac->pdev->device == PMC_DEVICE_S7 ||
aac->pdev->device == PMC_DEVICE_S8 ||
aac->pdev->device == PMC_DEVICE_S9) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {
if (irq_set_affinity_hint(
aac->msixentry[i].vector,
NULL)) {
printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
aac->name,
aac->id,
cpu);
}
cpu = cpumask_next(cpu,
cpu_online_mask);
free_irq(aac->msixentry[i].vector,
&(aac->aac_msix[i]));
}
} else {
free_irq(aac->pdev->irq,
&(aac->aac_msix[0]));
}
} else {
free_irq(aac->pdev->irq, aac);
}
if (aac->msi)
pci_disable_msi(aac->pdev);
else if (aac->max_msix > 1)
pci_disable_msix(aac->pdev);
}
static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)

View File

@ -400,16 +400,13 @@ int aac_rx_deliver_producer(struct fib * fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
unsigned long qflags;
u32 Index;
unsigned long nointr = 0;
spin_lock_irqsave(q->lock, qflags);
aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
q->numpending++;
atomic_inc(&q->numpending);
*(q->headers.producer) = cpu_to_le32(Index + 1);
spin_unlock_irqrestore(q->lock, qflags);
if (!(nointr & aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormCmdQueue);
@ -426,15 +423,12 @@ static int aac_rx_deliver_message(struct fib * fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
unsigned long qflags;
u32 Index;
u64 addr;
volatile void __iomem *device;
unsigned long count = 10000000L; /* 50 seconds */
spin_lock_irqsave(q->lock, qflags);
q->numpending++;
spin_unlock_irqrestore(q->lock, qflags);
atomic_inc(&q->numpending);
for(;;) {
Index = rx_readl(dev, MUnit.InboundQueue);
if (unlikely(Index == 0xFFFFFFFFL))
@ -442,9 +436,7 @@ static int aac_rx_deliver_message(struct fib * fib)
if (likely(Index != 0xFFFFFFFFL))
break;
if (--count == 0) {
spin_lock_irqsave(q->lock, qflags);
q->numpending--;
spin_unlock_irqrestore(q->lock, qflags);
atomic_dec(&q->numpending);
return -ETIMEDOUT;
}
udelay(5);

View File

@ -44,98 +44,128 @@
#include "aacraid.h"
static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
static int aac_src_get_sync_status(struct aac_dev *dev);
irqreturn_t aac_src_intr_message(int irq, void *dev_id)
{
struct aac_dev *dev = dev_id;
struct aac_msix_ctx *ctx;
struct aac_dev *dev;
unsigned long bellbits, bellbits_shifted;
int our_interrupt = 0;
int isFastResponse;
int vector_no;
int isFastResponse, mode;
u32 index, handle;
bellbits = src_readl(dev, MUnit.ODR_R);
if (bellbits & PmDoorBellResponseSent) {
bellbits = PmDoorBellResponseSent;
/* handle async. status */
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
our_interrupt = 1;
index = dev->host_rrq_idx;
ctx = (struct aac_msix_ctx *)dev_id;
dev = ctx->dev;
vector_no = ctx->vector_no;
if (dev->msi_enabled) {
mode = AAC_INT_MODE_MSI;
if (vector_no == 0) {
bellbits = src_readl(dev, MUnit.ODR_MSI);
if (bellbits & 0x40000)
mode |= AAC_INT_MODE_AIF;
if (bellbits & 0x1000)
mode |= AAC_INT_MODE_SYNC;
}
} else {
mode = AAC_INT_MODE_INTX;
bellbits = src_readl(dev, MUnit.ODR_R);
if (bellbits & PmDoorBellResponseSent) {
bellbits = PmDoorBellResponseSent;
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
} else {
bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
if (bellbits_shifted & DoorBellAifPending)
mode |= AAC_INT_MODE_AIF;
else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
mode |= AAC_INT_MODE_SYNC;
}
}
if (mode & AAC_INT_MODE_SYNC) {
unsigned long sflags;
struct list_head *entry;
int send_it = 0;
extern int aac_sync_mode;
if (!aac_sync_mode && !dev->msi_enabled) {
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
}
if (dev->sync_fib) {
if (dev->sync_fib->callback)
dev->sync_fib->callback(dev->sync_fib->callback_data,
dev->sync_fib);
spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
dev->management_fib_count--;
up(&dev->sync_fib->event_wait);
}
spin_unlock_irqrestore(&dev->sync_fib->event_lock,
sflags);
spin_lock_irqsave(&dev->sync_lock, sflags);
if (!list_empty(&dev->sync_fib_list)) {
entry = dev->sync_fib_list.next;
dev->sync_fib = list_entry(entry,
struct fib,
fiblink);
list_del(entry);
send_it = 1;
} else {
dev->sync_fib = NULL;
}
spin_unlock_irqrestore(&dev->sync_lock, sflags);
if (send_it) {
aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
(u32)dev->sync_fib->hw_fib_pa,
0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
}
}
if (!dev->msi_enabled)
mode = 0;
}
if (mode & AAC_INT_MODE_AIF) {
/* handle AIF */
aac_intr_normal(dev, 0, 2, 0, NULL);
if (dev->msi_enabled)
aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
mode = 0;
}
if (mode) {
index = dev->host_rrq_idx[vector_no];
for (;;) {
isFastResponse = 0;
/* remove toggle bit (31) */
handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff;
handle = (dev->host_rrq[index] & 0x7fffffff);
/* check fast response bit (30) */
if (handle & 0x40000000)
isFastResponse = 1;
handle &= 0x0000ffff;
if (handle == 0)
break;
if (dev->msi_enabled && dev->max_msix > 1)
atomic_dec(&dev->rrq_outstanding[vector_no]);
aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
dev->host_rrq[index++] = 0;
if (index == dev->scsi_host_ptr->can_queue +
AAC_NUM_MGT_FIB)
index = 0;
dev->host_rrq_idx = index;
}
} else {
bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
if (bellbits_shifted & DoorBellAifPending) {
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
our_interrupt = 1;
/* handle AIF */
aac_intr_normal(dev, 0, 2, 0, NULL);
} else if (bellbits_shifted & OUTBOUNDDOORBELL_0) {
unsigned long sflags;
struct list_head *entry;
int send_it = 0;
extern int aac_sync_mode;
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
if (!aac_sync_mode) {
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
our_interrupt = 1;
}
if (dev->sync_fib) {
our_interrupt = 1;
if (dev->sync_fib->callback)
dev->sync_fib->callback(dev->sync_fib->callback_data,
dev->sync_fib);
spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
dev->management_fib_count--;
up(&dev->sync_fib->event_wait);
}
spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags);
spin_lock_irqsave(&dev->sync_lock, sflags);
if (!list_empty(&dev->sync_fib_list)) {
entry = dev->sync_fib_list.next;
dev->sync_fib = list_entry(entry, struct fib, fiblink);
list_del(entry);
send_it = 1;
} else {
dev->sync_fib = NULL;
}
spin_unlock_irqrestore(&dev->sync_lock, sflags);
if (send_it) {
aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
(u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
}
}
if (index == (vector_no + 1) * dev->vector_cap)
index = vector_no * dev->vector_cap;
dev->host_rrq_idx[vector_no] = index;
}
mode = 0;
}
if (our_interrupt) {
return IRQ_HANDLED;
}
return IRQ_NONE;
return IRQ_HANDLED;
}
/**
@ -155,7 +185,7 @@ static void aac_src_disable_interrupt(struct aac_dev *dev)
static void aac_src_enable_interrupt_message(struct aac_dev *dev)
{
src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8);
aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
}
/**
@ -174,6 +204,7 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
{
unsigned long start;
unsigned long delay;
int ok;
/*
@ -191,7 +222,10 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Clear the synch command doorbell to start on a clean slate.
*/
src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
if (!dev->msi_enabled)
src_writel(dev,
MUnit.ODR_C,
OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
/*
* Disable doorbell interrupts
@ -213,19 +247,29 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
ok = 0;
start = jiffies;
/*
* Wait up to 5 minutes
*/
while (time_before(jiffies, start+300*HZ)) {
if (command == IOP_RESET_ALWAYS) {
/* Wait up to 10 sec */
delay = 10*HZ;
} else {
/* Wait up to 5 minutes */
delay = 300*HZ;
}
while (time_before(jiffies, start+delay)) {
udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
/*
* Mon960 will set doorbell0 bit when it has completed the command.
*/
if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
/*
* Clear the doorbell.
*/
src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
if (dev->msi_enabled)
aac_src_access_devreg(dev,
AAC_CLEAR_SYNC_BIT);
else
src_writel(dev,
MUnit.ODR_C,
OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
ok = 1;
break;
}
@ -254,11 +298,16 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
*r3 = readl(&dev->IndexRegs->Mailbox[3]);
if (r4)
*r4 = readl(&dev->IndexRegs->Mailbox[4]);
if (command == GET_COMM_PREFERRED_SETTINGS)
dev->max_msix =
readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
/*
* Clear the synch command doorbell.
*/
src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
if (!dev->msi_enabled)
src_writel(dev,
MUnit.ODR_C,
OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
}
/*
@ -335,9 +384,14 @@ static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
static void aac_src_start_adapter(struct aac_dev *dev)
{
struct aac_init *init;
int i;
/* reset host_rrq_idx first */
dev->host_rrq_idx = 0;
for (i = 0; i < dev->max_msix; i++) {
dev->host_rrq_idx[i] = i * dev->vector_cap;
atomic_set(&dev->rrq_outstanding[i], 0);
}
dev->fibs_pushed_no = 0;
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
@ -390,15 +444,39 @@ static int aac_src_deliver_message(struct fib *fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
unsigned long qflags;
u32 fibsize;
dma_addr_t address;
struct aac_fib_xporthdr *pFibX;
u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
spin_lock_irqsave(q->lock, qflags);
q->numpending++;
spin_unlock_irqrestore(q->lock, qflags);
atomic_inc(&q->numpending);
if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
dev->max_msix > 1) {
u_int16_t vector_no, first_choice = 0xffff;
vector_no = dev->fibs_pushed_no % dev->max_msix;
do {
vector_no += 1;
if (vector_no == dev->max_msix)
vector_no = 1;
if (atomic_read(&dev->rrq_outstanding[vector_no]) <
dev->vector_cap)
break;
if (0xffff == first_choice)
first_choice = vector_no;
else if (vector_no == first_choice)
break;
} while (1);
if (vector_no == first_choice)
vector_no = 0;
atomic_inc(&dev->rrq_outstanding[vector_no]);
if (dev->fibs_pushed_no == 0xffffffff)
dev->fibs_pushed_no = 0;
else
dev->fibs_pushed_no++;
fib->hw_fib_va->header.Handle += (vector_no << 16);
}
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
/* Calculate the amount to the fibsize bits */
@ -498,15 +576,34 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
if (bled)
printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
dev->name, dev->id, bled);
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
if (bled || (var != 0x00000001))
return -EINVAL;
if (dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_DOORBELL_RESET) {
src_writel(dev, MUnit.IDR, reset_mask);
if ((bled || (var != 0x00000001)) &&
!dev->doorbell_mask)
return -EINVAL;
else if (dev->doorbell_mask) {
reset_mask = dev->doorbell_mask;
bled = 0;
var = 0x00000001;
}
if ((dev->pdev->device == PMC_DEVICE_S7 ||
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) {
aac_src_access_devreg(dev, AAC_ENABLE_INTX);
dev->msi_enabled = 0;
msleep(5000); /* Delay 5 seconds */
}
if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_DOORBELL_RESET)) {
src_writel(dev, MUnit.IDR, reset_mask);
ssleep(45);
} else {
src_writel(dev, MUnit.IDR, 0x100);
ssleep(45);
}
}
if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
@ -527,7 +624,6 @@ int aac_src_select_comm(struct aac_dev *dev, int comm)
{
switch (comm) {
case AAC_COMM_MESSAGE:
dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
dev->a_ops.adapter_intr = aac_src_intr_message;
dev->a_ops.adapter_deliver = aac_src_deliver_message;
break;
@ -625,6 +721,7 @@ int aac_src_init(struct aac_dev *dev)
*/
dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
dev->a_ops.adapter_notify = aac_src_notify_adapter;
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_check_health = aac_src_check_health;
@ -646,8 +743,11 @@ int aac_src_init(struct aac_dev *dev)
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
dev->aac_msix[0].vector_no = 0;
dev->aac_msix[0].dev = dev;
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED, "aacraid", dev) < 0) {
IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
@ -659,6 +759,7 @@ int aac_src_init(struct aac_dev *dev)
dev->dbg_base = pci_resource_start(dev->pdev, 2);
dev->dbg_base_mapped = dev->regs.src.bar1;
dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
aac_adapter_enable_int(dev);
@ -688,7 +789,9 @@ int aac_srcv_init(struct aac_dev *dev)
unsigned long status;
int restart = 0;
int instance = dev->id;
int i, j;
const char *name = dev->name;
int cpu;
dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
dev->a_ops.adapter_comm = aac_src_select_comm;
@ -784,6 +887,7 @@ int aac_srcv_init(struct aac_dev *dev)
*/
dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
dev->a_ops.adapter_notify = aac_src_notify_adapter;
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_check_health = aac_src_check_health;
@ -802,18 +906,54 @@ int aac_srcv_init(struct aac_dev *dev)
goto error_iounmap;
if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
goto error_iounmap;
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED, "aacraid", dev) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
if (dev->msi_enabled)
aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < dev->max_msix; i++) {
dev->aac_msix[i].vector_no = i;
dev->aac_msix[i].dev = dev;
if (request_irq(dev->msixentry[i].vector,
dev->a_ops.adapter_intr,
0,
"aacraid",
&(dev->aac_msix[i]))) {
printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
name, instance, i);
for (j = 0 ; j < i ; j++)
free_irq(dev->msixentry[j].vector,
&(dev->aac_msix[j]));
pci_disable_msix(dev->pdev);
goto error_iounmap;
}
if (irq_set_affinity_hint(
dev->msixentry[i].vector,
get_cpu_mask(cpu))) {
printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
name, instance, cpu);
}
cpu = cpumask_next(cpu, cpu_online_mask);
}
} else {
dev->aac_msix[0].vector_no = 0;
dev->aac_msix[0].dev = dev;
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED,
"aacraid",
&(dev->aac_msix[0])) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
}
}
dev->dbg_base = dev->base_start;
dev->dbg_base_mapped = dev->base;
dev->dbg_size = dev->base_size;
dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
aac_adapter_enable_int(dev);
@ -831,3 +971,93 @@ error_iounmap:
return -1;
}
void aac_src_access_devreg(struct aac_dev *dev, int mode)
{
u_int32_t val;
switch (mode) {
case AAC_ENABLE_INTERRUPT:
src_writel(dev,
MUnit.OIMR,
dev->OIMR = (dev->msi_enabled ?
AAC_INT_ENABLE_TYPE1_MSIX :
AAC_INT_ENABLE_TYPE1_INTX));
break;
case AAC_DISABLE_INTERRUPT:
src_writel(dev,
MUnit.OIMR,
dev->OIMR = AAC_INT_DISABLE_ALL);
break;
case AAC_ENABLE_MSIX:
/* set bit 6 */
val = src_readl(dev, MUnit.IDR);
val |= 0x40;
src_writel(dev, MUnit.IDR, val);
src_readl(dev, MUnit.IDR);
/* unmask int. */
val = PMC_ALL_INTERRUPT_BITS;
src_writel(dev, MUnit.IOAR, val);
val = src_readl(dev, MUnit.OIMR);
src_writel(dev,
MUnit.OIMR,
val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
break;
case AAC_DISABLE_MSIX:
/* reset bit 6 */
val = src_readl(dev, MUnit.IDR);
val &= ~0x40;
src_writel(dev, MUnit.IDR, val);
src_readl(dev, MUnit.IDR);
break;
case AAC_CLEAR_AIF_BIT:
/* set bit 5 */
val = src_readl(dev, MUnit.IDR);
val |= 0x20;
src_writel(dev, MUnit.IDR, val);
src_readl(dev, MUnit.IDR);
break;
case AAC_CLEAR_SYNC_BIT:
/* set bit 4 */
val = src_readl(dev, MUnit.IDR);
val |= 0x10;
src_writel(dev, MUnit.IDR, val);
src_readl(dev, MUnit.IDR);
break;
case AAC_ENABLE_INTX:
/* set bit 7 */
val = src_readl(dev, MUnit.IDR);
val |= 0x80;
src_writel(dev, MUnit.IDR, val);
src_readl(dev, MUnit.IDR);
/* unmask int. */
val = PMC_ALL_INTERRUPT_BITS;
src_writel(dev, MUnit.IOAR, val);
src_readl(dev, MUnit.IOAR);
val = src_readl(dev, MUnit.OIMR);
src_writel(dev, MUnit.OIMR,
val & (~(PMC_GLOBAL_INT_BIT2)));
break;
default:
break;
}
}
static int aac_src_get_sync_status(struct aac_dev *dev)
{
int val;
if (dev->msi_enabled)
val = src_readl(dev, MUnit.ODR_MSI) & 0x1000 ? 1 : 0;
else
val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
return val;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +1,35 @@
#ifndef _AHA1542_H
/* $Id: aha1542.h,v 1.1 1992/07/24 06:27:38 root Exp root $
*
* Header file for the adaptec 1542 driver for Linux
*
* $Log: aha1542.h,v $
* Revision 1.1 1992/07/24 06:27:38 root
* Initial revision
*
* Revision 1.2 1992/07/04 18:41:49 root
* Replaced distribution with current drivers
*
* Revision 1.3 1992/06/23 23:58:20 root
* Fixes.
*
* Revision 1.2 1992/05/26 22:13:23 root
* Changed bug that prevented DMA above first 2 mbytes.
*
* Revision 1.1 1992/05/22 21:00:29 root
* Initial revision
*
* Revision 1.1 1992/04/24 18:01:50 root
* Initial revision
*
* Revision 1.1 1992/04/02 03:23:13 drew
* Initial revision
*
* Revision 1.3 1992/01/27 14:46:29 tthorn
* *** empty log message ***
*
*/
#ifndef _AHA1542_H_
#define _AHA1542_H_
#include <linux/types.h>
/* I/O Port interface 4.2 */
/* READ */
#define STATUS(base) base
#define STST 0x80 /* Self Test in Progress */
#define DIAGF 0x40 /* Internal Diagnostic Failure */
#define INIT 0x20 /* Mailbox Initialization Required */
#define IDLE 0x10 /* SCSI Host Adapter Idle */
#define CDF 0x08 /* Command/Data Out Port Full */
#define DF 0x04 /* Data In Port Full */
#define INVDCMD 0x01 /* Invalid H A Command */
#define STATMASK 0xfd /* 0x02 is reserved */
#define STST BIT(7) /* Self Test in Progress */
#define DIAGF BIT(6) /* Internal Diagnostic Failure */
#define INIT BIT(5) /* Mailbox Initialization Required */
#define IDLE BIT(4) /* SCSI Host Adapter Idle */
#define CDF BIT(3) /* Command/Data Out Port Full */
#define DF BIT(2) /* Data In Port Full */
/* BIT(1) is reserved */
#define INVDCMD BIT(0) /* Invalid H A Command */
#define STATMASK (STST | DIAGF | INIT | IDLE | CDF | DF | INVDCMD)
#define INTRFLAGS(base) (STATUS(base)+2)
#define ANYINTR 0x80 /* Any Interrupt */
#define SCRD 0x08 /* SCSI Reset Detected */
#define HACC 0x04 /* HA Command Complete */
#define MBOA 0x02 /* MBO Empty */
#define MBIF 0x01 /* MBI Full */
#define INTRMASK 0x8f
#define ANYINTR BIT(7) /* Any Interrupt */
#define SCRD BIT(3) /* SCSI Reset Detected */
#define HACC BIT(2) /* HA Command Complete */
#define MBOA BIT(1) /* MBO Empty */
#define MBIF BIT(0) /* MBI Full */
#define INTRMASK (ANYINTR | SCRD | HACC | MBOA | MBIF)
/* WRITE */
#define CONTROL(base) STATUS(base)
#define HRST 0x80 /* Hard Reset */
#define SRST 0x40 /* Soft Reset */
#define IRST 0x20 /* Interrupt Reset */
#define SCRST 0x10 /* SCSI Bus Reset */
#define HRST BIT(7) /* Hard Reset */
#define SRST BIT(6) /* Soft Reset */
#define IRST BIT(5) /* Interrupt Reset */
#define SCRST BIT(4) /* SCSI Bus Reset */
/* READ/WRITE */
#define DATA(base) (STATUS(base)+1)
@ -80,14 +51,14 @@
/* Mailbox Definition 5.2.1 and 5.2.2 */
struct mailbox {
unchar status; /* Command/Status */
unchar ccbptr[3]; /* msb, .., lsb */
u8 status; /* Command/Status */
u8 ccbptr[3]; /* msb, .., lsb */
};
/* This is used with scatter-gather */
struct chain {
unchar datalen[3]; /* Size of this part of chain */
unchar dataptr[3]; /* Location of data */
u8 datalen[3]; /* Size of this part of chain */
u8 dataptr[3]; /* Location of data */
};
/* These belong in scsi.h also */
@ -100,51 +71,32 @@ static inline void any2scsi(u8 *p, u32 v)
#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
#define xany2scsi(up, p) \
(up)[0] = ((long)(p)) >> 24; \
(up)[1] = ((long)(p)) >> 16; \
(up)[2] = ((long)(p)) >> 8; \
(up)[3] = ((long)(p));
#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ (((long)(up)[2]) << 8) + ((long)(up)[3]) )
#define MAX_CDB 12
#define MAX_SENSE 14
struct ccb { /* Command Control Block 5.3 */
unchar op; /* Command Control Block Operation Code */
unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
/* Outbound data transfer, length is checked*/
/* Inbound data transfer, length is checked */
/* Logical Unit Number */
unchar cdblen; /* SCSI Command Length */
unchar rsalen; /* Request Sense Allocation Length/Disable */
unchar datalen[3]; /* Data Length (msb, .., lsb) */
unchar dataptr[3]; /* Data Pointer */
unchar linkptr[3]; /* Link Pointer */
unchar commlinkid; /* Command Linking Identifier */
unchar hastat; /* Host Adapter Status (HASTAT) */
unchar tarstat; /* Target Device Status */
unchar reserved[2];
unchar cdb[MAX_CDB+MAX_SENSE];/* SCSI Command Descriptor Block */
/* REQUEST SENSE */
struct ccb { /* Command Control Block 5.3 */
u8 op; /* Command Control Block Operation Code */
u8 idlun; /* op=0,2:Target Id, op=1:Initiator Id */
/* Outbound data transfer, length is checked*/
/* Inbound data transfer, length is checked */
/* Logical Unit Number */
u8 cdblen; /* SCSI Command Length */
u8 rsalen; /* Request Sense Allocation Length/Disable */
u8 datalen[3]; /* Data Length (msb, .., lsb) */
u8 dataptr[3]; /* Data Pointer */
u8 linkptr[3]; /* Link Pointer */
u8 commlinkid; /* Command Linking Identifier */
u8 hastat; /* Host Adapter Status (HASTAT) */
u8 tarstat; /* Target Device Status */
u8 reserved[2];
u8 cdb[MAX_CDB+MAX_SENSE]; /* SCSI Command Descriptor Block */
/* REQUEST SENSE */
};
static int aha1542_detect(struct scsi_host_template *);
static int aha1542_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static int aha1542_bus_reset(Scsi_Cmnd * SCpnt);
static int aha1542_dev_reset(Scsi_Cmnd * SCpnt);
static int aha1542_host_reset(Scsi_Cmnd * SCpnt);
#if 0
static int aha1542_old_abort(Scsi_Cmnd * SCpnt);
static int aha1542_old_reset(Scsi_Cmnd *, unsigned int);
#endif
static int aha1542_biosparam(struct scsi_device *, struct block_device *,
sector_t, int *);
#define AHA1542_REGION_SIZE 4
#define AHA1542_MAILBOXES 8
#define AHA1542_SCATTER 16
#define AHA1542_CMDLUN 1
#endif
#endif /* _AHA1542_H_ */

View File

@ -10437,14 +10437,13 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
return;
}
}
lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC);
lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC);
if (lstate == NULL) {
xpt_print_path(ccb->ccb_h.path);
printk("Couldn't allocate lstate\n");
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
return;
}
memset(lstate, 0, sizeof(*lstate));
status = xpt_create_path(&lstate->path, /*periph*/NULL,
xpt_path_path_id(ccb->ccb_h.path),
xpt_path_target_id(ccb->ccb_h.path),

View File

@ -1326,10 +1326,9 @@ int
ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
{
ahd->platform_data =
kmalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
kzalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
if (ahd->platform_data == NULL)
return (ENOMEM);
memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
ahd->platform_data->irq = AHD_LINUX_NOIRQ;
ahd_lockinit(ahd);
ahd->seltime = (aic79xx_seltime & 0x3) << 4;

View File

@ -4464,10 +4464,9 @@ ahc_softc_init(struct ahc_softc *ahc)
ahc->pause = ahc->unpause | PAUSE;
/* XXX The shared scb data stuff should be deprecated */
if (ahc->scb_data == NULL) {
ahc->scb_data = kmalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
if (ahc->scb_data == NULL)
return (ENOMEM);
memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
}
return (0);
@ -4780,10 +4779,10 @@ ahc_init_scbdata(struct ahc_softc *ahc)
SLIST_INIT(&scb_data->sg_maps);
/* Allocate SCB resources */
scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
scb_data->scbarray = kzalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
GFP_ATOMIC);
if (scb_data->scbarray == NULL)
return (ENOMEM);
memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
/* Determine the number of hardware SCBs and initialize them */
@ -7558,14 +7557,13 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
return;
}
}
lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC);
lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC);
if (lstate == NULL) {
xpt_print_path(ccb->ccb_h.path);
printk("Couldn't allocate lstate\n");
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
return;
}
memset(lstate, 0, sizeof(*lstate));
status = xpt_create_path(&lstate->path, /*periph*/NULL,
xpt_path_path_id(ccb->ccb_h.path),
xpt_path_target_id(ccb->ccb_h.path),

View File

@ -1214,10 +1214,9 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
{
ahc->platform_data =
kmalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
kzalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
if (ahc->platform_data == NULL)
return (ENOMEM);
memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
ahc->platform_data->irq = AHC_LINUX_NOIRQ;
ahc_lockinit(ahc);
ahc->seltime = (aic7xxx_seltime & 0x3) << 4;

View File

@ -1486,7 +1486,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
* selection.
*/
timeout = jiffies + (250 * HZ / 1000);
timeout = jiffies + msecs_to_jiffies(250);
/*
* XXX very interesting - we're seeing a bounce where the BSY we

View File

@ -1014,7 +1014,6 @@ static struct platform_driver atari_scsi_driver = {
.remove = __exit_p(atari_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
.owner = THIS_MODULE,
},
};

View File

@ -57,9 +57,9 @@
*/
/* settings for DTC3181E card with only Mustek scanner attached */
#define USLEEP_POLL 1
#define USLEEP_SLEEP 20
#define USLEEP_WAITLONG 500
#define USLEEP_POLL msecs_to_jiffies(10)
#define USLEEP_SLEEP msecs_to_jiffies(200)
#define USLEEP_WAITLONG msecs_to_jiffies(5000)
#define AUTOPROBE_IRQ
@ -723,7 +723,7 @@ module_param(ncr_53c400a, int, 0);
module_param(dtc_3181e, int, 0);
MODULE_LICENSE("GPL");
#ifndef SCSI_G_NCR5380_MEM
#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE)
static struct isapnp_device_id id_table[] = {
{
ISAPNP_ANY_ID, ISAPNP_ANY_ID,

View File

@ -99,6 +99,7 @@ static unsigned int ipr_debug = 0;
static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
static unsigned int ipr_dual_ioa_raid = 1;
static unsigned int ipr_number_of_msix = 2;
static unsigned int ipr_fast_reboot;
static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
@ -221,6 +222,8 @@ MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
"[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
@ -495,6 +498,10 @@ struct ipr_error_table_t ipr_error_table[] = {
"4061: Multipath redundancy level got better"},
{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
"4060: Multipath redundancy level got worse"},
{0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
"9083: Device raw mode enabled"},
{0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
"9084: Device raw mode disabled"},
{0x07270000, 0, 0,
"Failure due to other device"},
{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
@ -1462,7 +1469,8 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ioasc) {
if (ioasc != IPR_IOASC_IOA_WAS_RESET)
if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
dev_err(&ioa_cfg->pdev->dev,
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
@ -2566,7 +2574,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
ipr_handle_log_data(ioa_cfg, hostrcb);
if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
} else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
dev_err(&ioa_cfg->pdev->dev,
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
}
@ -4491,11 +4500,83 @@ static struct device_attribute ipr_resource_type_attr = {
.show = ipr_show_resource_type
};
/**
* ipr_show_raw_mode - Show the adapter's raw mode
* @dev: class device struct
* @buf: buffer
*
* Return value:
* number of bytes printed to buffer
**/
static ssize_t ipr_show_raw_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
struct ipr_resource_entry *res;
unsigned long lock_flags = 0;
ssize_t len;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res)
len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
else
len = -ENXIO;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return len;
}
/**
* ipr_store_raw_mode - Change the adapter's raw mode
* @dev: class device struct
* @buf: buffer
*
* Return value:
* number of bytes printed to buffer
**/
static ssize_t ipr_store_raw_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
struct ipr_resource_entry *res;
unsigned long lock_flags = 0;
ssize_t len;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res) {
if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
res->raw_mode = simple_strtoul(buf, NULL, 10);
len = strlen(buf);
if (res->sdev)
sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
res->raw_mode ? "enabled" : "disabled");
} else
len = -EINVAL;
} else
len = -ENXIO;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return len;
}
static struct device_attribute ipr_raw_mode_attr = {
.attr = {
.name = "raw_mode",
.mode = S_IRUGO | S_IWUSR,
},
.show = ipr_show_raw_mode,
.store = ipr_store_raw_mode
};
static struct device_attribute *ipr_dev_attrs[] = {
&ipr_adapter_handle_attr,
&ipr_resource_path_attr,
&ipr_device_id_attr,
&ipr_resource_type_attr,
&ipr_raw_mode_attr,
NULL,
};
@ -5379,9 +5460,6 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
/* Mask the interrupt */
writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
/* Clear the interrupt */
writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
list_del(&ioa_cfg->reset_cmd->queue);
@ -6150,6 +6228,13 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
break;
case IPR_IOASC_NR_INIT_CMD_REQUIRED:
break;
case IPR_IOASC_IR_NON_OPTIMIZED:
if (res->raw_mode) {
res->raw_mode = 0;
scsi_cmd->result |= (DID_IMM_RETRY << 16);
} else
scsi_cmd->result |= (DID_ERROR << 16);
break;
default:
if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
scsi_cmd->result |= (DID_ERROR << 16);
@ -6289,6 +6374,8 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
(!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
}
if (res->raw_mode && ipr_is_af_dasd_device(res))
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
@ -6402,7 +6489,6 @@ static struct scsi_host_template driver_template = {
.shost_attrs = ipr_ioa_attrs,
.sdev_attrs = ipr_dev_attrs,
.proc_name = IPR_NAME,
.no_write_same = 1,
.use_blk_tags = 1,
};
@ -8318,13 +8404,38 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
{
ENTER;
pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
ipr_cmd->job_step = ipr_reset_bist_done;
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
LEAVE;
return IPR_RC_JOB_RETURN;
}
/**
* ipr_reset_reset_work - Pulse a PCIe fundamental reset
* @work: work struct
*
* Description: This pulses warm reset to a slot.
*
**/
static void ipr_reset_reset_work(struct work_struct *work)
{
struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct pci_dev *pdev = ioa_cfg->pdev;
unsigned long lock_flags = 0;
ENTER;
pci_set_pcie_reset_state(pdev, pcie_warm_reset);
msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (ioa_cfg->reset_cmd == ipr_cmd)
ipr_reset_ioa_job(ipr_cmd);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
LEAVE;
}
/**
* ipr_reset_slot_reset - Reset the PCI slot of the adapter.
* @ipr_cmd: ipr command struct
@ -8337,12 +8448,11 @@ static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
pci_set_pcie_reset_state(pdev, pcie_warm_reset);
INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
ipr_cmd->job_step = ipr_reset_slot_reset_done;
ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
LEAVE;
return IPR_RC_JOB_RETURN;
}
@ -8479,6 +8589,122 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
return IPR_RC_JOB_RETURN;
}
/**
* ipr_reset_quiesce_done - Complete IOA disconnect
* @ipr_cmd: ipr command struct
*
* Description: Freeze the adapter to complete quiesce processing
*
* Return value:
* IPR_RC_JOB_CONTINUE
**/
static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ENTER;
ipr_cmd->job_step = ipr_ioa_bringdown_done;
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
LEAVE;
return IPR_RC_JOB_CONTINUE;
}
/**
* ipr_reset_cancel_hcam_done - Check for outstanding commands
* @ipr_cmd: ipr command struct
*
* Description: Ensure nothing is outstanding to the IOA and
* proceed with IOA disconnect. Otherwise reset the IOA.
*
* Return value:
* IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
**/
static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_cmnd *loop_cmd;
struct ipr_hrr_queue *hrrq;
int rc = IPR_RC_JOB_CONTINUE;
int count = 0;
ENTER;
ipr_cmd->job_step = ipr_reset_quiesce_done;
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock(&hrrq->_lock);
list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
count++;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
rc = IPR_RC_JOB_RETURN;
break;
}
spin_unlock(&hrrq->_lock);
if (count)
break;
}
LEAVE;
return rc;
}
/**
* ipr_reset_cancel_hcam - Cancel outstanding HCAMs
* @ipr_cmd: ipr command struct
*
* Description: Cancel any oustanding HCAMs to the IOA.
*
* Return value:
* IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
**/
static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
int rc = IPR_RC_JOB_CONTINUE;
struct ipr_cmd_pkt *cmd_pkt;
struct ipr_cmnd *hcam_cmd;
struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
ENTER;
ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
if (!hrrq->ioa_is_dead) {
if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
continue;
ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
IPR_CANCEL_TIMEOUT);
rc = IPR_RC_JOB_RETURN;
ipr_cmd->job_step = ipr_reset_cancel_hcam;
break;
}
}
} else
ipr_cmd->job_step = ipr_reset_alert;
LEAVE;
return rc;
}
/**
* ipr_reset_ucode_download_done - Microcode download completion
* @ipr_cmd: ipr command struct
@ -8561,7 +8787,9 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
int rc = IPR_RC_JOB_CONTINUE;
ENTER;
if (shutdown_type != IPR_SHUTDOWN_NONE &&
if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
ipr_cmd->job_step = ipr_reset_cancel_hcam;
else if (shutdown_type != IPR_SHUTDOWN_NONE &&
!ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
@ -8917,13 +9145,15 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
{
int i;
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
if (ioa_cfg->ipr_cmnd_list[i])
dma_pool_free(ioa_cfg->ipr_cmd_pool,
ioa_cfg->ipr_cmnd_list[i],
ioa_cfg->ipr_cmnd_list_dma[i]);
if (ioa_cfg->ipr_cmnd_list) {
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
if (ioa_cfg->ipr_cmnd_list[i])
dma_pool_free(ioa_cfg->ipr_cmd_pool,
ioa_cfg->ipr_cmnd_list[i],
ioa_cfg->ipr_cmnd_list_dma[i]);
ioa_cfg->ipr_cmnd_list[i] = NULL;
ioa_cfg->ipr_cmnd_list[i] = NULL;
}
}
if (ioa_cfg->ipr_cmd_pool)
@ -8972,6 +9202,38 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
kfree(ioa_cfg->trace);
}
/**
* ipr_free_irqs - Free all allocated IRQs for the adapter.
* @ioa_cfg: ipr cfg struct
*
* This function frees all allocated IRQs for the
* specified adapter.
*
* Return value:
* none
**/
static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
{
struct pci_dev *pdev = ioa_cfg->pdev;
if (ioa_cfg->intr_flag == IPR_USE_MSI ||
ioa_cfg->intr_flag == IPR_USE_MSIX) {
int i;
for (i = 0; i < ioa_cfg->nvectors; i++)
free_irq(ioa_cfg->vectors_info[i].vec,
&ioa_cfg->hrrq[i]);
} else
free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
if (ioa_cfg->intr_flag == IPR_USE_MSI) {
pci_disable_msi(pdev);
ioa_cfg->intr_flag &= ~IPR_USE_MSI;
} else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
pci_disable_msix(pdev);
ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
}
}
/**
* ipr_free_all_resources - Free all allocated resources for an adapter.
* @ipr_cmd: ipr command struct
@ -8987,23 +9249,9 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
if (ioa_cfg->intr_flag == IPR_USE_MSI ||
ioa_cfg->intr_flag == IPR_USE_MSIX) {
int i;
for (i = 0; i < ioa_cfg->nvectors; i++)
free_irq(ioa_cfg->vectors_info[i].vec,
&ioa_cfg->hrrq[i]);
} else
free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
if (ioa_cfg->intr_flag == IPR_USE_MSI) {
pci_disable_msi(pdev);
ioa_cfg->intr_flag &= ~IPR_USE_MSI;
} else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
pci_disable_msix(pdev);
ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
}
ipr_free_irqs(ioa_cfg);
if (ioa_cfg->reset_work_q)
destroy_workqueue(ioa_cfg->reset_work_q);
iounmap(ioa_cfg->hdw_dma_regs);
pci_release_regions(pdev);
ipr_free_mem(ioa_cfg);
@ -9823,6 +10071,14 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
(dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
ioa_cfg->needs_warm_reset = 1;
ioa_cfg->reset = ipr_reset_slot_reset;
ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
WQ_MEM_RECLAIM, host->host_no);
if (!ioa_cfg->reset_work_q) {
dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
goto out_free_irq;
}
} else
ioa_cfg->reset = ipr_reset_start_bist;
@ -9834,6 +10090,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
out:
return rc;
out_free_irq:
ipr_free_irqs(ioa_cfg);
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
@ -9914,6 +10172,8 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
flush_work(&ioa_cfg->work_q);
if (ioa_cfg->reset_work_q)
flush_workqueue(ioa_cfg->reset_work_q);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@ -10036,6 +10296,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
unsigned long lock_flags = 0;
enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@ -10051,9 +10312,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
shutdown_type = IPR_SHUTDOWN_QUIESCE;
ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
ipr_free_irqs(ioa_cfg);
pci_disable_device(ioa_cfg->pdev);
}
}
static struct pci_device_id ipr_pci_table[] = {
@ -10211,7 +10479,8 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
(ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
continue;
}

View File

@ -39,8 +39,8 @@
/*
* Literals
*/
#define IPR_DRIVER_VERSION "2.6.0"
#define IPR_DRIVER_DATE "(November 16, 2012)"
#define IPR_DRIVER_VERSION "2.6.1"
#define IPR_DRIVER_DATE "(March 12, 2015)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@ -138,6 +138,7 @@
#define IPR_IOASC_BUS_WAS_RESET 0x06290000
#define IPR_IOASC_BUS_WAS_RESET_BY_OTHER 0x06298000
#define IPR_IOASC_ABORTED_CMD_TERM_BY_HOST 0x0B5A0000
#define IPR_IOASC_IR_NON_OPTIMIZED 0x05258200
#define IPR_FIRST_DRIVER_IOASC 0x10000000
#define IPR_IOASC_IOA_WAS_RESET 0x10000001
@ -196,6 +197,8 @@
/*
* Adapter Commands
*/
#define IPR_CANCEL_REQUEST 0xC0
#define IPR_CANCEL_64BIT_IOARCB 0x01
#define IPR_QUERY_RSRC_STATE 0xC2
#define IPR_RESET_DEVICE 0xC3
#define IPR_RESET_TYPE_SELECT 0x80
@ -222,6 +225,7 @@
#define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ)
#define IPR_DUAL_IOA_ABBR_SHUTDOWN_TO (2 * 60 * HZ)
#define IPR_DEVICE_RESET_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_CANCEL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_INTERNAL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
@ -518,6 +522,7 @@ struct ipr_cmd_pkt {
#define IPR_RQTYPE_IOACMD 0x01
#define IPR_RQTYPE_HCAM 0x02
#define IPR_RQTYPE_ATA_PASSTHRU 0x04
#define IPR_RQTYPE_PIPE 0x05
u8 reserved2;
@ -1271,6 +1276,7 @@ struct ipr_resource_entry {
u8 del_from_ml:1;
u8 resetting_device:1;
u8 reset_occurred:1;
u8 raw_mode:1;
u32 bus; /* AKA channel */
u32 target; /* AKA id */
@ -1402,7 +1408,8 @@ enum ipr_shutdown_type {
IPR_SHUTDOWN_NORMAL = 0x00,
IPR_SHUTDOWN_PREPARE_FOR_NORMAL = 0x40,
IPR_SHUTDOWN_ABBREV = 0x80,
IPR_SHUTDOWN_NONE = 0x100
IPR_SHUTDOWN_NONE = 0x100,
IPR_SHUTDOWN_QUIESCE = 0x101,
};
struct ipr_trace_entry {
@ -1536,6 +1543,7 @@ struct ipr_ioa_cfg {
u8 saved_mode_page_len;
struct work_struct work_q;
struct workqueue_struct *reset_work_q;
wait_queue_head_t reset_wait_q;
wait_queue_head_t msi_wait_q;
@ -1587,6 +1595,7 @@ struct ipr_cmnd {
struct ata_queued_cmd *qc;
struct completion completion;
struct timer_list timer;
struct work_struct work;
void (*fast_done) (struct ipr_cmnd *);
void (*done) (struct ipr_cmnd *);
int (*job_step) (struct ipr_cmnd *);

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -413,6 +413,9 @@ struct lpfc_vport {
uint32_t cfg_fcp_class;
uint32_t cfg_use_adisc;
uint32_t cfg_fdmi_on;
#define LPFC_FDMI_SUPPORT 1 /* bit 0 - FDMI supported? */
#define LPFC_FDMI_REG_DELAY 2 /* bit 1 - 60 sec registration delay */
#define LPFC_FDMI_ALL_ATTRIB 4 /* bit 2 - register ALL attributes? */
uint32_t cfg_discovery_threads;
uint32_t cfg_log_verbose;
uint32_t cfg_max_luns;

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -406,8 +406,13 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
char fwrev[FW_REV_STR_SIZE];
return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
if (phba->sli_rev < LPFC_SLI_REV4)
return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
lpfc_decode_firmware_rev(phba, fwrev, 1);
return snprintf(buf, PAGE_SIZE, "%s\n", fwrev);
}
/**
@ -4568,12 +4573,18 @@ LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
/*
# lpfc_fdmi_on: controls FDMI support.
# 0 = no FDMI support
# 1 = support FDMI without attribute of hostname
# 2 = support FDMI with attribute of hostname
# Value range [0,2]. Default value is 0.
# Set NOT Set
# bit 0 = FDMI support no FDMI support
# LPFC_FDMI_SUPPORT just turns basic support on/off
# bit 1 = Register delay no register delay (60 seconds)
# LPFC_FDMI_REG_DELAY 60 sec registration delay after FDMI login
# bit 2 = All attributes Use a attribute subset
# LPFC_FDMI_ALL_ATTRIB applies to both port and HBA attributes
# Port attrutes subset: 1 thru 6 OR all: 1 thru 0xd 0x101 0x102 0x103
# HBA attributes subset: 1 thru 0xb OR all: 1 thru 0xc
# Value range [0,7]. Default value is 0.
*/
LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 7, "Enable FDMI support");
/*
# Specifies the maximum number of ELS cmds we can have outstanding (for

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2009-2014 Emulex. All rights reserved. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -3194,6 +3194,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
cmd->unsli3.rcvsli3.ox_id = 0xffff;
}
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
cmdiocbq->vport = phba->pport;
cmdiocbq->iocb_cmpl = NULL;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
@ -4179,6 +4180,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
switch (opcode) {
case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
case COMN_OPCODE_GET_PROFILE_CONFIG:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2010-2014 Emulex. All rights reserved. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -246,6 +246,7 @@ struct lpfc_sli_config_emb1_subsys {
#define lpfc_emb1_subcmnd_subsys_WORD word6
/* Subsystem COMN (0x01) OpCodes */
#define SLI_CONFIG_SUBSYS_COMN 0x01
#define COMN_OPCODE_GET_PROFILE_CONFIG 0xA4
#define COMN_OPCODE_READ_OBJECT 0xAB
#define COMN_OPCODE_WRITE_OBJECT 0xAC
#define COMN_OPCODE_READ_OBJECT_LIST 0xAD

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -284,6 +284,7 @@ void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
@ -354,6 +355,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_s3;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2007-2014 Emulex. All rights reserved. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -2243,8 +2243,7 @@ lpfc_adisc_done(struct lpfc_vport *vport)
*/
if (vport->port_state < LPFC_VPORT_READY) {
/* If we get here, there is nothing to ADISC */
if (vport->port_type == LPFC_PHYSICAL_PORT)
lpfc_issue_clear_la(phba, vport);
lpfc_issue_clear_la(phba, vport);
if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
vport->num_disc_nodes = 0;
/* go thru NPR list, issue ELS PLOGIs */
@ -3338,7 +3337,11 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* FLOGI retry policy */
retry = 1;
/* retry FLOGI forever */
maxretry = 0;
if (phba->link_flag != LS_LOOPBACK_MODE)
maxretry = 0;
else
maxretry = 2;
if (cmdiocb->retry >= 100)
delay = 5000;
else if (cmdiocb->retry >= 32)
@ -3701,6 +3704,11 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount),
ndlp->nlp_usg_map, ndlp);
if (NLP_CHK_NODE_ACT(ndlp)) {
lpfc_nlp_put(ndlp);
/* This is the end of the default RPI cleanup logic for
@ -5198,7 +5206,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
port_state = vport->port_state;
vport->fc_flag |= FC_PT2PT;
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
vport->port_state = LPFC_FLOGI;
spin_unlock_irq(shost->host_lock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3311 Rcv Flogi PS x%x new PS x%x "
@ -7173,7 +7180,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return;
}
if (vport->cfg_fdmi_on) {
if (vport->cfg_fdmi_on & LPFC_FDMI_SUPPORT) {
/* If this is the first time, allocate an ndlp and initialize
* it. Otherwise, make sure the node is enabled and then do the
* login.

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -3439,6 +3439,11 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
pmb->context2 = NULL;
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount),
ndlp->nlp_usg_map, ndlp);
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
@ -3855,6 +3860,11 @@ out:
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount),
ndlp->nlp_usg_map, ndlp);
if (vport->port_state < LPFC_VPORT_READY) {
/* Link up discovery requires Fabric registration. */
@ -4250,8 +4260,15 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
if (vport->phba->sli_rev == LPFC_SLI_REV4)
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0008 rpi:%x DID:%x flg:%x refcnt:%d "
"map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount),
ndlp->nlp_usg_map, ndlp);
}
if (state != NLP_STE_UNUSED_NODE)
@ -4276,9 +4293,12 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
if (vport->phba->sli_rev == LPFC_SLI_REV4)
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_nlp_put(ndlp);
lpfc_unreg_rpi(vport, ndlp);
} else {
lpfc_nlp_put(ndlp);
}
return;
}
@ -4515,7 +4535,17 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
mbox->context1 = ndlp;
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else {
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (phba->sli_rev == LPFC_SLI_REV4 &&
(!(vport->load_flag & FC_UNLOADING)) &&
(bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_2)) {
mbox->context1 = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl =
lpfc_sli4_unreg_rpi_cmpl_clr;
} else
mbox->mbox_cmpl =
lpfc_sli_def_mbox_cmpl;
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@ -4741,6 +4771,11 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount),
ndlp->nlp_usg_map, ndlp);
if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
!= NULL) {
rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
@ -5070,8 +5105,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
!(vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
if (vport->port_type == LPFC_PHYSICAL_PORT)
lpfc_issue_clear_la(phba, vport);
lpfc_issue_clear_la(phba, vport);
lpfc_issue_reg_vpi(phba, vport);
return;
}
@ -5082,8 +5116,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
*/
if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
/* If we get here, there is nothing to ADISC */
if (vport->port_type == LPFC_PHYSICAL_PORT)
lpfc_issue_clear_la(phba, vport);
lpfc_issue_clear_la(phba, vport);
if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
vport->num_disc_nodes = 0;
@ -5484,18 +5517,22 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount),
ndlp->nlp_usg_map, ndlp);
/*
* Start issuing Fabric-Device Management Interface (FDMI) command to
* 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
* fdmi-on=2 (supporting RPA/hostnmae)
*/
if (vport->cfg_fdmi_on == 1)
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
else
if (vport->cfg_fdmi_on & LPFC_FDMI_REG_DELAY)
mod_timer(&vport->fc_fdmitmo,
jiffies + msecs_to_jiffies(1000 * 60));
else
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
/* decrement the node reference count held for this callback
* function.
@ -5650,6 +5687,13 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0007 rpi:%x DID:%x flg:%x refcnt:%d "
"map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount),
ndlp->nlp_usg_map, ndlp);
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
GFP_KERNEL);
@ -5684,9 +5728,9 @@ lpfc_nlp_release(struct kref *kref)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"0279 lpfc_nlp_release: ndlp:x%p did %x "
"usgmap:x%x refcnt:%d\n",
"usgmap:x%x refcnt:%d rpi:%x\n",
(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
atomic_read(&ndlp->kref.refcount));
atomic_read(&ndlp->kref.refcount), ndlp->nlp_rpi);
/* remove ndlp from action. */
lpfc_nlp_remove(ndlp->vport, ndlp);

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -107,6 +107,7 @@ struct lpfc_sli_ct_request {
uint8_t ReasonCode;
uint8_t Explanation;
uint8_t VendorUnique;
#define LPFC_CT_PREAMBLE 20 /* Size of CTReq + 4 up to here */
union {
uint32_t PortID;
@ -170,6 +171,8 @@ struct lpfc_sli_ct_request {
} un;
};
#define LPFC_MAX_CT_SIZE (60 * 4096)
#define SLI_CT_REVISION 1
#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gid))
@ -1007,78 +1010,45 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */
} un;
} ELS_PKT;
/*
* FDMI
* HBA MAnagement Operations Command Codes
*/
#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
#define SLI_MGMT_RHBA 0x200 /* Register HBA */
#define SLI_MGMT_RHAT 0x201 /* Register HBA attributes */
#define SLI_MGMT_RPRT 0x210 /* Register Port */
#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
#define SLI_MGMT_DPRT 0x310 /* De-register Port */
/******** FDMI ********/
/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
#define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
/*
* Management Service Subtypes
* Registered Port List Format
*/
#define SLI_CT_FDMI_Subtypes 0x10
/*
* HBA Management Service Reject Code
*/
#define REJECT_CODE 0x9 /* Unable to perform command request */
/*
* HBA Management Service Reject Reason Code
* Please refer to the Reason Codes above
*/
/*
* HBA Attribute Types
*/
#define NODE_NAME 0x1
#define MANUFACTURER 0x2
#define SERIAL_NUMBER 0x3
#define MODEL 0x4
#define MODEL_DESCRIPTION 0x5
#define HARDWARE_VERSION 0x6
#define DRIVER_VERSION 0x7
#define OPTION_ROM_VERSION 0x8
#define FIRMWARE_VERSION 0x9
#define OS_NAME_VERSION 0xa
#define MAX_CT_PAYLOAD_LEN 0xb
/*
* Port Attrubute Types
*/
#define SUPPORTED_FC4_TYPES 0x1
#define SUPPORTED_SPEED 0x2
#define PORT_SPEED 0x3
#define MAX_FRAME_SIZE 0x4
#define OS_DEVICE_NAME 0x5
#define HOST_NAME 0x6
union AttributesDef {
/* Structure is in Big Endian format */
struct {
uint32_t AttrType:16;
uint32_t AttrLen:16;
} bits;
uint32_t word;
struct lpfc_fdmi_reg_port_list {
uint32_t EntryCnt;
uint32_t pe; /* Variable-length array */
};
/*
* HBA Attribute Entry (8 - 260 bytes)
*/
typedef struct {
union AttributesDef ad;
/* Definitions for HBA / Port attribute entries */
struct lpfc_fdmi_attr_def { /* Defined in TLV format */
/* Structure is in Big Endian format */
uint32_t AttrType:16;
uint32_t AttrLen:16;
uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
};
/* Attribute Entry */
struct lpfc_fdmi_attr_entry {
union {
uint32_t VendorSpecific;
uint32_t SupportClass;
uint32_t SupportSpeed;
uint32_t PortSpeed;
uint32_t MaxFrameSize;
uint32_t MaxCTPayloadLen;
uint32_t PortState;
uint32_t PortId;
struct lpfc_name NodeName;
struct lpfc_name PortName;
struct lpfc_name FabricName;
uint8_t FC4Types[32];
uint8_t Manufacturer[64];
uint8_t SerialNumber[64];
uint8_t Model[256];
@ -1087,97 +1057,115 @@ typedef struct {
uint8_t DriverVersion[256];
uint8_t OptionROMVersion[256];
uint8_t FirmwareVersion[256];
struct lpfc_name NodeName;
uint8_t SupportFC4Types[32];
uint32_t SupportSpeed;
uint32_t PortSpeed;
uint32_t MaxFrameSize;
uint8_t OsHostName[256];
uint8_t NodeSymName[256];
uint8_t OsDeviceName[256];
uint8_t OsNameVersion[256];
uint32_t MaxCTPayloadLen;
uint8_t HostName[256];
} un;
} ATTRIBUTE_ENTRY;
};
#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
/*
* HBA Attribute Block
*/
typedef struct {
uint32_t EntryCnt; /* Number of HBA attribute entries */
ATTRIBUTE_ENTRY Entry; /* Variable-length array */
} ATTRIBUTE_BLOCK;
struct lpfc_fdmi_attr_block {
uint32_t EntryCnt; /* Number of HBA attribute entries */
struct lpfc_fdmi_attr_entry Entry; /* Variable-length array */
};
/*
* Port Entry
*/
typedef struct {
struct lpfc_fdmi_port_entry {
struct lpfc_name PortName;
} PORT_ENTRY;
};
/*
* HBA Identifier
*/
typedef struct {
struct lpfc_fdmi_hba_ident {
struct lpfc_name PortName;
} HBA_IDENTIFIER;
/*
* Registered Port List Format
*/
typedef struct {
uint32_t EntryCnt;
PORT_ENTRY pe; /* Variable-length array */
} REG_PORT_LIST;
};
/*
* Register HBA(RHBA)
*/
typedef struct {
HBA_IDENTIFIER hi;
REG_PORT_LIST rpl; /* variable-length array */
/* ATTRIBUTE_BLOCK ab; */
} REG_HBA;
struct lpfc_fdmi_reg_hba {
struct lpfc_fdmi_hba_ident hi;
struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
/* struct lpfc_fdmi_attr_block ab; */
};
/*
* Register HBA Attributes (RHAT)
*/
typedef struct {
struct lpfc_fdmi_reg_hbaattr {
struct lpfc_name HBA_PortName;
ATTRIBUTE_BLOCK ab;
} REG_HBA_ATTRIBUTE;
struct lpfc_fdmi_attr_block ab;
};
/*
* Register Port Attributes (RPA)
*/
typedef struct {
struct lpfc_fdmi_reg_portattr {
struct lpfc_name PortName;
ATTRIBUTE_BLOCK ab;
} REG_PORT_ATTRIBUTE;
struct lpfc_fdmi_attr_block ab;
};
/*
* Get Registered HBA List (GRHL) Accept Payload Format
* HBA MAnagement Operations Command Codes
*/
typedef struct {
uint32_t HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */
struct lpfc_name HBA_PortName; /* Variable-length array */
} GRHL_ACC_PAYLOAD;
#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
#define SLI_MGMT_GPAS 0x120 /* Get Port Statistics */
#define SLI_MGMT_RHBA 0x200 /* Register HBA */
#define SLI_MGMT_RHAT 0x201 /* Register HBA attributes */
#define SLI_MGMT_RPRT 0x210 /* Register Port */
#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
#define SLI_MGMT_DHAT 0x301 /* De-register HBA attributes */
#define SLI_MGMT_DPRT 0x310 /* De-register Port */
#define SLI_MGMT_DPA 0x311 /* De-register Port attributes */
/*
* Get Registered Port List (GRPL) Accept Payload Format
* HBA Attribute Types
*/
typedef struct {
uint32_t RPL_Entry_Cnt; /* Number of Registered Port Entries */
PORT_ENTRY Reg_Port_Entry[1]; /* Variable-length array */
} GRPL_ACC_PAYLOAD;
#define RHBA_NODENAME 0x1 /* 8 byte WWNN */
#define RHBA_MANUFACTURER 0x2 /* 4 to 64 byte ASCII string */
#define RHBA_SERIAL_NUMBER 0x3 /* 4 to 64 byte ASCII string */
#define RHBA_MODEL 0x4 /* 4 to 256 byte ASCII string */
#define RHBA_MODEL_DESCRIPTION 0x5 /* 4 to 256 byte ASCII string */
#define RHBA_HARDWARE_VERSION 0x6 /* 4 to 256 byte ASCII string */
#define RHBA_DRIVER_VERSION 0x7 /* 4 to 256 byte ASCII string */
#define RHBA_OPTION_ROM_VERSION 0x8 /* 4 to 256 byte ASCII string */
#define RHBA_FIRMWARE_VERSION 0x9 /* 4 to 256 byte ASCII string */
#define RHBA_OS_NAME_VERSION 0xa /* 4 to 256 byte ASCII string */
#define RHBA_MAX_CT_PAYLOAD_LEN 0xb /* 32-bit unsigned int */
#define RHBA_SYM_NODENAME 0xc /* 4 to 256 byte ASCII string */
/*
* Get Port Attributes (GPAT) Accept Payload Format
* Port Attrubute Types
*/
typedef struct {
ATTRIBUTE_BLOCK pab;
} GPAT_ACC_PAYLOAD;
#define RPRT_SUPPORTED_FC4_TYPES 0x1 /* 32 byte binary array */
#define RPRT_SUPPORTED_SPEED 0x2 /* 32-bit unsigned int */
#define RPRT_PORT_SPEED 0x3 /* 32-bit unsigned int */
#define RPRT_MAX_FRAME_SIZE 0x4 /* 32-bit unsigned int */
#define RPRT_OS_DEVICE_NAME 0x5 /* 4 to 256 byte ASCII string */
#define RPRT_HOST_NAME 0x6 /* 4 to 256 byte ASCII string */
#define RPRT_NODENAME 0x7 /* 8 byte WWNN */
#define RPRT_PORTNAME 0x8 /* 8 byte WWNN */
#define RPRT_SYM_PORTNAME 0x9 /* 4 to 256 byte ASCII string */
#define RPRT_PORT_TYPE 0xa /* 32-bit unsigned int */
#define RPRT_SUPPORTED_CLASS 0xb /* 32-bit unsigned int */
#define RPRT_FABRICNAME 0xc /* 8 byte Fabric WWNN */
#define RPRT_ACTIVE_FC4_TYPES 0xd /* 32 byte binary array */
#define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */
#define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */
#define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */
/*
* Begin HBA configuration parameters.

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2009-2014 Emulex. All rights reserved. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -3085,6 +3085,9 @@ struct lpfc_acqe_link {
#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
#define LPFC_ASYNC_LINK_SPEED_20GBPS 0x5
#define LPFC_ASYNC_LINK_SPEED_25GBPS 0x6
#define LPFC_ASYNC_LINK_SPEED_40GBPS 0x7
#define lpfc_acqe_link_duplex_SHIFT 16
#define lpfc_acqe_link_duplex_MASK 0x000000FF
#define lpfc_acqe_link_duplex_WORD word0
@ -3166,7 +3169,7 @@ struct lpfc_acqe_fc_la {
#define lpfc_acqe_fc_la_speed_SHIFT 24
#define lpfc_acqe_fc_la_speed_MASK 0x000000FF
#define lpfc_acqe_fc_la_speed_WORD word0
#define LPFC_FC_LA_SPEED_UNKOWN 0x0
#define LPFC_FC_LA_SPEED_UNKNOWN 0x0
#define LPFC_FC_LA_SPEED_1G 0x1
#define LPFC_FC_LA_SPEED_2G 0x2
#define LPFC_FC_LA_SPEED_4G 0x4
@ -3244,6 +3247,7 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
};
/*

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -1330,13 +1330,14 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
void
lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_HBA_ERROR;
spin_unlock_irq(&phba->hbalock);
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
lpfc_offline(phba);
lpfc_sli4_brdreset(phba);
lpfc_hba_down_post(phba);
lpfc_sli4_post_status_check(phba);
lpfc_unblock_mgmt_io(phba);
phba->link_state = LPFC_HBA_ERROR;
}
/**
@ -1629,6 +1630,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t uerrlo_reg, uemasklo_reg;
uint32_t pci_rd_rc1, pci_rd_rc2;
bool en_rn_msg = true;
struct temp_event temp_event_data;
int rc;
/* If the pci channel is offline, ignore possible errors, since
@ -1636,9 +1638,6 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
*/
if (pci_channel_offline(phba->pcidev))
return;
/* If resets are disabled then leave the HBA alone and return */
if (!phba->cfg_enable_hba_reset)
return;
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
@ -1654,6 +1653,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
return;
lpfc_sli4_offline_eratt(phba);
break;
case LPFC_SLI_INTF_IF_TYPE_2:
pci_rd_rc1 = lpfc_readl(
phba->sli4_hba.u.if_type2.STATUSregaddr,
@ -1668,15 +1668,27 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
/* TODO: Register for Overtemp async events. */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2889 Port Overtemperature event, "
"taking port offline\n");
"taking port offline Data: x%x x%x\n",
reg_err1, reg_err2);
temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
temp_event_data.event_code = LPFC_CRIT_TEMP;
temp_event_data.data = 0xFFFFFFFF;
shost = lpfc_shost_from_vport(phba->pport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(temp_event_data),
(char *)&temp_event_data,
SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX);
spin_lock_irq(&phba->hbalock);
phba->over_temp_state = HBA_OVER_TEMP;
spin_unlock_irq(&phba->hbalock);
lpfc_sli4_offline_eratt(phba);
break;
return;
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
@ -1693,6 +1705,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3145 Port Down: Provisioning\n");
/* If resets are disabled then leave the HBA alone and return */
if (!phba->cfg_enable_hba_reset)
return;
/* Check port status register for function reset */
rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
en_rn_msg);
@ -2759,9 +2775,19 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
if (NLP_CHK_NODE_ACT(ndlp))
if (NLP_CHK_NODE_ACT(ndlp)) {
ndlp->nlp_rpi =
lpfc_sli4_alloc_rpi(phba);
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NODE,
"0009 rpi:%x DID:%x "
"flg:%x map:%x %p\n",
ndlp->nlp_rpi,
ndlp->nlp_DID,
ndlp->nlp_flag,
ndlp->nlp_usg_map,
ndlp);
}
}
}
}
@ -2925,8 +2951,18 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* RPI. Get a new RPI when the adapter port
* comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4)
if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_printf_vlog(ndlp->vport,
KERN_INFO, LOG_NODE,
"0011 lpfc_offline: "
"ndlp:x%p did %x "
"usgmap:x%x rpi:%x\n",
ndlp, ndlp->nlp_DID,
ndlp->nlp_usg_map,
ndlp->nlp_rpi);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
}
lpfc_unreg_rpi(vports[i], ndlp);
}
}
@ -3241,12 +3277,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
struct Scsi_Host *shost;
int error = 0;
if (dev != &phba->pcidev->dev)
if (dev != &phba->pcidev->dev) {
shost = scsi_host_alloc(&lpfc_vport_template,
sizeof(struct lpfc_vport));
else
shost = scsi_host_alloc(&lpfc_template,
} else {
if (phba->sli_rev == LPFC_SLI_REV4)
shost = scsi_host_alloc(&lpfc_template,
sizeof(struct lpfc_vport));
else
shost = scsi_host_alloc(&lpfc_template_s3,
sizeof(struct lpfc_vport));
}
if (!shost)
goto out;
@ -3685,6 +3726,11 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
case LPFC_ASYNC_LINK_SPEED_10GBPS:
link_speed = LPFC_LINK_SPEED_10GHZ;
break;
case LPFC_ASYNC_LINK_SPEED_20GBPS:
case LPFC_ASYNC_LINK_SPEED_25GBPS:
case LPFC_ASYNC_LINK_SPEED_40GBPS:
link_speed = LPFC_LINK_SPEED_UNKNOWN;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0483 Invalid link-attention link speed: x%x\n",
@ -3756,46 +3802,55 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
switch (evt_code) {
case LPFC_TRAILER_CODE_LINK:
switch (speed_code) {
case LPFC_EVT_CODE_LINK_NO_LINK:
case LPFC_ASYNC_LINK_SPEED_ZERO:
port_speed = 0;
break;
case LPFC_EVT_CODE_LINK_10_MBIT:
case LPFC_ASYNC_LINK_SPEED_10MBPS:
port_speed = 10;
break;
case LPFC_EVT_CODE_LINK_100_MBIT:
case LPFC_ASYNC_LINK_SPEED_100MBPS:
port_speed = 100;
break;
case LPFC_EVT_CODE_LINK_1_GBIT:
case LPFC_ASYNC_LINK_SPEED_1GBPS:
port_speed = 1000;
break;
case LPFC_EVT_CODE_LINK_10_GBIT:
case LPFC_ASYNC_LINK_SPEED_10GBPS:
port_speed = 10000;
break;
case LPFC_ASYNC_LINK_SPEED_20GBPS:
port_speed = 20000;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
port_speed = 25000;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
port_speed = 40000;
break;
default:
port_speed = 0;
}
break;
case LPFC_TRAILER_CODE_FC:
switch (speed_code) {
case LPFC_EVT_CODE_FC_NO_LINK:
case LPFC_FC_LA_SPEED_UNKNOWN:
port_speed = 0;
break;
case LPFC_EVT_CODE_FC_1_GBAUD:
case LPFC_FC_LA_SPEED_1G:
port_speed = 1000;
break;
case LPFC_EVT_CODE_FC_2_GBAUD:
case LPFC_FC_LA_SPEED_2G:
port_speed = 2000;
break;
case LPFC_EVT_CODE_FC_4_GBAUD:
case LPFC_FC_LA_SPEED_4G:
port_speed = 4000;
break;
case LPFC_EVT_CODE_FC_8_GBAUD:
case LPFC_FC_LA_SPEED_8G:
port_speed = 8000;
break;
case LPFC_EVT_CODE_FC_10_GBAUD:
case LPFC_FC_LA_SPEED_10G:
port_speed = 10000;
break;
case LPFC_EVT_CODE_FC_16_GBAUD:
case LPFC_FC_LA_SPEED_16G:
port_speed = 16000;
break;
default:
@ -4044,18 +4099,21 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
char port_name;
char message[128];
uint8_t status;
uint8_t evt_type;
struct temp_event temp_event_data;
struct lpfc_acqe_misconfigured_event *misconfigured;
struct Scsi_Host *shost;
/* special case misconfigured event as it contains data for all ports */
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2) ||
(bf_get(lpfc_trailer_type, acqe_sli) !=
LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
evt_type = bf_get(lpfc_trailer_type, acqe_sli);
/* Special case Lancer */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2901 Async SLI event - Event Data1:x%08x Event Data2:"
"x%08x SLI Event Type:%d\n",
acqe_sli->event_data1, acqe_sli->event_data2,
bf_get(lpfc_trailer_type, acqe_sli));
evt_type);
return;
}
@ -4063,58 +4121,107 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
if (port_name == 0x00)
port_name = '?'; /* get port name is empty */
misconfigured = (struct lpfc_acqe_misconfigured_event *)
switch (evt_type) {
case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
temp_event_data.data = (uint32_t)acqe_sli->event_data1;
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3190 Over Temperature:%d Celsius- Port Name %c\n",
acqe_sli->event_data1, port_name);
shost = lpfc_shost_from_vport(phba->pport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(temp_event_data),
(char *)&temp_event_data,
SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX);
break;
case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
temp_event_data.event_code = LPFC_NORMAL_TEMP;
temp_event_data.data = (uint32_t)acqe_sli->event_data1;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3191 Normal Temperature:%d Celsius - Port Name %c\n",
acqe_sli->event_data1, port_name);
shost = lpfc_shost_from_vport(phba->pport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(temp_event_data),
(char *)&temp_event_data,
SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX);
break;
case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
misconfigured = (struct lpfc_acqe_misconfigured_event *)
&acqe_sli->event_data1;
/* fetch the status for this port */
switch (phba->sli4_hba.lnk_info.lnk_no) {
case LPFC_LINK_NUMBER_0:
status = bf_get(lpfc_sli_misconfigured_port0,
/* fetch the status for this port */
switch (phba->sli4_hba.lnk_info.lnk_no) {
case LPFC_LINK_NUMBER_0:
status = bf_get(lpfc_sli_misconfigured_port0,
&misconfigured->theEvent);
break;
case LPFC_LINK_NUMBER_1:
status = bf_get(lpfc_sli_misconfigured_port1,
break;
case LPFC_LINK_NUMBER_1:
status = bf_get(lpfc_sli_misconfigured_port1,
&misconfigured->theEvent);
break;
case LPFC_LINK_NUMBER_2:
status = bf_get(lpfc_sli_misconfigured_port2,
break;
case LPFC_LINK_NUMBER_2:
status = bf_get(lpfc_sli_misconfigured_port2,
&misconfigured->theEvent);
break;
case LPFC_LINK_NUMBER_3:
status = bf_get(lpfc_sli_misconfigured_port3,
break;
case LPFC_LINK_NUMBER_3:
status = bf_get(lpfc_sli_misconfigured_port3,
&misconfigured->theEvent);
break;
default:
status = ~LPFC_SLI_EVENT_STATUS_VALID;
break;
}
break;
default:
status = ~LPFC_SLI_EVENT_STATUS_VALID;
break;
}
switch (status) {
case LPFC_SLI_EVENT_STATUS_VALID:
return; /* no message if the sfp is okay */
case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
sprintf(message, "Optics faulted/incorrectly installed/not " \
"installed - Reseat optics, if issue not "
"resolved, replace.");
break;
case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
sprintf(message,
"Optics of two types installed - Remove one optic or " \
"install matching pair of optics.");
break;
case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
sprintf(message, "Incompatible optics - Replace with " \
switch (status) {
case LPFC_SLI_EVENT_STATUS_VALID:
return; /* no message if the sfp is okay */
case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
sprintf(message, "Optics faulted/incorrectly "
"installed/not installed - Reseat optics, "
"if issue not resolved, replace.");
break;
case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
sprintf(message,
"Optics of two types installed - Remove one "
"optic or install matching pair of optics.");
break;
case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
sprintf(message, "Incompatible optics - Replace with "
"compatible optics for card to function.");
break;
default:
/* firmware is reporting a status we don't know about */
sprintf(message, "Unknown event status x%02x", status);
break;
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3176 Misconfigured Physical Port - "
"Port Name %c %s\n", port_name, message);
break;
case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3192 Remote DPort Test Initiated - "
"Event Data1:x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
default:
/* firmware is reporting a status we don't know about */
sprintf(message, "Unknown event status x%02x", status);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3193 Async SLI event - Event Data1:x%08x Event Data2:"
"x%08x SLI Event Type:%d\n",
acqe_sli->event_data1, acqe_sli->event_data2,
evt_type);
break;
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3176 Misconfigured Physical Port - "
"Port Name %c %s\n", port_name, message);
}
/**
@ -5183,6 +5290,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = lpfc_pci_function_reset(phba);
if (unlikely(rc))
return -ENODEV;
phba->temp_sensor_support = 1;
}
/* Create the bootstrap mailbox command */
@ -7647,6 +7755,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
goto out_destroy_els_rq;
}
}
/*
* Configure EQ delay multipier for interrupt coalescing using
* MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
*/
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
fcp_eqidx += LPFC_MAX_EQ_DELAY)
lpfc_modify_fcp_eq_delay(phba, fcp_eqidx);
return 0;
out_destroy_els_rq:
@ -7953,7 +8069,7 @@ wait:
* up to 30 seconds. If the port doesn't respond, treat
* it as an error.
*/
for (rdy_chk = 0; rdy_chk < 3000; rdy_chk++) {
for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
if (lpfc_readl(phba->sli4_hba.u.if_type2.
STATUSregaddr, &reg_data.word0)) {
rc = -ENODEV;

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -276,6 +276,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint64_t nlp_portwwn = 0;
uint32_t *lp;
IOCB_t *icmd;
struct serv_parm *sp;
@ -332,6 +333,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NULL);
return 0;
}
nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
@ -367,7 +370,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
/* no need to reg_login if we are already in one of these states */
/* if already logged in, do implicit logout */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@ -376,8 +379,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
return 1;
/* lpfc_plogi_confirm_nport skips fabric did, handle it here */
if (!(ndlp->nlp_type & NLP_FABRIC)) {
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
ndlp, NULL);
return 1;
}
if (nlp_portwwn != 0 &&
nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0143 PLOGI recv'd from DID: x%x "
"WWPN changed: old %llx new %llx\n",
ndlp->nlp_DID,
(unsigned long long)nlp_portwwn,
(unsigned long long)
wwn_to_u64(sp->portName.u.wwn));
ndlp->nlp_prev_state = ndlp->nlp_state;
/* rport needs to be unregistered first */
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
break;
}
/* Check for Nport to NPort pt2pt protocol */

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -1129,6 +1129,25 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
phba->lpfc_release_scsi_buf(phba, psb);
}
/**
* lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
* @data: A pointer to the immediate command data portion of the IOCB.
* @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
*
* The routine copies the entire FCP command from @fcp_cmnd to @data while
* byte swapping the data to big endian format for transmission on the wire.
**/
static void
lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
{
int i, j;
for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
i += sizeof(uint32_t), j++) {
((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
}
}
/**
* lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
* @phba: The Hba for which this call is being executed.
@ -1264,6 +1283,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
return 0;
}
@ -4126,24 +4146,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
/**
* lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
* @data: A pointer to the immediate command data portion of the IOCB.
* @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
*
* The routine copies the entire FCP command from @fcp_cmnd to @data while
* byte swapping the data to big endian format for transmission on the wire.
**/
static void
lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
{
int i, j;
for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
i += sizeof(uint32_t), j++) {
((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
}
}
/**
* lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
* @vport: The virtual port for which this call is being executed.
@ -4223,9 +4225,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
fcp_cmnd->fcpCntl3 = 0;
phba->fc4ControlRequests++;
}
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
/*
* Finish initializing those IOCB fields that are independent
* of the scsi_cmnd request_buffer
@ -5118,9 +5117,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int status;
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0798 Device Reset rport failure: rdata x%p\n", rdata);
"0798 Device Reset rport failure: rdata x%p\n",
rdata);
return FAILED;
}
pnode = rdata->pnode;
@ -5202,10 +5202,12 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0722 Target Reset rport failure: rdata x%p\n", rdata);
spin_lock_irq(shost->host_lock);
pnode->nlp_flag &= ~NLP_NPR_ADISC;
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
spin_unlock_irq(shost->host_lock);
if (pnode) {
spin_lock_irq(shost->host_lock);
pnode->nlp_flag &= ~NLP_NPR_ADISC;
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
spin_unlock_irq(shost->host_lock);
}
lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
return FAST_IO_FAIL;
@ -5857,6 +5859,31 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
return false;
}
struct scsi_host_template lpfc_template_s3 = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
.use_blk_tags = 1,
.track_queue_depth = 1,
};
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -918,12 +918,16 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
ndlp = lpfc_cmd->rdata->pnode;
} else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
!(piocbq->iocb_flag & LPFC_IO_LIBDFC))
!(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
ndlp = piocbq->context_un.ndlp;
else if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
ndlp = piocbq->context_un.ndlp;
else
} else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
ndlp = NULL;
else
ndlp = piocbq->context_un.ndlp;
} else {
ndlp = piocbq->context1;
}
list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
start_sglq = sglq;
@ -2213,6 +2217,46 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else
mempool_free(pmb, phba->mbox_mem_pool);
}
/**
* lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
* @phba: Pointer to HBA context object.
* @pmb: Pointer to mailbox object.
*
* This function is the unreg rpi mailbox completion handler. It
* frees the memory resources associated with the completed mailbox
* command. An additional refrenece is put on the ndlp to prevent
* lpfc_nlp_release from freeing the rpi bit in the bitmask before
* the unreg mailbox command completes, this routine puts the
* reference back.
*
**/
void
lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct lpfc_nodelist *ndlp;
ndlp = pmb->context1;
if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
if (phba->sli_rev == LPFC_SLI_REV4 &&
(bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_2)) {
if (ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0010 UNREG_LOGIN vpi:%x "
"rpi:%x DID:%x map:%x %p\n",
vport->vpi, ndlp->nlp_rpi,
ndlp->nlp_DID,
ndlp->nlp_usg_map, ndlp);
lpfc_nlp_put(ndlp);
}
}
}
mempool_free(pmb, phba->mbox_mem_pool);
}
/**
* lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
@ -12842,7 +12886,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
* fails this function will return -ENXIO.
**/
int
lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
{
struct lpfc_mbx_modify_eq_delay *eq_delay;
LPFC_MBOXQ_t *mbox;
@ -12959,11 +13003,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
LPFC_EQE_SIZE);
bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
/* Calculate delay multiper from maximum interrupt per second */
if (imax > LPFC_DMULT_CONST)
dmult = 0;
else
dmult = LPFC_DMULT_CONST/imax - 1;
/* don't setup delay multiplier using EQ_CREATE */
dmult = 0;
bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
dmult);
switch (eq->entry_count) {
@ -15662,14 +15703,14 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
unsigned long iflag;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
/*
* Fetch the next logical rpi. Because this index is logical,
* the driver starts at 0 each time.
*/
spin_lock_irqsave(&phba->hbalock, iflag);
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
@ -15678,6 +15719,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.rpi_used++;
phba->sli4_hba.rpi_count++;
}
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0001 rpi:%x max:%x lim:%x\n",
(int) rpi, max_rpi, rpi_limit);
/*
* Don't try to allocate more rpi header regions if the device limit

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -80,6 +80,7 @@ struct lpfc_iocbq {
#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
#define LPFC_IO_FOF 0x20000 /* FOF FCP IO */
#define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2009-2014 Emulex. All rights reserved. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -671,7 +671,7 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint32_t);
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "10.4.8000.0."
#define LPFC_DRIVER_VERSION "10.5.0.0."
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@ -30,4 +30,4 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved."
#define LPFC_COPYRIGHT "Copyright(c) 2004-2015 Emulex. All rights reserved."

View File

@ -483,7 +483,6 @@ static struct platform_driver mac_scsi_driver = {
.remove = __exit_p(mac_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
.owner = THIS_MODULE,
},
};

View File

@ -18,6 +18,9 @@ config SCSI_QLA_FC
2322, 6322 ql2322_fw.bin
24xx, 54xx ql2400_fw.bin
25xx ql2500_fw.bin
2031 ql2600_fw.bin
8031 ql8300_fw.bin
27xx ql2700_fw.bin
Upon request, the driver caches the firmware image until
the driver is unloaded.

View File

@ -11,9 +11,9 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x017d | 0x0144,0x0146 |
* | Module Init and Probe | 0x017f | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e-0x0170 |
* | | | 0x016e-0x0170 |
* | Mailbox commands | 0x118d | 0x1115-0x1116 |
* | | | 0x111a-0x111b |
* | Device Discovery | 0x2016 | 0x2020-0x2022, |
@ -60,7 +60,7 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd213 | 0xd011-0xd017 |
* | Misc | 0xd300 | 0xd016-0xd017 |
* | | | 0xd021,0xd024 |
* | | | 0xd025,0xd029 |
* | | | 0xd02a,0xd02e |

View File

@ -2163,7 +2163,7 @@ struct ct_fdmi_hba_attr {
uint8_t node_name[WWN_SIZE];
uint8_t manufacturer[64];
uint8_t serial_num[32];
uint8_t model[16];
uint8_t model[16+1];
uint8_t model_desc[80];
uint8_t hw_version[32];
uint8_t driver_version[32];
@ -2184,9 +2184,9 @@ struct ct_fdmiv2_hba_attr {
uint16_t len;
union {
uint8_t node_name[WWN_SIZE];
uint8_t manufacturer[32];
uint8_t manufacturer[64];
uint8_t serial_num[32];
uint8_t model[16];
uint8_t model[16+1];
uint8_t model_desc[80];
uint8_t hw_version[16];
uint8_t driver_version[32];
@ -2252,7 +2252,7 @@ struct ct_fdmiv2_port_attr {
uint32_t cur_speed;
uint32_t max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[32];
uint8_t host_name[256];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t port_sym_name[128];
@ -2283,7 +2283,7 @@ struct ct_fdmi_port_attr {
uint32_t cur_speed;
uint32_t max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[32];
uint8_t host_name[256];
} a;
};
@ -3132,7 +3132,8 @@ struct qla_hw_data {
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
IS_QLA8044(ha) || IS_QLA27XX(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
IS_QLA27XX(ha))
#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
IS_QLA27XX(ha))
@ -3300,6 +3301,8 @@ struct qla_hw_data {
#define RISC_RDY_AFT_RESET 3
#define RISC_SRAM_DUMP_CMPL 4
#define RISC_EXT_MEM_DUMP_CMPL 5
#define ISP_MBX_RDY 6
#define ISP_SOFT_RESET_CMPL 7
int fw_dump_reading;
int prev_minidump_failed;
dma_addr_t eft_dma;
@ -3587,6 +3590,7 @@ typedef struct scsi_qla_host {
#define VP_BIND_NEEDED 2
#define VP_DELETE_NEEDED 3
#define VP_SCR_NEEDED 4 /* State Change Request registration */
#define VP_CONFIG_OK 5 /* Flag to cfg VP, if FW is ready */
atomic_t vp_state;
#define VP_OFFLINE 0
#define VP_ACTIVE 1

View File

@ -1121,7 +1121,7 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
static inline void
static inline int
qla24xx_reset_risc(scsi_qla_host_t *vha)
{
unsigned long flags = 0;
@ -1130,6 +1130,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
uint32_t cnt, d2;
uint16_t wd;
static int abts_cnt; /* ISP abort retry counts */
int rval = QLA_SUCCESS;
spin_lock_irqsave(&ha->hardware_lock, flags);
@ -1142,26 +1143,57 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
udelay(10);
}
if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
"HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
RD_REG_DWORD(&reg->hccr),
RD_REG_DWORD(&reg->ctrl_status),
(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
WRT_REG_DWORD(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
udelay(100);
/* Wait for firmware to complete NVRAM accesses. */
d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
for (cnt = 10000 ; cnt && d2; cnt--) {
udelay(5);
d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
udelay(5);
else
rval = QLA_FUNCTION_TIMEOUT;
}
if (rval == QLA_SUCCESS)
set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
"HCCR: 0x%x, MailBox0 Status 0x%x\n",
RD_REG_DWORD(&reg->hccr),
RD_REG_DWORD(&reg->mailbox0));
/* Wait for soft-reset to complete. */
d2 = RD_REG_DWORD(&reg->ctrl_status);
for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
udelay(5);
d2 = RD_REG_DWORD(&reg->ctrl_status);
for (cnt = 0; cnt < 6000000; cnt++) {
barrier();
if ((RD_REG_DWORD(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
break;
udelay(5);
}
if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
"HCCR: 0x%x, Soft Reset status: 0x%x\n",
RD_REG_DWORD(&reg->hccr),
RD_REG_DWORD(&reg->ctrl_status));
/* If required, do an MPI FW reset now */
if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
@ -1190,16 +1222,32 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
RD_REG_DWORD(&reg->hccr);
d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
for (cnt = 6000000 ; cnt && d2; cnt--) {
udelay(5);
d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
udelay(5);
else
rval = QLA_FUNCTION_TIMEOUT;
}
if (rval == QLA_SUCCESS)
set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
"Host Risc 0x%x, mailbox0 0x%x\n",
RD_REG_DWORD(&reg->hccr),
RD_REG_WORD(&reg->mailbox0));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
"Driver in %s mode\n",
IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
if (IS_NOPOLLING_TYPE(ha))
ha->isp_ops->enable_intrs(ha);
return rval;
}
static void
@ -2243,8 +2291,11 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
rval = QLA_SUCCESS;
/* 20 seconds for loop down. */
min_wait = 20;
/* Time to wait for loop down */
if (IS_P3P_TYPE(ha))
min_wait = 30;
else
min_wait = 20;
/*
* Firmware should take at most one RATOV to login, plus 5 seconds for

View File

@ -756,11 +756,21 @@ skip_rio:
/*
* In case of loop down, restore WWPN from
* NVRAM in case of FA-WWPN capable ISP
* Restore for Physical Port only
*/
if (ha->flags.fawwpn_enabled) {
void *wwpn = ha->init_cb->port_name;
if (!vha->vp_idx) {
if (ha->flags.fawwpn_enabled) {
void *wwpn = ha->init_cb->port_name;
memcpy(vha->port_name, wwpn, WWN_SIZE);
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
ql_dbg(ql_dbg_init + ql_dbg_verbose,
vha, 0x0144, "LOOP DOWN detected,"
"restore WWPN %016llx\n",
wwn_to_u64(vha->port_name));
}
memcpy(vha->port_name, wwpn, WWN_SIZE);
clear_bit(VP_CONFIG_OK, &vha->vp_flags);
}
vha->device_flags |= DFLG_NO_CABLE;
@ -947,6 +957,7 @@ skip_rio:
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(VP_CONFIG_OK, &vha->vp_flags);
qlt_async_event(mb[0], vha, mb);
break;

View File

@ -33,7 +33,7 @@
static int
qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
{
int rval;
int rval, i;
unsigned long flags = 0;
device_reg_t *reg;
uint8_t abort_active;
@ -43,10 +43,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint16_t __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
uint16_t __iomem *mbx_reg;
unsigned long wait_time;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
if (ha->pdev->error_state > pci_channel_io_frozen) {
@ -376,6 +378,18 @@ mbx_done:
ql_dbg(ql_dbg_disc, base_vha, 0x1020,
"**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
ql_dbg(ql_dbg_disc, vha, 0x1115,
"host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n",
RD_REG_DWORD(&reg->isp24.host_status),
ha->fw_dump_cap_flags,
RD_REG_DWORD(&reg->isp24.ictrl),
RD_REG_DWORD(&reg->isp24.istatus));
mbx_reg = &reg->isp24.mailbox0;
for (i = 0; i < 6; i++)
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x1116,
"mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
@ -2838,7 +2852,7 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (!IS_QLA2031(vha->hw))
if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
@ -2846,7 +2860,11 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
mcp->mb[0] = MBC_WRITE_SERDES;
mcp->mb[1] = addr;
mcp->mb[2] = data & 0xff;
if (IS_QLA2031(vha->hw))
mcp->mb[2] = data & 0xff;
else
mcp->mb[2] = data;
mcp->mb[3] = 0;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
@ -2872,7 +2890,7 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (!IS_QLA2031(vha->hw))
if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
@ -2887,7 +2905,10 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
*data = mcp->mb[1] & 0xff;
if (IS_QLA2031(vha->hw))
*data = mcp->mb[1] & 0xff;
else
*data = mcp->mb[1];
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1186,

View File

@ -306,19 +306,25 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
qla2x00_do_work(vha);
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
/* VP acquired. complete port configuration */
ql_dbg(ql_dbg_dpc, vha, 0x4014,
"Configure VP scheduled.\n");
qla24xx_configure_vp(vha);
ql_dbg(ql_dbg_dpc, vha, 0x4015,
"Configure VP end.\n");
return 0;
/* Check if Fw is ready to configure VP first */
if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
/* VP acquired. complete port configuration */
ql_dbg(ql_dbg_dpc, vha, 0x4014,
"Configure VP scheduled.\n");
qla24xx_configure_vp(vha);
ql_dbg(ql_dbg_dpc, vha, 0x4015,
"Configure VP end.\n");
return 0;
}
}
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {

View File

@ -5834,3 +5834,6 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
MODULE_FIRMWARE(FW_FILE_ISP2322);
MODULE_FIRMWARE(FW_FILE_ISP24XX);
MODULE_FIRMWARE(FW_FILE_ISP25XX);
MODULE_FIRMWARE(FW_FILE_ISP2031);
MODULE_FIRMWARE(FW_FILE_ISP8031);
MODULE_FIRMWARE(FW_FILE_ISP27XX);

View File

@ -1718,13 +1718,16 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
uint16_t orig_led_cfg[6];
uint32_t led_10_value, led_43_value;
if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha))
return;
if (!ha->beacon_blink_led)
return;
if (IS_QLA2031(ha)) {
if (IS_QLA27XX(ha)) {
qla2x00_write_ram_word(vha, 0x1003, 0x40000230);
qla2x00_write_ram_word(vha, 0x1004, 0x40000230);
} else if (IS_QLA2031(ha)) {
led_select_value = qla83xx_select_led_port(ha);
qla83xx_wr_reg(vha, led_select_value, 0x40000230);
@ -1811,7 +1814,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
return QLA_FUNCTION_FAILED;
}
if (IS_QLA2031(ha))
if (IS_QLA2031(ha) || IS_QLA27XX(ha))
goto skip_gpio;
spin_lock_irqsave(&ha->hardware_lock, flags);
@ -1848,7 +1851,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
ha->beacon_blink_led = 0;
if (IS_QLA2031(ha))
if (IS_QLA2031(ha) || IS_QLA27XX(ha))
goto set_fw_options;
if (IS_QLA8031(ha) || IS_QLA81XX(ha))

View File

@ -190,7 +190,7 @@ static inline void
qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
uint offset, uint32_t data, void *buf)
{
__iomem void *window = reg + offset;
__iomem void *window = (void __iomem *)reg + offset;
if (buf) {
WRT_REG_DWORD(window, data);
@ -219,6 +219,8 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
{
if (buf)
ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
ql_dbg(ql_dbg_misc + ql_dbg_verbose, NULL, 0xd011,
"Skipping entry %d\n", ent->hdr.entry_type);
}
static int
@ -784,6 +786,13 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd01b,
"%s: len=%lx\n", __func__, *len);
if (buf) {
ql_log(ql_log_warn, vha, 0xd015,
"Firmware dump saved to temp buffer (%ld/%p)\n",
vha->host_no, vha->hw->fw_dump);
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
}
static void
@ -938,6 +947,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
else if (!vha->hw->fw_dump_template)
ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
else if (vha->hw->fw_dumped)
ql_log(ql_log_warn, vha, 0xd300,
"Firmware has been previously dumped (%p),"
" -- ignoring request\n", vha->hw->fw_dump);
else
qla27xx_execute_fwdt_template(vha);

View File

@ -7,7 +7,7 @@
/*
* Driver version
*/
#define QLA2XXX_VERSION "8.07.00.16-k"
#define QLA2XXX_VERSION "8.07.00.18-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 7

View File

@ -972,18 +972,24 @@ EXPORT_SYMBOL(scsi_report_opcode);
* Description: Gets a reference to the scsi_device and increments the use count
* of the underlying LLDD module. You must hold host_lock of the
* parent Scsi_Host or already have a reference when calling this.
*
* This will fail if a device is deleted or cancelled, or when the LLD module
* is in the process of being unloaded.
*/
int scsi_device_get(struct scsi_device *sdev)
{
if (sdev->sdev_state == SDEV_DEL)
return -ENXIO;
if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
goto fail;
if (!get_device(&sdev->sdev_gendev))
return -ENXIO;
/* We can fail try_module_get if we're doing SCSI operations
* from module exit (like cache flush) */
__module_get(sdev->host->hostt->module);
goto fail;
if (!try_module_get(sdev->host->hostt->module))
goto fail_put_device;
return 0;
fail_put_device:
put_device(&sdev->sdev_gendev);
fail:
return -ENXIO;
}
EXPORT_SYMBOL(scsi_device_get);

View File

@ -1570,16 +1570,15 @@ EXPORT_SYMBOL(scsi_add_device);
void scsi_rescan_device(struct device *dev)
{
if (!dev->driver)
return;
if (try_module_get(dev->driver->owner)) {
device_lock(dev);
if (dev->driver && try_module_get(dev->driver->owner)) {
struct scsi_driver *drv = to_scsi_driver(dev->driver);
if (drv->rescan)
drv->rescan(dev);
module_put(dev->driver->owner);
}
device_unlock(dev);
}
EXPORT_SYMBOL(scsi_rescan_device);

View File

@ -265,6 +265,7 @@ static const struct {
{ FC_PORTSPEED_40GBIT, "40 Gbit" },
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)

View File

@ -564,10 +564,12 @@ static int sd_major(int major_idx)
}
}
static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
{
struct scsi_disk *sdkp = NULL;
mutex_lock(&sd_ref_mutex);
if (disk->private_data) {
sdkp = scsi_disk(disk);
if (scsi_device_get(sdkp->device) == 0)
@ -575,27 +577,6 @@ static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
else
sdkp = NULL;
}
return sdkp;
}
static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
{
struct scsi_disk *sdkp;
mutex_lock(&sd_ref_mutex);
sdkp = __scsi_disk_get(disk);
mutex_unlock(&sd_ref_mutex);
return sdkp;
}
static struct scsi_disk *scsi_disk_get_from_dev(struct device *dev)
{
struct scsi_disk *sdkp;
mutex_lock(&sd_ref_mutex);
sdkp = dev_get_drvdata(dev);
if (sdkp)
sdkp = __scsi_disk_get(sdkp->disk);
mutex_unlock(&sd_ref_mutex);
return sdkp;
}
@ -610,8 +591,6 @@ static void scsi_disk_put(struct scsi_disk *sdkp)
mutex_unlock(&sd_ref_mutex);
}
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
unsigned int dix, unsigned int dif)
{
@ -1525,12 +1504,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
static void sd_rescan(struct device *dev)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
struct scsi_disk *sdkp = dev_get_drvdata(dev);
if (sdkp) {
revalidate_disk(sdkp->disk);
scsi_disk_put(sdkp);
}
revalidate_disk(sdkp->disk);
}
@ -2235,11 +2211,11 @@ got_data:
{
char cap_str_2[10], cap_str_10[10];
u64 sz = (u64)sdkp->capacity << ilog2(sector_size);
string_get_size(sz, STRING_UNITS_2, cap_str_2,
sizeof(cap_str_2));
string_get_size(sz, STRING_UNITS_10, cap_str_10,
string_get_size(sdkp->capacity, sector_size,
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
string_get_size(sdkp->capacity, sector_size,
STRING_UNITS_10, cap_str_10,
sizeof(cap_str_10));
if (sdkp->first_scan || old_capacity != sdkp->capacity) {
@ -3149,13 +3125,13 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
*/
static void sd_shutdown(struct device *dev)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
struct scsi_disk *sdkp = dev_get_drvdata(dev);
if (!sdkp)
return; /* this can happen */
if (pm_runtime_suspended(dev))
goto exit;
return;
if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
@ -3166,14 +3142,11 @@ static void sd_shutdown(struct device *dev)
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
exit:
scsi_disk_put(sdkp);
}
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret = 0;
if (!sdkp)
@ -3199,7 +3172,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
}
done:
scsi_disk_put(sdkp);
return ret;
}
@ -3215,18 +3187,13 @@ static int sd_suspend_runtime(struct device *dev)
static int sd_resume(struct device *dev)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
int ret = 0;
struct scsi_disk *sdkp = dev_get_drvdata(dev);
if (!sdkp->device->manage_start_stop)
goto done;
return 0;
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
ret = sd_start_stop_device(sdkp, 1);
done:
scsi_disk_put(sdkp);
return ret;
return sd_start_stop_device(sdkp, 1);
}
/**

View File

@ -308,11 +308,16 @@ enum storvsc_request_type {
* This is the end of Protocol specific defines.
*/
static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
static u32 max_outstanding_req_per_channel;
static int storvsc_vcpus_per_sub_channel = 4;
module_param(storvsc_ringbuffer_size, int, S_IRUGO);
MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
MODULE_PARM_DESC(vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
/*
* Timeout in seconds for all devices managed by this driver.
*/
@ -320,7 +325,6 @@ static int storvsc_timeout = 180;
static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
#define STORVSC_MAX_IO_REQUESTS 200
static void storvsc_on_channel_callback(void *context);
@ -347,7 +351,10 @@ struct storvsc_cmd_request {
/* Synchronize the request/response if needed */
struct completion wait_event;
struct hv_multipage_buffer data_buffer;
struct vmbus_channel_packet_multipage_buffer mpb;
struct vmbus_packet_mpb_array *payload;
u32 payload_sz;
struct vstor_packet vstor_packet;
};
@ -373,6 +380,10 @@ struct storvsc_device {
unsigned char path_id;
unsigned char target_id;
/*
* Max I/O, the device can support.
*/
u32 max_transfer_bytes;
/* Used for vsc/vsp channel reset process */
struct storvsc_cmd_request init_request;
struct storvsc_cmd_request reset_request;
@ -618,19 +629,6 @@ cleanup:
return NULL;
}
/* Disgusting wrapper functions */
static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
{
void *addr = kmap_atomic(sg_page(sgl + idx));
return (unsigned long)addr;
}
static inline void sg_kunmap_atomic(unsigned long addr)
{
kunmap_atomic((void *)addr);
}
/* Assume the original sgl has enough room */
static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
struct scatterlist *bounce_sgl,
@ -645,32 +643,38 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
unsigned long bounce_addr = 0;
unsigned long dest_addr = 0;
unsigned long flags;
struct scatterlist *cur_dest_sgl;
struct scatterlist *cur_src_sgl;
local_irq_save(flags);
cur_dest_sgl = orig_sgl;
cur_src_sgl = bounce_sgl;
for (i = 0; i < orig_sgl_count; i++) {
dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
dest_addr = (unsigned long)
kmap_atomic(sg_page(cur_dest_sgl)) +
cur_dest_sgl->offset;
dest = dest_addr;
destlen = orig_sgl[i].length;
destlen = cur_dest_sgl->length;
if (bounce_addr == 0)
bounce_addr = sg_kmap_atomic(bounce_sgl,j);
bounce_addr = (unsigned long)kmap_atomic(
sg_page(cur_src_sgl));
while (destlen) {
src = bounce_addr + bounce_sgl[j].offset;
srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
src = bounce_addr + cur_src_sgl->offset;
srclen = cur_src_sgl->length - cur_src_sgl->offset;
copylen = min(srclen, destlen);
memcpy((void *)dest, (void *)src, copylen);
total_copied += copylen;
bounce_sgl[j].offset += copylen;
cur_src_sgl->offset += copylen;
destlen -= copylen;
dest += copylen;
if (bounce_sgl[j].offset == bounce_sgl[j].length) {
if (cur_src_sgl->offset == cur_src_sgl->length) {
/* full */
sg_kunmap_atomic(bounce_addr);
kunmap_atomic((void *)bounce_addr);
j++;
/*
@ -684,21 +688,27 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
/*
* We are done; cleanup and return.
*/
sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
kunmap_atomic((void *)(dest_addr -
cur_dest_sgl->offset));
local_irq_restore(flags);
return total_copied;
}
/* if we need to use another bounce buffer */
if (destlen || i != orig_sgl_count - 1)
bounce_addr = sg_kmap_atomic(bounce_sgl,j);
if (destlen || i != orig_sgl_count - 1) {
cur_src_sgl = sg_next(cur_src_sgl);
bounce_addr = (unsigned long)
kmap_atomic(
sg_page(cur_src_sgl));
}
} else if (destlen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */
sg_kunmap_atomic(bounce_addr);
kunmap_atomic((void *)bounce_addr);
}
}
sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset));
cur_dest_sgl = sg_next(cur_dest_sgl);
}
local_irq_restore(flags);
@ -719,48 +729,62 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
unsigned long bounce_addr = 0;
unsigned long src_addr = 0;
unsigned long flags;
struct scatterlist *cur_src_sgl;
struct scatterlist *cur_dest_sgl;
local_irq_save(flags);
cur_src_sgl = orig_sgl;
cur_dest_sgl = bounce_sgl;
for (i = 0; i < orig_sgl_count; i++) {
src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
src_addr = (unsigned long)
kmap_atomic(sg_page(cur_src_sgl)) +
cur_src_sgl->offset;
src = src_addr;
srclen = orig_sgl[i].length;
srclen = cur_src_sgl->length;
if (bounce_addr == 0)
bounce_addr = sg_kmap_atomic(bounce_sgl,j);
bounce_addr = (unsigned long)
kmap_atomic(sg_page(cur_dest_sgl));
while (srclen) {
/* assume bounce offset always == 0 */
dest = bounce_addr + bounce_sgl[j].length;
destlen = PAGE_SIZE - bounce_sgl[j].length;
dest = bounce_addr + cur_dest_sgl->length;
destlen = PAGE_SIZE - cur_dest_sgl->length;
copylen = min(srclen, destlen);
memcpy((void *)dest, (void *)src, copylen);
total_copied += copylen;
bounce_sgl[j].length += copylen;
cur_dest_sgl->length += copylen;
srclen -= copylen;
src += copylen;
if (bounce_sgl[j].length == PAGE_SIZE) {
if (cur_dest_sgl->length == PAGE_SIZE) {
/* full..move to next entry */
sg_kunmap_atomic(bounce_addr);
kunmap_atomic((void *)bounce_addr);
bounce_addr = 0;
j++;
/* if we need to use another bounce buffer */
if (srclen || i != orig_sgl_count - 1)
bounce_addr = sg_kmap_atomic(bounce_sgl,j);
} else if (srclen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */
sg_kunmap_atomic(bounce_addr);
}
/* if we need to use another bounce buffer */
if (srclen && bounce_addr == 0) {
cur_dest_sgl = sg_next(cur_dest_sgl);
bounce_addr = (unsigned long)
kmap_atomic(
sg_page(cur_dest_sgl));
}
}
sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
kunmap_atomic((void *)(src_addr - cur_src_sgl->offset));
cur_src_sgl = sg_next(cur_src_sgl);
}
if (bounce_addr)
kunmap_atomic((void *)bounce_addr);
local_irq_restore(flags);
return total_copied;
@ -970,6 +994,8 @@ static int storvsc_channel_init(struct hv_device *device)
STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
process_sub_channels = true;
}
stor_device->max_transfer_bytes =
vstor_packet->storage_channel_properties.max_transfer_bytes;
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
@ -1080,6 +1106,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
struct Scsi_Host *host;
struct storvsc_device *stor_dev;
struct hv_device *dev = host_dev->dev;
u32 payload_sz = cmd_request->payload_sz;
void *payload = cmd_request->payload;
stor_dev = get_in_stor_device(dev);
host = stor_dev->host;
@ -1109,10 +1137,14 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
sense_hdr.ascq);
scsi_set_resid(scmnd,
cmd_request->data_buffer.len -
cmd_request->payload->range.len -
vm_srb->data_transfer_length);
scmnd->scsi_done(scmnd);
if (payload_sz >
sizeof(struct vmbus_channel_packet_multipage_buffer))
kfree(payload);
}
static void storvsc_on_io_completion(struct hv_device *device,
@ -1314,7 +1346,7 @@ static int storvsc_dev_remove(struct hv_device *device)
}
static int storvsc_do_io(struct hv_device *device,
struct storvsc_cmd_request *request)
struct storvsc_cmd_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
@ -1346,19 +1378,20 @@ static int storvsc_do_io(struct hv_device *device,
vstor_packet->vm_srb.data_transfer_length =
request->data_buffer.len;
request->payload->range.len;
vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
if (request->data_buffer.len) {
ret = vmbus_sendpacket_multipagebuffer(outgoing_channel,
&request->data_buffer,
if (request->payload->range.len) {
ret = vmbus_sendpacket_mpb_desc(outgoing_channel,
request->payload, request->payload_sz,
vstor_packet,
(sizeof(struct vstor_packet) -
vmscsi_size_delta),
(unsigned long)request);
} else {
ret = vmbus_sendpacket(device->channel, vstor_packet,
ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
(sizeof(struct vstor_packet) -
vmscsi_size_delta),
(unsigned long)request,
@ -1376,7 +1409,6 @@ static int storvsc_do_io(struct hv_device *device,
static int storvsc_device_configure(struct scsi_device *sdevice)
{
scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS);
blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
@ -1526,6 +1558,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct scatterlist *sgl;
unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
struct scatterlist *cur_sgl;
struct vmbus_packet_mpb_array *payload;
u32 payload_sz;
u32 length;
if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
/*
@ -1579,46 +1615,71 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
cmd_request->data_buffer.len = scsi_bufflen(scmnd);
if (scsi_sg_count(scmnd)) {
sgl = (struct scatterlist *)scsi_sglist(scmnd);
sg_count = scsi_sg_count(scmnd);
sgl = (struct scatterlist *)scsi_sglist(scmnd);
sg_count = scsi_sg_count(scmnd);
length = scsi_bufflen(scmnd);
payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
payload_sz = sizeof(cmd_request->mpb);
if (sg_count) {
/* check if we need to bounce the sgl */
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
cmd_request->bounce_sgl =
create_bounce_buffer(sgl, scsi_sg_count(scmnd),
scsi_bufflen(scmnd),
create_bounce_buffer(sgl, sg_count,
length,
vm_srb->data_in);
if (!cmd_request->bounce_sgl)
return SCSI_MLQUEUE_HOST_BUSY;
cmd_request->bounce_sgl_count =
ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
PAGE_SHIFT;
ALIGN(length, PAGE_SIZE) >> PAGE_SHIFT;
if (vm_srb->data_in == WRITE_TYPE)
copy_to_bounce_buffer(sgl,
cmd_request->bounce_sgl,
scsi_sg_count(scmnd));
cmd_request->bounce_sgl, sg_count);
sgl = cmd_request->bounce_sgl;
sg_count = cmd_request->bounce_sgl_count;
}
cmd_request->data_buffer.offset = sgl[0].offset;
for (i = 0; i < sg_count; i++)
cmd_request->data_buffer.pfn_array[i] =
page_to_pfn(sg_page((&sgl[i])));
if (sg_count > MAX_PAGE_BUFFER_COUNT) {
payload_sz = (sg_count * sizeof(void *) +
sizeof(struct vmbus_packet_mpb_array));
payload = kmalloc(payload_sz, GFP_ATOMIC);
if (!payload) {
if (cmd_request->bounce_sgl_count)
destroy_bounce_buffer(
cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
}
payload->range.len = length;
payload->range.offset = sgl[0].offset;
cur_sgl = sgl;
for (i = 0; i < sg_count; i++) {
payload->range.pfn_array[i] =
page_to_pfn(sg_page((cur_sgl)));
cur_sgl = sg_next(cur_sgl);
}
} else if (scsi_sglist(scmnd)) {
cmd_request->data_buffer.offset =
payload->range.len = length;
payload->range.offset =
virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
cmd_request->data_buffer.pfn_array[0] =
payload->range.pfn_array[0] =
virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
}
cmd_request->payload = payload;
cmd_request->payload_sz = payload_sz;
/* Invokes the vsc to start an IO */
ret = storvsc_do_io(dev, cmd_request);
@ -1646,12 +1707,8 @@ static struct scsi_host_template scsi_driver = {
.eh_timed_out = storvsc_eh_timed_out,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 255,
.can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
.this_id = -1,
/* no use setting to 0 since ll_blk_rw reset it to 1 */
/* currently 32 */
.sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
.use_clustering = DISABLE_CLUSTERING,
.use_clustering = ENABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
.no_write_same = 1,
@ -1686,6 +1743,7 @@ static int storvsc_probe(struct hv_device *device,
const struct hv_vmbus_device_id *dev_id)
{
int ret;
int num_cpus = num_online_cpus();
struct Scsi_Host *host;
struct hv_host_device *host_dev;
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
@ -1694,6 +1752,7 @@ static int storvsc_probe(struct hv_device *device,
int max_luns_per_target;
int max_targets;
int max_channels;
int max_sub_channels = 0;
/*
* Based on the windows host we are running on,
@ -1719,12 +1778,18 @@ static int storvsc_probe(struct hv_device *device,
max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
max_targets = STORVSC_MAX_TARGETS;
max_channels = STORVSC_MAX_CHANNELS;
/*
* On Windows8 and above, we support sub-channels for storage.
* The number of sub-channels offerred is based on the number of
* VCPUs in the guest.
*/
max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
break;
}
if (dev_id->driver_data == SFC_GUID)
scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS *
STORVSC_FC_MAX_TARGETS);
scsi_driver.can_queue = (max_outstanding_req_per_channel *
(max_sub_channels + 1));
host = scsi_host_alloc(&scsi_driver,
sizeof(struct hv_host_device));
if (!host)
@ -1780,6 +1845,12 @@ static int storvsc_probe(struct hv_device *device,
/* max cmd length */
host->max_cmd_len = STORVSC_MAX_CMD_LEN;
/*
* set the table size based on the info we got
* from the host.
*/
host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
if (ret != 0)
@ -1837,7 +1908,6 @@ static struct hv_driver storvsc_drv = {
static int __init storvsc_drv_init(void)
{
u32 max_outstanding_req_per_channel;
/*
* Divide the ring buffer data size (which is 1 page less
@ -1852,10 +1922,6 @@ static int __init storvsc_drv_init(void)
vmscsi_size_delta,
sizeof(u64)));
if (max_outstanding_req_per_channel <
STORVSC_MAX_IO_REQUESTS)
return -EINVAL;
return vmbus_driver_register(&storvsc_drv);
}

View File

@ -676,7 +676,6 @@ static struct platform_driver sun3_scsi_driver = {
.remove = __exit_p(sun3_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
.owner = THIS_MODULE,
},
};

View File

@ -214,8 +214,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
struct ufs_qcom_host *host = hba->priv;
struct phy *phy = host->generic_phy;
int ret = 0;
u8 major;
u16 minor, step;
bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
? true : false;
@ -224,8 +222,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
/* provide 1ms delay to let the reset pulse propagate */
usleep_range(1000, 1100);
ufs_qcom_get_controller_revision(hba, &major, &minor, &step);
ufs_qcom_phy_save_controller_version(phy, major, minor, step);
ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
if (ret) {
dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
@ -698,16 +694,24 @@ out:
*/
static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
u8 major;
u16 minor, step;
struct ufs_qcom_host *host = hba->priv;
ufs_qcom_get_controller_revision(hba, &major, &minor, &step);
if (host->hw_ver.major == 0x1)
hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
/*
* TBD
* here we should be advertising controller quirks according to
* controller version.
*/
if (host->hw_ver.major >= 0x2) {
if (!ufs_qcom_cap_qunipro(host))
/* Legacy UniPro mode still need following quirks */
hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
}
}
static void ufs_qcom_set_caps(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = hba->priv;
if (host->hw_ver.major >= 0x2)
host->caps = UFS_QCOM_CAP_QUNIPRO;
}
static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
@ -929,6 +933,13 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (err)
goto out_host_free;
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
/* update phy revision information before calling phy_init() */
ufs_qcom_phy_save_controller_version(host->generic_phy,
host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
phy_init(host->generic_phy);
err = phy_power_on(host->generic_phy);
if (err)
@ -938,6 +949,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (err)
goto out_disable_phy;
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING;

View File

@ -151,7 +151,23 @@ struct ufs_qcom_bus_vote {
struct device_attribute max_bus_bw;
};
/* Host controller hardware version: major.minor.step */
struct ufs_hw_version {
u16 step;
u16 minor;
u8 major;
};
struct ufs_qcom_host {
/*
* Set this capability if host controller supports the QUniPro mode
* and if driver wants the Host controller to operate in QUniPro mode.
* Note: By default this capability will be kept enabled if host
* controller supports the QUniPro mode.
*/
#define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0)
u32 caps;
struct phy *generic_phy;
struct ufs_hba *hba;
struct ufs_qcom_bus_vote bus_vote;
@ -161,10 +177,20 @@ struct ufs_qcom_host {
struct clk *rx_l1_sync_clk;
struct clk *tx_l1_sync_clk;
bool is_lane_clks_enabled;
struct ufs_hw_version hw_ver;
};
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
{
if (host->caps & UFS_QCOM_CAP_QUNIPRO)
return true;
else
return false;
}
#endif /* UFS_QCOM_H_ */

View File

@ -183,6 +183,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
@ -972,6 +973,8 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
ufshcd_hold(hba, false);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@ -2058,6 +2061,37 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
return ret;
}
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
#define MIN_DELAY_BEFORE_DME_CMDS_US 1000
unsigned long min_sleep_time_us;
if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
return;
/*
* last_dme_cmd_tstamp will be 0 only for 1st call to
* this function
*/
if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
} else {
unsigned long delta =
(unsigned long) ktime_to_us(
ktime_sub(ktime_get(),
hba->last_dme_cmd_tstamp));
if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
min_sleep_time_us =
MIN_DELAY_BEFORE_DME_CMDS_US - delta;
else
return; /* no more delay required */
}
/* allow sleep for extra 50us if needed */
usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
}
/**
* ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
* @hba: per adapter instance
@ -2157,6 +2191,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
mutex_lock(&hba->uic_cmd_mutex);
init_completion(&uic_async_done);
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->uic_async_done = &uic_async_done;

View File

@ -366,6 +366,7 @@ struct ufs_init_prefetch {
* @saved_err: sticky error mask
* @saved_uic_err: sticky UIC error mask
* @dev_cmd: ufs device management command information
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
* @auto_bkops_enabled: to track whether bkops is enabled in device
* @vreg_info: UFS device voltage regulator information
* @clk_list_head: UFS host controller clocks list node head
@ -416,6 +417,13 @@ struct ufs_hba {
unsigned int irq;
bool is_irq_enabled;
/*
* delay before each dme command is required as the unipro
* layer has shown instabilities
*/
#define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(0)
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
wait_queue_head_t tm_wq;
wait_queue_head_t tm_tag_wq;
@ -446,6 +454,7 @@ struct ufs_hba {
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
ktime_t last_dme_cmd_tstamp;
/* Keeps information of the UFS device connected to this host */
struct ufs_dev_info dev_info;

View File

@ -10,7 +10,7 @@ enum string_size_units {
STRING_UNITS_2, /* use binary powers of 2^10 */
};
void string_get_size(u64 size, enum string_size_units units,
void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
char *buf, int len);
#define UNESCAPE_SPACE 0x01

View File

@ -135,6 +135,7 @@ enum fc_vport_state {
#define FC_PORTSPEED_40GBIT 0x100
#define FC_PORTSPEED_50GBIT 0x200
#define FC_PORTSPEED_100GBIT 0x400
#define FC_PORTSPEED_25GBIT 0x800
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*

View File

@ -4,6 +4,7 @@
* Copyright 31 August 2008 James Bottomley
* Copyright (C) 2013, Intel Corporation
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/export.h>
@ -14,7 +15,8 @@
/**
* string_get_size - get the size in the specified units
* @size: The size to be converted
* @size: The size to be converted in blocks
* @blk_size: Size of the block (use 1 for size in bytes)
* @units: units to use (powers of 1000 or 1024)
* @buf: buffer to format to
* @len: length of buffer
@ -24,14 +26,14 @@
* at least 9 bytes and will always be zero terminated.
*
*/
void string_get_size(u64 size, const enum string_size_units units,
void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
char *buf, int len)
{
static const char *const units_10[] = {
"B", "kB", "MB", "GB", "TB", "PB", "EB"
"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
};
static const char *const units_2[] = {
"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"
"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
};
static const char *const *const units_str[] = {
[STRING_UNITS_10] = units_10,
@ -42,31 +44,57 @@ void string_get_size(u64 size, const enum string_size_units units,
[STRING_UNITS_2] = 1024,
};
int i, j;
u32 remainder = 0, sf_cap;
u32 remainder = 0, sf_cap, exp;
char tmp[8];
const char *unit;
tmp[0] = '\0';
i = 0;
if (size >= divisor[units]) {
while (size >= divisor[units]) {
remainder = do_div(size, divisor[units]);
i++;
}
if (!size)
goto out;
sf_cap = size;
for (j = 0; sf_cap*10 < 1000; j++)
sf_cap *= 10;
if (j) {
remainder *= 1000;
remainder /= divisor[units];
snprintf(tmp, sizeof(tmp), ".%03u", remainder);
tmp[j+1] = '\0';
}
while (blk_size >= divisor[units]) {
remainder = do_div(blk_size, divisor[units]);
i++;
}
exp = divisor[units] / (u32)blk_size;
if (size >= exp) {
remainder = do_div(size, divisor[units]);
remainder *= blk_size;
i++;
} else {
remainder *= size;
}
size *= blk_size;
size += remainder / divisor[units];
remainder %= divisor[units];
while (size >= divisor[units]) {
remainder = do_div(size, divisor[units]);
i++;
}
sf_cap = size;
for (j = 0; sf_cap*10 < 1000; j++)
sf_cap *= 10;
if (j) {
remainder *= 1000;
remainder /= divisor[units];
snprintf(tmp, sizeof(tmp), ".%03u", remainder);
tmp[j+1] = '\0';
}
out:
if (i >= ARRAY_SIZE(units_2))
unit = "UNK";
else
unit = units_str[units][i];
snprintf(buf, len, "%u%s %s", (u32)size,
tmp, units_str[units][i]);
tmp, unit);
}
EXPORT_SYMBOL(string_get_size);