SCSI misc on 20220113

This series consists of the usual driver updates (ufs, pm80xx, lpfc,
 mpi3mr, mpt3sas, hisi_sas, libsas) and minor updates and bug fixes.
 The most impactful change is likely the switch from GFP_DMA to
 GFP_KERNEL in a bunch of drivers, but even that shouldn't affect too
 many people.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYeCJZyYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishfMnAQCsERG9
 V4yX8LDpBjD7leIccf+6krJNNWaIWYYkEdxpzQD9FShB7/yDakFq3erW2y5mVqac
 dZ065M0ckE4bxk9uMIE=
 =gPHF
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This series consists of the usual driver updates (ufs, pm80xx, lpfc,
  mpi3mr, mpt3sas, hisi_sas, libsas) and minor updates and bug fixes.

  The most impactful change is likely the switch from GFP_DMA to
  GFP_KERNEL in a bunch of drivers, but even that shouldn't affect too
  many people"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (121 commits)
  scsi: mpi3mr: Bump driver version to 8.0.0.61.0
  scsi: mpi3mr: Fixes around reply request queues
  scsi: mpi3mr: Enhanced Task Management Support Reply handling
  scsi: mpi3mr: Use TM response codes from MPI3 headers
  scsi: mpi3mr: Add io_uring interface support in I/O-polled mode
  scsi: mpi3mr: Print cable mngnt and temp threshold events
  scsi: mpi3mr: Support Prepare for Reset event
  scsi: mpi3mr: Add Event acknowledgment logic
  scsi: mpi3mr: Gracefully handle online FW update operation
  scsi: mpi3mr: Detect async reset that occurred in firmware
  scsi: mpi3mr: Add IOC reinit function
  scsi: mpi3mr: Handle offline FW activation in graceful manner
  scsi: mpi3mr: Code refactor of IOC init - part2
  scsi: mpi3mr: Code refactor of IOC init - part1
  scsi: mpi3mr: Fault IOC when internal command gets timeout
  scsi: mpi3mr: Display IOC firmware package version
  scsi: mpi3mr: Handle unaligned PLL in unmap cmnds
  scsi: mpi3mr: Increase internal cmnds timeout to 60s
  scsi: mpi3mr: Do access status validation before adding devices
  scsi: mpi3mr: Add support for PCIe Managed Switch SES device
  ...
This commit is contained in:
Linus Torvalds 2022-01-14 14:37:34 +01:00
commit e1a7aa25ff
92 changed files with 3727 additions and 1641 deletions

View File

@ -7081,9 +7081,7 @@ S: Maintained
F: drivers/mmc/host/cqhci*
EMULEX 10Gbps iSCSI - OneConnect DRIVER
M: Subbu Seetharaman <subbu.seetharaman@broadcom.com>
M: Ketan Mukadam <ketan.mukadam@broadcom.com>
M: Jitendra Bhivare <jitendra.bhivare@broadcom.com>
L: linux-scsi@vger.kernel.org
S: Supported
W: http://www.broadcom.com

View File

@ -163,27 +163,19 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
/**
* blk_post_runtime_resume - Post runtime resume processing
* @q: the queue of the device
* @err: return value of the device's runtime_resume function
*
* Description:
* Update the queue's runtime status according to the return value of the
* device's runtime_resume function. If the resume was successful, call
* blk_set_runtime_active() to do the real work of restarting the queue.
* For historical reasons, this routine merely calls blk_set_runtime_active()
* to do the real work of restarting the queue. It does this regardless of
* whether the device's runtime-resume succeeded; even if it failed the
* driver or error handler will need to communicate with the device.
*
* This function should be called near the end of the device's
* runtime_resume callback.
*/
void blk_post_runtime_resume(struct request_queue *q, int err)
void blk_post_runtime_resume(struct request_queue *q)
{
if (!q->dev)
return;
if (!err) {
blk_set_runtime_active(q);
} else {
spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_SUSPENDED;
spin_unlock_irq(&q->queue_lock);
}
blk_set_runtime_active(q);
}
EXPORT_SYMBOL(blk_post_runtime_resume);
@ -201,7 +193,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
* runtime PM status and re-enable peeking requests from the queue. It
* should be called before first request is added to the queue.
*
* This function is also called by blk_post_runtime_resume() for successful
* This function is also called by blk_post_runtime_resume() for
* runtime resumes. It does everything necessary to restart the queue.
*/
void blk_set_runtime_active(struct request_queue *q)

View File

@ -1274,8 +1274,6 @@ mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
static int
mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
{
int r = 0;
/* return if in use */
if (CHIPREG_READ32(&ioc->chip->Doorbell)
& MPI_DOORBELL_ACTIVE)
@ -1289,9 +1287,9 @@ mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int slee
(access_control_value<<12)));
/* Wait for IOC to clear Doorbell Status bit */
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
if (WaitForDoorbellAck(ioc, 5, sleepFlag) < 0)
return -2;
}else
else
return 0;
}

View File

@ -904,13 +904,11 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
/**
* inia100_queue_lck - queue command with host
* @cmd: Command block
* @done: Completion function
*
* Called by the mid layer to queue a command. Process the command
* block, build the host specific scb structures and if there is room
* queue the command down to the controller
*/
static int inia100_queue_lck(struct scsi_cmnd *cmd)
{
struct orc_scb *scb;

View File

@ -614,7 +614,6 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
/**
* atp870u_queuecommand_lck - Queue SCSI command
* @req_p: request block
* @done: completion function
*
* Queue a command to the ATP queue. Called with the host lock held.
*/

View File

@ -981,7 +981,7 @@ const struct attribute_group *bfad_im_host_groups[] = {
NULL
};
struct attribute *bfad_im_vport_attrs[] = {
static struct attribute *bfad_im_vport_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_model.attr,
&dev_attr_model_description.attr,

View File

@ -239,7 +239,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
u_char *buffer;
int result;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
buffer = kmalloc(512, GFP_KERNEL);
if(!buffer)
return -ENOMEM;
@ -297,7 +297,7 @@ ch_readconfig(scsi_changer *ch)
int result,id,lun,i;
u_int elem;
buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
buffer = kzalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@ -783,7 +783,7 @@ static long ch_ioctl(struct file *file,
return -EINVAL;
elem = ch->firsts[cge.cge_type] + cge.cge_unit;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
mutex_lock(&ch->lock);

View File

@ -946,7 +946,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* layer, invoke 'done' on completion
*
* @cmd: pointer to scsi command object
* @done: function pointer to be invoked on completion
*
* Returns 1 if the adapter (host) is busy, else returns 0. One
* reason for an adapter to be busy is that the number
@ -959,7 +958,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
* and is expected to be held on return.
*
**/
*/
static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
{
void (*done)(struct scsi_cmnd *) = scsi_done;

View File

@ -261,7 +261,7 @@ efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
dma.size = FW_WRITE_BUFSIZE;
dma.virt = dma_alloc_coherent(&efct->pci->dev,
dma.size, &dma.phys, GFP_DMA);
dma.size, &dma.phys, GFP_KERNEL);
if (!dma.virt)
return -ENOMEM;

View File

@ -516,7 +516,7 @@ efct_hw_setup_io(struct efct_hw *hw)
dma = &hw->xfer_rdy;
dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
dma->virt = dma_alloc_coherent(&efct->pci->dev,
dma->size, &dma->phys, GFP_DMA);
dma->size, &dma->phys, GFP_KERNEL);
if (!dma->virt)
return -ENOMEM;
}
@ -562,7 +562,7 @@ efct_hw_setup_io(struct efct_hw *hw)
sizeof(struct sli4_sge);
dma->virt = dma_alloc_coherent(&efct->pci->dev,
dma->size, &dma->phys,
GFP_DMA);
GFP_KERNEL);
if (!dma->virt) {
efc_log_err(hw->os, "dma_alloc fail %d\n", i);
memset(&io->def_sgl, 0,
@ -618,7 +618,7 @@ efct_hw_init_prereg_io(struct efct_hw *hw)
memset(&req, 0, sizeof(struct efc_dma));
req.size = 32 + sgls_per_request * 16;
req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
GFP_DMA);
GFP_KERNEL);
if (!req.virt) {
kfree(sgls);
return -ENOMEM;
@ -1063,7 +1063,7 @@ efct_hw_init(struct efct_hw *hw)
dma = &hw->loop_map;
dma->size = SLI4_MIN_LOOP_MAP_BYTES;
dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
GFP_DMA);
GFP_KERNEL);
if (!dma->virt)
return -EIO;
@ -1192,7 +1192,7 @@ efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
prq->dma.size,
&prq->dma.phys,
GFP_DMA);
GFP_KERNEL);
if (!prq->dma.virt) {
efc_log_err(hw->os, "DMA allocation failed\n");
kfree(rq_buf);

View File

@ -48,7 +48,7 @@ efct_io_pool_create(struct efct *efct, u32 num_sgl)
io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
io->rspbuf.size,
&io->rspbuf.phys, GFP_DMA);
&io->rspbuf.phys, GFP_KERNEL);
if (!io->rspbuf.virt) {
efc_log_err(efct, "dma_alloc rspbuf failed\n");
efct_io_pool_free(io_pool);

View File

@ -179,7 +179,7 @@ efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
nport->dma.size = EFC_SPARAM_DMA_SZ;
nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
nport->dma.size, &nport->dma.phys,
GFP_DMA);
GFP_KERNEL);
if (!nport->dma.virt) {
efc_log_err(efc, "Failed to allocate DMA memory\n");
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
@ -466,7 +466,7 @@ efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
domain->dma.size = EFC_SPARAM_DMA_SZ;
domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
domain->dma.size,
&domain->dma.phys, GFP_DMA);
&domain->dma.phys, GFP_KERNEL);
if (!domain->dma.virt) {
efc_log_err(efc, "Failed to allocate DMA memory\n");
return -EIO;

View File

@ -71,7 +71,7 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
/* now allocate DMA for request and response */
els->io.req.size = reqlen;
els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
&els->io.req.phys, GFP_DMA);
&els->io.req.phys, GFP_KERNEL);
if (!els->io.req.virt) {
mempool_free(els, efc->els_io_pool);
spin_unlock_irqrestore(&node->els_ios_lock, flags);
@ -80,7 +80,7 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
els->io.rsp.size = rsplen;
els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
&els->io.rsp.phys, GFP_DMA);
&els->io.rsp.phys, GFP_KERNEL);
if (!els->io.rsp.virt) {
dma_free_coherent(&efc->pci->dev, els->io.req.size,
els->io.req.virt, els->io.req.phys);

View File

@ -445,7 +445,7 @@ sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
dma->size = payload_size;
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
&dma->phys, GFP_DMA);
&dma->phys, GFP_KERNEL);
if (!dma->virt)
return -EIO;
@ -508,7 +508,7 @@ __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
q->dma.size = size * n_entries;
q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
&q->dma.phys, GFP_DMA);
&q->dma.phys, GFP_KERNEL);
if (!q->dma.virt) {
memset(&q->dma, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
@ -849,7 +849,7 @@ static int sli_cmd_cq_set_create(struct sli4 *sli4,
dma->size = payload_size;
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
&dma->phys, GFP_DMA);
&dma->phys, GFP_KERNEL);
if (!dma->virt)
return -EIO;
@ -4413,7 +4413,7 @@ sli_get_ctrl_attributes(struct sli4 *sli4)
psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
data.size = psize;
data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
&data.phys, GFP_DMA);
&data.phys, GFP_KERNEL);
if (!data.virt) {
memset(&data, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
@ -4653,7 +4653,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
*/
sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
&sli4->bmbx.phys, GFP_DMA);
&sli4->bmbx.phys, GFP_KERNEL);
if (!sli4->bmbx.virt) {
memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
@ -4674,7 +4674,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
sli4->vpd_data.size,
&sli4->vpd_data.phys,
GFP_DMA);
GFP_KERNEL);
if (!sli4->vpd_data.virt) {
memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
/* Note that failure isn't fatal in this specific case */
@ -5070,7 +5070,7 @@ sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
payload_dma->size = payload_size;
payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
payload_dma->size,
&payload_dma->phys, GFP_DMA);
&payload_dma->phys, GFP_KERNEL);
if (!payload_dma->virt) {
memset(payload_dma, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "mbox payload memory allocation fail\n");

View File

@ -8,7 +8,6 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
#include <linux/async.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
@ -134,6 +133,11 @@ struct hisi_sas_rst {
bool done;
};
struct hisi_sas_internal_abort {
unsigned int flag;
unsigned int tag;
};
#define HISI_SAS_RST_WORK_INIT(r, c) \
{ .hisi_hba = hisi_hba, \
.completion = &c, \
@ -154,6 +158,7 @@ enum hisi_sas_bit_err_type {
enum hisi_sas_phy_event {
HISI_PHYE_PHY_UP = 0U,
HISI_PHYE_LINK_RESET,
HISI_PHYE_PHY_UP_PM,
HISI_PHYES_NUM,
};

View File

@ -158,7 +158,7 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
clear_bit(slot_idx, bitmap);
__clear_bit(slot_idx, bitmap);
}
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
@ -175,7 +175,7 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
set_bit(slot_idx, bitmap);
__set_bit(slot_idx, bitmap);
}
static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
@ -206,14 +206,6 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
return index;
}
static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
{
int i;
for (i = 0; i < hisi_hba->slot_index_count; ++i)
hisi_sas_slot_index_clear(hisi_hba, i);
}
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
@ -273,11 +265,11 @@ static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
}
static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort)
struct hisi_sas_internal_abort *abort,
struct hisi_sas_slot *slot, int device_id)
{
hisi_hba->hw->prep_abort(hisi_hba, slot,
device_id, abort_flag, tag_to_abort);
device_id, abort->flag, abort->tag);
}
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
@ -403,26 +395,122 @@ err_out_dif_dma_unmap:
return rc;
}
static int hisi_sas_task_prep(struct sas_task *task,
struct hisi_sas_dq **dq_pointer,
bool is_tmf, struct hisi_sas_tmf_task *tmf,
int *pass)
static
void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
struct hisi_sas_dq *dq,
struct hisi_sas_device *sas_dev,
struct hisi_sas_internal_abort *abort,
struct hisi_sas_tmf_task *tmf)
{
struct domain_device *device = task->dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct asd_sas_port *sas_port = device->port;
struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
struct scsi_cmnd *scmd = NULL;
struct hisi_sas_dq *dq;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
int dlvry_queue_slot, dlvry_queue;
struct sas_task *task = slot->task;
unsigned long flags;
int wr_q_index;
spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
list_add_tail(&slot->delivery, &dq->list);
spin_unlock(&dq->lock);
spin_lock(&sas_dev->lock);
list_add_tail(&slot->entry, &sas_dev->list);
spin_unlock(&sas_dev->lock);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
slot->device_id = sas_dev->device_id;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
slot->tmf = tmf;
slot->is_internal = tmf;
task->lldd_task = slot;
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
memset(hisi_sas_status_buf_addr_mem(slot), 0,
sizeof(struct hisi_sas_err_record));
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
hisi_sas_task_prep_smp(hisi_hba, slot);
break;
case SAS_PROTOCOL_SSP:
hisi_sas_task_prep_ssp(hisi_hba, slot);
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
hisi_sas_task_prep_ata(hisi_hba, slot);
break;
case SAS_PROTOCOL_NONE:
if (abort) {
hisi_sas_task_prep_abort(hisi_hba, abort, slot, sas_dev->device_id);
break;
}
fallthrough;
default:
dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
task->task_proto);
break;
}
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
WRITE_ONCE(slot->ready, 1);
spin_lock(&dq->lock);
hisi_hba->hw->start_delivery(dq);
spin_unlock(&dq->lock);
}
static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
struct hisi_sas_tmf_task *tmf)
{
int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
struct domain_device *device = task->dev;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct scsi_cmnd *scmd = NULL;
struct hisi_sas_dq *dq = NULL;
struct hisi_sas_port *port;
struct hisi_hba *hisi_hba;
struct hisi_sas_slot *slot;
struct device *dev;
int rc;
if (!sas_port) {
struct task_status_struct *ts = &task->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
/*
* libsas will use dev->port, should
* not call task_done for sata
*/
if (device->dev_type != SAS_SATA_DEV)
task->task_done(task);
return -ECOMM;
}
hisi_hba = dev_to_hisi_hba(device);
dev = hisi_hba->dev;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
if (!gfpflags_allow_blocking(gfp_flags))
return -EINVAL;
down(&hisi_hba->sem);
up(&hisi_hba->sem);
}
if (DEV_IS_GONE(sas_dev)) {
if (sas_dev)
dev_info(dev, "task prep: device %d not ready\n",
@ -451,13 +539,13 @@ static int hisi_sas_task_prep(struct sas_task *task,
blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
*dq_pointer = dq = &hisi_hba->dq[dq_index];
dq = &hisi_hba->dq[dq_index];
} else {
struct Scsi_Host *shost = hisi_hba->shost;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
int queue = qmap->mq_map[raw_smp_processor_id()];
*dq_pointer = dq = &hisi_hba->dq[queue];
dq = &hisi_hba->dq[queue];
}
port = to_hisi_sas_port(sas_port);
@ -489,63 +577,17 @@ static int hisi_sas_task_prep(struct sas_task *task,
if (rc < 0)
goto err_out_dif_dma_unmap;
slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
list_add_tail(&slot->delivery, &dq->list);
spin_unlock(&dq->lock);
spin_lock(&sas_dev->lock);
list_add_tail(&slot->entry, &sas_dev->list);
spin_unlock(&sas_dev->lock);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
slot->device_id = sas_dev->device_id;
slot = &hisi_hba->slot_info[rc];
slot->n_elem = n_elem;
slot->n_elem_dif = n_elem_dif;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
slot->task = task;
slot->port = port;
slot->tmf = tmf;
slot->is_internal = is_tmf;
task->lldd_task = slot;
slot->is_internal = tmf;
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
memset(hisi_sas_status_buf_addr_mem(slot), 0,
sizeof(struct hisi_sas_err_record));
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
hisi_sas_task_prep_smp(hisi_hba, slot);
break;
case SAS_PROTOCOL_SSP:
hisi_sas_task_prep_ssp(hisi_hba, slot);
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
hisi_sas_task_prep_ata(hisi_hba, slot);
break;
default:
dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
task->task_proto);
break;
}
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
++(*pass);
WRITE_ONCE(slot->ready, 1);
/* protect task_prep and start_delivery sequence */
hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL, tmf);
return 0;
@ -554,59 +596,9 @@ err_out_dif_dma_unmap:
hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
err_out_dma_unmap:
hisi_sas_dma_unmap(hisi_hba, task, n_elem,
n_elem_req);
n_elem_req);
prep_out:
dev_err(dev, "task prep: failed[%d]!\n", rc);
return rc;
}
static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
bool is_tmf, struct hisi_sas_tmf_task *tmf)
{
u32 rc;
u32 pass = 0;
struct hisi_hba *hisi_hba;
struct device *dev;
struct domain_device *device = task->dev;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_dq *dq = NULL;
if (!sas_port) {
struct task_status_struct *ts = &task->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
/*
* libsas will use dev->port, should
* not call task_done for sata
*/
if (device->dev_type != SAS_SATA_DEV)
task->task_done(task);
return -ECOMM;
}
hisi_hba = dev_to_hisi_hba(device);
dev = hisi_hba->dev;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
if (!gfpflags_allow_blocking(gfp_flags))
return -EINVAL;
down(&hisi_hba->sem);
up(&hisi_hba->sem);
}
/* protect task_prep and start_delivery sequence */
rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
if (rc)
dev_err(dev, "task exec: failed[%d]!\n", rc);
if (likely(pass)) {
spin_lock(&dq->lock);
hisi_hba->hw->start_delivery(dq);
spin_unlock(&dq->lock);
}
dev_err(dev, "task exec: failed[%d]!\n", rc);
return rc;
}
@ -619,12 +611,6 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
if (!phy->phy_attached)
return;
if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) &&
!sas_phy->suspended) {
dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no);
return;
}
sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
if (sas_phy->phy) {
@ -860,10 +846,11 @@ int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
static void hisi_sas_phyup_work(struct work_struct *work)
static void hisi_sas_phyup_work_common(struct work_struct *work,
enum hisi_sas_phy_event event)
{
struct hisi_sas_phy *phy =
container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
container_of(work, typeof(*phy), works[event]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
@ -874,6 +861,11 @@ static void hisi_sas_phyup_work(struct work_struct *work)
hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
}
static void hisi_sas_phyup_work(struct work_struct *work)
{
hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
}
static void hisi_sas_linkreset_work(struct work_struct *work)
{
struct hisi_sas_phy *phy =
@ -883,9 +875,21 @@ static void hisi_sas_linkreset_work(struct work_struct *work)
hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
}
static void hisi_sas_phyup_pm_work(struct work_struct *work)
{
struct hisi_sas_phy *phy =
container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct device *dev = hisi_hba->dev;
hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
pm_runtime_put_sync(dev);
}
static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
[HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
};
bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
@ -917,10 +921,14 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct device *dev = hisi_hba->dev;
unsigned long flags;
dev_dbg(dev, "phy%d OOB ready\n", phy_no);
if (phy->phy_attached)
spin_lock_irqsave(&phy->lock, flags);
if (phy->phy_attached) {
spin_unlock_irqrestore(&phy->lock, flags);
return;
}
if (!timer_pending(&phy->timer)) {
if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
@ -928,13 +936,17 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
phy->timer.expires = jiffies +
HISI_SAS_WAIT_PHYUP_TIMEOUT;
add_timer(&phy->timer);
} else {
dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
phy_no, phy->wait_phyup_cnt);
phy->wait_phyup_cnt = 0;
spin_unlock_irqrestore(&phy->lock, flags);
return;
}
dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
phy_no, phy->wait_phyup_cnt);
phy->wait_phyup_cnt = 0;
}
spin_unlock_irqrestore(&phy->lock, flags);
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
@ -1105,7 +1117,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
return hisi_sas_task_exec(task, gfp_flags, NULL);
}
static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
@ -1156,6 +1168,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
u8 sts = phy->phy_attached;
int ret = 0;
down(&hisi_hba->sem);
phy->reset_completion = &completion;
switch (func) {
@ -1199,6 +1212,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
out:
phy->reset_completion = NULL;
up(&hisi_hba->sem);
return ret;
}
@ -1259,8 +1273,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
add_timer(&task->slow_task->timer);
res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
res = hisi_sas_task_exec(task, GFP_KERNEL, tmf);
if (res) {
del_timer_sync(&task->slow_task->timer);
dev_err(dev, "abort tmf: executing internal task failed: %d\n",
@ -1433,11 +1446,13 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
sas_port = device->port;
port = to_hisi_sas_port(sas_port);
spin_lock(&sas_port->phy_list_lock);
list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
if (state & BIT(sas_phy->id)) {
phy = sas_phy->lldd_phy;
break;
}
spin_unlock(&sas_port->phy_list_lock);
if (phy) {
port->id = phy->port_id;
@ -1514,22 +1529,25 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
struct ata_link *link;
u8 fis[20] = {0};
u32 state;
int i;
state = hisi_hba->hw->get_phys_state(hisi_hba);
list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
for (i = 0; i < hisi_hba->n_phy; i++) {
if (!(state & BIT(sas_phy->id)))
continue;
if (!(sas_port->phy_mask & BIT(i)))
continue;
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
tmf_task.phy_id = sas_phy->id;
tmf_task.phy_id = i;
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
&tmf_task);
if (rc != TMF_RESP_FUNC_COMPLETE) {
dev_err(dev, "phy%d ata reset failed rc=%d\n",
sas_phy->id, rc);
i, rc);
break;
}
}
@ -1581,7 +1599,6 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
{
struct Scsi_Host *shost = hisi_hba->shost;
down(&hisi_hba->sem);
hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
@ -1606,9 +1623,9 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
if (hisi_hba->reject_stp_links_msk)
hisi_sas_terminate_stp_reject(hisi_hba);
hisi_sas_reset_init_all_devices(hisi_hba);
up(&hisi_hba->sem);
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
up(&hisi_hba->sem);
hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
}
@ -1619,8 +1636,11 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
if (!hisi_hba->hw->soft_reset)
return -1;
if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
down(&hisi_hba->sem);
if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
up(&hisi_hba->sem);
return -1;
}
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
@ -2021,19 +2041,17 @@ static int hisi_sas_query_task(struct sas_task *task)
static int
hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
struct sas_task *task, int abort_flag,
int task_tag, struct hisi_sas_dq *dq)
struct hisi_sas_internal_abort *abort,
struct sas_task *task,
struct hisi_sas_dq *dq)
{
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
unsigned long flags;
int wr_q_index;
struct hisi_sas_slot *slot;
int slot_idx;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
return -EINVAL;
@ -2044,59 +2062,24 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
port = to_hisi_sas_port(sas_port);
/* simply get a slot and send abort command */
rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
if (rc < 0)
slot_idx = hisi_sas_slot_index_alloc(hisi_hba, NULL);
if (slot_idx < 0)
goto err_out;
slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
list_add_tail(&slot->delivery, &dq->list);
spin_unlock(&dq->lock);
spin_lock(&sas_dev->lock);
list_add_tail(&slot->entry, &sas_dev->list);
spin_unlock(&sas_dev->lock);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
slot->device_id = sas_dev->device_id;
slot->n_elem = n_elem;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
slot->n_elem = 0;
slot->task = task;
slot->port = port;
slot->is_internal = true;
task->lldd_task = slot;
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
memset(hisi_sas_status_buf_addr_mem(slot), 0,
sizeof(struct hisi_sas_err_record));
hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
abort_flag, task_tag);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */
spin_lock(&dq->lock);
hisi_hba->hw->start_delivery(dq);
spin_unlock(&dq->lock);
hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort, NULL);
return 0;
err_out:
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
dev_err(dev, "internal abort task prep: failed[%d]!\n", slot_idx);
return rc;
return slot_idx;
}
/**
@ -2118,9 +2101,12 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
{
struct sas_task *task;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_internal_abort abort = {
.flag = abort_flag,
.tag = tag,
};
struct device *dev = hisi_hba->dev;
int res;
/*
* The interface is not realized means this HW don't support internal
* abort, or don't need to do internal abort. Then here, we return
@ -2138,14 +2124,14 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
return -ENOMEM;
task->dev = device;
task->task_proto = device->tproto;
task->task_proto = SAS_PROTOCOL_NONE;
task->task_done = hisi_sas_task_done;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT;
add_timer(&task->slow_task->timer);
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
task, abort_flag, tag, dq);
&abort, task, dq);
if (res) {
del_timer_sync(&task->slow_task->timer);
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
@ -2516,9 +2502,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
if (!hisi_hba->breakpoint)
goto err_out;
hisi_hba->slot_index_count = max_command_entries;
s = hisi_hba->slot_index_count / BITS_PER_BYTE;
hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
s = hisi_hba->slot_index_count = max_command_entries;
hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
if (!hisi_hba->slot_index_tags)
goto err_out;
@ -2536,7 +2521,6 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
if (!hisi_hba->sata_breakpoint)
goto err_out;
hisi_sas_slot_index_init(hisi_hba);
hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));

View File

@ -1484,7 +1484,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = hisi_hba->dev;
del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
@ -1561,9 +1560,18 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
}
phy->port_id = port_id;
phy->phy_attached = 1;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
/* Call pm_runtime_put_sync() with pairs in hisi_sas_phyup_pm_work() */
pm_runtime_get_noresume(dev);
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM);
res = IRQ_HANDLED;
spin_lock(&phy->lock);
/* Delete timer and set phy_attached atomically */
del_timer(&phy->timer);
phy->phy_attached = 1;
spin_unlock(&phy->lock);
end:
if (phy->reset_completion)
complete(phy->reset_completion);
@ -4775,6 +4783,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_scan_host(shost);
pm_runtime_set_autosuspend_delay(dev, 5000);
pm_runtime_use_autosuspend(dev);
/*
* For the situation that there are ATA disks connected with SAS
* controller, it additionally creates ata_port which will affect the
@ -4848,6 +4858,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
int rc;
dev_info(dev, "FLR prepare\n");
down(&hisi_hba->sem);
set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_prepare(hisi_hba);
@ -4897,6 +4908,8 @@ static int _suspend_v3_hw(struct device *device)
if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
dev_warn(dev, "entering suspend state\n");
scsi_block_requests(shost);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
flush_workqueue(hisi_hba->wq);
@ -4912,11 +4925,11 @@ static int _suspend_v3_hw(struct device *device)
hisi_sas_init_mem(hisi_hba);
dev_warn(dev, "entering suspend state\n");
hisi_sas_release_tasks(hisi_hba);
sas_suspend_ha(sha);
dev_warn(dev, "end of suspending controller\n");
return 0;
}
@ -4943,9 +4956,19 @@ static int _resume_v3_hw(struct device *device)
return rc;
}
phys_init_v3_hw(hisi_hba);
sas_resume_ha(sha);
/*
* If a directly-attached disk is removed during suspend, a deadlock
* may occur, as the PHYE_RESUME_TIMEOUT processing will require the
* hisi_hba->device to be active, which can only happen when resume
* completes. So don't wait for the HA event workqueue to drain upon
* resume.
*/
sas_resume_ha_no_sync(sha);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
dev_warn(dev, "end of resuming controller\n");
return 0;
}

View File

@ -61,6 +61,7 @@ static void scsi_host_cls_release(struct device *dev)
static struct class shost_class = {
.name = "scsi_host",
.dev_release = scsi_host_cls_release,
.dev_groups = scsi_shost_groups,
};
/**
@ -377,7 +378,7 @@ static struct device_type scsi_host_type = {
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
int index, i, j = 0;
int index;
shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
if (!shost)
@ -483,17 +484,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->shost_dev.parent = &shost->shost_gendev;
shost->shost_dev.class = &shost_class;
dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
shost->shost_dev.groups = shost->shost_dev_attr_groups;
shost->shost_dev_attr_groups[j++] = &scsi_shost_attr_group;
if (sht->shost_groups) {
for (i = 0; sht->shost_groups[i] &&
j < ARRAY_SIZE(shost->shost_dev_attr_groups);
i++, j++) {
shost->shost_dev_attr_groups[j] =
sht->shost_groups[i];
}
}
WARN_ON_ONCE(j >= ARRAY_SIZE(shost->shost_dev_attr_groups));
shost->shost_dev.groups = sht->shost_groups;
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);

View File

@ -4354,7 +4354,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
int i, ndevs_to_allocate;
int raid_ctlr_position;
bool physical_device;
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
@ -4368,7 +4367,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
dev_err(&h->pdev->dev, "out of memory\n");
goto out;
}
memset(lunzerobits, 0, sizeof(lunzerobits));
h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */

View File

@ -2602,13 +2602,11 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
/**
* i91u_queuecommand_lck - Queue a new command if possible
* @cmd: SCSI command block from the mid layer
* @done: Completion handler
*
* Attempts to queue a new command with the host adapter. Will return
* zero if successful or indicate a host busy condition if not (which
* will cause the mid layer to call us again later with the command)
*/
static int i91u_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
@ -2849,7 +2847,8 @@ static int initio_probe_one(struct pci_dev *pdev,
for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
i = num_scb * sizeof(struct scsi_ctrl_blk);
if ((scb = kzalloc(i, GFP_DMA)) != NULL)
scb = kzalloc(i, GFP_KERNEL);
if (scb)
break;
}

View File

@ -8,7 +8,6 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/async.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include "sas_internal.h"

View File

@ -41,12 +41,25 @@ static int sas_queue_event(int event, struct sas_work *work,
return rc;
}
void __sas_drain_work(struct sas_ha_struct *ha)
void sas_queue_deferred_work(struct sas_ha_struct *ha)
{
struct sas_work *sw, *_sw;
int ret;
spin_lock_irq(&ha->lock);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
ret = sas_queue_work(ha, sw);
if (ret != 1) {
pm_runtime_put(ha->dev);
sas_free_event(to_asd_sas_event(&sw->work));
}
}
spin_unlock_irq(&ha->lock);
}
void __sas_drain_work(struct sas_ha_struct *ha)
{
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
@ -55,16 +68,8 @@ void __sas_drain_work(struct sas_ha_struct *ha)
drain_workqueue(ha->event_q);
drain_workqueue(ha->disco_q);
spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
ret = sas_queue_work(ha, sw);
if (ret != 1)
sas_free_event(to_asd_sas_event(&sw->work));
}
spin_unlock_irq(&ha->lock);
sas_queue_deferred_work(ha);
}
int sas_drain_work(struct sas_ha_struct *ha)
@ -104,11 +109,15 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
if (!test_and_clear_bit(ev, &d->pending))
continue;
if (list_empty(&port->phy_list))
spin_lock(&port->phy_list_lock);
if (list_empty(&port->phy_list)) {
spin_unlock(&port->phy_list_lock);
continue;
}
sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
port_phy_el);
spin_unlock(&port->phy_list_lock);
sas_notify_port_event(sas_phy,
PORTE_BROADCAST_RCVD, GFP_KERNEL);
}
@ -119,19 +128,43 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
static void sas_port_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *ha = phy->ha;
sas_port_event_fns[ev->event](work);
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
static void sas_phy_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *ha = phy->ha;
sas_phy_event_fns[ev->event](work);
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
/* defer works of new phys during suspend */
static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev)
{
struct sas_ha_struct *ha = phy->ha;
unsigned long flags;
bool deferred = false;
spin_lock_irqsave(&ha->lock, flags);
if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) {
struct sas_work *sw = &ev->work;
list_add_tail(&sw->drain_node, &ha->defer_q);
deferred = true;
}
spin_unlock_irqrestore(&ha->lock, flags);
return deferred;
}
int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
gfp_t gfp_flags)
{
@ -145,11 +178,19 @@ int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
if (!ev)
return -ENOMEM;
/* Call pm_runtime_put() with pairs in sas_port_event_worker() */
pm_runtime_get_noresume(ha->dev);
INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
if (sas_defer_event(phy, ev))
return 0;
ret = sas_queue_event(event, &ev->work, ha);
if (ret != 1)
if (ret != 1) {
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
return ret;
}
@ -168,11 +209,19 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
if (!ev)
return -ENOMEM;
/* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
pm_runtime_get_noresume(ha->dev);
INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
if (sas_defer_event(phy, ev))
return 0;
ret = sas_queue_event(event, &ev->work, ha);
if (ret != 1)
if (ret != 1) {
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
return ret;
}

View File

@ -58,7 +58,9 @@ static int smp_execute_task_sg(struct domain_device *dev,
struct sas_task *task = NULL;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
struct sas_ha_struct *ha = dev->port->ha;
pm_runtime_get_sync(ha->dev);
mutex_lock(&dev->ex_dev.cmd_mutex);
for (retry = 0; retry < 3; retry++) {
if (test_bit(SAS_DEV_GONE, &dev->state)) {
@ -131,6 +133,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
}
}
mutex_unlock(&dev->ex_dev.cmd_mutex);
pm_runtime_put_sync(ha->dev);
BUG_ON(retry == 3 && task != NULL);
sas_free_task(task);

View File

@ -362,6 +362,7 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
int i;
set_bit(SAS_HA_REGISTERED, &ha->state);
set_bit(SAS_HA_RESUMING, &ha->state);
/* clear out any stale link events/data from the suspension path */
for (i = 0; i < ha->num_phys; i++) {
@ -387,7 +388,31 @@ static int phys_suspended(struct sas_ha_struct *ha)
return rc;
}
void sas_resume_ha(struct sas_ha_struct *ha)
static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha)
{
int i;
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
struct domain_device *dev = port->port_dev;
if (dev && dev_is_expander(dev->dev_type)) {
struct asd_sas_phy *first_phy;
spin_lock(&port->phy_list_lock);
first_phy = list_first_entry_or_null(
&port->phy_list, struct asd_sas_phy,
port_phy_el);
spin_unlock(&port->phy_list_lock);
if (first_phy)
sas_notify_port_event(first_phy,
PORTE_BROADCAST_RCVD, GFP_KERNEL);
}
}
}
static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
{
const unsigned long tmo = msecs_to_jiffies(25000);
int i;
@ -417,10 +442,30 @@ void sas_resume_ha(struct sas_ha_struct *ha)
* flush out disks that did not return
*/
scsi_unblock_requests(ha->core.shost);
sas_drain_work(ha);
if (drain)
sas_drain_work(ha);
clear_bit(SAS_HA_RESUMING, &ha->state);
sas_queue_deferred_work(ha);
/* send event PORTE_BROADCAST_RCVD to identify some new inserted
* disks for expander
*/
sas_resume_insert_broadcast_ha(ha);
}
void sas_resume_ha(struct sas_ha_struct *ha)
{
_sas_resume_ha(ha, true);
}
EXPORT_SYMBOL(sas_resume_ha);
/* A no-sync variant, which does not call sas_drain_ha(). */
void sas_resume_ha_no_sync(struct sas_ha_struct *ha)
{
_sas_resume_ha(ha, false);
}
EXPORT_SYMBOL(sas_resume_ha_no_sync);
void sas_suspend_ha(struct sas_ha_struct *ha)
{
int i;

View File

@ -14,6 +14,7 @@
#include <scsi/scsi_transport_sas.h>
#include <scsi/libsas.h>
#include <scsi/sas_ata.h>
#include <linux/pm_runtime.h>
#ifdef pr_fmt
#undef pr_fmt
@ -56,6 +57,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha);
void sas_disable_revalidation(struct sas_ha_struct *ha);
void sas_enable_revalidation(struct sas_ha_struct *ha);
void sas_queue_deferred_work(struct sas_ha_struct *ha);
void __sas_drain_work(struct sas_ha_struct *ha);
void sas_deform_port(struct asd_sas_phy *phy, int gone);

View File

@ -37,7 +37,8 @@
static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
int hs = 0, stat = 0;
enum scsi_host_status hs = DID_OK;
enum exec_status stat = SAS_SAM_STAT_GOOD;
if (ts->resp == SAS_TASK_UNDELIVERED) {
/* transport error */
@ -82,10 +83,10 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
case SAS_ABORTED_TASK:
hs = DID_ABORT;
break;
case SAM_STAT_CHECK_CONDITION:
case SAS_SAM_STAT_CHECK_CONDITION:
memcpy(sc->sense_buffer, ts->buf,
min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
stat = SAM_STAT_CHECK_CONDITION;
stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
default:
stat = ts->stat;

View File

@ -496,52 +496,50 @@ struct lpfc_cgn_info {
__le32 cgn_alarm_hr[24];
__le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS];
/* Start of congestion statistics */
uint8_t cgn_stat_npm; /* Notifications per minute */
struct_group(cgn_stat,
uint8_t cgn_stat_npm; /* Notifications per minute */
/* Start Time */
uint8_t cgn_stat_month;
uint8_t cgn_stat_day;
uint8_t cgn_stat_year;
uint8_t cgn_stat_hour;
uint8_t cgn_stat_minute;
uint8_t cgn_pad2[2];
/* Start Time */
uint8_t cgn_stat_month;
uint8_t cgn_stat_day;
uint8_t cgn_stat_year;
uint8_t cgn_stat_hour;
uint8_t cgn_stat_minute;
uint8_t cgn_pad2[2];
__le32 cgn_notification;
__le32 cgn_peer_notification;
__le32 link_integ_notification;
__le32 delivery_notification;
__le32 cgn_notification;
__le32 cgn_peer_notification;
__le32 link_integ_notification;
__le32 delivery_notification;
uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
uint8_t cgn_stat_cgn_day;
uint8_t cgn_stat_cgn_year;
uint8_t cgn_stat_cgn_hour;
uint8_t cgn_stat_cgn_min;
uint8_t cgn_stat_cgn_sec;
uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
uint8_t cgn_stat_cgn_day;
uint8_t cgn_stat_cgn_year;
uint8_t cgn_stat_cgn_hour;
uint8_t cgn_stat_cgn_min;
uint8_t cgn_stat_cgn_sec;
uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
uint8_t cgn_stat_peer_day;
uint8_t cgn_stat_peer_year;
uint8_t cgn_stat_peer_hour;
uint8_t cgn_stat_peer_min;
uint8_t cgn_stat_peer_sec;
uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
uint8_t cgn_stat_peer_day;
uint8_t cgn_stat_peer_year;
uint8_t cgn_stat_peer_hour;
uint8_t cgn_stat_peer_min;
uint8_t cgn_stat_peer_sec;
uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
uint8_t cgn_stat_lnk_day;
uint8_t cgn_stat_lnk_year;
uint8_t cgn_stat_lnk_hour;
uint8_t cgn_stat_lnk_min;
uint8_t cgn_stat_lnk_sec;
uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
uint8_t cgn_stat_lnk_day;
uint8_t cgn_stat_lnk_year;
uint8_t cgn_stat_lnk_hour;
uint8_t cgn_stat_lnk_min;
uint8_t cgn_stat_lnk_sec;
uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
uint8_t cgn_stat_del_day;
uint8_t cgn_stat_del_year;
uint8_t cgn_stat_del_hour;
uint8_t cgn_stat_del_min;
uint8_t cgn_stat_del_sec;
#define LPFC_CGN_STAT_SIZE 48
#define LPFC_CGN_DATA_SIZE (sizeof(struct lpfc_cgn_info) - \
LPFC_CGN_STAT_SIZE - sizeof(uint32_t))
uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
uint8_t cgn_stat_del_day;
uint8_t cgn_stat_del_year;
uint8_t cgn_stat_del_hour;
uint8_t cgn_stat_del_min;
uint8_t cgn_stat_del_sec;
);
__le32 cgn_info_crc;
#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41
@ -669,8 +667,6 @@ struct lpfc_vport {
struct timer_list els_tmofunc;
struct timer_list delayed_disc_tmo;
int unreg_vpi_cmpl;
uint8_t load_flag;
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
@ -1023,7 +1019,6 @@ struct lpfc_hba {
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed
@ -1031,7 +1026,7 @@ struct lpfc_hba {
*/
#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */
#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */
@ -1040,6 +1035,7 @@ struct lpfc_hba {
#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */
struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@ -1604,6 +1600,7 @@ struct lpfc_hba {
#define LPFC_MAX_RXMONITOR_ENTRY 800
#define LPFC_MAX_RXMONITOR_DUMP 32
struct rxtable_entry {
uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
uint64_t total_bytes; /* Total no of read bytes requested */
uint64_t rcv_bytes; /* Total no of read bytes completed */
uint64_t avg_io_size;

View File

@ -1709,25 +1709,25 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
before_fc_flag = phba->pport->fc_flag;
sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
/* Disable SR-IOV virtual functions if enabled */
if (phba->cfg_sriov_nr_virtfn) {
pci_disable_sriov(pdev);
phba->cfg_sriov_nr_virtfn = 0;
if (opcode == LPFC_FW_DUMP) {
init_completion(&online_compl);
phba->fw_dump_cmpl = &online_compl;
} else {
/* Disable SR-IOV virtual functions if enabled */
if (phba->cfg_sriov_nr_virtfn) {
pci_disable_sriov(pdev);
phba->cfg_sriov_nr_virtfn = 0;
}
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0)
return status;
/* wait for the device to be quiesced before firmware reset */
msleep(100);
}
if (opcode == LPFC_FW_DUMP)
phba->hba_flag |= HBA_FW_DUMP_OP;
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0) {
phba->hba_flag &= ~HBA_FW_DUMP_OP;
return status;
}
/* wait for the device to be quiesced before firmware reset */
msleep(100);
reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PDEV_CTL_OFFSET);
@ -1756,24 +1756,42 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3153 Fail to perform the requested "
"access: x%x\n", reg_val);
if (phba->fw_dump_cmpl)
phba->fw_dump_cmpl = NULL;
return rc;
}
/* keep the original port state */
if (before_fc_flag & FC_OFFLINE_MODE)
if (before_fc_flag & FC_OFFLINE_MODE) {
if (phba->fw_dump_cmpl)
phba->fw_dump_cmpl = NULL;
goto out;
}
init_completion(&online_compl);
job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
if (!job_posted)
goto out;
wait_for_completion(&online_compl);
/* Firmware dump will trigger an HA_ERATT event, and
* lpfc_handle_eratt_s4 routine already handles bringing the port back
* online.
*/
if (opcode == LPFC_FW_DUMP) {
wait_for_completion(phba->fw_dump_cmpl);
} else {
init_completion(&online_compl);
job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
if (!job_posted)
goto out;
wait_for_completion(&online_compl);
}
out:
/* in any case, restore the virtual functions enabled as before */
if (sriov_nr_virtfn) {
/* If fw_dump was performed, first disable to clean up */
if (opcode == LPFC_FW_DUMP) {
pci_disable_sriov(pdev);
phba->cfg_sriov_nr_virtfn = 0;
}
sriov_err =
lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
if (!sriov_err)

View File

@ -5484,7 +5484,7 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"Truncated . . .\n");
break;
goto out;
}
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"%03x: %08x %08x %08x %08x "
@ -5495,6 +5495,17 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
cnt += 32;
ptr += 8;
}
if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"Truncated . . .\n");
goto out;
}
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"Parameter Data\n");
ptr = (uint32_t *)&phba->cgn_p;
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"%08x %08x %08x %08x\n",
*ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3));
out:
return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
}
@ -5561,22 +5572,24 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
start = tail;
len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
" MaxBPI\t Total Data Cmd Total Data Cmpl "
" Latency(us) Avg IO Size\tMax IO Size IO cnt "
"Info BWutil(ms)\n");
" MaxBPI Tot_Data_CMF Tot_Data_Cmd "
"Tot_Data_Cmpl Lat(us) Avg_IO Max_IO "
"Bsy IO_cnt Info BWutil(ms)\n");
get_table:
for (i = start; i < last; i++) {
entry = &phba->rxtable[i];
len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
"%3d:%12lld %12lld\t%12lld\t"
"%8lldus\t%8lld\t%10lld "
"%8d %2d %2d(%2d)\n",
"%3d:%12lld %12lld %12lld %12lld "
"%7lldus %8lld %7lld "
"%2d %4d %2d %2d(%2d)\n",
i, entry->max_bytes_per_interval,
entry->cmf_bytes,
entry->total_bytes,
entry->rcv_bytes,
entry->avg_io_latency,
entry->avg_io_size,
entry->max_read_cnt,
entry->cmf_busy,
entry->io_cnt,
entry->cmf_info,
entry->timer_utilization,

View File

@ -282,7 +282,7 @@ struct lpfc_idiag {
void *ptr_private;
};
#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY)
#define MAX_DEBUGFS_RX_TABLE_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
struct lpfc_rx_monitor_debug {
char *i_private;
char *buffer;

View File

@ -3538,11 +3538,6 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
return 1;
}
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
if (!(vport->fc_flag & FC_PT2PT))
lpfc_nlp_put(ndlp);
return 0;
}
@ -6899,6 +6894,7 @@ static int
lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
{
LPFC_MBOXQ_t *mbox = NULL;
struct lpfc_dmabuf *mp;
int rc;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@ -6914,8 +6910,11 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
if (rc == MBX_NOT_FINISHED) {
mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
goto issue_mbox_fail;
}
return 0;
@ -10974,10 +10973,19 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_can_disctmo(vport);
}
if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
/* Wake up lpfc_vport_delete if waiting...*/
if (ndlp->logo_waitq)
wake_up(ndlp->logo_waitq);
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
spin_unlock_irq(&ndlp->lock);
}
/* Safe to release resources now. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
vport->unreg_vpi_cmpl = VPORT_ERROR;
}
/**

View File

@ -869,10 +869,16 @@ lpfc_work_done(struct lpfc_hba *phba)
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
lpfc_sli4_post_async_mbox(phba);
if (ha_copy & HA_ERATT)
if (ha_copy & HA_ERATT) {
/* Handle the error attention event */
lpfc_handle_eratt(phba);
if (phba->fw_dump_cmpl) {
complete(phba->fw_dump_cmpl);
phba->fw_dump_cmpl = NULL;
}
}
if (ha_copy & HA_MBATT)
lpfc_sli_handle_mb_event(phba);
@ -3928,7 +3934,6 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_cleanup_vports_rrqs(vport, NULL);
/*
@ -3958,7 +3963,6 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
vport->unreg_vpi_cmpl = VPORT_ERROR;
return rc;
}
return 0;

View File

@ -3675,19 +3675,26 @@ union sli_var {
};
typedef struct {
struct_group_tagged(MAILBOX_word0, bits,
union {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t mbxStatus;
uint8_t mbxCommand;
uint8_t mbxReserved:6;
uint8_t mbxHc:1;
uint8_t mbxOwner:1; /* Low order bit first word */
uint16_t mbxStatus;
uint8_t mbxCommand;
uint8_t mbxReserved:6;
uint8_t mbxHc:1;
uint8_t mbxOwner:1; /* Low order bit first word */
#else /* __LITTLE_ENDIAN_BITFIELD */
uint8_t mbxOwner:1; /* Low order bit first word */
uint8_t mbxHc:1;
uint8_t mbxReserved:6;
uint8_t mbxCommand;
uint16_t mbxStatus;
uint8_t mbxOwner:1; /* Low order bit first word */
uint8_t mbxHc:1;
uint8_t mbxReserved:6;
uint8_t mbxCommand;
uint16_t mbxStatus;
#endif
};
u32 word0;
};
);
MAILVARIANTS un;
union sli_var us;
@ -3746,7 +3753,7 @@ typedef struct {
#define IOERR_ILLEGAL_COMMAND 0x06
#define IOERR_XCHG_DROPPED 0x07
#define IOERR_ILLEGAL_FIELD 0x08
#define IOERR_BAD_CONTINUE 0x09
#define IOERR_RPI_SUSPENDED 0x09
#define IOERR_TOO_MANY_BUFFERS 0x0A
#define IOERR_RCV_BUFFER_WAITING 0x0B
#define IOERR_NO_CONNECTION 0x0C

View File

@ -5373,8 +5373,10 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
*/
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
if (rc == MBX_NOT_FINISHED) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
goto out_free_dmabuf;
}
return;
}
/*
@ -5925,7 +5927,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
uint32_t io_cnt;
uint32_t head, tail;
uint32_t busy, max_read;
uint64_t total, rcv, lat, mbpi, extra;
uint64_t total, rcv, lat, mbpi, extra, cnt;
int timer_interval = LPFC_CMF_INTERVAL;
uint32_t ms;
struct lpfc_cgn_stat *cgs;
@ -5996,20 +5998,28 @@ lpfc_cmf_timer(struct hrtimer *timer)
/* Calculate any extra bytes needed to account for the
* timer accuracy. If we are less than LPFC_CMF_INTERVAL
* add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL
* add an extra 2%. The goal is to equalize total with a
* time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1
* calculate the adjustment needed for total to reflect
* a full LPFC_CMF_INTERVAL.
*/
if (ms == LPFC_CMF_INTERVAL)
extra = div_u64(total, 50);
else if (ms < LPFC_CMF_INTERVAL)
extra = div_u64(total, 33);
if (ms && ms < LPFC_CMF_INTERVAL) {
cnt = div_u64(total, ms); /* bytes per ms */
cnt *= LPFC_CMF_INTERVAL; /* what total should be */
/* If the timeout is scheduled to be shorter,
* this value may skew the data, so cap it at mbpi.
*/
if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
cnt = mbpi;
extra = cnt - total;
}
lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
} else {
/* For Monitor mode or link down we want mbpi
* to be the full link speed
*/
mbpi = phba->cmf_link_byte_count;
extra = 0;
}
phba->cmf_timer_cnt++;
@ -6040,6 +6050,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
LPFC_RXMONITOR_TABLE_IN_USE);
entry = &phba->rxtable[head];
entry->total_bytes = total;
entry->cmf_bytes = total + extra;
entry->rcv_bytes = rcv;
entry->cmf_busy = busy;
entry->cmf_info = phba->cmf_active_info;
@ -6082,6 +6093,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
/* Each minute save Fabric and Driver congestion information */
lpfc_cgn_save_evt_cnt(phba);
phba->hba_flag &= ~HBA_SHORT_CMF;
/* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
* minute, adjust our next timer interval, if needed, to ensure a
* 1 minute granularity when we get the next timer interrupt.
@ -6092,6 +6105,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
jiffies);
if (timer_interval <= 0)
timer_interval = LPFC_CMF_INTERVAL;
else
phba->hba_flag |= HBA_SHORT_CMF;
/* If we adjust timer_interval, max_bytes_per_interval
* needs to be adjusted as well.
@ -6337,8 +6352,10 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
}
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
if (rc == MBX_NOT_FINISHED) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
goto out_free_dmabuf;
}
return;
out_free_dmabuf:
@ -13464,7 +13481,7 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
phba->cgn_evt_minute = 0;
phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
cp->cgn_info_version = LPFC_CGN_INFO_V3;
@ -13523,7 +13540,7 @@ lpfc_init_congestion_stat(struct lpfc_hba *phba)
return;
cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
ktime_get_real_ts64(&cmpl_time);
time64_to_tm(cmpl_time.tv_sec, 0, &broken);

View File

@ -322,6 +322,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
struct lpfc_dmabuf *mp;
uint64_t nlp_portwwn = 0;
uint32_t *lp;
IOCB_t *icmd;
@ -571,6 +572,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* a default RPI.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
mp = (struct lpfc_dmabuf *)login_mbox->ctx_buf;
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
mempool_free(login_mbox, phba->mbox_mem_pool);
login_mbox = NULL;
} else {

View File

@ -4393,6 +4393,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
cmd->result = DID_REQUEUE << 16;
break;
@ -4448,10 +4449,11 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"9039 Iodone <%d/%llu> cmd x%px, error "
"x%x SNS x%x x%x Data: x%x x%x\n",
"x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
cmd->result, *lp, *(lp + 3), cmd->retries,
scsi_get_resid(cmd));
cmd->result, *lp, *(lp + 3),
(u64)scsi_get_lba(cmd),
cmd->retries, scsi_get_resid(cmd));
}
lpfc_update_stats(vport, lpfc_cmd);

View File

@ -4749,7 +4749,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
volatile uint32_t mbox;
volatile struct MAILBOX_word0 mbox;
uint32_t hc_copy, ha_copy, resp_data;
int i;
uint8_t hdrtype;
@ -4783,13 +4783,13 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
phba->pport->stopped = 1;
}
mbox = 0;
((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
mbox.word0 = 0;
mbox.mbxCommand = MBX_KILL_BOARD;
mbox.mbxOwner = OWN_CHIP;
writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
mbox_buf = phba->MBslimaddr;
writel(mbox, mbox_buf);
writel(mbox.word0, mbox_buf);
for (i = 0; i < 50; i++) {
if (lpfc_readl((resp_buf + 1), &resp_data))
@ -4810,12 +4810,12 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
goto clear_errat;
}
((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
mbox.mbxOwner = OWN_HOST;
resp_data = 0;
for (i = 0; i < 500; i++) {
if (lpfc_readl(resp_buf, &resp_data))
return;
if (resp_data != mbox)
if (resp_data != mbox.word0)
mdelay(1);
else
break;
@ -5046,12 +5046,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->fcf.fcf_flag = 0;
spin_unlock_irq(&phba->hbalock);
/* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
if (phba->hba_flag & HBA_FW_DUMP_OP) {
phba->hba_flag &= ~HBA_FW_DUMP_OP;
return rc;
}
/* Now physically reset the device */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0389 Performing PCI function reset!\n");
@ -5091,9 +5085,8 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
static int
lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
{
MAILBOX_t *mb;
volatile struct MAILBOX_word0 mb;
struct lpfc_sli *psli;
volatile uint32_t word0;
void __iomem *to_slim;
uint32_t hba_aer_enabled;
@ -5110,24 +5103,23 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
(phba->pport) ? phba->pport->port_state : 0,
psli->sli_flag);
word0 = 0;
mb = (MAILBOX_t *) &word0;
mb->mbxCommand = MBX_RESTART;
mb->mbxHc = 1;
mb.word0 = 0;
mb.mbxCommand = MBX_RESTART;
mb.mbxHc = 1;
lpfc_reset_barrier(phba);
to_slim = phba->MBslimaddr;
writel(*(uint32_t *) mb, to_slim);
writel(mb.word0, to_slim);
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
if (phba->pport && phba->pport->port_state)
word0 = 1; /* This is really setting up word1 */
mb.word0 = 1; /* This is really setting up word1 */
else
word0 = 0; /* This is really setting up word1 */
mb.word0 = 0; /* This is really setting up word1 */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
writel(*(uint32_t *) mb, to_slim);
writel(mb.word0, to_slim);
readl(to_slim); /* flush */
lpfc_sli_brdreset(phba);

View File

@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "14.0.0.3"
#define LPFC_DRIVER_VERSION "14.0.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */

View File

@ -485,23 +485,68 @@ error_out:
return rc;
}
static int
lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
int rc;
struct lpfc_hba *phba = vport->phba;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
spin_lock_irq(&ndlp->lock);
if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
!ndlp->logo_waitq) {
ndlp->logo_waitq = &waitq;
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag |= NLP_ISSUE_LOGO;
ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
}
spin_unlock_irq(&ndlp->lock);
rc = lpfc_issue_els_npiv_logo(vport, ndlp);
if (!rc) {
wait_event_timeout(waitq,
(!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
msecs_to_jiffies(phba->fc_ratov * 2000));
if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
goto logo_cmpl;
/* LOGO wait failed. Correct status. */
rc = -EINTR;
} else {
rc = -EIO;
}
/* Error - clean up node flags. */
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
spin_unlock_irq(&ndlp->lock);
logo_cmpl:
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
"1824 Issue LOGO completes with status %d\n",
rc);
spin_lock_irq(&ndlp->lock);
ndlp->logo_waitq = NULL;
spin_unlock_irq(&ndlp->lock);
return rc;
}
static int
disable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
long timeout;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
/* Can't disable during an outstanding delete. */
if (vport->load_flag & FC_UNLOADING)
return 0;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (ndlp && phba->link_state >= LPFC_LINK_UP) {
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
timeout = schedule_timeout(timeout);
}
if (ndlp && phba->link_state >= LPFC_LINK_UP)
(void)lpfc_send_npiv_logo(vport, ndlp);
lpfc_sli_host_down(vport);
@ -600,7 +645,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
long timeout;
int rc;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@ -665,15 +710,14 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
/* Send DA_ID and wait for a completion. */
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
while (vport->ct_flags && timeout)
timeout = schedule_timeout(timeout);
else
rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
if (rc) {
lpfc_printf_log(vport->phba, KERN_WARNING,
LOG_VPORT,
"1829 CT command failed to "
"delete objects on fabric\n");
"delete objects on fabric, "
"rc %d\n", rc);
}
}
/*
@ -688,11 +732,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
goto skip_logo;
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
timeout = schedule_timeout(timeout);
rc = lpfc_send_npiv_logo(vport, ndlp);
if (rc)
goto skip_logo;
}
if (!(phba->pport->load_flag & FC_UNLOADING))

View File

@ -1431,7 +1431,6 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
/**
* megaraid_queue_command_lck - generic queue entry point for all LLDs
* @scp : pointer to the scsi command to be executed
* @done : callback routine to be called after the cmd has be completed
*
* Queue entry point for mailbox based controllers.
*/

View File

@ -8,7 +8,7 @@
#define MPI3_CONFIG_PAGETYPE_IO_UNIT (0x00)
#define MPI3_CONFIG_PAGETYPE_MANUFACTURING (0x01)
#define MPI3_CONFIG_PAGETYPE_IOC (0x02)
#define MPI3_CONFIG_PAGETYPE_UEFI_BSD (0x03)
#define MPI3_CONFIG_PAGETYPE_DRIVER (0x03)
#define MPI3_CONFIG_PAGETYPE_SECURITY (0x04)
#define MPI3_CONFIG_PAGETYPE_ENCLOSURE (0x11)
#define MPI3_CONFIG_PAGETYPE_DEVICE (0x12)
@ -181,8 +181,17 @@ struct mpi3_config_page_header {
#define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0a)
#define MPI3_SAS_HWRATE_MIN_RATE_12_0 (0x0b)
#define MPI3_SAS_HWRATE_MIN_RATE_22_5 (0x0c)
#define MPI3_SLOT_INVALID (0xffff)
#define MPI3_SLOT_INDEX_INVALID (0xffff)
#define MPI3_SLOT_INVALID (0xffff)
#define MPI3_SLOT_INDEX_INVALID (0xffff)
#define MPI3_LINK_CHANGE_COUNT_INVALID (0xffff)
#define MPI3_RATE_CHANGE_COUNT_INVALID (0xffff)
#define MPI3_TEMP_SENSOR_LOCATION_INTERNAL (0x0)
#define MPI3_TEMP_SENSOR_LOCATION_INLET (0x1)
#define MPI3_TEMP_SENSOR_LOCATION_OUTLET (0x2)
#define MPI3_TEMP_SENSOR_LOCATION_DRAM (0x3)
#define MPI3_MFGPAGE_VENDORID_BROADCOM (0x1000)
#define MPI3_MFGPAGE_DEVID_SAS4116 (0x00a5)
#define MPI3_MFGPAGE_DEVID_SAS4016 (0x00a7)
struct mpi3_man_page0 {
struct mpi3_config_page_header header;
u8 chip_revision[8];
@ -195,7 +204,7 @@ struct mpi3_man_page0 {
__le32 reserved98;
u8 oem;
u8 sub_oem;
__le16 reserved9e;
__le16 flags;
u8 board_mfg_day;
u8 board_mfg_month;
__le16 board_mfg_year;
@ -208,6 +217,8 @@ struct mpi3_man_page0 {
};
#define MPI3_MAN0_PAGEVERSION (0x00)
#define MPI3_MAN0_FLAGS_SWITCH_PRESENT (0x0002)
#define MPI3_MAN0_FLAGS_EXPANDER_PRESENT (0x0001)
#define MPI3_MAN1_VPD_SIZE (512)
struct mpi3_man_page1 {
struct mpi3_config_page_header header;
@ -236,7 +247,7 @@ struct mpi3_man_page5 {
#define MPI3_MAN5_PAGEVERSION (0x00)
struct mpi3_man6_gpio_entry {
u8 function_code;
u8 reserved01;
u8 function_flags;
__le16 flags;
u8 param1;
u8 param2;
@ -253,7 +264,6 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_YELLOW (0x06)
#define MPI3_MAN6_GPIO_FUNCTION_CABLE_MANAGEMENT (0x07)
#define MPI3_MAN6_GPIO_FUNCTION_BKPLANE_MGMT_TYPE (0x08)
#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_MUX_RESET (0x09)
#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a)
#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b)
#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c)
@ -263,6 +273,10 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10)
#define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11)
#define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12)
#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET_CLAMP (0x13)
#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_MASK (0x01)
#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_ISTWI (0x00)
#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_RECEPTACLEID (0x01)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xf0)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10)
@ -275,8 +289,6 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_ACTIVE_CABLE_ENABLE (0x01)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_CABLE_MGMT_ENABLE (0x02)
#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_SPEC_MUX (0x00)
#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_ALL_MUXES (0x01)
#define MPI3_MAN6_GPIO_LICENSE_PARAM1_TYPE_IBUTTON (0x00)
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100)
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100)
@ -353,6 +365,7 @@ struct mpi3_man8_phy_info {
__le32 reserved0c;
};
#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_HOST_PHY (0xff)
#ifndef MPI3_MAN8_PHY_INFO_MAX
#define MPI3_MAN8_PHY_INFO_MAX (1)
#endif
@ -373,20 +386,22 @@ struct mpi3_man9_rsrc_entry {
};
enum mpi3_man9_resources {
MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
MPI3_MAN9_RSRC_TARGET_CMDS = 1,
MPI3_MAN9_RSRC_SAS_TARGETS = 2,
MPI3_MAN9_RSRC_PCIE_TARGETS = 3,
MPI3_MAN9_RSRC_INITIATORS = 4,
MPI3_MAN9_RSRC_VDS = 5,
MPI3_MAN9_RSRC_ENCLOSURES = 6,
MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
MPI3_MAN9_RSRC_EXPANDERS = 8,
MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
MPI3_MAN9_RSRC_PDS = 10,
MPI3_MAN9_RSRC_HOST_PDS = 11,
MPI3_MAN9_RSRC_ADV_HOST_PDS = 12,
MPI3_MAN9_RSRC_RAID_PDS = 13,
MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
MPI3_MAN9_RSRC_TARGET_CMDS = 1,
MPI3_MAN9_RSRC_RESERVED02 = 2,
MPI3_MAN9_RSRC_NVME = 3,
MPI3_MAN9_RSRC_INITIATORS = 4,
MPI3_MAN9_RSRC_VDS = 5,
MPI3_MAN9_RSRC_ENCLOSURES = 6,
MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
MPI3_MAN9_RSRC_EXPANDERS = 8,
MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
MPI3_MAN9_RSRC_RESERVED10 = 10,
MPI3_MAN9_RSRC_HOST_PD_DRIVES = 11,
MPI3_MAN9_RSRC_ADV_HOST_PD_DRIVES = 12,
MPI3_MAN9_RSRC_RAID_PD_DRIVES = 13,
MPI3_MAN9_RSRC_DRV_DIAG_BUF = 14,
MPI3_MAN9_RSRC_NAMESPACE_COUNT = 15,
MPI3_MAN9_RSRC_NUM_RESOURCES
};
@ -402,6 +417,7 @@ enum mpi3_man9_resources {
#define MPI3_MAN9_MIN_ENCLOSURES (0)
#define MPI3_MAN9_MAX_ENCLOSURES (65535)
#define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0)
#define MPI3_MAN9_MIN_NAMESPACE_COUNT (1)
#define MPI3_MAN9_MIN_EXPANDERS (0)
#define MPI3_MAN9_MAX_EXPANDERS (65535)
#define MPI3_MAN9_MIN_PCIE_SWITCHES (0)
@ -422,9 +438,14 @@ struct mpi3_man_page9 {
struct mpi3_man10_istwi_ctrlr_entry {
__le16 slave_address;
__le16 flags;
__le32 reserved04;
u8 scl_low_override;
u8 scl_high_override;
__le16 reserved06;
};
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_MASK (0x000c)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_100K (0x0000)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_400K (0x0004)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_SLAVE_ENABLED (0x0002)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_MASTER_ENABLED (0x0001)
#ifndef MPI3_MAN10_ISTWI_CTRLR_MAX
@ -451,10 +472,13 @@ struct mpi3_man11_temp_sensor_device_format {
u8 temp_channel[4];
};
#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
#define MPI3_MAN11_TEMP_SENSOR_TYPE_SE97B (0x03)
#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_MASK (0xe0)
#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_SHIFT (5)
#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
struct mpi3_man11_seeprom_device_format {
u8 size;
u8 page_write_size;
@ -495,31 +519,40 @@ struct mpi3_man11_bkplane_spec_ubm_format {
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_SHIFT (4)
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
struct mpi3_man11_bkplane_spec_vpp_format {
struct mpi3_man11_bkplane_spec_non_ubm_format {
__le16 flags;
__le16 reserved02;
u8 reserved02;
u8 type;
};
#define MPI3_MAN11_BKPLANE_VPP_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0040)
#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_MASK (0x0030)
#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_REG (0x0010)
#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_MASK (0x000f)
#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_SHIFT (0)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_MASK (0xf000)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_SHIFT (12)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_MASK (0x0030)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_REG (0x0010)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
#define MPI3_MAN11_BKPLANE_NON_UBM_TYPE_VPP (0x00)
union mpi3_man11_bkplane_spec_format {
struct mpi3_man11_bkplane_spec_ubm_format ubm;
struct mpi3_man11_bkplane_spec_vpp_format vpp;
struct mpi3_man11_bkplane_spec_ubm_format ubm;
struct mpi3_man11_bkplane_spec_non_ubm_format non_ubm;
};
struct mpi3_man11_bkplane_mgmt_device_format {
u8 type;
u8 receptacle_id;
__le16 reserved02;
u8 reset_info;
u8 reserved03;
union mpi3_man11_bkplane_spec_format backplane_mgmt_specific;
};
#define MPI3_MAN11_BKPLANE_MGMT_TYPE_UBM (0x00)
#define MPI3_MAN11_BKPLANE_MGMT_TYPE_VPP (0x01)
#define MPI3_MAN11_BKPLANE_MGMT_TYPE_NON_UBM (0x01)
#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_MASK (0xf0)
#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_SHIFT (4)
#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_MASK (0x0f)
#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_SHIFT (0)
struct mpi3_man11_gas_gauge_device_format {
u8 type;
u8 reserved01[3];
@ -527,6 +560,11 @@ struct mpi3_man11_gas_gauge_device_format {
};
#define MPI3_MAN11_GAS_GAUGE_TYPE_STANDARD (0x00)
struct mpi3_man11_mgmt_ctrlr_device_format {
__le32 reserved00;
__le32 reserved04;
};
union mpi3_man11_device_specific_format {
struct mpi3_man11_mux_device_format mux;
struct mpi3_man11_temp_sensor_device_format temp_sensor;
@ -535,6 +573,7 @@ union mpi3_man11_device_specific_format {
struct mpi3_man11_cable_mgmt_device_format cable_mgmt;
struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt;
struct mpi3_man11_gas_gauge_device_format gas_gauge;
struct mpi3_man11_mgmt_ctrlr_device_format mgmt_controller;
__le32 words[2];
};
@ -556,10 +595,8 @@ struct mpi3_man11_istwi_device_format {
#define MPI3_MAN11_ISTWI_DEVTYPE_CABLE_MGMT (0x04)
#define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05)
#define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06)
#define MPI3_MAN11_ISTWI_DEVTYPE_MGMT_CONTROLLER (0x07)
#define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01)
#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_MASK (0x06)
#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_100KHZ (0x00)
#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_400KHZ (0x02)
#ifndef MPI3_MAN11_ISTWI_DEVICE_MAX
#define MPI3_MAN11_ISTWI_DEVICE_MAX (1)
#endif
@ -692,8 +729,8 @@ struct mpi3_man_page14 {
#define MPI3_MAN14_FLAGS_AUTH_SESSION_REQ (0x01)
#define MPI3_MAN14_FLAGS_AUTH_API_MASK (0x0e)
#define MPI3_MAN14_FLAGS_AUTH_API_NONE (0x00)
#define MPI3_MAN14_FLAGS_AUTH_API_CEREBUS (0x02)
#define MPI3_MAN14_FLAGS_AUTH_API_DMTF_PMCI (0x04)
#define MPI3_MAN14_FLAGS_AUTH_API_CERBERUS (0x02)
#define MPI3_MAN14_FLAGS_AUTH_API_SPDM (0x04)
#ifndef MPI3_MAN15_VERSION_RECORD_MAX
#define MPI3_MAN15_VERSION_RECORD_MAX 1
#endif
@ -808,7 +845,7 @@ struct mpi3_io_unit_page1 {
struct mpi3_config_page_header header;
__le32 flags;
u8 dmd_io_delay;
u8 dmd_report_pc_ie;
u8 dmd_report_pcie;
u8 dmd_report_sata;
u8 dmd_report_sas;
};
@ -844,26 +881,30 @@ struct mpi3_io_unit_page2 {
#define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001)
struct mpi3_io_unit3_sensor {
__le16 flags;
__le16 reserved02;
__le16 threshold[4];
u8 threshold_margin;
u8 reserved03;
__le16 threshold[3];
__le16 reserved0a;
__le32 reserved0c;
__le32 reserved10;
__le32 reserved14;
};
#define MPI3_IOUNIT3_SENSOR_FLAGS_T3_ENABLE (0x0008)
#define MPI3_IOUNIT3_SENSOR_FLAGS_T2_ENABLE (0x0004)
#define MPI3_IOUNIT3_SENSOR_FLAGS_T1_ENABLE (0x0002)
#define MPI3_IOUNIT3_SENSOR_FLAGS_T0_ENABLE (0x0001)
#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_EVENT_ENABLED (0x0010)
#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_ACTION_ENABLED (0x0008)
#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_EVENT_ENABLED (0x0004)
#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_ACTION_ENABLED (0x0002)
#define MPI3_IOUNIT3_SENSOR_FLAGS_WARNING_EVENT_ENABLED (0x0001)
#ifndef MPI3_IO_UNIT3_SENSOR_MAX
#define MPI3_IO_UNIT3_SENSOR_MAX (1)
#define MPI3_IO_UNIT3_SENSOR_MAX (1)
#endif
struct mpi3_io_unit_page3 {
struct mpi3_config_page_header header;
__le32 reserved08;
u8 num_sensors;
u8 polling_interval;
__le16 reserved0e;
u8 nominal_poll_interval;
u8 warning_poll_interval;
u8 reserved0f;
struct mpi3_io_unit3_sensor sensor[MPI3_IO_UNIT3_SENSOR_MAX];
};
@ -873,13 +914,19 @@ struct mpi3_io_unit4_sensor {
__le16 reserved02;
u8 flags;
u8 reserved05[3];
__le32 reserved08;
__le16 istwi_index;
u8 channel;
u8 reserved0b;
__le32 reserved0c;
};
#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_MASK (0xe0)
#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_SHIFT (5)
#define MPI3_IOUNIT4_SENSOR_FLAGS_TEMP_VALID (0x01)
#define MPI3_IOUNIT4_SENSOR_ISTWI_INDEX_INTERNAL (0xffff)
#define MPI3_IOUNIT4_SENSOR_CHANNEL_RESERVED (0xff)
#ifndef MPI3_IO_UNIT4_SENSOR_MAX
#define MPI3_IO_UNIT4_SENSOR_MAX (1)
#define MPI3_IO_UNIT4_SENSOR_MAX (1)
#endif
struct mpi3_io_unit_page4 {
struct mpi3_config_page_header header;
@ -906,8 +953,9 @@ struct mpi3_io_unit_page5 {
struct mpi3_io_unit5_spinup_group spinup_group_parameters[4];
__le32 reserved18;
__le32 reserved1c;
__le32 reserved20;
u8 reserved24;
__le16 device_shutdown;
__le16 reserved22;
u8 pcie_device_wait_time;
u8 sata_device_wait_time;
u8 spinup_encl_drive_count;
u8 spinup_encl_delay;
@ -919,6 +967,22 @@ struct mpi3_io_unit_page5 {
};
#define MPI3_IOUNIT5_PAGEVERSION (0x00)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NO_ACTION (0x00)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_ATTACHED (0x01)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_EXPANDER_ATTACHED (0x02)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SWITCH_ATTACHED (0x02)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_EXPANDER (0x03)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_SWITCH (0x03)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_MASK (0x0300)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_SHIFT (8)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_MASK (0x00c0)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_SHIFT (6)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_MASK (0x0030)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_SHIFT (4)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_MASK (0x000c)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAA_SSD_SHIFT (0)
#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
@ -1012,7 +1076,52 @@ struct mpi3_ioc_page2 {
};
#define MPI3_IOC2_PAGEVERSION (0x00)
struct mpi3_uefibsd_page0 {
#define MPI3_DRIVER_FLAGS_ADMINRAIDPD_BLOCKED (0x0010)
#define MPI3_DRIVER_FLAGS_OOBRAIDPD_BLOCKED (0x0008)
#define MPI3_DRIVER_FLAGS_OOBRAIDVD_BLOCKED (0x0004)
#define MPI3_DRIVER_FLAGS_OOBADVHOSTPD_BLOCKED (0x0002)
#define MPI3_DRIVER_FLAGS_OOBHOSTPD_BLOCKED (0x0001)
struct mpi3_allowed_cmd_scsi {
__le16 service_action;
u8 operation_code;
u8 command_flags;
};
struct mpi3_allowed_cmd_ata {
u8 subcommand;
u8 reserved01;
u8 command;
u8 command_flags;
};
struct mpi3_allowed_cmd_nvme {
u8 reserved00;
u8 nvme_cmd_flags;
u8 op_code;
u8 command_flags;
};
#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_MASK (0x80)
#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_IO (0x00)
#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_ADMIN (0x80)
#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_MASK (0x3f)
#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_NVM (0x00)
union mpi3_allowed_cmd {
struct mpi3_allowed_cmd_scsi scsi;
struct mpi3_allowed_cmd_ata ata;
struct mpi3_allowed_cmd_nvme nvme;
};
#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_ADMINRAIDPD_BLOCKED (0x20)
#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDPD_BLOCKED (0x10)
#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDVD_BLOCKED (0x08)
#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBADVHOSTPD_BLOCKED (0x04)
#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBHOSTPD_BLOCKED (0x02)
#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_CHECKSUBCMD_ENABLED (0x01)
#ifndef MPI3_ALLOWED_CMDS_MAX
#define MPI3_ALLOWED_CMDS_MAX (1)
#endif
struct mpi3_driver_page0 {
struct mpi3_config_page_header header;
__le32 bsd_options;
u8 ssu_timeout;
@ -1026,13 +1135,122 @@ struct mpi3_uefibsd_page0 {
__le32 reserved18;
};
#define MPI3_UEFIBSD_PAGEVERSION (0x00)
#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_MASK (0x00000003)
#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_NONE (0x00000002)
#define MPI3_UEFIBSD_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
#define MPI3_UEFIBSD_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008)
#define MPI3_DRIVER0_PAGEVERSION (0x00)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
#define MPI3_DRIVER0_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008)
struct mpi3_driver_page1 {
struct mpi3_config_page_header header;
__le32 flags;
__le32 reserved0c;
__le16 host_diag_trace_max_size;
__le16 host_diag_trace_min_size;
__le16 host_diag_trace_decrement_size;
__le16 reserved16;
__le16 host_diag_fw_max_size;
__le16 host_diag_fw_min_size;
__le16 host_diag_fw_decrement_size;
__le16 reserved1e;
__le16 host_diag_driver_max_size;
__le16 host_diag_driver_min_size;
__le16 host_diag_driver_decrement_size;
__le16 reserved26;
};
#define MPI3_DRIVER1_PAGEVERSION (0x00)
#ifndef MPI3_DRIVER2_TRIGGER_MAX
#define MPI3_DRIVER2_TRIGGER_MAX (1)
#endif
struct mpi3_driver2_trigger_event {
u8 type;
u8 flags;
u8 reserved02;
u8 event;
__le32 reserved04[3];
};
struct mpi3_driver2_trigger_scsi_sense {
u8 type;
u8 flags;
__le16 reserved02;
u8 ascq;
u8 asc;
u8 sense_key;
u8 reserved07;
__le32 reserved08[2];
};
#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL (0xff)
#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL (0xff)
#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL (0xff)
struct mpi3_driver2_trigger_reply {
u8 type;
u8 flags;
__le16 ioc_status;
__le32 ioc_log_info;
__le32 ioc_log_info_mask;
__le32 reserved0c;
};
#define MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL (0xffff)
union mpi3_driver2_trigger_element {
struct mpi3_driver2_trigger_event event;
struct mpi3_driver2_trigger_scsi_sense scsi_sense;
struct mpi3_driver2_trigger_reply reply;
};
#define MPI3_DRIVER2_TRIGGER_TYPE_EVENT (0x00)
#define MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE (0x01)
#define MPI3_DRIVER2_TRIGGER_TYPE_REPLY (0x02)
#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE (0x02)
#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE (0x01)
struct mpi3_driver_page2 {
struct mpi3_config_page_header header;
__le64 master_trigger;
__le32 reserved10[3];
u8 num_triggers;
u8 reserved1d[3];
union mpi3_driver2_trigger_element trigger[MPI3_DRIVER2_TRIGGER_MAX];
};
#define MPI3_DRIVER2_PAGEVERSION (0x00)
#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_TRACE_RELEASE (0x8000000000000000ULL)
#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_FW_RELEASE (0x4000000000000000ULL)
#define MPI3_DRIVER2_MASTERTRIGGER_SNAPDUMP (0x2000000000000000ULL)
#define MPI3_DRIVER2_MASTERTRIGGER_DEVICE_REMOVAL_ENABLED (0x0000000000000004ULL)
#define MPI3_DRIVER2_MASTERTRIGGER_TASK_MANAGEMENT_ENABLED (0x0000000000000002ULL)
struct mpi3_driver_page10 {
struct mpi3_config_page_header header;
__le16 flags;
__le16 reserved0a;
u8 num_allowed_commands;
u8 reserved0d[3];
union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
};
#define MPI3_DRIVER10_PAGEVERSION (0x00)
struct mpi3_driver_page20 {
struct mpi3_config_page_header header;
__le16 flags;
__le16 reserved0a;
u8 num_allowed_commands;
u8 reserved0d[3];
union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
};
#define MPI3_DRIVER20_PAGEVERSION (0x00)
struct mpi3_driver_page30 {
struct mpi3_config_page_header header;
__le16 flags;
__le16 reserved0a;
u8 num_allowed_commands;
u8 reserved0d[3];
union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
};
#define MPI3_DRIVER30_PAGEVERSION (0x00)
union mpi3_security_mac {
__le32 dword[16];
__le16 word[32];
@ -1102,7 +1320,7 @@ struct mpi3_security1_key_record {
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_NOT_VALID (0x00)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_SAFESTORE (0x01)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CERT_CHAIN (0x02)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_AUTH_DEV_KEY (0x03)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_DEVICE_KEY (0x03)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CACHE_OFFLOAD (0x04)
struct mpi3_security_page1 {
struct mpi3_config_page_header header;
@ -1137,16 +1355,30 @@ struct mpi3_sas_io_unit_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
u8 num_phys;
u8 reserved0d[3];
u8 init_status;
__le16 reserved0e;
struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX];
};
#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
#define MPI3_SASIOUNIT0_INITSTATUS_NO_ERRORS (0x00)
#define MPI3_SASIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01)
#define MPI3_SASIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02)
#define MPI3_SASIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04)
#define MPI3_SASIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05)
#define MPI3_SASIOUNIT0_INITSTATUS_HOST_PHYS_ENABLED (0x06)
#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MIN (0xf0)
#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MAX (0xff)
#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_MASK (0x03)
#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_IOUNIT1 (0x00)
#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_DYNAMIC (0x01)
#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_BACKPLANE (0x02)
#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
#define MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY (0x02)
#define MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY (0x01)
struct mpi3_sas_io_unit1_phy_data {
u8 io_unit_port;
u8 port_flags;
@ -1343,6 +1575,26 @@ struct mpi3_sas_expander_page1 {
#define MPI3_SASEXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
#define MPI3_SASEXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
#define MPI3_SASEXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
#ifndef MPI3_SASEXPANDER2_MAX_NUM_PHYS
#define MPI3_SASEXPANDER2_MAX_NUM_PHYS (1)
#endif
struct mpi3_sasexpander2_phy_element {
u8 link_change_count;
u8 reserved01;
__le16 rate_change_count;
__le32 reserved04;
};
struct mpi3_sas_expander_page2 {
struct mpi3_config_page_header header;
u8 num_phys;
u8 reserved09;
__le16 dev_handle;
__le32 reserved0c;
struct mpi3_sasexpander2_phy_element phy[MPI3_SASEXPANDER2_MAX_NUM_PHYS];
};
#define MPI3_SASEXPANDER2_PAGEVERSION (0x00)
struct mpi3_sas_port_page0 {
struct mpi3_config_page_header header;
u8 port_number;
@ -1510,6 +1762,14 @@ struct mpi3_sas_phy_page4 {
#define MPI3_PCIE_NEG_LINK_RATE_8_0 (0x04)
#define MPI3_PCIE_NEG_LINK_RATE_16_0 (0x05)
#define MPI3_PCIE_NEG_LINK_RATE_32_0 (0x06)
#define MPI3_PCIE_ASPM_ENABLE_NONE (0x0)
#define MPI3_PCIE_ASPM_ENABLE_L0S (0x1)
#define MPI3_PCIE_ASPM_ENABLE_L1 (0x2)
#define MPI3_PCIE_ASPM_ENABLE_L0S_L1 (0x3)
#define MPI3_PCIE_ASPM_SUPPORT_NONE (0x0)
#define MPI3_PCIE_ASPM_SUPPORT_L0S (0x1)
#define MPI3_PCIE_ASPM_SUPPORT_L1 (0x2)
#define MPI3_PCIE_ASPM_SUPPORT_L0S_L1 (0x3)
struct mpi3_pcie_io_unit0_phy_data {
u8 link;
u8 link_flags;
@ -1540,7 +1800,8 @@ struct mpi3_pcie_io_unit_page0 {
__le32 reserved08;
u8 num_phys;
u8 init_status;
__le16 reserved0e;
u8 aspm;
u8 reserved0f;
struct mpi3_pcie_io_unit0_phy_data phy_data[MPI3_PCIE_IO_UNIT0_PHY_MAX];
};
@ -1556,6 +1817,14 @@ struct mpi3_pcie_io_unit_page0 {
#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_CLOCKING_MODE (0x08)
#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_START (0xf0)
#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_END (0xff)
#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_MASK (0xc0)
#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_SHIFT (6)
#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_MASK (0x30)
#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_SHIFT (4)
#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_MASK (0x0c)
#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_SHIFT (2)
#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_MASK (0x03)
#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_SHIFT (0)
struct mpi3_pcie_io_unit1_phy_data {
u8 link;
u8 link_flags;
@ -1569,16 +1838,16 @@ struct mpi3_pcie_io_unit1_phy_data {
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02)
#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
#ifndef MPI3_PCIE_IO_UNIT1_PHY_MAX
#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
#endif
struct mpi3_pcie_io_unit_page1 {
struct mpi3_config_page_header header;
@ -1586,21 +1855,66 @@ struct mpi3_pcie_io_unit_page1 {
__le32 reserved0c;
u8 num_phys;
u8 reserved11;
__le16 reserved12;
u8 aspm;
u8 reserved13;
struct mpi3_pcie_io_unit1_phy_data phy_data[MPI3_PCIE_IO_UNIT1_PHY_MAX];
};
#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x80)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x40)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x30)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SHIFT (4)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x10)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x20)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0f)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x02)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x03)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x04)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x05)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x06)
#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_MASK (0x0c)
#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_SHIFT (2)
#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_MASK (0x03)
#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_SHIFT (0)
struct mpi3_pcie_io_unit_page2 {
struct mpi3_config_page_header header;
__le16 nv_me_max_queue_depth;
__le16 reserved0a;
u8 nv_me_abort_to;
__le16 nvme_max_q_dx1;
__le16 nvme_max_q_dx2;
u8 nvme_abort_to;
u8 reserved0d;
__le16 reserved0e;
__le16 nvme_max_q_dx4;
};
#define MPI3_PCIEIOUNIT2_PAGEVERSION (0x00)
#define MPI3_PCIEIOUNIT3_ERROR_RECEIVER_ERROR (0)
#define MPI3_PCIEIOUNIT3_ERROR_RECOVERY (1)
#define MPI3_PCIEIOUNIT3_ERROR_CORRECTABLE_ERROR_MSG (2)
#define MPI3_PCIEIOUNIT3_ERROR_BAD_DLLP (3)
#define MPI3_PCIEIOUNIT3_ERROR_BAD_TLP (4)
#define MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX (5)
struct mpi3_pcie_io_unit3_error {
__le16 threshold_count;
__le16 reserved02;
};
struct mpi3_pcie_io_unit_page3 {
struct mpi3_config_page_header header;
u8 threshold_window;
u8 threshold_action;
u8 escalation_count;
u8 escalation_action;
u8 num_errors;
u8 reserved0d[3];
struct mpi3_pcie_io_unit3_error error[MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX];
};
#define MPI3_PCIEIOUNIT3_PAGEVERSION (0x00)
#define MPI3_PCIEIOUNIT3_ACTION_NO_ACTION (0x00)
#define MPI3_PCIEIOUNIT3_ACTION_HOT_RESET (0x01)
#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_ONLY (0x02)
#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_NO_ACCESS (0x03)
struct mpi3_pcie_switch_page0 {
struct mpi3_config_page_header header;
u8 io_unit_port;
@ -1609,7 +1923,7 @@ struct mpi3_pcie_switch_page0 {
__le16 dev_handle;
__le16 parent_dev_handle;
u8 num_ports;
u8 pc_ie_level;
u8 pcie_level;
__le16 reserved12;
__le32 reserved14;
__le32 reserved18;
@ -1623,7 +1937,8 @@ struct mpi3_pcie_switch_page0 {
struct mpi3_pcie_switch_page1 {
struct mpi3_config_page_header header;
u8 io_unit_port;
u8 reserved09[3];
u8 flags;
__le16 reserved0a;
u8 num_ports;
u8 port_num;
__le16 attached_dev_handle;
@ -1636,15 +1951,43 @@ struct mpi3_pcie_switch_page1 {
};
#define MPI3_PCIESWITCH1_PAGEVERSION (0x00)
#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_MASK (0x0c)
#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_SHIFT (2)
#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_MASK (0x03)
#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_SHIFT (0)
#ifndef MPI3_PCIESWITCH2_MAX_NUM_PORTS
#define MPI3_PCIESWITCH2_MAX_NUM_PORTS (1)
#endif
struct mpi3_pcieswitch2_port_element {
__le16 link_change_count;
__le16 rate_change_count;
__le32 reserved04;
};
struct mpi3_pcie_switch_page2 {
struct mpi3_config_page_header header;
u8 num_ports;
u8 reserved09;
__le16 dev_handle;
__le32 reserved0c;
struct mpi3_pcieswitch2_port_element port[MPI3_PCIESWITCH2_MAX_NUM_PORTS];
};
#define MPI3_PCIESWITCH2_PAGEVERSION (0x00)
struct mpi3_pcie_link_page0 {
struct mpi3_config_page_header header;
u8 link;
u8 reserved09[3];
__le32 correctable_error_count;
__le16 n_fatal_error_count;
__le16 reserved12;
__le16 fatal_error_count;
__le16 reserved16;
__le32 reserved0c;
__le32 receiver_error_count;
__le32 recovery_count;
__le32 corr_error_msg_count;
__le32 non_fatal_error_msg_count;
__le32 fatal_error_msg_count;
__le32 non_fatal_error_count;
__le32 fatal_error_count;
__le32 bad_dllp_count;
__le32 bad_tlp_count;
};
#define MPI3_PCIELINK0_PAGEVERSION (0x00)
@ -1654,11 +1997,12 @@ struct mpi3_enclosure_page0 {
__le16 flags;
__le16 enclosure_handle;
__le16 num_slots;
__le16 start_slot;
__le16 reserved16;
u8 io_unit_port;
u8 enclosure_level;
__le16 sep_dev_handle;
__le32 reserved1c;
u8 chassis_slot;
u8 reserved1d[3];
};
#define MPI3_ENCLOSURE0_PAGEVERSION (0x00)
@ -1666,6 +2010,7 @@ struct mpi3_enclosure_page0 {
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000)
#define MPI3_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010)
@ -1686,6 +2031,7 @@ struct mpi3_device0_sas_sata_format {
u8 zone_group;
};
#define MPI3_DEVICE0_SASSATA_FLAGS_WRITE_SAME_UNMAP_NCQ (0x0400)
#define MPI3_DEVICE0_SASSATA_FLAGS_SLUMBER_CAP (0x0200)
#define MPI3_DEVICE0_SASSATA_FLAGS_PARTIAL_CAP (0x0100)
#define MPI3_DEVICE0_SASSATA_FLAGS_ASYNC_NOTIFY (0x0080)
@ -1707,10 +2053,11 @@ struct mpi3_device0_pcie_format {
__le32 maximum_data_transfer_size;
__le32 capabilities;
__le16 noiob;
u8 nv_me_abort_to;
u8 nvme_abort_to;
u8 page_size;
__le16 shutdown_latency;
__le16 reserved16;
u8 recovery_info;
u8 reserved17;
};
#define MPI3_DEVICE0_PCIE_LINK_RATE_32_0_SUPP (0x10)
@ -1718,16 +2065,38 @@ struct mpi3_device0_pcie_format {
#define MPI3_DEVICE0_PCIE_LINK_RATE_8_0_SUPP (0x04)
#define MPI3_DEVICE0_PCIE_LINK_RATE_5_0_SUPP (0x02)
#define MPI3_DEVICE0_PCIE_LINK_RATE_2_5_SUPP (0x01)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0003)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0007)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE (0x0003)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_MASK (0x0030)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_SHIFT (4)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK (0x00c0)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_SHIFT (6)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0 (0x0000)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_1 (0x0040)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_2 (0x0080)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_3 (0x00c0)
#define MPI3_DEVICE0_PCIE_CAP_SGL_EXTRA_LENGTH_SUPPORTED (0x00000020)
#define MPI3_DEVICE0_PCIE_CAP_METADATA_SEPARATED (0x00000010)
#define MPI3_DEVICE0_PCIE_CAP_SGL_DWORD_ALIGN_REQUIRED (0x00000008)
#define MPI3_DEVICE0_PCIE_CAP_NVME_SGL_ENABLED (0x00000004)
#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_SGL (0x00000004)
#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_PRP (0x00000000)
#define MPI3_DEVICE0_PCIE_CAP_BIT_BUCKET_SGL_SUPP (0x00000002)
#define MPI3_DEVICE0_PCIE_CAP_SGL_SUPP (0x00000001)
#define MPI3_DEVICE0_PCIE_CAP_ASPM_MASK (0x000000c0)
#define MPI3_DEVICE0_PCIE_CAP_ASPM_SHIFT (6)
#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_MASK (0xe0)
#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_NS_MGMT (0x00)
#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_FORMAT (0x20)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_MASK (0x1f)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NS (0x00)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NSID_1 (0x01)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_TOO_MANY_NS (0x02)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_PROTECTION (0x03)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_METADATA_SZ (0x04)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_LBA_DATA_SZ (0x05)
struct mpi3_device0_vd_format {
u8 vd_state;
u8 raid_level;
@ -1783,6 +2152,8 @@ struct mpi3_device_page0 {
};
#define MPI3_DEVICE0_PAGEVERSION (0x00)
#define MPI3_DEVICE0_PARENT_INVALID (0xffff)
#define MPI3_DEVICE0_ENCLOSURE_HANDLE_NO_ENCLOSURE (0x0000)
#define MPI3_DEVICE0_WWID_INVALID (0xffffffffffffffff)
#define MPI3_DEVICE0_PERSISTENTID_INVALID (0xffff)
#define MPI3_DEVICE0_IOUNITPORT_INVALID (0xff)
@ -1792,9 +2163,13 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x03)
#define MPI3_DEVICE0_ASTATUS_UNAUTHORIZED (0x04)
#define MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY (0x05)
#define MPI3_DEVICE0_ASTATUS_PREPARE (0x06)
#define MPI3_DEVICE0_ASTATUS_SAFE_MODE (0x07)
#define MPI3_DEVICE0_ASTATUS_GENERIC_MAX (0x0f)
#define MPI3_DEVICE0_ASTATUS_SAS_UNKNOWN (0x10)
#define MPI3_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x11)
#define MPI3_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x12)
#define MPI3_DEVICE0_ASTATUS_SAS_MAX (0x1f)
#define MPI3_DEVICE0_ASTATUS_SIF_UNKNOWN (0x20)
#define MPI3_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x21)
#define MPI3_DEVICE0_ASTATUS_SIF_DIAG (0x22)
@ -1810,6 +2185,8 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_PCIE_MEM_SPACE_ACCESS (0x31)
#define MPI3_DEVICE0_ASTATUS_PCIE_UNSUPPORTED (0x32)
#define MPI3_DEVICE0_ASTATUS_PCIE_MSIX_REQUIRED (0x33)
#define MPI3_DEVICE0_ASTATUS_PCIE_ECRC_REQUIRED (0x34)
#define MPI3_DEVICE0_ASTATUS_PCIE_MAX (0x3f)
#define MPI3_DEVICE0_ASTATUS_NVME_UNKNOWN (0x40)
#define MPI3_DEVICE0_ASTATUS_NVME_READY_TIMEOUT (0x41)
#define MPI3_DEVICE0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x42)
@ -1820,7 +2197,17 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x47)
#define MPI3_DEVICE0_ASTATUS_NVME_IDLE_TIMEOUT (0x48)
#define MPI3_DEVICE0_ASTATUS_NVME_CTRL_FAILURE_STATUS (0x49)
#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x50)
#define MPI3_DEVICE0_ASTATUS_NVME_INSUFFICIENT_POWER (0x4a)
#define MPI3_DEVICE0_ASTATUS_NVME_DOORBELL_STRIDE (0x4b)
#define MPI3_DEVICE0_ASTATUS_NVME_MEM_PAGE_MIN_SIZE (0x4c)
#define MPI3_DEVICE0_ASTATUS_NVME_MEMORY_ALLOCATION (0x4d)
#define MPI3_DEVICE0_ASTATUS_NVME_COMPLETION_TIME (0x4e)
#define MPI3_DEVICE0_ASTATUS_NVME_BAR (0x4f)
#define MPI3_DEVICE0_ASTATUS_NVME_NS_DESCRIPTOR (0x50)
#define MPI3_DEVICE0_ASTATUS_NVME_INCOMPATIBLE_SETTINGS (0x51)
#define MPI3_DEVICE0_ASTATUS_NVME_MAX (0x5f)
#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x80)
#define MPI3_DEVICE0_ASTATUS_VD_MAX (0x8f)
#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080)
#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008)
#define MPI3_DEVICE0_FLAGS_ATT_METHOD_MASK (0x0006)
@ -1870,11 +2257,17 @@ struct mpi3_device_page1 {
struct mpi3_config_page_header header;
__le16 dev_handle;
__le16 reserved0a;
__le32 reserved0c[12];
__le16 link_change_count;
__le16 rate_change_count;
__le16 tm_count;
__le16 reserved12;
__le32 reserved14[10];
u8 reserved3c[3];
u8 device_form;
union mpi3_device1_dev_spec_format device_specific;
};
#define MPI3_DEVICE1_PAGEVERSION (0x00)
#define MPI3_DEVICE1_COUNTER_MAX (0xfffe)
#define MPI3_DEVICE1_COUNTER_INVALID (0xffff)
#endif

View File

@ -61,6 +61,8 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
#define MPI3_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250)
#define MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST (0x464e414d)
#define MPI3_IMAGE_HEADER_SIGNATURE1_OEM (0x204d454f)
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
@ -94,6 +96,61 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5c)
#define MPI3_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7c)
#define MPI3_IMAGE_HEADER_SIZE (0x100)
#ifndef MPI3_CI_MANIFEST_MPI_MAX
#define MPI3_CI_MANIFEST_MPI_MAX (1)
#endif
struct mpi3_ci_manifest_mpi_comp_image_ref {
__le32 signature1;
__le32 reserved04[3];
struct mpi3_comp_image_version component_image_version;
__le32 component_image_version_string_offset;
__le32 crc;
};
struct mpi3_ci_manifest_mpi {
u8 manifest_type;
u8 reserved01[3];
__le32 reserved04[3];
u8 num_image_references;
u8 release_level;
__le16 reserved12;
__le16 reserved14;
__le16 flags;
__le32 reserved18[2];
__le16 vendor_id;
__le16 device_id;
__le16 subsystem_vendor_id;
__le16 subsystem_id;
__le32 reserved28[2];
union mpi3_version_union package_security_version;
__le32 reserved34;
struct mpi3_comp_image_version package_version;
__le32 package_version_string_offset;
__le32 package_build_date_string_offset;
__le32 package_build_time_string_offset;
__le32 reserved4c;
__le32 diag_authorization_identifier[16];
struct mpi3_ci_manifest_mpi_comp_image_ref component_image_ref[MPI3_CI_MANIFEST_MPI_MAX];
};
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_DEV (0x00)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_PREALPHA (0x10)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_ALPHA (0x20)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_BETA (0x30)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_RC (0x40)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_GCA (0x50)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_POINT (0x60)
#define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTHORIZATION (0x01)
#define MPI3_CI_MANIFEST_MPI_SUBSYSTEMID_IGNORED (0xffff)
#define MPI3_CI_MANIFEST_MPI_PKG_VER_STR_OFF_UNSPECIFIED (0x00000000)
#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_DATE_STR_OFF_UNSPECIFIED (0x00000000)
#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_TIME_STR_OFF_UNSPECIFIED (0x00000000)
union mpi3_ci_manifest {
struct mpi3_ci_manifest_mpi mpi;
__le32 dword[1];
};
#define MPI3_CI_MANIFEST_TYPE_MPI (0x00)
struct mpi3_extended_image_header {
u8 image_type;
u8 reserved01[3];
@ -161,6 +218,7 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
#define MPI3_HASH_ALGORITHM_SIZE_SHA384 (0x03)
#define MPI3_ENCRYPTION_ALGORITHM_UNUSED (0x00)
#define MPI3_ENCRYPTION_ALGORITHM_RSA256 (0x01)
#define MPI3_ENCRYPTION_ALGORITHM_RSA512 (0x02)
@ -178,7 +236,6 @@ struct mpi3_encrypted_key_with_hash_entry {
u8 reserved03;
__le32 reserved04;
__le32 public_key[MPI3_PUBLIC_KEY_MAX];
__le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
};
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX

View File

@ -13,7 +13,7 @@ struct mpi3_scsi_io_cdb_eedp32 {
__le32 transfer_length;
};
union mpi3_scso_io_cdb_union {
union mpi3_scsi_io_cdb_union {
u8 cdb32[32];
struct mpi3_scsi_io_cdb_eedp32 eedp32;
struct mpi3_sge_common sge;
@ -32,11 +32,12 @@ struct mpi3_scsi_io_request {
__le32 skip_count;
__le32 data_length;
u8 lun[8];
union mpi3_scso_io_cdb_union cdb;
union mpi3_scsi_io_cdb_union cdb;
union mpi3_sge_union sgl[4];
};
#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
@ -155,5 +156,13 @@ struct mpi3_scsi_task_mgmt_reply {
__le32 reserved18;
};
#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE (0x00)
#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME (0x02)
#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED (0x04)
#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED (0x05)
#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED (0x08)
#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN (0x09)
#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG (0x0a)
#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED (0x81)
#endif

View File

@ -29,10 +29,15 @@ struct mpi3_ioc_init_request {
__le64 driver_information_address;
};
#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
#define MPI3_WHOINIT_ROM_BIOS (0x02)
#define MPI3_WHOINIT_HOST_DRIVER (0x03)
#define MPI3_WHOINIT_MANUFACTURER (0x04)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_BOTH (0x03)
#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
#define MPI3_WHOINIT_ROM_BIOS (0x02)
#define MPI3_WHOINIT_HOST_DRIVER (0x03)
#define MPI3_WHOINIT_MANUFACTURER (0x04)
struct mpi3_driver_info_layout {
__le32 information_length;
u8 driver_signature[12];
@ -77,17 +82,17 @@ struct mpi3_ioc_facts_data {
u8 sge_modifier_shift;
u8 protocol_flags;
__le16 max_sas_initiators;
__le16 max_sas_targets;
__le16 reserved2a;
__le16 max_sas_expanders;
__le16 max_enclosures;
__le16 min_dev_handle;
__le16 max_dev_handle;
__le16 max_pc_ie_switches;
__le16 max_pcie_switches;
__le16 max_nvme;
__le16 max_pds;
__le16 reserved38;
__le16 max_vds;
__le16 max_host_pds;
__le16 max_advanced_host_pds;
__le16 max_adv_host_pds;
__le16 max_raid_pds;
__le16 max_posted_cmd_buffers;
__le32 flags;
@ -97,26 +102,41 @@ struct mpi3_ioc_facts_data {
__le16 reserved4e;
__le32 diag_trace_size;
__le32 diag_fw_size;
__le32 diag_driver_size;
u8 max_host_pd_ns_count;
u8 max_adv_host_pd_ns_count;
u8 max_raidpd_ns_count;
u8 reserved5f;
};
#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD (0x00000010)
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x10000000)
#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100)
#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080)
#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040)
#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020)
#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010)
#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008)
#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_GRAN_MASK (0x00000001)
#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_IOC_GRAN (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_REPLY_Q_GRAN (0x00000001)
#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002)
#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001)
#define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000)
#define MPI3_IOCFACTS_PID_TYPE_SHIFT (12)
#define MPI3_IOCFACTS_PID_PRODUCT_MASK (0x0f00)
#define MPI3_IOCFACTS_PID_PRODUCT_SHIFT (8)
#define MPI3_IOCFACTS_PID_FAMILY_MASK (0x00ff)
#define MPI3_IOCFACTS_PID_FAMILY_SHIFT (0)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_REKEY (0x2000)
#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_RAID (0x0100)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0200)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_RAID (0x0300)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0400)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_MGMT (0x0300)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0400)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0500)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_OOB (0x0600)
#define MPI3_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0080)
#define MPI3_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0040)
#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
@ -175,6 +195,7 @@ struct mpi3_create_request_queue_request {
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2)
struct mpi3_delete_request_queue_request {
__le16 host_tag;
u8 ioc_use_only02;
@ -210,6 +231,7 @@ struct mpi3_create_reply_queue_request {
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2)
struct mpi3_delete_reply_queue_request {
__le16 host_tag;
u8 ioc_use_only02;
@ -255,7 +277,9 @@ struct mpi3_port_enable_request {
#define MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x19)
#define MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x20)
#define MPI3_EVENT_PCIE_ENUMERATION (0x22)
#define MPI3_EVENT_PCIE_ERROR_THRESHOLD (0x23)
#define MPI3_EVENT_HARD_RESET_RECEIVED (0x40)
#define MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE (0x50)
#define MPI3_EVENT_MIN_PRODUCT_SPECIFIC (0x60)
#define MPI3_EVENT_MAX_PRODUCT_SPECIFIC (0x7f)
#define MPI3_EVENT_NOTIFY_EVENTMASK_WORDS (4)
@ -311,10 +335,9 @@ struct mpi3_event_data_temp_threshold {
__le32 reserved0c;
};
#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD3_EXCEEDED (0x0008)
#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD2_EXCEEDED (0x0004)
#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD1_EXCEEDED (0x0002)
#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD0_EXCEEDED (0x0001)
#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_FATAL_THRESHOLD_EXCEEDED (0x0004)
#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_CRITICAL_THRESHOLD_EXCEEDED (0x0002)
#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_WARNING_THRESHOLD_EXCEEDED (0x0001)
struct mpi3_event_data_cable_management {
__le32 active_cable_power_requirement;
u8 status;
@ -398,8 +421,10 @@ struct mpi3_event_data_sas_discovery {
#define MPI3_SAS_DISC_STATUS_MAX_EXPANDERS_EXCEED (0x40000000)
#define MPI3_SAS_DISC_STATUS_MAX_DEVICES_EXCEED (0x20000000)
#define MPI3_SAS_DISC_STATUS_MAX_TOPO_PHYS_EXCEED (0x10000000)
#define MPI3_SAS_DISC_STATUS_INVALID_CEI (0x00010000)
#define MPI3_SAS_DISC_STATUS_FECEI_MISMATCH (0x00008000)
#define MPI3_SAS_DISC_STATUS_MULTIPLE_DEVICES_IN_SLOT (0x00004000)
#define MPI3_SAS_DISC_STATUS_SLOT_COUNT_MISMATCH (0x00002000)
#define MPI3_SAS_DISC_STATUS_NECEI_MISMATCH (0x00002000)
#define MPI3_SAS_DISC_STATUS_TOO_MANY_SLOTS (0x00001000)
#define MPI3_SAS_DISC_STATUS_EXP_MULTI_SUBTRACTIVE (0x00000800)
#define MPI3_SAS_DISC_STATUS_MULTI_PORT_DOMAIN (0x00000400)
@ -581,6 +606,20 @@ struct mpi3_event_data_pcie_topology_change_list {
#define MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
#define MPI3_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
#define MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
struct mpi3_event_data_pcie_error_threshold {
__le64 timestamp;
u8 reason_code;
u8 port;
__le16 switch_dev_handle;
u8 error;
u8 action;
__le16 threshold_count;
__le16 attached_dev_handle;
__le16 reserved12;
};
#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00)
#define MPI3_EVENT_PCI_ERROR_RC_ESCALATION (0x01)
struct mpi3_event_data_sas_init_dev_status_change {
u8 reason_code;
u8 io_unit_port;
@ -604,6 +643,16 @@ struct mpi3_event_data_hard_reset_received {
__le16 reserved02;
};
struct mpi3_event_data_diag_buffer_status_change {
u8 type;
u8 reason_code;
__le16 reserved02;
__le32 reserved04;
};
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01)
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02)
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03)
#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
@ -645,21 +694,23 @@ struct mpi3_pel_seq {
};
struct mpi3_pel_entry {
__le64 time_stamp;
__le32 sequence_number;
__le32 time_stamp[2];
__le16 log_code;
__le16 arg_type;
__le16 locale;
u8 class;
u8 reserved13;
u8 flags;
u8 ext_num;
u8 num_exts;
u8 arg_data_size;
u8 fixed_format_size;
u8 fixed_format_strings_size;
__le32 reserved18[2];
__le32 pel_info[24];
};
#define MPI3_PEL_FLAGS_COMPLETE_RESET_NEEDED (0x02)
#define MPI3_PEL_FLAGS_ACK_NEEDED (0x01)
struct mpi3_pel_list {
__le32 log_count;
__le32 reserved04;
@ -837,7 +888,10 @@ struct mpi3_pel_req_action_acknowledge {
__le32 reserved10;
};
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT (0x01)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
struct mpi3_pel_reply {
__le16 host_tag;
u8 ioc_use_only02;
@ -885,6 +939,7 @@ struct mpi3_ci_download_request {
#define MPI3_CI_DOWNLOAD_ACTION_ONLINE_ACTIVATION (0x02)
#define MPI3_CI_DOWNLOAD_ACTION_OFFLINE_ACTIVATION (0x03)
#define MPI3_CI_DOWNLOAD_ACTION_GET_STATUS (0x04)
#define MPI3_CI_DOWNLOAD_ACTION_CANCEL_OFFLINE_ACTIVATION (0x05)
struct mpi3_ci_download_reply {
__le16 host_tag;
u8 ioc_use_only02;
@ -902,6 +957,7 @@ struct mpi3_ci_download_reply {
};
#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80)
#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
@ -939,19 +995,28 @@ struct mpi3_ci_upload_request {
#define MPI3_CTRL_OP_REMOVE_DEVICE (0x10)
#define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11)
#define MPI3_CTRL_OP_HIDDEN_ACK (0x12)
#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS (0x13)
#define MPI3_CTRL_OP_SAS_SEND_PRIMITIVE (0x20)
#define MPI3_CTRL_OP_SAS_CLEAR_ERROR_LOG (0x21)
#define MPI3_CTRL_OP_PCIE_CLEAR_ERROR_LOG (0x22)
#define MPI3_CTRL_OP_SAS_PHY_CONTROL (0x21)
#define MPI3_CTRL_OP_READ_INTERNAL_BUS (0x23)
#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS (0x24)
#define MPI3_CTRL_OP_PCIE_LINK_CONTROL (0x30)
#define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00)
#define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00)
#define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PHY_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
#define MPI3_CTRL_OP_PCIE_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX (0x01)
#define MPI3_CTRL_OP_READ_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM32_VALUE_INDEX (0x00)
#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_ACTION_INDEX (0x00)
#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_LINK_INDEX (0x01)
#define MPI3_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01)
#define MPI3_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02)
#define MPI3_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
@ -966,9 +1031,14 @@ struct mpi3_ci_upload_request {
#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1)
#define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0)
#define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0)
#define MPI3_CTRL_READ_INTERNAL_BUS_VALUE32_VALUE_INDEX (0)
#define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01)
#define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03)
#define MPI3_CTRL_PRIMFLAGS_REDUNDANT (0x06)
#define MPI3_CTRL_ACTION_NOP (0x00)
#define MPI3_CTRL_ACTION_LINK_RESET (0x01)
#define MPI3_CTRL_ACTION_HARD_RESET (0x02)
#define MPI3_CTRL_ACTION_CLEAR_ERROR_LOG (0x05)
struct mpi3_iounit_control_request {
__le16 host_tag;
u8 ioc_use_only02;

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2016-2021 Broadcom Inc. All rights reserved.
*
*/
#ifndef MPI30_PCI_H
#define MPI30_PCI_H 1
#ifndef MPI3_NVME_ENCAP_CMD_MAX
#define MPI3_NVME_ENCAP_CMD_MAX (1)
#endif
struct mpi3_nvme_encapsulated_request {
__le16 host_tag;
u8 ioc_use_only02;
u8 function;
__le16 ioc_use_only04;
u8 ioc_use_only06;
u8 msg_flags;
__le16 change_count;
__le16 dev_handle;
__le16 encapsulated_command_length;
__le16 flags;
__le32 reserved10[4];
__le32 command[MPI3_NVME_ENCAP_CMD_MAX];
};
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)
struct mpi3_nvme_encapsulated_error_reply {
__le16 host_tag;
u8 ioc_use_only02;
u8 function;
__le16 ioc_use_only04;
u8 ioc_use_only06;
u8 msg_flags;
__le16 ioc_use_only08;
__le16 ioc_status;
__le32 ioc_log_info;
__le32 nvme_completion_entry[4];
};
#endif

View File

@ -30,4 +30,18 @@ struct mpi3_smp_passthrough_request {
struct mpi3_sge_common request_sge;
struct mpi3_sge_common response_sge;
};
struct mpi3_smp_passthrough_reply {
__le16 host_tag;
u8 ioc_use_only02;
u8 function;
__le16 ioc_use_only04;
u8 ioc_use_only06;
u8 msg_flags;
__le16 ioc_use_only08;
__le16 ioc_status;
__le32 ioc_log_info;
__le16 response_data_length;
__le16 reserved12;
};
#endif

View File

@ -19,8 +19,8 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
#define MPI3_VERSION_UNIT (0)
#define MPI3_VERSION_DEV (18)
#define MPI3_VERSION_UNIT (22)
#define MPI3_VERSION_DEV (0)
struct mpi3_sysif_oper_queue_indexes {
__le16 producer_index;
__le16 reserved02;
@ -74,6 +74,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_INFO_HIGH_OFFSET (0x00000004)
#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK (0xff000000)
#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT (24)
#define MPI3_SYSIF_IOC_INFO_LOW_HCB_DISABLED (0x00000001)
#define MPI3_SYSIF_IOC_CONFIG_OFFSET (0x00000014)
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ (0x00f00000)
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
@ -82,12 +83,13 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN (0x00002000)
#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ (0x00002000)
#define MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE (0x00000010)
#define MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC (0x00000001)
#define MPI3_SYSIF_IOC_STATUS_OFFSET (0x0000001c)
#define MPI3_SYSIF_IOC_STATUS_RESET_HISTORY (0x00000010)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK (0x0000000c)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_SHIFT (0x00000002)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_NONE (0x00000000)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS (0x00000004)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE (0x00000008)
@ -107,9 +109,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x30000000)
#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_MASK (0x00ff0000)
#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_SHIFT (16)
#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x20000000)
#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_MASK (0x01ff0000)
#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_SHIFT (16)
#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_MASK (0x0000ff00)
#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_SHIFT (8)
#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_MASK (0x000000ff)
@ -117,9 +119,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET (0x00001000)
#define MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET (0x00001004)
#define MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET (0x00001008)
#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(n) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((n) - 1) * 8))
#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(N) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET (0x0000100c)
#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(n) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((n) - 1) * 8))
#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
@ -133,7 +135,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_FLASH_RCVRY_RESET (0x00000200)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_COMPLETE_RESET (0x00000300)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT (0x00000700)
#define MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS (0x00000080)
@ -153,8 +155,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
#define MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED (0x0000f003)
#define MPI3_SYSIF_FAULT_CODE_SAFE_MODE_EXIT (0x0000f004)
#define MPI3_SYSIF_FAULT_CODE_FACTORY_RESET (0x0000f005)
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004)
#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005)
#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006)
#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
@ -409,6 +412,8 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c)
#define MPI3_IOCSTATUS_SUPERVISOR_ONLY (0x000d)
#define MPI3_IOCSTATUS_FAILURE (0x001f)
#define MPI3_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
#define MPI3_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
@ -448,8 +453,10 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_CI_UNSUPPORTED (0x00b0)
#define MPI3_IOCSTATUS_CI_UPDATE_SEQUENCE (0x00b1)
#define MPI3_IOCSTATUS_CI_VALIDATION_FAILED (0x00b2)
#define MPI3_IOCSTATUS_CI_UPDATE_PENDING (0x00b3)
#define MPI3_IOCSTATUS_CI_KEY_UPDATE_PENDING (0x00b3)
#define MPI3_IOCSTATUS_CI_KEY_UPDATE_NOT_POSSIBLE (0x00b4)
#define MPI3_IOCSTATUS_SECURITY_KEY_REQUIRED (0x00c0)
#define MPI3_IOCSTATUS_SECURITY_VIOLATION (0x00c1)
#define MPI3_IOCSTATUS_INVALID_QUEUE_ID (0x0f00)
#define MPI3_IOCSTATUS_INVALID_QUEUE_SIZE (0x0f01)
#define MPI3_IOCSTATUS_INVALID_MSIX_VECTOR (0x0f02)

View File

@ -45,6 +45,7 @@
#include "mpi/mpi30_init.h"
#include "mpi/mpi30_ioc.h"
#include "mpi/mpi30_sas.h"
#include "mpi/mpi30_pci.h"
#include "mpi3mr_debug.h"
/* Global list and lock for storing multiple adapters managed by the driver */
@ -52,8 +53,8 @@ extern spinlock_t mrioc_list_lock;
extern struct list_head mrioc_list;
extern int prot_mask;
#define MPI3MR_DRIVER_VERSION "00.255.45.01"
#define MPI3MR_DRIVER_RELDATE "12-December-2020"
#define MPI3MR_DRIVER_VERSION "8.0.0.61.0"
#define MPI3MR_DRIVER_RELDATE "20-December-2021"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@ -79,7 +80,8 @@ extern int prot_mask;
/* Operational queue management definitions */
#define MPI3MR_OP_REQ_Q_QD 512
#define MPI3MR_OP_REP_Q_QD 4096
#define MPI3MR_OP_REP_Q_QD 1024
#define MPI3MR_OP_REP_Q_QD4K 4096
#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
#define MPI3MR_MAX_SEG_LIST_SIZE 4096
@ -90,25 +92,31 @@ extern int prot_mask;
#define MPI3MR_HOSTTAG_IOCTLCMDS 2
#define MPI3MR_HOSTTAG_BLK_TMS 5
#define MPI3MR_NUM_DEVRMCMD 1
#define MPI3MR_NUM_DEVRMCMD 16
#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_BLK_TMS + 1)
#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
MPI3MR_NUM_DEVRMCMD - 1)
#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
#define MPI3MR_NUM_EVTACKCMD 4
#define MPI3MR_HOSTTAG_EVTACKCMD_MIN (MPI3MR_HOSTTAG_DEVRMCMD_MAX + 1)
#define MPI3MR_HOSTTAG_EVTACKCMD_MAX (MPI3MR_HOSTTAG_EVTACKCMD_MIN + \
MPI3MR_NUM_EVTACKCMD - 1)
/* Reduced resource count definition for crash kernel */
#define MPI3MR_HOST_IOS_KDUMP 128
/* command/controller interaction timeout definitions in seconds */
#define MPI3MR_INTADMCMD_TIMEOUT 10
#define MPI3MR_INTADMCMD_TIMEOUT 60
#define MPI3MR_PORTENABLE_TIMEOUT 300
#define MPI3MR_ABORTTM_TIMEOUT 30
#define MPI3MR_RESETTM_TIMEOUT 30
#define MPI3MR_ABORTTM_TIMEOUT 60
#define MPI3MR_RESETTM_TIMEOUT 60
#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5
#define MPI3MR_TSUPDATE_INTERVAL 900
#define MPI3MR_DEFAULT_SHUTDOWN_TIME 120
#define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180
#define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180
#define MPI3MR_RESET_ACK_TIMEOUT 30
#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
@ -121,7 +129,7 @@ extern int prot_mask;
/* Definitions for Event replies and sense buffer allocated per controller */
#define MPI3MR_NUM_EVT_REPLIES 64
#define MPI3MR_SENSEBUF_SZ 256
#define MPI3MR_SENSE_BUF_SZ 256
#define MPI3MR_SENSEBUF_FACTOR 3
#define MPI3MR_CHAINBUF_FACTOR 3
#define MPI3MR_CHAINBUFDIX_FACTOR 2
@ -135,17 +143,11 @@ extern int prot_mask;
/* ResponseCode definitions */
#define MPI3MR_RI_MASK_RESPCODE (0x000000FF)
#define MPI3MR_RSP_TM_COMPLETE 0x00
#define MPI3MR_RSP_INVALID_FRAME 0x02
#define MPI3MR_RSP_TM_NOT_SUPPORTED 0x04
#define MPI3MR_RSP_TM_FAILED 0x05
#define MPI3MR_RSP_TM_SUCCEEDED 0x08
#define MPI3MR_RSP_TM_INVALID_LUN 0x09
#define MPI3MR_RSP_TM_OVERLAPPED_TAG 0x0A
#define MPI3MR_RSP_IO_QUEUED_ON_IOC \
MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC
#define MPI3MR_DEFAULT_MDTS (128 * 1024)
#define MPI3MR_DEFAULT_PGSZEXP (12)
/* Command retry count definitions */
#define MPI3MR_DEV_RMHS_RETRY_COUNT 3
@ -183,20 +185,6 @@ enum mpi3mr_iocstate {
MRIOC_STATE_UNRECOVERABLE,
};
/* Init type definitions */
enum mpi3mr_init_type {
MPI3MR_IT_INIT = 0,
MPI3MR_IT_RESET,
MPI3MR_IT_RESUME,
};
/* Cleanup reason definitions */
enum mpi3mr_cleanup_reason {
MPI3MR_COMPLETE_CLEANUP = 0,
MPI3MR_REINIT_FAILURE,
MPI3MR_SUSPEND,
};
/* Reset reason code definitions*/
enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_BRINGUP = 1,
@ -222,7 +210,14 @@ enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT = 21,
MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22,
MPI3MR_RESET_FROM_SYSFS = 23,
MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24
MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24,
MPI3MR_RESET_FROM_FIRMWARE = 27,
};
/* Queue type definitions */
enum queue_type {
MPI3MR_DEFAULT_QUEUE = 0,
MPI3MR_POLL_QUEUE,
};
/**
@ -263,7 +258,7 @@ struct mpi3mr_ioc_facts {
u16 max_vds;
u16 max_hpds;
u16 max_advhpds;
u16 max_raidpds;
u16 max_raid_pds;
u16 min_devhandle;
u16 max_devhandle;
u16 max_op_req_q;
@ -336,6 +331,7 @@ struct op_req_qinfo {
* @pend_ios: Number of IOs pending in HW for this queue
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
* @qtype: Type of queue (types defined in enum queue_type)
*/
struct op_reply_qinfo {
u16 ci;
@ -350,6 +346,7 @@ struct op_reply_qinfo {
atomic_t pend_ios;
bool enable_irq_poll;
atomic_t in_use;
enum queue_type qtype;
};
/**
@ -388,6 +385,7 @@ struct tgt_dev_sas_sata {
* @pgsz: Device page size
* @abort_to: Timeout for abort TM
* @reset_to: Timeout for Target/LUN reset TM
* @dev_info: Device information bits
*/
struct tgt_dev_pcie {
u32 mdts;
@ -395,6 +393,7 @@ struct tgt_dev_pcie {
u8 pgsz;
u8 abort_to;
u8 reset_to;
u16 dev_info;
};
/**
@ -499,6 +498,8 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
* @dev_removedelay: Device is waiting to be removed in FW
* @dev_type: Device type
* @tgt_dev: Internal target device pointer
* @pend_count: Counter to track pending I/Os during error
* handling
*/
struct mpi3mr_stgt_priv_data {
struct scsi_target *starget;
@ -510,6 +511,7 @@ struct mpi3mr_stgt_priv_data {
u8 dev_removedelay;
u8 dev_type;
struct mpi3mr_tgt_dev *tgt_dev;
u32 pend_count;
};
/**
@ -518,11 +520,14 @@ struct mpi3mr_stgt_priv_data {
* @tgt_priv_data: Scsi_target private data pointer
* @lun_id: LUN ID of the device
* @ncq_prio_enable: NCQ priority enable for SATA device
* @pend_count: Counter to track pending I/Os during error
* handling
*/
struct mpi3mr_sdev_priv_data {
struct mpi3mr_stgt_priv_data *tgt_priv_data;
u32 lun_id;
u8 ncq_prio_enable;
u32 pend_count;
};
/**
@ -630,6 +635,7 @@ struct scmd_priv {
* @ready_timeout: Controller ready timeout
* @intr_info: Interrupt cookie pointer
* @intr_info_count: Number of interrupt cookies
* @is_intr_info_set: Flag to indicate intr info is setup
* @num_queues: Number of operational queues
* @num_op_req_q: Number of operational request queues
* @req_qinfo: Operational request queue info pointer
@ -681,17 +687,23 @@ struct scmd_priv {
* @chain_buf_lock: Chain buffer list lock
* @host_tm_cmds: Command tracker for task management commands
* @dev_rmhs_cmds: Command tracker for device removal commands
* @evtack_cmds: Command tracker for event ack commands
* @devrem_bitmap_sz: Device removal bitmap size
* @devrem_bitmap: Device removal bitmap
* @dev_handle_bitmap_sz: Device handle bitmap size
* @removepend_bitmap: Remove pending bitmap
* @delayed_rmhs_list: Delayed device removal list
* @evtack_cmds_bitmap_sz: Event Ack bitmap size
* @evtack_cmds_bitmap: Event Ack bitmap
* @delayed_evtack_cmds_list: Delayed event acknowledgment list
* @ts_update_counter: Timestamp update counter
* @fault_dbg: Fault debug flag
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
* @prev_reset_result: Result of previous reset
* @reset_mutex: Controller reset mutex
* @reset_waitq: Controller reset wait queue
* @prepare_for_reset: Prepare for reset event received
* @prepare_for_reset_timeout_counter: Prepare for reset timeout
* @diagsave_timeout: Diagnostic information save timeout
* @logging_level: Controller debug logging level
* @flush_io_count: I/O count to flush after reset
@ -699,6 +711,9 @@ struct scmd_priv {
* @driver_info: Driver, Kernel, OS information to firmware
* @change_count: Topology change count
* @op_reply_q_offset: Operational reply queue offset with MSIx
* @default_qcount: Total Default queues
* @active_poll_qcount: Currently active poll queue count
* @requested_poll_qcount: User requested poll queue count
*/
struct mpi3mr_ioc {
struct list_head list;
@ -739,6 +754,7 @@ struct mpi3mr_ioc {
struct mpi3mr_intr_info *intr_info;
u16 intr_info_count;
bool is_intr_info_set;
u16 num_queues;
u16 num_op_req_q;
@ -758,6 +774,7 @@ struct mpi3mr_ioc {
dma_addr_t reply_buf_dma_max_address;
u16 reply_free_qsz;
u16 reply_sz;
struct dma_pool *reply_free_q_pool;
__le64 *reply_free_q;
dma_addr_t reply_free_q_dma;
@ -805,19 +822,26 @@ struct mpi3mr_ioc {
struct mpi3mr_drv_cmd host_tm_cmds;
struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
u16 devrem_bitmap_sz;
void *devrem_bitmap;
u16 dev_handle_bitmap_sz;
void *removepend_bitmap;
struct list_head delayed_rmhs_list;
u16 evtack_cmds_bitmap_sz;
void *evtack_cmds_bitmap;
struct list_head delayed_evtack_cmds_list;
u32 ts_update_counter;
u8 fault_dbg;
u8 reset_in_progress;
u8 unrecoverable;
int prev_reset_result;
struct mutex reset_mutex;
wait_queue_head_t reset_waitq;
u8 prepare_for_reset;
u16 prepare_for_reset_timeout_counter;
u16 diagsave_timeout;
int logging_level;
u16 flush_io_count;
@ -826,6 +850,10 @@ struct mpi3mr_ioc {
struct mpi3_driver_info_layout driver_info;
u16 change_count;
u16 op_reply_q_offset;
u16 default_qcount;
u16 active_poll_qcount;
u16 requested_poll_qcount;
};
/**
@ -867,10 +895,23 @@ struct delayed_dev_rmhs_node {
u8 iou_rc;
};
/**
* struct delayed_evt_ack_node - Delayed event ack node
* @list: list head
* @event: MPI3 event ID
* @event_ctx: event context
*/
struct delayed_evt_ack_node {
struct list_head list;
u8 event;
u32 event_ctx;
};
int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type);
void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason);
int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc);
int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume);
void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc);
int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
u16 admin_req_sz, u8 ignore_reset);
@ -887,6 +928,7 @@ void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
u64 sense_buf_dma);
void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc);
void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc);
void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
struct mpi3_event_notification_reply *event_reply);
void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
@ -897,13 +939,11 @@ void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc);
int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
u32 reset_reason, u8 snapdump);
int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
u32 reset_reason);
void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc);
void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc);
enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc);
int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
u32 event_ctx);
void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout);
@ -911,6 +951,12 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc);
void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc);
void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc);
void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc);
void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
struct op_reply_qinfo *op_reply_q);
int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
#endif /*MPI3MR_H_INCLUDED*/

View File

@ -14,27 +14,20 @@
/*
* debug levels
*/
#define MPI3_DEBUG 0x00000001
#define MPI3_DEBUG_MSG_FRAME 0x00000002
#define MPI3_DEBUG_SG 0x00000004
#define MPI3_DEBUG_EVENTS 0x00000008
#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000010
#define MPI3_DEBUG_INIT 0x00000020
#define MPI3_DEBUG_EXIT 0x00000040
#define MPI3_DEBUG_FAIL 0x00000080
#define MPI3_DEBUG_TM 0x00000100
#define MPI3_DEBUG_REPLY 0x00000200
#define MPI3_DEBUG_HANDSHAKE 0x00000400
#define MPI3_DEBUG_CONFIG 0x00000800
#define MPI3_DEBUG_DL 0x00001000
#define MPI3_DEBUG_RESET 0x00002000
#define MPI3_DEBUG_SCSI 0x00004000
#define MPI3_DEBUG_IOCTL 0x00008000
#define MPI3_DEBUG_CSMISAS 0x00010000
#define MPI3_DEBUG_SAS 0x00020000
#define MPI3_DEBUG_TRANSPORT 0x00040000
#define MPI3_DEBUG_TASK_SET_FULL 0x00080000
#define MPI3_DEBUG_TRIGGER_DIAG 0x00200000
#define MPI3_DEBUG_EVENT 0x00000001
#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000002
#define MPI3_DEBUG_INIT 0x00000004
#define MPI3_DEBUG_EXIT 0x00000008
#define MPI3_DEBUG_TM 0x00000010
#define MPI3_DEBUG_RESET 0x00000020
#define MPI3_DEBUG_SCSI_ERROR 0x00000040
#define MPI3_DEBUG_REPLY 0x00000080
#define MPI3_DEBUG_IOCTL_ERROR 0x00008000
#define MPI3_DEBUG_IOCTL_INFO 0x00010000
#define MPI3_DEBUG_SCSI_INFO 0x00020000
#define MPI3_DEBUG 0x01000000
#define MPI3_DEBUG_SG 0x02000000
/*
@ -50,11 +43,103 @@
#define ioc_info(ioc, fmt, ...) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
#define dbgprint(IOC, FMT, ...) \
#define dprint(ioc, fmt, ...) \
do { \
if (IOC->logging_level & MPI3_DEBUG) \
pr_info("%s: " FMT, (IOC)->name, ##__VA_ARGS__); \
if (ioc->logging_level & MPI3_DEBUG) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_event_th(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_EVENT) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_event_bh(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_init(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_INIT) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_exit(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_EXIT) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_tm(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_TM) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_reply(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_REPLY) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_reset(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_RESET) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_scsi_info(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_SCSI_INFO) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_scsi_err(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_SCSI_ERROR) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_scsi_command(ioc, SCMD, LOG_LEVEL) \
do { \
if (ioc->logging_level & LOG_LEVEL) \
scsi_print_command(SCMD); \
} while (0)
#define dprint_ioctl_info(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_IOCTL_INFO) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#define dprint_ioctl_err(ioc, fmt, ...) \
do { \
if (ioc->logging_level & MPI3_DEBUG_IOCTL_ERROR) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#endif /* MPT3SAS_DEBUG_H_INCLUDED */
/**
* dprint_dump_req - print message frame contents
* @req: pointer to message frame
* @sz: number of dwords
*/
static inline void
dprint_dump_req(void *req, int sz)
{
int i;
__le32 *mfp = (__le32 *)req;
pr_info("request:\n\t");
for (i = 0; i < sz; i++) {
if (i && ((i % 8) == 0))
pr_info("\n\t");
pr_info("%08x ", le32_to_cpu(mfp[i]));
}
pr_info("\n");
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -67,8 +67,10 @@ static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
while (sha->sas_port[i]) {
if (sha->sas_port[i] == dev->port) {
spin_lock(&sha->sas_port[i]->phy_list_lock);
phy = container_of(sha->sas_port[i]->phy_list.next,
struct asd_sas_phy, port_phy_el);
spin_unlock(&sha->sas_port[i]->phy_list_lock);
j = 0;
while (sha->sas_phy[j]) {
if (sha->sas_phy[j] == phy)
@ -96,6 +98,8 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
while (sha->sas_port[i]) {
if (sha->sas_port[i] == dev->port) {
struct asd_sas_phy *phy;
spin_lock(&sha->sas_port[i]->phy_list_lock);
list_for_each_entry(phy,
&sha->sas_port[i]->phy_list, port_phy_el) {
j = 0;
@ -109,6 +113,7 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
num++;
n++;
}
spin_unlock(&sha->sas_port[i]->phy_list_lock);
break;
}
i++;

View File

@ -1674,7 +1674,7 @@ static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
if (sdev->id > MYRB_MAX_TARGETS)
return -ENXIO;
pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
if (!pdev_info)
return -ENOMEM;

View File

@ -538,13 +538,11 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->fwstat_buf = NULL;
goto out_free;
}
cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
GFP_KERNEL | GFP_DMA);
cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL);
if (!cs->ctlr_info)
goto out_free;
cs->event_buf = kzalloc(sizeof(struct myrs_event),
GFP_KERNEL | GFP_DMA);
cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL);
if (!cs->event_buf)
goto out_free;
@ -1805,7 +1803,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
ldev_num = myrs_translate_ldev(cs, sdev);
ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
if (!ldev_info)
return -ENOMEM;
@ -1867,7 +1865,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
} else {
struct myrs_pdev_info *pdev_info;
pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
if (!pdev_info)
return -ENOMEM;

View File

@ -6,9 +6,12 @@
obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
CFLAGS_pm80xx_tracepoints.o := -I$(src)
pm80xx-y += pm8001_init.o \
pm8001_sas.o \
pm8001_ctl.o \
pm8001_hwi.o \
pm80xx_hwi.o
pm80xx_hwi.o \
pm80xx_tracepoints.o

View File

@ -889,14 +889,6 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,
static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
/**
* ctl_mpi_state_show - controller MPI state check
* @cdev: pointer to embedded class device
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static const char *const mpiStateText[] = {
"MPI is not initialized",
"MPI is successfully initialized",
@ -904,6 +896,14 @@ static const char *const mpiStateText[] = {
"MPI initialization failed with error in [31:16]"
};
/**
* ctl_mpi_state_show - controller MPI state check
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t ctl_mpi_state_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@ -920,11 +920,11 @@ static DEVICE_ATTR_RO(ctl_mpi_state);
/**
* ctl_hmi_error_show - controller MPI initialization fails
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t ctl_hmi_error_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@ -941,11 +941,11 @@ static DEVICE_ATTR_RO(ctl_hmi_error);
/**
* ctl_raae_count_show - controller raae count check
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t ctl_raae_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@ -962,11 +962,11 @@ static DEVICE_ATTR_RO(ctl_raae_count);
/**
* ctl_iop0_count_show - controller iop0 count check
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t ctl_iop0_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@ -983,11 +983,11 @@ static DEVICE_ATTR_RO(ctl_iop0_count);
/**
* ctl_iop1_count_show - controller iop1 count check
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t ctl_iop1_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{

View File

@ -42,6 +42,7 @@
#include "pm8001_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
#include "pm80xx_tracepoints.h"
/**
* read_main_config_table - read the configure table and save it.
@ -1324,8 +1325,14 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
unsigned long flags;
int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
int rv;
u32 htag = le32_to_cpu(*(__le32 *)payload);
trace_pm80xx_mpi_build_cmd(pm8001_ha->id, opCode, htag, q_index,
circularQ->producer_idx, le32_to_cpu(circularQ->consumer_index));
if (WARN_ON(q_index >= pm8001_ha->max_q_num))
return -EINVAL;
WARN_ON(q_index >= PM8001_MAX_INB_NUM);
spin_lock_irqsave(&circularQ->iq_lock, flags);
rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
&pMessage);
@ -2304,21 +2311,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
if (!tag) {
pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
return;
}
ccb = &pm8001_ha->ccb_info[tag];
param = le32_to_cpu(psataPayload->param);
if (ccb) {
t = ccb->task;
pm8001_dev = ccb->device;
} else {
pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
return;
}
t = ccb->task;
pm8001_dev = ccb->device;
if (t) {
if (t->dev && (t->dev->lldd_dev))
@ -2335,10 +2338,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
ts = &t->task_status;
if (!ts) {
pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
return;
}
if (status)
pm8001_dbg(pm8001_ha, IOERR,
@ -2695,14 +2694,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 dev_id = le32_to_cpu(psataPayload->device_id);
unsigned long flags;
ccb = &pm8001_ha->ccb_info[tag];
if (ccb) {
t = ccb->task;
pm8001_dev = ccb->device;
} else {
pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
}
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);

View File

@ -179,7 +179,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
}
PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
flush_workqueue(pm8001_wq);
kfree(pm8001_ha->tags);
bitmap_free(pm8001_ha->tags);
kfree(pm8001_ha);
}
@ -1192,7 +1192,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
can_queue = ccb_count - PM8001_RESERVE_SLOT;
shost->can_queue = can_queue;
pm8001_ha->tags = kzalloc(ccb_count, GFP_KERNEL);
pm8001_ha->tags = bitmap_zalloc(ccb_count, GFP_KERNEL);
if (!pm8001_ha->tags)
goto err_out;

View File

@ -40,6 +40,7 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm80xx_tracepoints.h"
/**
* pm8001_find_tag - from sas task to find out tag that belongs to this task
@ -527,6 +528,9 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
{
struct ata_queued_cmd *qc;
struct pm8001_device *pm8001_dev;
if (!ccb->task)
return;
if (!sas_protocol_ata(task->task_proto))
@ -549,6 +553,18 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
/* do nothing */
break;
}
if (sas_protocol_ata(task->task_proto)) {
// For SCSI/ATA commands uldd_task points to ata_queued_cmd
qc = task->uldd_task;
pm8001_dev = ccb->device;
trace_pm80xx_request_complete(pm8001_ha->id,
pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
ccb_idx, 0 /* ctlr_opcode not known */,
qc ? qc->tf.command : 0, // ata opcode
pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
}
task->lldd_task = NULL;
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;

View File

@ -42,6 +42,7 @@
#include "pm80xx_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
#include "pm80xx_tracepoints.h"
#define SMP_DIRECT 1
#define SMP_INDIRECT 2
@ -2400,21 +2401,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
if (!tag) {
pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
return;
}
ccb = &pm8001_ha->ccb_info[tag];
param = le32_to_cpu(psataPayload->param);
if (ccb) {
t = ccb->task;
pm8001_dev = ccb->device;
} else {
pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
return;
}
t = ccb->task;
pm8001_dev = ccb->device;
if (t) {
if (t->dev && (t->dev->lldd_dev))
@ -2431,10 +2428,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
}
ts = &t->task_status;
if (!ts) {
pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
return;
}
if (status != IO_SUCCESS) {
pm8001_dbg(pm8001_ha, FAIL,
@ -2830,15 +2823,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
u32 dev_id = le32_to_cpu(psataPayload->device_id);
unsigned long flags;
ccb = &pm8001_ha->ccb_info[tag];
if (ccb) {
t = ccb->task;
pm8001_dev = ccb->device;
} else {
pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
return;
}
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
@ -2852,6 +2836,10 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
return;
}
ccb = &pm8001_ha->ccb_info[tag];
t = ccb->task;
pm8001_dev = ccb->device;
if (unlikely(!t || !t->lldd_task || !t->dev)) {
pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
@ -3522,7 +3510,7 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 status =
le32_to_cpu(pPayload->status);
u32 phy_id =
le32_to_cpu(pPayload->phyid);
le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
pm8001_dbg(pm8001_ha, INIT,
@ -4547,6 +4535,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
struct ata_queued_cmd *qc = task->uldd_task;
u32 tag = ccb->ccb_tag;
int ret;
u32 q_index, cpu_id;
@ -4766,6 +4755,11 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
}
trace_pm80xx_request_issue(pm8001_ha->id,
ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS,
ccb->ccb_tag, opc,
qc ? qc->tf.command : 0, // ata opcode
ccb->device ? atomic_read(&ccb->device->running_req) : 0);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
&sata_cmd, sizeof(sata_cmd), q_index);
return ret;

View File

@ -0,0 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Trace events in pm8001 driver.
*
* Copyright 2020 Google LLC
* Author: Akshat Jain <akshatzen@google.com>
*/
#define CREATE_TRACE_POINTS
#include "pm80xx_tracepoints.h"

View File

@ -0,0 +1,113 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Trace events in pm8001 driver.
*
* Copyright 2020 Google LLC
* Author: Akshat Jain <akshatzen@google.com>
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM pm80xx
#if !defined(_TRACE_PM80XX_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_PM80XX_H
#include <linux/tracepoint.h>
#include "pm8001_sas.h"
TRACE_EVENT(pm80xx_request_issue,
TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode,
u16 ata_opcode, int running_req),
TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req),
TP_STRUCT__entry(
__field(u32, id)
__field(u32, phy_id)
__field(u32, htag)
__field(u32, ctlr_opcode)
__field(u16, ata_opcode)
__field(int, running_req)
),
TP_fast_assign(
__entry->id = id;
__entry->phy_id = phy_id;
__entry->htag = htag;
__entry->ctlr_opcode = ctlr_opcode;
__entry->ata_opcode = ata_opcode;
__entry->running_req = running_req;
),
TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d",
__entry->id, __entry->phy_id, __entry->htag,
__entry->ctlr_opcode, __entry->ata_opcode,
__entry->running_req)
);
TRACE_EVENT(pm80xx_request_complete,
TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode,
u16 ata_opcode, int running_req),
TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req),
TP_STRUCT__entry(
__field(u32, id)
__field(u32, phy_id)
__field(u32, htag)
__field(u32, ctlr_opcode)
__field(u16, ata_opcode)
__field(int, running_req)
),
TP_fast_assign(
__entry->id = id;
__entry->phy_id = phy_id;
__entry->htag = htag;
__entry->ctlr_opcode = ctlr_opcode;
__entry->ata_opcode = ata_opcode;
__entry->running_req = running_req;
),
TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d",
__entry->id, __entry->phy_id, __entry->htag,
__entry->ctlr_opcode, __entry->ata_opcode,
__entry->running_req)
);
TRACE_EVENT(pm80xx_mpi_build_cmd,
TP_PROTO(u32 id, u32 opc, u32 htag, u32 qi, u32 pi, u32 ci),
TP_ARGS(id, opc, htag, qi, pi, ci),
TP_STRUCT__entry(
__field(u32, id)
__field(u32, opc)
__field(u32, htag)
__field(u32, qi)
__field(u32, pi)
__field(u32, ci)
),
TP_fast_assign(
__entry->id = id;
__entry->opc = opc;
__entry->htag = htag;
__entry->qi = qi;
__entry->pi = pi;
__entry->ci = ci;
),
TP_printk("ctlr_id = %u opc = %#x htag = %#x QI = %u PI = %u CI = %u",
__entry->id, __entry->opc, __entry->htag, __entry->qi,
__entry->pi, __entry->ci)
);
#endif /* _TRACE_PM80XX_H_ */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE pm80xx_tracepoints
#include <trace/define_trace.h>

View File

@ -3221,8 +3221,8 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
return NULL;
sglist->order = order;
sgl_alloc_order(buflen, order, false,
GFP_KERNEL | GFP_DMA | __GFP_ZERO, &sglist->num_sg);
sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO,
&sglist->num_sg);
return sglist;
}
@ -3302,7 +3302,6 @@ static int pmcraid_copy_sglist(
/**
* pmcraid_queuecommand_lck - Queue a mid-layer request
* @scsi_cmd: scsi command struct
* @done: done function
*
* This function queues a request generated by the mid-layer. Midlayer calls
* this routine within host->lock. Some of the functions called by queuecommand

View File

@ -1538,7 +1538,6 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
int i;
struct scsi_bd *pbl;
u64 *list;
dma_addr_t page;
/* Alloc dma memory for BDQ buffers */
for (i = 0; i < QEDI_BDQ_NUM; i++) {
@ -1608,11 +1607,9 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size /
QEDI_PAGE_SIZE;
list = (u64 *)qedi->bdq_pbl_list;
page = qedi->bdq_pbl_list_dma;
for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
*list = qedi->bdq_pbl_dma;
list++;
page += QEDI_PAGE_SIZE;
}
return 0;
@ -2089,8 +2086,7 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
rc = snprintf(buf, ip_len, fmt, gw);
break;
case ISCSI_BOOT_ETH_FLAGS:
rc = snprintf(buf, 3, "%hhd\n",
SYSFS_FLAG_FW_SEL_BOOT);
rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = snprintf(buf, 3, "0\n");
@ -2257,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = snprintf(buf, 3, "0\n");

View File

@ -2700,7 +2700,13 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
static inline void
qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
rport->dev_loss_tmo = timeout ? timeout : 1;
if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port)
nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
rport->dev_loss_tmo);
}
static void

View File

@ -5828,13 +5828,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_dfs_create_rport(vha, fcport);
if (NVME_TARGET(vha->hw, fcport)) {
qla_nvme_register_remote(vha, fcport);
qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
return;
}
qla24xx_update_fcport_fcp_prio(vha, fcport);
switch (vha->host->active_mode) {
@ -5856,6 +5849,9 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
if (NVME_TARGET(vha->hw, fcport))
qla_nvme_register_remote(vha, fcport);
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {

View File

@ -43,7 +43,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
req.port_name = wwn_to_u64(fcport->port_name);
req.node_name = wwn_to_u64(fcport->node_name);
req.port_role = 0;
req.dev_loss_tmo = 0;
req.dev_loss_tmo = fcport->dev_loss_tmo;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
@ -70,6 +70,9 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return ret;
}
nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
fcport->dev_loss_tmo);
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
ql_log(ql_log_info, vha, 0x212a,
"PortID:%06x Supports SLER\n", req.port_id);

View File

@ -5734,7 +5734,7 @@ static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = sprintf(str, "0\n");
@ -5843,7 +5843,7 @@ qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
(char *)&boot_conn->chap.intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = sprintf(str, "0\n");

View File

@ -55,7 +55,6 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/async.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@ -201,11 +200,11 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
/*
* 1024 is big enough for saturating the fast scsi LUN now
* 1024 is big enough for saturating fast SCSI LUNs.
*/
int scsi_device_max_queue_depth(struct scsi_device *sdev)
{
return max_t(int, sdev->host->can_queue, 1024);
return min_t(int, sdev->host->can_queue, 1024);
}
/**

View File

@ -9,6 +9,7 @@
static const char *const scsi_cmd_flags[] = {
SCSI_CMD_FLAG_NAME(TAGGED),
SCSI_CMD_FLAG_NAME(INITIALIZED),
SCSI_CMD_FLAG_NAME(LAST),
};
#undef SCSI_CMD_FLAG_NAME

View File

@ -133,23 +133,6 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd)
return true;
}
static void scsi_eh_complete_abort(struct scsi_cmnd *scmd, struct Scsi_Host *shost)
{
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
list_del_init(&scmd->eh_entry);
/*
* If the abort succeeds, and there is no further
* EH action, clear the ->last_reset time.
*/
if (list_empty(&shost->eh_abort_list) &&
list_empty(&shost->eh_cmd_q))
if (shost->eh_deadline != -1)
shost->last_reset = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
@ -166,54 +149,72 @@ scmd_eh_abort_handler(struct work_struct *work)
struct scsi_cmnd *scmd =
container_of(work, struct scsi_cmnd, abort_work.work);
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
enum scsi_disposition rtn;
unsigned long flags;
if (scsi_host_eh_past_deadline(sdev->host)) {
if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not aborting\n"));
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"aborting command\n"));
rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
if (rtn == SUCCESS) {
set_host_byte(scmd, DID_TIME_OUT);
if (scsi_host_eh_past_deadline(sdev->host)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not retrying "
"aborted command\n"));
} else if (!scsi_noretry_cmd(scmd) &&
scsi_cmd_retry_allowed(scmd) &&
scsi_eh_should_retry_cmd(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"retry aborted command\n"));
scsi_eh_complete_abort(scmd, sdev->host);
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
return;
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"finish aborted command\n"));
scsi_eh_complete_abort(scmd, sdev->host);
scsi_finish_command(scmd);
return;
}
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"cmd abort %s\n",
(rtn == FAST_IO_FAIL) ?
"not send" : "failed"));
}
goto out;
}
spin_lock_irqsave(sdev->host->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"aborting command\n"));
rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
if (rtn != SUCCESS) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"cmd abort %s\n",
(rtn == FAST_IO_FAIL) ?
"not send" : "failed"));
goto out;
}
set_host_byte(scmd, DID_TIME_OUT);
if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not retrying "
"aborted command\n"));
goto out;
}
spin_lock_irqsave(shost->host_lock, flags);
list_del_init(&scmd->eh_entry);
spin_unlock_irqrestore(sdev->host->host_lock, flags);
/*
* If the abort succeeds, and there is no further
* EH action, clear the ->last_reset time.
*/
if (list_empty(&shost->eh_abort_list) &&
list_empty(&shost->eh_cmd_q))
if (shost->eh_deadline != -1)
shost->last_reset = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
if (!scsi_noretry_cmd(scmd) &&
scsi_cmd_retry_allowed(scmd) &&
scsi_eh_should_retry_cmd(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"retry aborted command\n"));
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"finish aborted command\n"));
scsi_finish_command(scmd);
}
return;
out:
spin_lock_irqsave(shost->host_lock, flags);
list_del_init(&scmd->eh_entry);
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_eh_scmd_add(scmd);
}
@ -1429,7 +1430,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
enum scsi_disposition rtn = NEEDS_RETRY;
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
scmd->device->eh_timeout, 0);
if (rtn == SUCCESS)
return 0;

View File

@ -8,7 +8,6 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
#include <linux/async.h>
#include <linux/blk-pm.h>
#include <scsi/scsi.h>
@ -181,7 +180,7 @@ static int sdev_runtime_resume(struct device *dev)
blk_pre_runtime_resume(sdev->request_queue);
if (pm && pm->runtime_resume)
err = pm->runtime_resume(dev);
blk_post_runtime_resume(sdev->request_queue, err);
blk_post_runtime_resume(sdev->request_queue);
return err;
}

View File

@ -3,7 +3,6 @@
#define _SCSI_PRIV_H
#include <linux/device.h>
#include <linux/async.h>
#include <scsi/scsi_device.h>
#include <linux/sbitmap.h>
@ -144,7 +143,7 @@ extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
extern struct bus_type scsi_bus_type;
extern const struct attribute_group scsi_shost_attr_group;
extern const struct attribute_group *scsi_shost_groups[];
/* scsi_netlink.c */
#ifdef CONFIG_SCSI_NETLINK

View File

@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,
#define SCSI_SCAN_TYPE_DEFAULT "sync"
#endif
char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
S_IRUGO|S_IWUSR);
@ -122,7 +122,7 @@ struct async_scan_data {
struct completion prev_finished;
};
/**
/*
* scsi_enable_async_suspend - Enable async suspend and resume
*/
void scsi_enable_async_suspend(struct device *dev)

View File

@ -424,10 +424,15 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
NULL
};
const struct attribute_group scsi_shost_attr_group = {
static const struct attribute_group scsi_shost_attr_group = {
.attrs = scsi_sysfs_shost_attrs,
};
const struct attribute_group *scsi_shost_groups[] = {
&scsi_shost_attr_group,
NULL
};
static void scsi_device_cls_release(struct device *class_dev)
{
struct scsi_device *sdev;

View File

@ -51,7 +51,6 @@
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
#include <linux/async.h>
#include <linux/slab.h>
#include <linux/sed-opal.h>
#include <linux/pm_runtime.h>

View File

@ -61,10 +61,10 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
zone.capacity = zone.len;
zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
if (zone.type != ZBC_ZONE_TYPE_CONV &&
zone.cond == ZBC_ZONE_COND_FULL)
if (zone.cond == ZBC_ZONE_COND_FULL)
zone.wp = zone.start + zone.len;
else
zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
ret = cb(&zone, idx, data);
if (ret)

View File

@ -100,7 +100,7 @@ snic_queue_report_tgt_req(struct snic *snic)
SNIC_BUG_ON(ntgts == 0);
buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) {
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");

View File

@ -855,7 +855,7 @@ static void get_capabilities(struct scsi_cd *cd)
/* allocate transfer buffer */
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
buffer = kmalloc(512, GFP_KERNEL);
if (!buffer) {
sr_printk(KERN_ERR, cd, "out of memory.\n");
return;

View File

@ -131,7 +131,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
if (cd->vendor == VENDOR_TOSHIBA)
density = (blocklength > 2048) ? 0x81 : 0x83;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@ -179,7 +179,7 @@ int sr_cd_check(struct cdrom_device_info *cdi)
if (cd->cdi.mask & CDC_MULTI_SESSION)
return 0;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;

View File

@ -50,9 +50,11 @@ config SCSI_UFSHCD
However, do not compile this as a module if your root file system
(the one containing the directory /) is located on a UFS device.
if SCSI_UFSHCD
config SCSI_UFSHCD_PCI
tristate "PCI bus based UFS Controller support"
depends on SCSI_UFSHCD && PCI
depends on PCI
help
This selects the PCI UFS Host Controller Interface. Select this if
you have UFS Host Controller with PCI Interface.
@ -71,7 +73,6 @@ config SCSI_UFS_DWC_TC_PCI
config SCSI_UFSHCD_PLATFORM
tristate "Platform bus based UFS Controller support"
depends on SCSI_UFSHCD
depends on HAS_IOMEM
help
This selects the UFS host controller support. Select this if
@ -147,7 +148,6 @@ config SCSI_UFS_TI_J721E
config SCSI_UFS_BSG
bool "Universal Flash Storage BSG device node"
depends on SCSI_UFSHCD
select BLK_DEV_BSGLIB
help
Universal Flash Storage (UFS) is SCSI transport specification for
@ -177,7 +177,7 @@ config SCSI_UFS_EXYNOS
config SCSI_UFS_CRYPTO
bool "UFS Crypto Engine Support"
depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION
depends on BLK_INLINE_ENCRYPTION
help
Enable Crypto Engine Support in UFS.
Enabling this makes it possible for the kernel to use the crypto
@ -186,7 +186,6 @@ config SCSI_UFS_CRYPTO
config SCSI_UFS_HPB
bool "Support UFS Host Performance Booster"
depends on SCSI_UFSHCD
help
The UFS HPB feature improves random read performance. It caches
L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
@ -195,16 +194,18 @@ config SCSI_UFS_HPB
config SCSI_UFS_FAULT_INJECTION
bool "UFS Fault Injection Support"
depends on SCSI_UFSHCD && FAULT_INJECTION
depends on FAULT_INJECTION
help
Enable fault injection support in the UFS driver. This makes it easier
to test the UFS error handler and abort handler.
config SCSI_UFS_HWMON
bool "UFS Temperature Notification"
bool "UFS Temperature Notification"
depends on SCSI_UFSHCD=HWMON || HWMON=y
help
This provides support for UFS hardware monitoring. If enabled,
a hardware monitoring device will be created for the UFS device.
If unsure, say N.
endif

View File

@ -110,7 +110,6 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
pci_set_drvdata(pdev, hba);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);

View File

@ -853,14 +853,14 @@ static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
}
static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba,
int tag, bool op)
int tag, bool is_scsi_cmd)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
u32 type;
type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE);
if (op)
if (is_scsi_cmd)
hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE);
else
hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE);

View File

@ -396,6 +396,12 @@ out:
return ret;
}
static int ufs_hisi_suspend_prepare(struct device *dev)
{
/* RPM and SPM are different. Refer ufs_hisi_suspend() */
return __ufshcd_suspend_prepare(dev, false);
}
static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@ -578,7 +584,7 @@ static int ufs_hisi_remove(struct platform_device *pdev)
static const struct dev_pm_ops ufs_hisi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.prepare = ufs_hisi_suspend_prepare,
.complete = ufshcd_resume_complete,
};

View File

@ -538,8 +538,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
pci_set_drvdata(pdev, hba);
hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
err = ufshcd_init(hba, mmio_base, pdev->irq);

View File

@ -361,8 +361,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
goto dealloc_host;
}
platform_set_drvdata(pdev, hba);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);

View File

@ -128,8 +128,9 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
UFSHCD_CMD_PER_LUN = 32,
UFSHCD_CAN_QUEUE = 32,
UFSHCD_NUM_RESERVED = 1,
UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
};
static const char *const ufshcd_state_name[] = {
@ -1069,13 +1070,32 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
return false;
}
/*
* Determine the number of pending commands by counting the bits in the SCSI
* device budget maps. This approach has been selected because a bit is set in
* the budget map before scsi_host_queue_ready() checks the host_self_blocked
* flag. The host_self_blocked flag can be modified by calling
* scsi_block_requests() or scsi_unblock_requests().
*/
static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{
struct scsi_device *sdev;
u32 pending = 0;
lockdep_assert_held(hba->host->host_lock);
__shost_for_each_device(sdev, hba->host)
pending += sbitmap_weight(&sdev->budget_map);
return pending;
}
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
unsigned long flags;
int ret = 0;
u32 tm_doorbell;
u32 tr_doorbell;
u32 tr_pending;
bool timeout = false, do_last_check = false;
ktime_t start;
@ -1093,8 +1113,8 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
}
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!tm_doorbell && !tr_doorbell) {
tr_pending = ufshcd_pending_cmds(hba);
if (!tm_doorbell && !tr_pending) {
timeout = false;
break;
} else if (do_last_check) {
@ -1114,12 +1134,12 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
do_last_check = true;
}
spin_lock_irqsave(hba->host->host_lock, flags);
} while (tm_doorbell || tr_doorbell);
} while (tm_doorbell || tr_pending);
if (timeout) {
dev_err(hba->dev,
"%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
__func__, tm_doorbell, tr_doorbell);
__func__, tm_doorbell, tr_pending);
ret = -EBUSY;
}
out:
@ -1352,25 +1372,6 @@ out:
return ret;
}
static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
{
int *busy = priv;
WARN_ON_ONCE(reserved);
(*busy)++;
return false;
}
/* Whether or not any tag is in use by a request that is in progress. */
static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
{
struct request_queue *q = hba->cmd_queue;
int busy = 0;
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
return busy;
}
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
{
@ -1666,7 +1667,8 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
bool flush_result;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba))
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
@ -1769,7 +1771,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->outstanding_reqs || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
@ -1826,7 +1828,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
hba->outstanding_tasks ||
hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
hba->active_uic_cmd || hba->uic_async_done ||
hba->clk_gating.state == CLKS_OFF)
return;
@ -1961,11 +1963,15 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
if (!hba->clk_gating.is_initialized)
return;
ufshcd_remove_clk_gating_sysfs(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
destroy_workqueue(hba->clk_gating.clk_gating_workq);
/* Ungate the clock if necessary. */
ufshcd_hold(hba, false);
hba->clk_gating.is_initialized = false;
ufshcd_release(hba);
destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
/* Must be called with host lock acquired */
@ -2189,6 +2195,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
hba->reserved_slot = hba->nutrs - 1;
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
@ -2650,17 +2657,42 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
static inline bool is_rpmb_wlun(struct scsi_device *sdev)
{
return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
}
static inline bool is_device_wlun(struct scsi_device *sdev)
{
return sdev->lun ==
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
}
/*
* Associate the UFS controller queue with the default and poll HCTX types.
* Initialize the mq_map[] arrays.
*/
static int ufshcd_map_queues(struct Scsi_Host *shost)
{
int i, ret;
for (i = 0; i < shost->nr_maps; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
switch (i) {
case HCTX_TYPE_DEFAULT:
case HCTX_TYPE_POLL:
map->nr_queues = 1;
break;
case HCTX_TYPE_READ:
map->nr_queues = 0;
break;
default:
WARN_ON_ONCE(true);
}
map->queue_offset = 0;
ret = blk_mq_map_queues(map);
WARN_ON_ONCE(ret);
}
return 0;
}
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
@ -2696,10 +2728,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp;
int err = 0;
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
/*
* Allows the UFS error handler to wait for prior ufshcd_queuecommand()
* calls.
*/
rcu_read_lock();
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
@ -2779,8 +2814,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
ufshcd_send_command(hba, tag);
out:
up_read(&hba->clk_scaling_lock);
rcu_read_unlock();
if (ufs_trigger_eh()) {
unsigned long flags;
@ -2936,31 +2972,16 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
struct request_queue *q = hba->cmd_queue;
DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err;
int tag;
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);
down_read(&hba->clk_scaling_lock);
/*
* Get free slot, sleep if slots are unavailable.
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by SCSI request timeout.
*/
req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_unlock;
}
tag = req->tag;
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
/* Set the timeout such that the SCSI error handler is not activated. */
req->timeout = msecs_to_jiffies(2 * timeout);
blk_mq_start_request(req);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
@ -2977,8 +2998,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
blk_mq_free_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@ -4960,11 +4979,7 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
*/
static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
{
struct ufs_hba *hba = shost_priv(sdev->host);
if (depth > hba->nutrs)
depth = hba->nutrs;
return scsi_change_queue_depth(sdev, depth);
return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
}
static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
@ -5256,6 +5271,18 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
return retval;
}
/* Release the resources allocated for processing a SCSI command. */
static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
struct scsi_cmnd *cmd = lrbp->cmd;
scsi_dma_unmap(cmd);
lrbp->cmd = NULL; /* Mark the command as completed. */
ufshcd_release(hba);
ufshcd_clk_scaling_update_busy(hba);
}
/**
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
@ -5266,9 +5293,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
int result;
int index;
bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
@ -5278,29 +5303,47 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
ufshcd_release(hba);
update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
UFS_DEV_COMP);
complete(hba->dev_cmd.complete);
update_scaling = true;
ufshcd_clk_scaling_update_busy(hba);
}
}
if (update_scaling)
ufshcd_clk_scaling_update_busy(hba);
}
}
/*
* Returns > 0 if one or more commands have been completed or 0 if no
* requests have been completed.
*/
static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
{
struct ufs_hba *hba = shost_priv(shost);
unsigned long completed_reqs, flags;
u32 tr_doorbell;
spin_lock_irqsave(&hba->outstanding_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
"completed: %#lx; outstanding: %#lx\n", completed_reqs,
hba->outstanding_reqs);
hba->outstanding_reqs &= ~completed_reqs;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (completed_reqs)
__ufshcd_transfer_req_compl(hba, completed_reqs);
return completed_reqs;
}
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
@ -5311,9 +5354,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
*/
static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs, flags;
u32 tr_doorbell;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
* In order to prevent other interrupts starvation the DB is read once
@ -5328,21 +5368,13 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
if (ufs_fail_completion())
return IRQ_HANDLED;
spin_lock_irqsave(&hba->outstanding_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
"completed: %#lx; outstanding: %#lx\n", completed_reqs,
hba->outstanding_reqs);
hba->outstanding_reqs &= ~completed_reqs;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
/*
* Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
* do not want polling to trigger spurious interrupt complaints.
*/
ufshcd_poll(hba->host, 0);
if (completed_reqs) {
__ufshcd_transfer_req_compl(hba, completed_reqs);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
}
return IRQ_HANDLED;
}
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
@ -5986,8 +6018,7 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
}
ufshcd_scsi_block_requests(hba);
/* Drain ufshcd_queuecommand() */
down_write(&hba->clk_scaling_lock);
up_write(&hba->clk_scaling_lock);
synchronize_rcu();
cancel_work_sync(&hba->eeh_work);
}
@ -6594,6 +6625,8 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_lock_irqsave(host->host_lock, flags);
task_tag = req->tag;
WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
task_tag);
hba->tmf_rqs[req->tag] = req;
treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
@ -6711,29 +6744,17 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
struct request_queue *q = hba->cmd_queue;
DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err = 0;
int tag;
u8 upiu_flags;
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);
down_read(&hba->clk_scaling_lock);
req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_unlock;
}
tag = req->tag;
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
err = -EBUSY;
goto out;
}
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
@ -6801,9 +6822,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
blk_mq_free_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@ -7033,6 +7051,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
unsigned long flags;
int err = FAILED;
bool outstanding;
u32 reg;
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
@ -7110,7 +7129,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
lrbp->cmd = NULL;
/*
* Clear the corresponding bit from outstanding_reqs since the command
* has been aborted successfully.
*/
spin_lock_irqsave(&hba->outstanding_lock, flags);
outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (outstanding)
ufshcd_release_scsi_cmd(hba, lrbp);
err = SUCCESS;
release:
@ -7412,7 +7441,7 @@ static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
{
int ret = 0;
struct scsi_device *sdev_boot;
struct scsi_device *sdev_boot, *sdev_rpmb;
hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
@ -7423,14 +7452,14 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
}
scsi_device_put(hba->sdev_ufs_device);
hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
if (IS_ERR(hba->sdev_rpmb)) {
ret = PTR_ERR(hba->sdev_rpmb);
if (IS_ERR(sdev_rpmb)) {
ret = PTR_ERR(sdev_rpmb);
goto remove_sdev_ufs_device;
}
ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
scsi_device_put(hba->sdev_rpmb);
ufshcd_blk_pm_runtime_init(sdev_rpmb);
scsi_device_put(sdev_rpmb);
sdev_boot = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
@ -8156,7 +8185,9 @@ static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
.proc_name = UFSHCD,
.map_queues = ufshcd_map_queues,
.queuecommand = ufshcd_queuecommand,
.mq_poll = ufshcd_poll,
.slave_alloc = ufshcd_slave_alloc,
.slave_configure = ufshcd_slave_configure,
.slave_destroy = ufshcd_slave_destroy,
@ -9384,7 +9415,6 @@ void ufshcd_remove(struct ufs_hba *hba)
ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
@ -9445,6 +9475,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
@ -9486,6 +9517,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
struct device *dev = hba->dev;
char eh_wq_name[sizeof("ufs_eh_wq_00")];
/*
* dev_set_drvdata() must be called before any callbacks are registered
* that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
* sysfs).
*/
dev_set_drvdata(dev, hba);
if (!mmio_base) {
dev_err(hba->dev,
"Invalid memory reference for mmio_base is NULL\n");
@ -9528,8 +9566,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Configure LRB */
ufshcd_host_memory_configure(hba);
host->can_queue = hba->nutrs;
host->cmd_per_lun = hba->nutrs;
host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
host->max_id = UFSHCD_MAX_ID;
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
@ -9597,12 +9635,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_disable;
}
hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
if (IS_ERR(hba->cmd_queue)) {
err = PTR_ERR(hba->cmd_queue);
goto out_remove_scsi_host;
}
hba->tmf_tag_set = (struct blk_mq_tag_set) {
.nr_hw_queues = 1,
.queue_depth = hba->nutmrs,
@ -9611,7 +9643,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
};
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
goto free_cmd_queue;
goto out_remove_scsi_host;
hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
if (IS_ERR(hba->tmf_queue)) {
err = PTR_ERR(hba->tmf_queue);
@ -9680,8 +9712,6 @@ free_tmf_queue:
blk_cleanup_queue(hba->tmf_queue);
free_tmf_tag_set:
blk_mq_free_tag_set(&hba->tmf_tag_set);
free_cmd_queue:
blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
out_disable:
@ -9703,7 +9733,27 @@ void ufshcd_resume_complete(struct device *dev)
}
EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
int ufshcd_suspend_prepare(struct device *dev)
static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
{
struct device *dev = &hba->sdev_ufs_device->sdev_gendev;
enum ufs_dev_pwr_mode dev_pwr_mode;
enum uic_link_state link_state;
unsigned long flags;
bool res;
spin_lock_irqsave(&dev->power.lock, flags);
dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
res = pm_runtime_suspended(dev) &&
hba->curr_dev_pwr_mode == dev_pwr_mode &&
hba->uic_link_state == link_state &&
!hba->dev_info.b_rpm_dev_flush_capable;
spin_unlock_irqrestore(&dev->power.lock, flags);
return res;
}
int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
@ -9715,15 +9765,30 @@ int ufshcd_suspend_prepare(struct device *dev)
* Refer ufshcd_resume_complete()
*/
if (hba->sdev_ufs_device) {
ret = ufshcd_rpm_get_sync(hba);
if (ret < 0 && ret != -EACCES) {
ufshcd_rpm_put(hba);
return ret;
/* Prevent runtime suspend */
ufshcd_rpm_get_noresume(hba);
/*
* Check if already runtime suspended in same state as system
* suspend would be.
*/
if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
/* RPM state is not ok for SPM, so runtime resume */
ret = ufshcd_rpm_resume(hba);
if (ret < 0 && ret != -EACCES) {
ufshcd_rpm_put(hba);
return ret;
}
}
hba->complete_put = true;
}
return 0;
}
EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
int ufshcd_suspend_prepare(struct device *dev)
{
return __ufshcd_suspend_prepare(dev, true);
}
EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
#ifdef CONFIG_PM_SLEEP

View File

@ -338,7 +338,8 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *,
struct ufs_pa_layer_attr *);
void (*setup_xfer_req)(struct ufs_hba *, int, bool);
void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
bool is_scsi_cmd);
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
@ -737,13 +738,13 @@ struct ufs_hba_monitor {
* @host: Scsi_Host instance of the driver
* @dev: device handle
* @lrb: local reference block
* @cmd_queue: Used to allocate command tags from hba->host->tag_set.
* @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_lock: Protects @outstanding_reqs.
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
* @nutrs: Transfer Request Queue depth supported by controller
* @nutmrs: Task Management Queue depth supported by controller
* @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
* @ufs_version: UFS Version to which controller complies
* @vops: pointer to variant specific operations
* @priv: pointer to variant specific private data
@ -777,6 +778,7 @@ struct ufs_hba_monitor {
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm
* @clk_scaling_lock: used to serialize device commands and clock scaling
* @desc_size: descriptor sizes reported by device
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
@ -802,13 +804,11 @@ struct ufs_hba {
struct Scsi_Host *host;
struct device *dev;
struct request_queue *cmd_queue;
/*
* This field is to keep a reference to "scsi_device" corresponding to
* "UFS device" W-LU.
*/
struct scsi_device *sdev_ufs_device;
struct scsi_device *sdev_rpmb;
#ifdef CONFIG_SCSI_UFS_HWMON
struct device *hwmon_device;
@ -836,6 +836,7 @@ struct ufs_hba {
u32 capabilities;
int nutrs;
int nutmrs;
u32 reserved_slot;
u32 ufs_version;
const struct ufs_hba_variant_ops *vops;
struct ufs_hba_variant_params *vps;
@ -1211,6 +1212,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
int ufshcd_suspend_prepare(struct device *dev);
int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
void ufshcd_resume_complete(struct device *dev);
/* Wrapper functions for safely calling variant operations */
@ -1420,6 +1422,16 @@ static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev);
}
static inline void ufshcd_rpm_get_noresume(struct ufs_hba *hba)
{
pm_runtime_get_noresume(&hba->sdev_ufs_device->sdev_gendev);
}
static inline int ufshcd_rpm_resume(struct ufs_hba *hba)
{
return pm_runtime_resume(&hba->sdev_ufs_device->sdev_gendev);
}
static inline int ufshcd_rpm_put(struct ufs_hba *hba)
{
return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);

View File

@ -10,7 +10,6 @@
*/
#include <asm/unaligned.h>
#include <linux/async.h>
#include "ufshcd.h"
#include "ufshpb.h"

View File

@ -14,7 +14,7 @@ extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
extern void blk_post_runtime_resume(struct request_queue *q, int err);
extern void blk_post_runtime_resume(struct request_queue *q);
extern void blk_set_runtime_active(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,

View File

@ -356,6 +356,7 @@ enum sas_ha_state {
SAS_HA_DRAINING,
SAS_HA_ATA_EH_ACTIVE,
SAS_HA_FROZEN,
SAS_HA_RESUMING,
};
struct sas_ha_struct {
@ -660,6 +661,7 @@ extern int sas_register_ha(struct sas_ha_struct *);
extern int sas_unregister_ha(struct sas_ha_struct *);
extern void sas_prep_resume_ha(struct sas_ha_struct *sas_ha);
extern void sas_resume_ha(struct sas_ha_struct *sas_ha);
extern void sas_resume_ha_no_sync(struct sas_ha_struct *sas_ha);
extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates);

View File

@ -691,12 +691,6 @@ struct Scsi_Host {
/* ldm bits */
struct device shost_gendev, shost_dev;
/*
* The array size 3 provides space for one attribute group defined by
* the SCSI core, one attribute group defined by the SCSI LLD and one
* terminating NULL pointer.
*/
const struct attribute_group *shost_dev_attr_groups[3];
/*
* Points to the transport data (if any) which is allocated