dmaengine: idxd: move idxd interrupt handling to mask instead of ignore

Switch driver to use MSIX mask and unmask instead of the ignore bit.
When ignore bit is cleared, we must issue an MMIO read to ensure writes
have all arrived and check and process any additional completions. The
ignore bit does not queue up any pending MSIX interrupts. The mask bit
however does. Use API call from interrupt subsystem to mask MSIX
interrupt since the hardware does not have convenient mask bit register.

Suggested-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/159319517621.70410.11816465052708900506.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Dave Jiang 2020-06-26 11:12:56 -07:00 committed by Vinod Koul
parent 0d5c10b4c8
commit 4548a6ad3d
3 changed files with 13 additions and 46 deletions

View file

@ -6,6 +6,8 @@
#include <linux/pci.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dmaengine.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <uapi/linux/idxd.h>
#include "../dmaengine.h"
#include "idxd.h"
@ -15,61 +17,28 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
u32 *status);
/* Interrupt control bits */
int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
{
struct pci_dev *pdev = idxd->pdev;
int msixcnt = pci_msix_vec_count(pdev);
union msix_perm perm;
u32 offset;
struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
if (vec_id < 0 || vec_id >= msixcnt)
return -EINVAL;
offset = idxd->msix_perm_offset + vec_id * 8;
perm.bits = ioread32(idxd->reg_base + offset);
perm.ignore = 1;
iowrite32(perm.bits, idxd->reg_base + offset);
return 0;
pci_msi_mask_irq(data);
}
void idxd_mask_msix_vectors(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
int msixcnt = pci_msix_vec_count(pdev);
int i, rc;
int i;
for (i = 0; i < msixcnt; i++) {
rc = idxd_mask_msix_vector(idxd, i);
if (rc < 0)
dev_warn(&pdev->dev,
"Failed disabling msix vec %d\n", i);
}
for (i = 0; i < msixcnt; i++)
idxd_mask_msix_vector(idxd, i);
}
int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
{
struct pci_dev *pdev = idxd->pdev;
int msixcnt = pci_msix_vec_count(pdev);
union msix_perm perm;
u32 offset;
struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
if (vec_id < 0 || vec_id >= msixcnt)
return -EINVAL;
offset = idxd->msix_perm_offset + vec_id * 8;
perm.bits = ioread32(idxd->reg_base + offset);
perm.ignore = 0;
iowrite32(perm.bits, idxd->reg_base + offset);
/*
* A readback from the device ensures that any previously generated
* completion record writes are visible to software based on PCI
* ordering rules.
*/
perm.bits = ioread32(idxd->reg_base + offset);
return 0;
pci_msi_unmask_irq(data);
}
void idxd_unmask_error_interrupts(struct idxd_device *idxd)

View file

@ -273,8 +273,8 @@ irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
void idxd_mask_msix_vectors(struct idxd_device *idxd);
int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
void idxd_device_init_reset(struct idxd_device *idxd);

View file

@ -260,8 +260,6 @@ irqreturn_t idxd_wq_thread(int irq, void *data)
processed = idxd_desc_process(irq_entry);
idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
/* catch anything unprocessed after unmasking */
processed += idxd_desc_process(irq_entry);
if (processed == 0)
return IRQ_NONE;