crypto: qat - protect interrupt mask CSRs with a spinlock

In the PF interrupt handler, the interrupt is disabled for a set of VFs
by writing to the interrupt source mask register, ERRMSK.
The interrupt is re-enabled in the bottom half handler by writing to the
same CSR. This is done through the functions enable_vf2pf_interrupts()
and disable_vf2pf_interrupts() which perform a read-modify-write
operation on the ERRMSK registers to mask and unmask the source of
interrupt.

There can be a race condition where the top half handler for one VF
interrupt runs just as the bottom half for another VF is about to
re-enable the interrupt. Depending on whether the top or bottom half
updates the CSR first, this would result either in a spurious interrupt
or in the interrupt not being re-enabled.

This patch protects the access of ERRMSK with a spinlock.

The functions adf_enable_vf2pf_interrupts() and
adf_disable_vf2pf_interrupts() have been changed to acquire a spin lock
before accessing and modifying the ERRMSK registers. These functions use
spin_lock_irqsave() to disable IRQs and avoid potential deadlocks.
In addition, the function adf_disable_vf2pf_interrupts_irq() has been
added. This uses spin_lock() and it is meant to be used in the top half
only.

Signed-off-by: Kanchana Velusamy <kanchanax.velusamy@intel.com>
Co-developed-by: Marco Chiappero <marco.chiappero@intel.com>
Signed-off-by: Marco Chiappero <marco.chiappero@intel.com>
Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Fiona Trahe <fiona.trahe@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Kanchana Velusamy 2021-08-12 21:21:21 +01:00 committed by Herbert Xu
parent 9800678f05
commit 07df385e64
7 changed files with 41 additions and 4 deletions

View file

@ -161,6 +161,8 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
return 0;
}

View file

@ -163,6 +163,8 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
return 0;
}

View file

@ -246,6 +246,8 @@ struct adf_accel_dev {
struct adf_accel_pci accel_pci_dev;
union {
struct {
/* protects VF2PF interrupts access */
spinlock_t vf2pf_ints_lock;
/* vf_info is non-zero when SR-IOV is init'ed */
struct adf_accel_vf_info *vf_info;
} pf;

View file

@ -193,6 +193,8 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask);
void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
u32 vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);

View file

@ -103,7 +103,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
int i;
/* Disable VF2PF interrupts for VFs with pending ints */
adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
/*
* Handle VF2PF interrupt unless the VF is malicious and

View file

@ -11,8 +11,8 @@
#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask)
static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
@ -35,7 +35,17 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
}
}
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
unsigned long flags;
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
__adf_enable_vf2pf_interrupts(accel_dev, vf_mask);
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
}
static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
@ -58,6 +68,22 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
}
}
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
unsigned long flags;
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
__adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
}
void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
spin_lock(&accel_dev->pf.vf2pf_ints_lock);
__adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
}
static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
{
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
@ -264,6 +290,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
/* re-enable interrupt on PF from this VF */
adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
return;
err:
dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",

View file

@ -182,6 +182,8 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
return 0;
}