A single fix for a regression caused by the recent PCI/MSI rework which

resulted in a recursive locking problem in the VMD driver. The cure is to
 cache the relevant information upfront instead of retrieving it at runtime.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmIbQhATHHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYoX+ND/0ZY7N+NbHnOWz8aPRIelSgchSq4xEU
 jjNe+FIe32Zq8zmVDeS59aAXF/gbT4LwR8eL0clzpM+Sd0Rg7xyvYE5v9ltwgv17
 3IJNnmJgLmeJazI5qMRSeDZcV5Ys0AIJYueVkDOkiMiJd0alLuGkRocOsejVdFhh
 27mLu33tfnXf0qFCZHFUiQtrus5zgJWh+kKz2vOuzLUxF9QPUe+CCTyA9HVNRneh
 94PFK7hjjbtyI65KLqSjEQRnGP3ddRwwII4EwE1aa+x/Fx6cDA6/L0PinpIDCSkh
 vXfODriwqW2Y9M4g3WrKLU69OB+LxVzV5pKcbC8Rrs9xOfNVGOBJNbzyqnR3nye6
 jPOb1I5DF427LJpac8BQKcdu9kxwqTF8D77BWZpkjYdKbIFh5Otd0/DgKaLOH4EG
 u4eMSNsgYkFLTc1Aa59CrYdAM03yflYI0BJ0Sdrw+fZbhRoFFmuEMm9R7f6J6E4+
 2tbq8uZpZcqBP7YLbAuMmC1Km7fhMlGZNj/8XXHj2168wKmTmQm48J2bARkZmIPt
 Jk2el2wKM14gGttES2nqEf/UDrl8XCllTD+cRzBqEAjOv3himpsErZmuKxni6BAd
 pQozQpyJlK5swF7U3mZkalJE/btyVL6dzAzlDp0psZbDGFmFK5O+/F3kxQOpoGzo
 hsbHVeTZFmmWdA==
 =ukul
 -----END PGP SIGNATURE-----

Merge tag 'irq-urgent-2022-02-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fix from Thomas Gleixner:
 "A single fix for a regression caused by the recent PCI/MSI rework
  which resulted in a recursive locking problem in the VMD driver.

  The cure is to cache the relevant information upfront instead of
  retrieving it at runtime"

* tag 'irq-urgent-2022-02-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  PCI: vmd: Prevent recursive locking on interrupt allocation
This commit is contained in:
Linus Torvalds 2022-02-27 13:07:40 -08:00
commit 52a0255467
1 changed files with 7 additions and 7 deletions

View File

@ -99,11 +99,13 @@ struct vmd_irq {
* @srcu: SRCU struct for local synchronization.
* @count: number of child IRQs assigned to this vector; used to track
* sharing.
* @virq: The underlying VMD Linux interrupt number
*/
struct vmd_irq_list {
struct list_head irq_list;
struct srcu_struct srcu;
unsigned int count;
unsigned int virq;
};
struct vmd_dev {
@ -253,7 +255,6 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
struct msi_desc *desc = arg->desc;
struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
unsigned int index, vector;
if (!vmdirq)
return -ENOMEM;
@ -261,10 +262,8 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
INIT_LIST_HEAD(&vmdirq->node);
vmdirq->irq = vmd_next_irq(vmd, desc);
vmdirq->virq = virq;
index = index_from_irqs(vmd, vmdirq->irq);
vector = pci_irq_vector(vmd->dev, index);
irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
handle_untracked_irq, vmd, NULL);
return 0;
}
@ -685,7 +684,8 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd)
return err;
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
vmd->irqs[i].virq = pci_irq_vector(dev, i);
err = devm_request_irq(&dev->dev, vmd->irqs[i].virq,
vmd_irq, IRQF_NO_THREAD,
vmd->name, &vmd->irqs[i]);
if (err)
@ -969,7 +969,7 @@ static int vmd_suspend(struct device *dev)
int i;
for (i = 0; i < vmd->msix_count; i++)
devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]);
return 0;
}
@ -981,7 +981,7 @@ static int vmd_resume(struct device *dev)
int err, i;
for (i = 0; i < vmd->msix_count; i++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, i),
err = devm_request_irq(dev, vmd->irqs[i].virq,
vmd_irq, IRQF_NO_THREAD,
vmd->name, &vmd->irqs[i]);
if (err)