NVMe: Add MSI support

Some devices only have support for MSI, not MSI-X.  While MSI is more
limited, it still provides better performance than line-based interrupts.

Signed-off-by: Ramachandra Gajula <rama@fastorsystems.com>
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
This commit is contained in:
Ramachandra Rao Gajula 2013-05-11 15:19:31 -07:00 committed by Matthew Wilcox
parent cf9f123b38
commit fa08a39664

View file

@ -1637,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static int nvme_setup_io_queues(struct nvme_dev *dev) static int nvme_setup_io_queues(struct nvme_dev *dev)
{ {
int result, cpu, i, nr_io_queues, db_bar_size, q_depth; struct pci_dev *pdev = dev->pci_dev;
int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
nr_io_queues = num_online_cpus(); nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues); result = set_queue_count(dev, nr_io_queues);
@ -1646,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < nr_io_queues) if (result < nr_io_queues)
nr_io_queues = result; nr_io_queues = result;
q_count = nr_io_queues;
/* Deregister the admin queue's interrupt */ /* Deregister the admin queue's interrupt */
free_irq(dev->entry[0].vector, dev->queues[0]); free_irq(dev->entry[0].vector, dev->queues[0]);
db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
if (db_bar_size > 8192) { if (db_bar_size > 8192) {
iounmap(dev->bar); iounmap(dev->bar);
dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
db_bar_size);
dev->dbs = ((void __iomem *)dev->bar) + 4096; dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->queues[0]->q_db = dev->dbs; dev->queues[0]->q_db = dev->dbs;
} }
@ -1661,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
for (i = 0; i < nr_io_queues; i++) for (i = 0; i < nr_io_queues; i++)
dev->entry[i].entry = i; dev->entry[i].entry = i;
for (;;) { for (;;) {
result = pci_enable_msix(dev->pci_dev, dev->entry, result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
nr_io_queues);
if (result == 0) { if (result == 0) {
break; break;
} else if (result > 0) { } else if (result > 0) {
nr_io_queues = result; nr_io_queues = result;
continue; continue;
} else { } else {
nr_io_queues = 1; nr_io_queues = 0;
break; break;
} }
} }
if (nr_io_queues == 0) {
nr_io_queues = q_count;
for (;;) {
result = pci_enable_msi_block(pdev, nr_io_queues);
if (result == 0) {
for (i = 0; i < nr_io_queues; i++)
dev->entry[i].vector = i + pdev->irq;
break;
} else if (result > 0) {
nr_io_queues = result;
continue;
} else {
nr_io_queues = 1;
break;
}
}
}
result = queue_request_irq(dev, dev->queues[0], "nvme admin"); result = queue_request_irq(dev, dev->queues[0], "nvme admin");
/* XXX: handle failure here */ /* XXX: handle failure here */
@ -1854,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref)
{ {
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
nvme_dev_remove(dev); nvme_dev_remove(dev);
pci_disable_msix(dev->pci_dev); if (dev->pci_dev->msi_enabled)
pci_disable_msi(dev->pci_dev);
else if (dev->pci_dev->msix_enabled)
pci_disable_msix(dev->pci_dev);
iounmap(dev->bar); iounmap(dev->bar);
nvme_release_instance(dev); nvme_release_instance(dev);
nvme_release_prp_pools(dev); nvme_release_prp_pools(dev);
@ -1987,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
unmap: unmap:
iounmap(dev->bar); iounmap(dev->bar);
disable_msix: disable_msix:
pci_disable_msix(pdev); if (dev->pci_dev->msi_enabled)
pci_disable_msi(dev->pci_dev);
else if (dev->pci_dev->msix_enabled)
pci_disable_msix(dev->pci_dev);
nvme_release_instance(dev); nvme_release_instance(dev);
nvme_release_prp_pools(dev); nvme_release_prp_pools(dev);
disable: disable: