dmaengine: ioat: Support in-use unbind

Don't allocate memory using the devm infrastructure and instead call
kfree with the new dmaengine device_release call back. This ensures
the structures are available until the last reference is dropped.

We also need to ensure we call ioat_shutdown() in ioat_remove() so
that all the channels are quiesced and further transaction fails.

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Acked-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/20191216190120.21374-6-logang@deltatee.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Logan Gunthorpe 2019-12-16 12:01:20 -07:00 committed by Vinod Koul
parent 8ad342a863
commit bf453a0a18

View file

@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
ioat_kobject_del(ioat_dma);
dma_async_device_unregister(dma);
dma_pool_destroy(ioat_dma->completion_pool);
INIT_LIST_HEAD(&dma->channels);
}
/**
@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan)
break;
@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c)
return;
ioat_stop(ioat_chan);
ioat_reset_hw(ioat_chan);
/* Put LTR to idle */
if (ioat_dma->version >= IOAT_VER_3_4)
writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
ioat_reset_hw(ioat_chan);
/* Put LTR to idle */
if (ioat_dma->version >= IOAT_VER_3_4)
writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
ioat_chan->reg_base +
IOAT_CHAN_LTR_SWSEL_OFFSET);
}
spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock);
@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = {
.err_handler = &ioat_err_handler,
};
static void release_ioatdma(struct dma_device *device)
{
struct ioatdma_device *d = to_ioatdma_device(device);
int i;
for (i = 0; i < IOAT_MAX_CHANS; i++)
kfree(d->idx[i]);
dma_pool_destroy(d->completion_pool);
kfree(d);
}
static struct ioatdma_device *
alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
{
struct device *dev = &pdev->dev;
struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return NULL;
d->pdev = pdev;
d->reg_base = iobase;
d->dma_dev.device_release = release_ioatdma;
return d;
}
@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev)
if (!device)
return;
ioat_shutdown(pdev);
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);