dmaengine: ioatdma: adding shutdown support

The ioatdma needs to be queisced and block all additional op submission
during reboots. When NET_DMA was used, this caused issue as ops were still
being sent to ioatdma during reboots even though PCI BME has been turned
off. Even though NET_DMA has been deprecated, we need to prevent similar
situations. The shutdown handler should address that.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Dave Jiang 2015-08-26 13:17:24 -07:00 committed by Vinod Koul
parent 6ff33f3902
commit ad4a7b5065
4 changed files with 66 additions and 3 deletions

View file

@ -197,7 +197,8 @@ static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
{ {
spin_lock_bh(&ioat_chan->prep_lock); spin_lock_bh(&ioat_chan->prep_lock);
__ioat_start_null_desc(ioat_chan); if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
__ioat_start_null_desc(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->prep_lock);
} }

View file

@ -82,8 +82,9 @@ struct ioatdma_device {
struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
struct dma_device dma_dev; struct dma_device dma_dev;
u8 version; u8 version;
struct msix_entry msix_entries[4]; #define IOAT_MAX_CHANS 4
struct ioatdma_chan *idx[4]; struct msix_entry msix_entries[IOAT_MAX_CHANS];
struct ioatdma_chan *idx[IOAT_MAX_CHANS];
struct dca_provider *dca; struct dca_provider *dca;
enum ioat_irq_mode irq_mode; enum ioat_irq_mode irq_mode;
u32 cap; u32 cap;
@ -95,6 +96,7 @@ struct ioatdma_chan {
dma_addr_t last_completion; dma_addr_t last_completion;
spinlock_t cleanup_lock; spinlock_t cleanup_lock;
unsigned long state; unsigned long state;
#define IOAT_CHAN_DOWN 0
#define IOAT_COMPLETION_ACK 1 #define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2 #define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3 #define IOAT_KOBJ_INIT_FAIL 3

View file

@ -1186,6 +1186,31 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
return 0; return 0;
} }
static void ioat_shutdown(struct pci_dev *pdev)
{
struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
struct ioatdma_chan *ioat_chan;
int i;
if (!ioat_dma)
return;
for (i = 0; i < IOAT_MAX_CHANS; i++) {
ioat_chan = ioat_dma->idx[i];
if (!ioat_chan)
continue;
spin_lock_bh(&ioat_chan->prep_lock);
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
del_timer_sync(&ioat_chan->timer);
spin_unlock_bh(&ioat_chan->prep_lock);
/* this should quiesce then reset */
ioat_reset_hw(ioat_chan);
}
ioat_disable_interrupts(ioat_dma);
}
#define DRV_NAME "ioatdma" #define DRV_NAME "ioatdma"
static struct pci_driver ioat_pci_driver = { static struct pci_driver ioat_pci_driver = {
@ -1193,6 +1218,7 @@ static struct pci_driver ioat_pci_driver = {
.id_table = ioat_pci_tbl, .id_table = ioat_pci_tbl,
.probe = ioat_pci_probe, .probe = ioat_pci_probe,
.remove = ioat_remove, .remove = ioat_remove,
.shutdown = ioat_shutdown,
}; };
static struct ioatdma_device * static struct ioatdma_device *

View file

@ -121,6 +121,9 @@ ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
size_t total_len = len; size_t total_len = len;
int num_descs, idx, i; int num_descs, idx, i;
if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
return NULL;
num_descs = ioat_xferlen_to_descs(ioat_chan, len); num_descs = ioat_xferlen_to_descs(ioat_chan, len);
if (likely(num_descs) && if (likely(num_descs) &&
ioat_check_space_lock(ioat_chan, num_descs) == 0) ioat_check_space_lock(ioat_chan, num_descs) == 0)
@ -254,6 +257,11 @@ struct dma_async_tx_descriptor *
ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags) unsigned int src_cnt, size_t len, unsigned long flags)
{ {
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
return NULL;
return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
} }
@ -262,6 +270,11 @@ ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags) enum sum_check_flags *result, unsigned long flags)
{ {
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
return NULL;
/* the cleanup routine only sets bits on validate failure, it /* the cleanup routine only sets bits on validate failure, it
* does not clear bits on validate success... so clear it here * does not clear bits on validate success... so clear it here
*/ */
@ -574,6 +587,11 @@ ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags) unsigned long flags)
{ {
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
return NULL;
/* specify valid address for disabled result */ /* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P) if (flags & DMA_PREP_PQ_DISABLE_P)
dst[0] = dst[1]; dst[0] = dst[1];
@ -614,6 +632,11 @@ ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags) enum sum_check_flags *pqres, unsigned long flags)
{ {
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
return NULL;
/* specify valid address for disabled result */ /* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P) if (flags & DMA_PREP_PQ_DISABLE_P)
pq[0] = pq[1]; pq[0] = pq[1];
@ -638,6 +661,10 @@ ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
{ {
unsigned char scf[MAX_SCF]; unsigned char scf[MAX_SCF];
dma_addr_t pq[2]; dma_addr_t pq[2];
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
return NULL;
if (src_cnt > MAX_SCF) if (src_cnt > MAX_SCF)
return NULL; return NULL;
@ -661,6 +688,10 @@ ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
{ {
unsigned char scf[MAX_SCF]; unsigned char scf[MAX_SCF];
dma_addr_t pq[2]; dma_addr_t pq[2];
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
return NULL;
if (src_cnt > MAX_SCF) if (src_cnt > MAX_SCF)
return NULL; return NULL;
@ -689,6 +720,9 @@ ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
struct ioat_ring_ent *desc; struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw; struct ioat_dma_descriptor *hw;
if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
return NULL;
if (ioat_check_space_lock(ioat_chan, 1) == 0) if (ioat_check_space_lock(ioat_chan, 1) == 0)
desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
else else