b43: Fix DMA for 30/32-bit DMA engines

This checks if the DMA address is bigger than what the controller can manage.
It will reallocate the buffers in the GFP_DMA zone in that case.

Signed-off-by: Michael Buesch <mb@bu3sch.de>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Michael Buesch 2008-02-05 12:50:41 +01:00 committed by John W. Linville
parent 532031d7f4
commit b79caa68c0
2 changed files with 99 additions and 58 deletions

View file

@ -337,7 +337,7 @@ static inline int txring_to_priority(struct b43_dmaring *ring)
return idx_to_prio[index]; return idx_to_prio[index];
} }
u16 b43_dmacontroller_base(int dma64bit, int controller_idx) static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
{ {
static const u16 map64[] = { static const u16 map64[] = {
B43_MMIO_DMA64_BASE0, B43_MMIO_DMA64_BASE0,
@ -356,7 +356,7 @@ u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
B43_MMIO_DMA32_BASE5, B43_MMIO_DMA32_BASE5,
}; };
if (dma64bit) { if (type == B43_DMA_64BIT) {
B43_WARN_ON(!(controller_idx >= 0 && B43_WARN_ON(!(controller_idx >= 0 &&
controller_idx < ARRAY_SIZE(map64))); controller_idx < ARRAY_SIZE(map64)));
return map64[controller_idx]; return map64[controller_idx];
@ -437,7 +437,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
* 02, which uses 64-bit DMA, needs the ring buffer in very low memory, * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
* which accounts for the GFP_DMA flag below. * which accounts for the GFP_DMA flag below.
*/ */
if (ring->dma64) if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA; flags |= GFP_DMA;
ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE, ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
&(ring->dmabase), flags); &(ring->dmabase), flags);
@ -459,7 +459,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
} }
/* Reset the RX DMA channel */ /* Reset the RX DMA channel */
int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
enum b43_dmatype type)
{ {
int i; int i;
u32 value; u32 value;
@ -467,12 +468,13 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
might_sleep(); might_sleep();
offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
b43_write32(dev, mmio_base + offset, 0); b43_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS; offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
B43_DMA32_RXSTATUS;
value = b43_read32(dev, mmio_base + offset); value = b43_read32(dev, mmio_base + offset);
if (dma64) { if (type == B43_DMA_64BIT) {
value &= B43_DMA64_RXSTAT; value &= B43_DMA64_RXSTAT;
if (value == B43_DMA64_RXSTAT_DISABLED) { if (value == B43_DMA64_RXSTAT_DISABLED) {
i = -1; i = -1;
@ -496,7 +498,8 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
} }
/* Reset the TX DMA channel */ /* Reset the TX DMA channel */
int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
enum b43_dmatype type)
{ {
int i; int i;
u32 value; u32 value;
@ -505,9 +508,10 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
might_sleep(); might_sleep();
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
B43_DMA32_TXSTATUS;
value = b43_read32(dev, mmio_base + offset); value = b43_read32(dev, mmio_base + offset);
if (dma64) { if (type == B43_DMA_64BIT) {
value &= B43_DMA64_TXSTAT; value &= B43_DMA64_TXSTAT;
if (value == B43_DMA64_TXSTAT_DISABLED || if (value == B43_DMA64_TXSTAT_DISABLED ||
value == B43_DMA64_TXSTAT_IDLEWAIT || value == B43_DMA64_TXSTAT_IDLEWAIT ||
@ -522,12 +526,13 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
} }
msleep(1); msleep(1);
} }
offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
b43_write32(dev, mmio_base + offset, 0); b43_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
B43_DMA32_TXSTATUS;
value = b43_read32(dev, mmio_base + offset); value = b43_read32(dev, mmio_base + offset);
if (dma64) { if (type == B43_DMA_64BIT) {
value &= B43_DMA64_TXSTAT; value &= B43_DMA64_TXSTAT;
if (value == B43_DMA64_TXSTAT_DISABLED) { if (value == B43_DMA64_TXSTAT_DISABLED) {
i = -1; i = -1;
@ -552,6 +557,33 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
return 0; return 0;
} }
/* Check if a DMA mapping address is invalid. */
static bool b43_dma_mapping_error(struct b43_dmaring *ring,
dma_addr_t addr,
size_t buffersize)
{
if (unlikely(dma_mapping_error(addr)))
return 1;
switch (ring->type) {
case B43_DMA_30BIT:
if ((u64)addr + buffersize > (1ULL << 30))
return 1;
break;
case B43_DMA_32BIT:
if ((u64)addr + buffersize > (1ULL << 32))
return 1;
break;
case B43_DMA_64BIT:
/* Currently we can't have addresses beyond
* 64bit in the kernel. */
break;
}
/* The address is OK. */
return 0;
}
static int setup_rx_descbuffer(struct b43_dmaring *ring, static int setup_rx_descbuffer(struct b43_dmaring *ring,
struct b43_dmadesc_generic *desc, struct b43_dmadesc_generic *desc,
struct b43_dmadesc_meta *meta, gfp_t gfp_flags) struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
@ -567,7 +599,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
if (unlikely(!skb)) if (unlikely(!skb))
return -ENOMEM; return -ENOMEM;
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
if (dma_mapping_error(dmaaddr)) { if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
/* ugh. try to realloc in zone_dma */ /* ugh. try to realloc in zone_dma */
gfp_flags |= GFP_DMA; gfp_flags |= GFP_DMA;
@ -580,7 +612,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
ring->rx_buffersize, 0); ring->rx_buffersize, 0);
} }
if (dma_mapping_error(dmaaddr)) { if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return -EIO; return -EIO;
} }
@ -645,7 +677,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
u32 trans = ssb_dma_translation(ring->dev->dev); u32 trans = ssb_dma_translation(ring->dev->dev);
if (ring->tx) { if (ring->tx) {
if (ring->dma64) { if (ring->type == B43_DMA_64BIT) {
u64 ringbase = (u64) (ring->dmabase); u64 ringbase = (u64) (ring->dmabase);
addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@ -677,7 +709,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
err = alloc_initial_descbuffers(ring); err = alloc_initial_descbuffers(ring);
if (err) if (err)
goto out; goto out;
if (ring->dma64) { if (ring->type == B43_DMA_64BIT) {
u64 ringbase = (u64) (ring->dmabase); u64 ringbase = (u64) (ring->dmabase);
addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@ -722,16 +754,16 @@ static void dmacontroller_cleanup(struct b43_dmaring *ring)
{ {
if (ring->tx) { if (ring->tx) {
b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
ring->dma64); ring->type);
if (ring->dma64) { if (ring->type == B43_DMA_64BIT) {
b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
} else } else
b43_dma_write(ring, B43_DMA32_TXRING, 0); b43_dma_write(ring, B43_DMA32_TXRING, 0);
} else { } else {
b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
ring->dma64); ring->type);
if (ring->dma64) { if (ring->type == B43_DMA_64BIT) {
b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
} else } else
@ -786,7 +818,8 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
static static
struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
int controller_index, int controller_index,
int for_tx, int dma64) int for_tx,
enum b43_dmatype type)
{ {
struct b43_dmaring *ring; struct b43_dmaring *ring;
int err; int err;
@ -796,6 +829,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) if (!ring)
goto out; goto out;
ring->type = type;
nr_slots = B43_RXRING_SLOTS; nr_slots = B43_RXRING_SLOTS;
if (for_tx) if (for_tx)
@ -818,7 +852,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
b43_txhdr_size(dev), b43_txhdr_size(dev),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(dma_test)) { if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) {
/* ugh realloc */ /* ugh realloc */
kfree(ring->txhdr_cache); kfree(ring->txhdr_cache);
ring->txhdr_cache = kcalloc(nr_slots, ring->txhdr_cache = kcalloc(nr_slots,
@ -832,7 +866,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
b43_txhdr_size(dev), b43_txhdr_size(dev),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(dma_test)) if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev)))
goto err_kfree_txhdr_cache; goto err_kfree_txhdr_cache;
} }
@ -843,10 +878,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
ring->dev = dev; ring->dev = dev;
ring->nr_slots = nr_slots; ring->nr_slots = nr_slots;
ring->mmio_base = b43_dmacontroller_base(dma64, controller_index); ring->mmio_base = b43_dmacontroller_base(type, controller_index);
ring->index = controller_index; ring->index = controller_index;
ring->dma64 = !!dma64; if (type == B43_DMA_64BIT)
if (dma64)
ring->ops = &dma64_ops; ring->ops = &dma64_ops;
else else
ring->ops = &dma32_ops; ring->ops = &dma32_ops;
@ -896,8 +930,8 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring)
if (!ring) if (!ring)
return; return;
b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n", b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n",
(ring->dma64) ? "64" : "32", (unsigned int)(ring->type),
ring->mmio_base, ring->mmio_base,
(ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots); (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);
/* Device IRQs are disabled prior entering this function, /* Device IRQs are disabled prior entering this function,
@ -941,12 +975,22 @@ int b43_dma_init(struct b43_wldev *dev)
struct b43_dmaring *ring; struct b43_dmaring *ring;
int err; int err;
u64 dmamask; u64 dmamask;
int dma64 = 0; enum b43_dmatype type;
dmamask = supported_dma_mask(dev); dmamask = supported_dma_mask(dev);
if (dmamask == DMA_64BIT_MASK) switch (dmamask) {
dma64 = 1; default:
B43_WARN_ON(1);
case DMA_30BIT_MASK:
type = B43_DMA_30BIT;
break;
case DMA_32BIT_MASK:
type = B43_DMA_32BIT;
break;
case DMA_64BIT_MASK:
type = B43_DMA_64BIT;
break;
}
err = ssb_dma_set_mask(dev->dev, dmamask); err = ssb_dma_set_mask(dev->dev, dmamask);
if (err) { if (err) {
b43err(dev->wl, "The machine/kernel does not support " b43err(dev->wl, "The machine/kernel does not support "
@ -958,52 +1002,51 @@ int b43_dma_init(struct b43_wldev *dev)
err = -ENOMEM; err = -ENOMEM;
/* setup TX DMA channels. */ /* setup TX DMA channels. */
ring = b43_setup_dmaring(dev, 0, 1, dma64); ring = b43_setup_dmaring(dev, 0, 1, type);
if (!ring) if (!ring)
goto out; goto out;
dma->tx_ring0 = ring; dma->tx_ring0 = ring;
ring = b43_setup_dmaring(dev, 1, 1, dma64); ring = b43_setup_dmaring(dev, 1, 1, type);
if (!ring) if (!ring)
goto err_destroy_tx0; goto err_destroy_tx0;
dma->tx_ring1 = ring; dma->tx_ring1 = ring;
ring = b43_setup_dmaring(dev, 2, 1, dma64); ring = b43_setup_dmaring(dev, 2, 1, type);
if (!ring) if (!ring)
goto err_destroy_tx1; goto err_destroy_tx1;
dma->tx_ring2 = ring; dma->tx_ring2 = ring;
ring = b43_setup_dmaring(dev, 3, 1, dma64); ring = b43_setup_dmaring(dev, 3, 1, type);
if (!ring) if (!ring)
goto err_destroy_tx2; goto err_destroy_tx2;
dma->tx_ring3 = ring; dma->tx_ring3 = ring;
ring = b43_setup_dmaring(dev, 4, 1, dma64); ring = b43_setup_dmaring(dev, 4, 1, type);
if (!ring) if (!ring)
goto err_destroy_tx3; goto err_destroy_tx3;
dma->tx_ring4 = ring; dma->tx_ring4 = ring;
ring = b43_setup_dmaring(dev, 5, 1, dma64); ring = b43_setup_dmaring(dev, 5, 1, type);
if (!ring) if (!ring)
goto err_destroy_tx4; goto err_destroy_tx4;
dma->tx_ring5 = ring; dma->tx_ring5 = ring;
/* setup RX DMA channels. */ /* setup RX DMA channels. */
ring = b43_setup_dmaring(dev, 0, 0, dma64); ring = b43_setup_dmaring(dev, 0, 0, type);
if (!ring) if (!ring)
goto err_destroy_tx5; goto err_destroy_tx5;
dma->rx_ring0 = ring; dma->rx_ring0 = ring;
if (dev->dev->id.revision < 5) { if (dev->dev->id.revision < 5) {
ring = b43_setup_dmaring(dev, 3, 0, dma64); ring = b43_setup_dmaring(dev, 3, 0, type);
if (!ring) if (!ring)
goto err_destroy_rx0; goto err_destroy_rx0;
dma->rx_ring3 = ring; dma->rx_ring3 = ring;
} }
b43dbg(dev->wl, "%d-bit DMA initialized\n", b43dbg(dev->wl, "%u-bit DMA initialized\n",
(dmamask == DMA_64BIT_MASK) ? 64 : (unsigned int)type);
(dmamask == DMA_32BIT_MASK) ? 32 : 30);
err = 0; err = 0;
out: out:
return err; return err;
@ -1146,7 +1189,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
hdrsize, 1); hdrsize, 1);
if (dma_mapping_error(meta_hdr->dmaaddr)) { if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) {
ring->current_slot = old_top_slot; ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots; ring->used_slots = old_used_slots;
return -EIO; return -EIO;
@ -1165,7 +1208,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */ /* create a bounce buffer in zone_dma on mapping failure. */
if (dma_mapping_error(meta->dmaaddr)) { if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) { if (!bounce_skb) {
ring->current_slot = old_top_slot; ring->current_slot = old_top_slot;
@ -1179,7 +1222,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
skb = bounce_skb; skb = bounce_skb;
meta->skb = skb; meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (dma_mapping_error(meta->dmaaddr)) { if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
ring->current_slot = old_top_slot; ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots; ring->used_slots = old_used_slots;
err = -EIO; err = -EIO;

View file

@ -203,6 +203,12 @@ struct b43_dma_ops {
void (*set_current_rxslot) (struct b43_dmaring * ring, int slot); void (*set_current_rxslot) (struct b43_dmaring * ring, int slot);
}; };
enum b43_dmatype {
B43_DMA_30BIT = 30,
B43_DMA_32BIT = 32,
B43_DMA_64BIT = 64,
};
struct b43_dmaring { struct b43_dmaring {
/* Lowlevel DMA ops. */ /* Lowlevel DMA ops. */
const struct b43_dma_ops *ops; const struct b43_dma_ops *ops;
@ -235,8 +241,8 @@ struct b43_dmaring {
int index; int index;
/* Boolean. Is this a TX ring? */ /* Boolean. Is this a TX ring? */
bool tx; bool tx;
/* Boolean. 64bit DMA if true, 32bit DMA otherwise. */ /* The type of DMA engine used. */
bool dma64; enum b43_dmatype type;
/* Boolean. Is this ring stopped at ieee80211 level? */ /* Boolean. Is this ring stopped at ieee80211 level? */
bool stopped; bool stopped;
/* Lock, only used for TX. */ /* Lock, only used for TX. */
@ -255,8 +261,7 @@ static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)
return b43_read32(ring->dev, ring->mmio_base + offset); return b43_read32(ring->dev, ring->mmio_base + offset);
} }
static inline static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
{ {
b43_write32(ring->dev, ring->mmio_base + offset, value); b43_write32(ring->dev, ring->mmio_base + offset, value);
} }
@ -264,13 +269,6 @@ static inline
int b43_dma_init(struct b43_wldev *dev); int b43_dma_init(struct b43_wldev *dev);
void b43_dma_free(struct b43_wldev *dev); void b43_dma_free(struct b43_wldev *dev);
int b43_dmacontroller_rx_reset(struct b43_wldev *dev,
u16 dmacontroller_mmio_base, int dma64);
int b43_dmacontroller_tx_reset(struct b43_wldev *dev,
u16 dmacontroller_mmio_base, int dma64);
u16 b43_dmacontroller_base(int dma64bit, int dmacontroller_idx);
void b43_dma_tx_suspend(struct b43_wldev *dev); void b43_dma_tx_suspend(struct b43_wldev *dev);
void b43_dma_tx_resume(struct b43_wldev *dev); void b43_dma_tx_resume(struct b43_wldev *dev);