Merge branch 'pci/dma' into next

* pci/dma:
  PCI: Remove NULL device handling from PCI DMA API
  net: tsi108: Use DMA API properly
  media: ttusb-dec: Remove pci_zalloc_coherent() abuse
  media: ttusb-budget: Remove pci_zalloc_coherent() abuse
This commit is contained in:
Bjorn Helgaas 2018-01-31 10:10:28 -06:00 committed by Bjorn Helgaas
commit ac7ab8a6b3
4 changed files with 41 additions and 58 deletions

View file

@ -102,7 +102,6 @@ struct ttusb {
unsigned int isoc_in_pipe;
void *iso_buffer;
dma_addr_t iso_dma_handle;
struct urb *iso_urb[ISO_BUF_COUNT];
@ -792,26 +791,17 @@ static void ttusb_free_iso_urbs(struct ttusb *ttusb)
for (i = 0; i < ISO_BUF_COUNT; i++)
usb_free_urb(ttusb->iso_urb[i]);
pci_free_consistent(NULL,
ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF *
ISO_BUF_COUNT, ttusb->iso_buffer,
ttusb->iso_dma_handle);
kfree(ttusb->iso_buffer);
}
static int ttusb_alloc_iso_urbs(struct ttusb *ttusb)
{
int i;
ttusb->iso_buffer = pci_zalloc_consistent(NULL,
ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF * ISO_BUF_COUNT,
&ttusb->iso_dma_handle);
if (!ttusb->iso_buffer) {
dprintk("%s: pci_alloc_consistent - not enough memory\n",
__func__);
ttusb->iso_buffer = kcalloc(FRAMES_PER_ISO_BUF * ISO_BUF_COUNT,
ISO_FRAME_SIZE, GFP_KERNEL);
if (!ttusb->iso_buffer)
return -ENOMEM;
}
for (i = 0; i < ISO_BUF_COUNT; i++) {
struct urb *urb;

View file

@ -127,7 +127,6 @@ struct ttusb_dec {
struct urb *irq_urb;
dma_addr_t irq_dma_handle;
void *iso_buffer;
dma_addr_t iso_dma_handle;
struct urb *iso_urb[ISO_BUF_COUNT];
int iso_stream_count;
struct mutex iso_mutex;
@ -1185,11 +1184,7 @@ static void ttusb_dec_free_iso_urbs(struct ttusb_dec *dec)
for (i = 0; i < ISO_BUF_COUNT; i++)
usb_free_urb(dec->iso_urb[i]);
pci_free_consistent(NULL,
ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF *
ISO_BUF_COUNT),
dec->iso_buffer, dec->iso_dma_handle);
kfree(dec->iso_buffer);
}
static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec)
@ -1198,15 +1193,10 @@ static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec)
dprintk("%s\n", __func__);
dec->iso_buffer = pci_zalloc_consistent(NULL,
ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF * ISO_BUF_COUNT),
&dec->iso_dma_handle);
if (!dec->iso_buffer) {
dprintk("%s: pci_alloc_consistent - not enough memory\n",
__func__);
dec->iso_buffer = kcalloc(FRAMES_PER_ISO_BUF * ISO_BUF_COUNT,
ISO_FRAME_SIZE, GFP_KERNEL);
if (!dec->iso_buffer)
return -ENOMEM;
}
for (i = 0; i < ISO_BUF_COUNT; i++) {
struct urb *urb;

View file

@ -152,6 +152,8 @@ struct tsi108_prv_data {
u32 msg_enable; /* debug message level */
struct mii_if_info mii_if;
unsigned int init_media;
struct platform_device *pdev;
};
/* Structure for a device driver */
@ -703,17 +705,18 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
data->txskbs[tx] = skb;
if (i == 0) {
data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
data->txring[tx].buf0 = dma_map_single(&data->pdev->dev,
skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
data->txring[tx].len = skb_headlen(skb);
misc |= TSI108_TX_SOF;
} else {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
data->txring[tx].buf0 = skb_frag_dma_map(NULL, frag,
0,
skb_frag_size(frag),
DMA_TO_DEVICE);
data->txring[tx].buf0 =
skb_frag_dma_map(&data->pdev->dev, frag,
0, skb_frag_size(frag),
DMA_TO_DEVICE);
data->txring[tx].len = skb_frag_size(frag);
}
@ -808,9 +811,9 @@ static int tsi108_refill_rx(struct net_device *dev, int budget)
if (!skb)
break;
data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
TSI108_RX_SKB_SIZE,
DMA_FROM_DEVICE);
data->rxring[rx].buf0 = dma_map_single(&data->pdev->dev,
skb->data, TSI108_RX_SKB_SIZE,
DMA_FROM_DEVICE);
/* Sometimes the hardware sets blen to zero after packet
* reception, even though the manual says that it's only ever
@ -1308,15 +1311,15 @@ static int tsi108_open(struct net_device *dev)
data->id, dev->irq, dev->name);
}
data->rxring = dma_zalloc_coherent(NULL, rxring_size, &data->rxdma,
GFP_KERNEL);
data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size,
&data->rxdma, GFP_KERNEL);
if (!data->rxring)
return -ENOMEM;
data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
GFP_KERNEL);
data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size,
&data->txdma, GFP_KERNEL);
if (!data->txring) {
pci_free_consistent(NULL, rxring_size, data->rxring,
dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
data->rxdma);
return -ENOMEM;
}
@ -1428,10 +1431,10 @@ static int tsi108_close(struct net_device *dev)
dev_kfree_skb(skb);
}
dma_free_coherent(0,
dma_free_coherent(&data->pdev->dev,
TSI108_RXRING_LEN * sizeof(rx_desc),
data->rxring, data->rxdma);
dma_free_coherent(0,
dma_free_coherent(&data->pdev->dev,
TSI108_TXRING_LEN * sizeof(tx_desc),
data->txring, data->txdma);
@ -1576,6 +1579,7 @@ tsi108_init_one(struct platform_device *pdev)
printk("tsi108_eth%d: probe...\n", pdev->id);
data = netdev_priv(dev);
data->dev = dev;
data->pdev = pdev;
pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
pdev->id, einfo->regs, einfo->phyregs,

View file

@ -17,91 +17,90 @@ static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void *
pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
size, dma_handle, GFP_ATOMIC);
return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void
pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle);
}
static inline dma_addr_t
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
}
static inline dma_addr_t
pci_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
{
return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
size_t size, int direction)
{
dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
}
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
static inline int