s390/cio: use dma helpers for setting masks

Bypassing the DMA API is bad style, even when we don't expect any
actual problems. Let's utilize the right API helpers for setting the
DMA masks and check for returned errors, so that we benefit from
common sanity checks.

io_subchannel_allocate_dev() required some extra massaging, so that we
can return an errno other than -ENOMEM.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Julian Wiedmann 2020-12-09 11:24:13 +01:00 committed by Vasily Gorbik
parent 29c53de014
commit 4520a91a97
2 changed files with 34 additions and 12 deletions

View file

@ -225,18 +225,23 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
INIT_WORK(&sch->todo_work, css_sch_todo);
sch->dev.release = &css_subchannel_release;
sch->dev.dma_mask = &sch->dma_mask;
device_initialize(&sch->dev);
/*
* The physical addresses of some the dma structures that can
* The physical addresses for some of the dma structures that can
* belong to a subchannel need to fit 31 bit width (e.g. ccw).
*/
sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
if (ret)
goto err;
/*
* But we don't have such restrictions imposed on the stuff that
* is handled by the streaming API.
*/
sch->dma_mask = DMA_BIT_MASK(64);
sch->dev.dma_mask = &sch->dma_mask;
ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
if (ret)
goto err;
return sch;
err:
@ -970,8 +975,11 @@ static int __init setup_css(int nr)
* css->device as the device argument with the DMA API)
* and are fine with 64 bit addresses.
*/
css->device.coherent_dma_mask = DMA_BIT_MASK(64);
css->device.dma_mask = &css->device.coherent_dma_mask;
ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
if (ret) {
kfree(css);
goto out_err;
}
mutex_init(&css->mutex);
ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);

View file

@ -679,33 +679,47 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
{
struct ccw_device *cdev;
struct gen_pool *dma_pool;
int ret;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
if (!cdev) {
ret = -ENOMEM;
goto err_cdev;
}
cdev->private = kzalloc(sizeof(struct ccw_device_private),
GFP_KERNEL | GFP_DMA);
if (!cdev->private)
if (!cdev->private) {
ret = -ENOMEM;
goto err_priv;
cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
}
cdev->dev.dma_mask = sch->dev.dma_mask;
ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
if (ret)
goto err_coherent_mask;
dma_pool = cio_gp_dma_create(&cdev->dev, 1);
if (!dma_pool)
if (!dma_pool) {
ret = -ENOMEM;
goto err_dma_pool;
}
cdev->private->dma_pool = dma_pool;
cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
sizeof(*cdev->private->dma_area));
if (!cdev->private->dma_area)
if (!cdev->private->dma_area) {
ret = -ENOMEM;
goto err_dma_area;
}
return cdev;
err_dma_area:
cio_gp_dma_destroy(dma_pool, &cdev->dev);
err_dma_pool:
err_coherent_mask:
kfree(cdev->private);
err_priv:
kfree(cdev);
err_cdev:
return ERR_PTR(-ENOMEM);
return ERR_PTR(ret);
}
static void ccw_device_todo(struct work_struct *work);