media: videobuf2: Stop using internal dma-buf lock

All drivers that use dma-bufs have been moved to the updated locking
specification and now dma-buf reservation is guaranteed to be locked
by importers during the mapping operations. There is no need to take
the internal dma-buf lock anymore. Remove locking from the videobuf2
memory allocators.

Acked-by: Tomasz Figa <tfiga@chromium.org>
Acked-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221017172229.42269-21-dmitry.osipenko@collabora.com
This commit is contained in:
Dmitry Osipenko 2022-10-17 20:22:28 +03:00
parent ae2e7f28a1
commit 23543b3c4f
3 changed files with 3 additions and 30 deletions

View file

@ -382,18 +382,12 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{ {
struct vb2_dc_attachment *attach = db_attach->priv; struct vb2_dc_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt; struct sg_table *sgt;
mutex_lock(lock);
sgt = &attach->sgt; sgt = &attach->sgt;
/* return previously mapped sg table */ /* return previously mapped sg table */
if (attach->dma_dir == dma_dir) { if (attach->dma_dir == dma_dir)
mutex_unlock(lock);
return sgt; return sgt;
}
/* release any previous cache */ /* release any previous cache */
if (attach->dma_dir != DMA_NONE) { if (attach->dma_dir != DMA_NONE) {
@ -409,14 +403,11 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
DMA_ATTR_SKIP_CPU_SYNC)) { DMA_ATTR_SKIP_CPU_SYNC)) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
attach->dma_dir = dma_dir; attach->dma_dir = dma_dir;
mutex_unlock(lock);
return sgt; return sgt;
} }

View file

@ -424,18 +424,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{ {
struct vb2_dma_sg_attachment *attach = db_attach->priv; struct vb2_dma_sg_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt; struct sg_table *sgt;
mutex_lock(lock);
sgt = &attach->sgt; sgt = &attach->sgt;
/* return previously mapped sg table */ /* return previously mapped sg table */
if (attach->dma_dir == dma_dir) { if (attach->dma_dir == dma_dir)
mutex_unlock(lock);
return sgt; return sgt;
}
/* release any previous cache */ /* release any previous cache */
if (attach->dma_dir != DMA_NONE) { if (attach->dma_dir != DMA_NONE) {
@ -446,14 +440,11 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
/* mapping to the client with new direction */ /* mapping to the client with new direction */
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
attach->dma_dir = dma_dir; attach->dma_dir = dma_dir;
mutex_unlock(lock);
return sgt; return sgt;
} }

View file

@ -267,18 +267,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{ {
struct vb2_vmalloc_attachment *attach = db_attach->priv; struct vb2_vmalloc_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt; struct sg_table *sgt;
mutex_lock(lock);
sgt = &attach->sgt; sgt = &attach->sgt;
/* return previously mapped sg table */ /* return previously mapped sg table */
if (attach->dma_dir == dma_dir) { if (attach->dma_dir == dma_dir)
mutex_unlock(lock);
return sgt; return sgt;
}
/* release any previous cache */ /* release any previous cache */
if (attach->dma_dir != DMA_NONE) { if (attach->dma_dir != DMA_NONE) {
@ -289,14 +283,11 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
/* mapping to the client with new direction */ /* mapping to the client with new direction */
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
attach->dma_dir = dma_dir; attach->dma_dir = dma_dir;
mutex_unlock(lock);
return sgt; return sgt;
} }