linux-stable/drivers/media/common/videobuf2/videobuf2-vmalloc.c
Dmitry Osipenko 6f508c8875 media: videobuf2: Don't assert held reservation lock for dma-buf mmapping
Don't assert held dma-buf reservation lock on memory mapping of exported
buffer.

We're going to change dma-buf mmap() locking policy such that exporters
will have to handle the lock. The previous locking policy caused deadlock
problem for DRM drivers in a case of self-imported dma-bufs once these
drivers are moved to use reservation lock universally. The problem is
solved by moving the lock down to exporters. This patch prepares videobuf2
for the locking policy update.

Reviewed-by: Emil Velikov <emil.l.velikov@gmail.com>
Reviewed-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230529223935.2672495-2-dmitry.osipenko@collabora.com
2023-06-21 20:22:20 +03:00

443 lines
10 KiB
C

/*
* videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
*
* Copyright (C) 2010 Samsung Electronics
*
* Author: Pawel Osciak <pawel@osciak.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#include <media/videobuf2-memops.h>
struct vb2_vmalloc_buf {
void *vaddr;
struct frame_vector *vec;
enum dma_data_direction dma_dir;
unsigned long size;
refcount_t refcount;
struct vb2_vmarea_handler handler;
struct dma_buf *dbuf;
};
static void vb2_vmalloc_put(void *buf_priv);
static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
unsigned long size)
{
struct vb2_vmalloc_buf *buf;
buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
if (!buf)
return ERR_PTR(-ENOMEM);
buf->size = size;
buf->vaddr = vmalloc_user(buf->size);
if (!buf->vaddr) {
pr_debug("vmalloc of size %ld failed\n", buf->size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
buf->dma_dir = vb->vb2_queue->dma_dir;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_vmalloc_put;
buf->handler.arg = buf;
refcount_set(&buf->refcount, 1);
return buf;
}
static void vb2_vmalloc_put(void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
if (refcount_dec_and_test(&buf->refcount)) {
vfree(buf->vaddr);
kfree(buf);
}
}
static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
unsigned long vaddr, unsigned long size)
{
struct vb2_vmalloc_buf *buf;
struct frame_vector *vec;
int n_pages, offset, i;
int ret = -ENOMEM;
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
buf->dma_dir = vb->vb2_queue->dma_dir;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
vec = vb2_create_framevec(vaddr, size,
buf->dma_dir == DMA_FROM_DEVICE ||
buf->dma_dir == DMA_BIDIRECTIONAL);
if (IS_ERR(vec)) {
ret = PTR_ERR(vec);
goto fail_pfnvec_create;
}
buf->vec = vec;
n_pages = frame_vector_count(vec);
if (frame_vector_to_pages(vec) < 0) {
unsigned long *nums = frame_vector_pfns(vec);
/*
* We cannot get page pointers for these pfns. Check memory is
* physically contiguous and use direct mapping.
*/
for (i = 1; i < n_pages; i++)
if (nums[i-1] + 1 != nums[i])
goto fail_map;
buf->vaddr = (__force void *)
ioremap(__pfn_to_phys(nums[0]), size + offset);
} else {
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
}
if (!buf->vaddr)
goto fail_map;
buf->vaddr += offset;
return buf;
fail_map:
vb2_destroy_framevec(vec);
fail_pfnvec_create:
kfree(buf);
return ERR_PTR(ret);
}
static void vb2_vmalloc_put_userptr(void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
unsigned int i;
struct page **pages;
unsigned int n_pages;
if (!buf->vec->is_pfns) {
n_pages = frame_vector_count(buf->vec);
pages = frame_vector_pages(buf->vec);
if (vaddr)
vm_unmap_ram((void *)vaddr, n_pages);
if (buf->dma_dir == DMA_FROM_DEVICE ||
buf->dma_dir == DMA_BIDIRECTIONAL)
for (i = 0; i < n_pages; i++)
set_page_dirty_lock(pages[i]);
} else {
iounmap((__force void __iomem *)buf->vaddr);
}
vb2_destroy_framevec(buf->vec);
kfree(buf);
}
static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
if (!buf->vaddr) {
pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
return NULL;
}
return buf->vaddr;
}
static unsigned int vb2_vmalloc_num_users(void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
return refcount_read(&buf->refcount);
}
static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
{
struct vb2_vmalloc_buf *buf = buf_priv;
int ret;
if (!buf) {
pr_err("No memory to map\n");
return -EINVAL;
}
ret = remap_vmalloc_range(vma, buf->vaddr, 0);
if (ret) {
pr_err("Remapping vmalloc memory, error: %d\n", ret);
return ret;
}
/*
* Make sure that vm_areas for 2 buffers won't be merged together
*/
vm_flags_set(vma, VM_DONTEXPAND);
/*
* Use common vm_area operations to track buffer refcount.
*/
vma->vm_private_data = &buf->handler;
vma->vm_ops = &vb2_common_vm_ops;
vma->vm_ops->open(vma);
return 0;
}
#ifdef CONFIG_HAS_DMA
/*********************************************/
/* DMABUF ops for exporters */
/*********************************************/
struct vb2_vmalloc_attachment {
struct sg_table sgt;
enum dma_data_direction dma_dir;
};
static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
struct vb2_vmalloc_attachment *attach;
struct vb2_vmalloc_buf *buf = dbuf->priv;
int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
struct sg_table *sgt;
struct scatterlist *sg;
void *vaddr = buf->vaddr;
int ret;
int i;
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
if (!attach)
return -ENOMEM;
sgt = &attach->sgt;
ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
if (ret) {
kfree(attach);
return ret;
}
for_each_sgtable_sg(sgt, sg, i) {
struct page *page = vmalloc_to_page(vaddr);
if (!page) {
sg_free_table(sgt);
kfree(attach);
return -ENOMEM;
}
sg_set_page(sg, page, PAGE_SIZE, 0);
vaddr += PAGE_SIZE;
}
attach->dma_dir = DMA_NONE;
dbuf_attach->priv = attach;
return 0;
}
static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
struct dma_buf_attachment *db_attach)
{
struct vb2_vmalloc_attachment *attach = db_attach->priv;
struct sg_table *sgt;
if (!attach)
return;
sgt = &attach->sgt;
/* release the scatterlist cache */
if (attach->dma_dir != DMA_NONE)
dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
sg_free_table(sgt);
kfree(attach);
db_attach->priv = NULL;
}
static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{
struct vb2_vmalloc_attachment *attach = db_attach->priv;
struct sg_table *sgt;
sgt = &attach->sgt;
/* return previously mapped sg table */
if (attach->dma_dir == dma_dir)
return sgt;
/* release any previous cache */
if (attach->dma_dir != DMA_NONE) {
dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
attach->dma_dir = DMA_NONE;
}
/* mapping to the client with new direction */
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
pr_err("failed to map scatterlist\n");
return ERR_PTR(-EIO);
}
attach->dma_dir = dma_dir;
return sgt;
}
static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
struct sg_table *sgt, enum dma_data_direction dma_dir)
{
/* nothing to be done here */
}
static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
{
/* drop reference obtained in vb2_vmalloc_get_dmabuf */
vb2_vmalloc_put(dbuf->priv);
}
static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
struct iosys_map *map)
{
struct vb2_vmalloc_buf *buf = dbuf->priv;
iosys_map_set_vaddr(map, buf->vaddr);
return 0;
}
static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
struct vm_area_struct *vma)
{
return vb2_vmalloc_mmap(dbuf->priv, vma);
}
static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
.attach = vb2_vmalloc_dmabuf_ops_attach,
.detach = vb2_vmalloc_dmabuf_ops_detach,
.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
.vmap = vb2_vmalloc_dmabuf_ops_vmap,
.mmap = vb2_vmalloc_dmabuf_ops_mmap,
.release = vb2_vmalloc_dmabuf_ops_release,
};
static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
void *buf_priv,
unsigned long flags)
{
struct vb2_vmalloc_buf *buf = buf_priv;
struct dma_buf *dbuf;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &vb2_vmalloc_dmabuf_ops;
exp_info.size = buf->size;
exp_info.flags = flags;
exp_info.priv = buf;
if (WARN_ON(!buf->vaddr))
return NULL;
dbuf = dma_buf_export(&exp_info);
if (IS_ERR(dbuf))
return NULL;
/* dmabuf keeps reference to vb2 buffer */
refcount_inc(&buf->refcount);
return dbuf;
}
#endif /* CONFIG_HAS_DMA */
/*********************************************/
/* callbacks for DMABUF buffers */
/*********************************************/
static int vb2_vmalloc_map_dmabuf(void *mem_priv)
{
struct vb2_vmalloc_buf *buf = mem_priv;
struct iosys_map map;
int ret;
ret = dma_buf_vmap_unlocked(buf->dbuf, &map);
if (ret)
return -EFAULT;
buf->vaddr = map.vaddr;
return 0;
}
static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
{
struct vb2_vmalloc_buf *buf = mem_priv;
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
dma_buf_vunmap_unlocked(buf->dbuf, &map);
buf->vaddr = NULL;
}
static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
{
struct vb2_vmalloc_buf *buf = mem_priv;
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
if (buf->vaddr)
dma_buf_vunmap_unlocked(buf->dbuf, &map);
kfree(buf);
}
static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
struct device *dev,
struct dma_buf *dbuf,
unsigned long size)
{
struct vb2_vmalloc_buf *buf;
if (dbuf->size < size)
return ERR_PTR(-EFAULT);
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
buf->dbuf = dbuf;
buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
return buf;
}
const struct vb2_mem_ops vb2_vmalloc_memops = {
.alloc = vb2_vmalloc_alloc,
.put = vb2_vmalloc_put,
.get_userptr = vb2_vmalloc_get_userptr,
.put_userptr = vb2_vmalloc_put_userptr,
#ifdef CONFIG_HAS_DMA
.get_dmabuf = vb2_vmalloc_get_dmabuf,
#endif
.map_dmabuf = vb2_vmalloc_map_dmabuf,
.unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
.attach_dmabuf = vb2_vmalloc_attach_dmabuf,
.detach_dmabuf = vb2_vmalloc_detach_dmabuf,
.vaddr = vb2_vmalloc_vaddr,
.mmap = vb2_vmalloc_mmap,
.num_users = vb2_vmalloc_num_users,
};
EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(DMA_BUF);