accel/ivpu: Use GEM shmem helper for all buffers

Use struct drm_gem_shmem_object as a base for struct ivpu_bo.
This cuts by 50% the buffer management code.

Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231031073156.1301669-5-stanislaw.gruszka@linux.intel.com
This commit is contained in:
Jacek Lawrynowicz 2023-10-31 08:31:56 +01:00
parent 48d45fac39
commit 8d88e4cdce
5 changed files with 150 additions and 466 deletions

View file

@ -6,7 +6,7 @@ config DRM_ACCEL_IVPU
depends on X86_64 && !UML
depends on PCI && PCI_MSI
select FW_LOADER
select SHMEM
select DRM_GEM_SHMEM_HELPER
select GENERIC_ALLOCATOR
help
Choose this option if you have a system with an 14th generation

View file

@ -360,7 +360,7 @@ int ivpu_boot(struct ivpu_device *vdev)
int ret;
/* Update boot params located at first 4KB of FW memory */
ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr);
ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
ret = ivpu_hw_boot_fw(vdev);
if (ret) {
@ -409,7 +409,9 @@ static const struct drm_driver driver = {
.open = ivpu_open,
.postclose = ivpu_postclose,
.gem_prime_import = ivpu_gem_prime_import,
.gem_create_object = ivpu_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),

View file

@ -20,224 +20,18 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
MODULE_IMPORT_NS(DMA_BUF);
static const struct drm_gem_object_funcs ivpu_gem_funcs;
static struct lock_class_key prime_bo_lock_class_key;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
if (bo->ctx)
ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u ctx %d vpu_addr 0x%llx mmu_mapped %d\n",
action, bo->base.size, (bool)bo->pages, (bool)bo->sgt, bo->handle,
bo->ctx->id, bo->vpu_addr, bo->mmu_mapped);
action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
bo->handle, bo->ctx->id, bo->vpu_addr, bo->mmu_mapped);
else
ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u (not added to context)\n",
action, bo->base.size, (bool)bo->pages, (bool)bo->sgt, bo->handle);
}
static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo)
{
/* Pages are managed by the underlying dma-buf */
return 0;
}
static void prime_free_pages_locked(struct ivpu_bo *bo)
{
/* Pages are managed by the underlying dma-buf */
}
static int prime_map_pages_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct sg_table *sgt;
sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt));
return PTR_ERR(sgt);
}
bo->sgt = sgt;
return 0;
}
static void prime_unmap_pages_locked(struct ivpu_bo *bo)
{
dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
bo->sgt = NULL;
}
static const struct ivpu_bo_ops prime_ops = {
.type = IVPU_BO_TYPE_PRIME,
.name = "prime",
.alloc_pages = prime_alloc_pages_locked,
.free_pages = prime_free_pages_locked,
.map_pages = prime_map_pages_locked,
.unmap_pages = prime_unmap_pages_locked,
};
static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
{
int npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
struct page **pages;
pages = drm_gem_get_pages(&bo->base);
if (IS_ERR(pages))
return PTR_ERR(pages);
if (bo->flags & DRM_IVPU_BO_WC)
set_pages_array_wc(pages, npages);
bo->pages = pages;
return 0;
}
static void shmem_free_pages_locked(struct ivpu_bo *bo)
{
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
set_pages_array_wb(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
drm_gem_put_pages(&bo->base, bo->pages, true, false);
bo->pages = NULL;
}
static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
{
int npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct sg_table *sgt;
int ret;
sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages);
if (IS_ERR(sgt)) {
ivpu_err(vdev, "Failed to allocate sgtable\n");
return PTR_ERR(sgt);
}
ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret) {
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
goto err_free_sgt;
}
bo->sgt = sgt;
return 0;
err_free_sgt:
kfree(sgt);
return ret;
}
static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(bo->sgt);
kfree(bo->sgt);
bo->sgt = NULL;
}
static const struct ivpu_bo_ops shmem_ops = {
.type = IVPU_BO_TYPE_SHMEM,
.name = "shmem",
.alloc_pages = shmem_alloc_pages_locked,
.free_pages = shmem_free_pages_locked,
.map_pages = ivpu_bo_map_pages_locked,
.unmap_pages = ivpu_bo_unmap_pages_locked,
};
static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
{
unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
struct page **pages;
int ret;
pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
if (!pages[i]) {
ret = -ENOMEM;
goto err_free_pages;
}
cond_resched();
}
bo->pages = pages;
return 0;
err_free_pages:
while (i--)
put_page(pages[i]);
kvfree(pages);
return ret;
}
static void internal_free_pages_locked(struct ivpu_bo *bo)
{
unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
set_pages_array_wb(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
for (i = 0; i < npages; i++)
put_page(bo->pages[i]);
kvfree(bo->pages);
bo->pages = NULL;
}
static const struct ivpu_bo_ops internal_ops = {
.type = IVPU_BO_TYPE_INTERNAL,
.name = "internal",
.alloc_pages = internal_alloc_pages_locked,
.free_pages = internal_free_pages_locked,
.map_pages = ivpu_bo_map_pages_locked,
.unmap_pages = ivpu_bo_unmap_pages_locked,
};
static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
int ret;
lockdep_assert_held(&bo->lock);
drm_WARN_ON(&vdev->drm, bo->sgt);
ret = bo->ops->alloc_pages(bo);
if (ret) {
ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret);
return ret;
}
ret = bo->ops->map_pages(bo);
if (ret) {
ivpu_err(vdev, "Failed to map pages for BO: %d", ret);
goto err_free_pages;
}
return ret;
err_free_pages:
bo->ops->free_pages(bo);
return ret;
}
static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo)
{
mutex_lock(&bo->lock);
WARN_ON(!bo->sgt);
bo->ops->unmap_pages(bo);
WARN_ON(bo->sgt);
bo->ops->free_pages(bo);
WARN_ON(bo->pages);
mutex_unlock(&bo->lock);
action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
bo->handle);
}
/*
@ -254,20 +48,24 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
mutex_lock(&bo->lock);
ivpu_dbg_bo(vdev, bo, "pin");
if (!bo->ctx) {
ivpu_err(vdev, "vpu_addr not allocated for BO %d\n", bo->handle);
ret = -EINVAL;
goto unlock;
}
if (!bo->sgt) {
ret = ivpu_bo_alloc_and_map_pages_locked(bo);
if (ret)
goto unlock;
}
if (!bo->mmu_mapped) {
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt,
struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
goto unlock;
}
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
ivpu_bo_is_snooped(bo));
if (ret) {
ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
@ -291,13 +89,14 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
mutex_lock(&bo->lock);
ret = ivpu_mmu_context_insert_node(ctx, range, bo->base.size, &bo->mm_node);
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
bo->ctx = ctx;
bo->vpu_addr = bo->mm_node.start;
} else {
ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
}
ivpu_dbg_bo(vdev, bo, "alloc");
mutex_unlock(&bo->lock);
@ -314,11 +113,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
ivpu_dbg_bo(vdev, bo, "unbind");
/* TODO: dma_unmap */
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
drm_WARN_ON(&vdev->drm, !bo->vpu_addr);
drm_WARN_ON(&vdev->drm, !bo->sgt);
ivpu_mmu_context_unmap_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt);
drm_WARN_ON(&vdev->drm, !bo->base.sgt);
ivpu_mmu_context_unmap_sgt(vdev, bo->ctx, bo->vpu_addr, bo->base.sgt);
bo->mmu_mapped = false;
}
@ -353,15 +153,32 @@ void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
mutex_unlock(&vdev->bo_list_lock);
}
static struct ivpu_bo *
ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, const struct ivpu_bo_ops *ops)
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size)
{
struct ivpu_bo *bo;
int ret = 0;
if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size)))
if (size == 0 || !PAGE_ALIGNED(size))
return ERR_PTR(-EINVAL);
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
bo->base.base.funcs = &ivpu_gem_funcs;
bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
INIT_LIST_HEAD(&bo->bo_list_node);
mutex_init(&bo->lock);
return &bo->base.base;
}
static struct ivpu_bo *
ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
struct ivpu_bo *bo;
switch (flags & DRM_IVPU_BO_CACHE_MASK) {
case DRM_IVPU_BO_CACHED:
case DRM_IVPU_BO_WC:
@ -370,44 +187,22 @@ ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, const struct ivpu_b
return ERR_PTR(-EINVAL);
}
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
shmem = drm_gem_shmem_create(&vdev->drm, size);
if (IS_ERR(shmem))
return ERR_CAST(shmem);
mutex_init(&bo->lock);
bo->base.funcs = &ivpu_gem_funcs;
bo = to_ivpu_bo(&shmem->base);
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
bo->flags = flags;
bo->ops = ops;
if (ops->type == IVPU_BO_TYPE_SHMEM)
ret = drm_gem_object_init(&vdev->drm, &bo->base, size);
else
drm_gem_private_object_init(&vdev->drm, &bo->base, size);
if (ret) {
ivpu_err(vdev, "Failed to initialize drm object\n");
goto err_free;
}
if (flags & DRM_IVPU_BO_MAPPABLE) {
ret = drm_gem_create_mmap_offset(&bo->base);
if (ret) {
ivpu_err(vdev, "Failed to allocate mmap offset\n");
goto err_release;
}
}
mutex_lock(&vdev->bo_list_lock);
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock);
return bo;
ivpu_dbg(vdev, BO, "create: vpu_addr 0x%llx size %zu flags 0x%x\n",
bo->vpu_addr, bo->base.base.size, flags);
err_release:
drm_gem_object_release(&bo->base);
err_free:
kfree(bo);
return ERR_PTR(ret);
return bo;
}
static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
@ -429,8 +224,8 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
static void ivpu_bo_free(struct drm_gem_object *obj)
{
struct ivpu_device *vdev = to_ivpu_device(obj->dev);
struct ivpu_bo *bo = to_ivpu_bo(obj);
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
mutex_lock(&vdev->bo_list_lock);
list_del(&bo->bo_list_node);
@ -441,108 +236,64 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
ivpu_dbg_bo(vdev, bo, "free");
ivpu_bo_unbind(bo);
vunmap(bo->kvaddr);
if (bo->sgt)
ivpu_bo_unmap_and_free_pages(bo);
if (bo->base.import_attach)
drm_prime_gem_destroy(&bo->base, bo->sgt);
drm_gem_object_release(&bo->base);
mutex_destroy(&bo->lock);
kfree(bo);
drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
drm_gem_shmem_free(&bo->base);
}
static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct ivpu_bo *bo = to_ivpu_bo(obj);
if (obj->import_attach) {
/* Drop the reference drm_gem_mmap_obj() acquired.*/
drm_gem_object_put(obj);
vma->vm_private_data = NULL;
return dma_buf_mmap(obj->dma_buf, vma, 0);
}
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND);
vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags));
return 0;
}
static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj)
{
struct ivpu_bo *bo = to_ivpu_bo(obj);
loff_t npages = obj->size >> PAGE_SHIFT;
int ret = 0;
mutex_lock(&bo->lock);
if (!bo->sgt)
ret = ivpu_bo_alloc_and_map_pages_locked(bo);
mutex_unlock(&bo->lock);
if (ret)
return ERR_PTR(ret);
return drm_prime_pages_to_sg(obj->dev, bo->pages, npages);
}
static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct ivpu_bo *bo = to_ivpu_bo(obj);
loff_t npages = obj->size >> PAGE_SHIFT;
pgoff_t page_offset;
struct page *page;
vm_fault_t ret;
int err;
mutex_lock(&bo->lock);
if (!bo->sgt) {
err = ivpu_bo_alloc_and_map_pages_locked(bo);
if (err) {
ret = vmf_error(err);
goto unlock;
}
}
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (page_offset >= npages) {
ret = VM_FAULT_SIGBUS;
} else {
page = bo->pages[page_offset];
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
unlock:
mutex_unlock(&bo->lock);
return ret;
}
static const struct vm_operations_struct ivpu_vm_ops = {
.fault = ivpu_vm_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
static const struct dma_buf_ops ivpu_bo_dmabuf_ops = {
.cache_sgt_mapping = true,
.attach = drm_gem_map_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
};
static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags)
{
struct drm_device *dev = obj->dev;
struct dma_buf_export_info exp_info = {
.exp_name = KBUILD_MODNAME,
.owner = dev->driver->fops->owner,
.ops = &ivpu_bo_dmabuf_ops,
.size = obj->size,
.flags = flags,
.priv = obj,
.resv = obj->resv,
};
void *sgt;
/*
* Make sure that pages are allocated and dma-mapped before exporting the bo.
* DMA-mapping is required if the bo will be imported to the same device.
*/
sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj));
if (IS_ERR(sgt))
return sgt;
return drm_gem_dmabuf_export(dev, &exp_info);
}
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_bo_free,
.open = ivpu_bo_open,
.mmap = ivpu_bo_mmap,
.vm_ops = &ivpu_vm_ops,
.get_sg_table = ivpu_bo_get_sg_table,
.export = ivpu_bo_export,
.print_info = drm_gem_shmem_object_print_info,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
};
int
ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
@ -557,20 +308,20 @@ ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (size == 0)
return -EINVAL;
bo = ivpu_bo_alloc(vdev, size, args->flags, &shmem_ops);
bo = ivpu_bo_create(vdev, size, args->flags);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
ret = drm_gem_handle_create(file, &bo->base, &bo->handle);
ret = drm_gem_handle_create(file, &bo->base.base, &bo->handle);
if (!ret) {
args->vpu_addr = bo->vpu_addr;
args->handle = bo->handle;
}
drm_gem_object_put(&bo->base);
drm_gem_object_put(&bo->base.base);
return ret;
}
@ -580,8 +331,8 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
{
const struct ivpu_addr_range *range;
struct ivpu_addr_range fixed_range;
struct iosys_map map;
struct ivpu_bo *bo;
pgprot_t prot;
int ret;
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
@ -595,7 +346,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
range = &vdev->hw->ranges.global;
}
bo = ivpu_bo_alloc(vdev, size, flags, &internal_ops);
bo = ivpu_bo_create(vdev, size, flags);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
bo, vpu_addr, size, flags);
@ -610,61 +361,23 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
if (ret)
goto err_put;
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
drm_clflush_pages(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
if (bo->flags & DRM_IVPU_BO_WC)
set_pages_array_wc(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
else if (bo->flags & DRM_IVPU_BO_UNCACHED)
set_pages_array_uc(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
bo->kvaddr = vmap(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT, VM_MAP, prot);
if (!bo->kvaddr) {
ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n");
ret = drm_gem_shmem_vmap(&bo->base, &map);
if (ret)
goto err_put;
}
return bo;
err_put:
drm_gem_object_put(&bo->base);
drm_gem_object_put(&bo->base.base);
return NULL;
}
void ivpu_bo_free_internal(struct ivpu_bo *bo)
{
drm_gem_object_put(&bo->base);
}
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
{
struct ivpu_device *vdev = to_ivpu_device(dev);
struct dma_buf_attachment *attach;
struct ivpu_bo *bo;
attach = dma_buf_attach(buf, dev->dev);
if (IS_ERR(attach))
return ERR_CAST(attach);
get_dma_buf(buf);
bo = ivpu_bo_alloc(vdev, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size);
goto err_detach;
}
lockdep_set_class(&bo->lock, &prime_bo_lock_class_key);
bo->base.import_attach = attach;
return &bo->base;
err_detach:
dma_buf_detach(buf, attach);
dma_buf_put(buf);
return ERR_CAST(bo);
drm_gem_shmem_vunmap(&bo->base, &map);
drm_gem_object_put(&bo->base.base);
}
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@ -723,12 +436,23 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
mutex_lock(&bo->lock);
if (bo->base.dma_buf && bo->base.dma_buf->file)
dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
if (bo->base.base.dma_buf && bo->base.base.dma_buf->file)
dma_refcount = atomic_long_read(&bo->base.base.dma_buf->file->f_count);
drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n",
bo->ctx ? bo->ctx->id : -1, bo->handle, bo->vpu_addr, bo->base.size,
kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
drm_printf(p, "%-3u %-6d 0x%-12llx %-10lu 0x%-8x %-4u %-8lu",
bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.base.size,
bo->flags, kref_read(&bo->base.base.refcount), dma_refcount);
if (bo->base.base.import_attach)
drm_printf(p, " imported");
if (bo->base.pages)
drm_printf(p, " has_pages");
if (bo->mmu_mapped)
drm_printf(p, " mmu_mapped");
drm_printf(p, "\n");
mutex_unlock(&bo->lock);
}
@ -738,8 +462,8 @@ void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_bo *bo;
drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n",
"ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type");
drm_printf(p, "%-3s %-6s %-14s %-10s %-10s %-4s %-8s %s\n",
"ctx", "handle", "vpu_addr", "size", "flags", "refs", "dma_refs", "attribs");
mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node)

View file

@ -6,83 +6,52 @@
#define __IVPU_GEM_H__
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
struct dma_buf;
struct ivpu_bo_ops;
struct ivpu_file_priv;
struct ivpu_bo {
struct drm_gem_object base;
const struct ivpu_bo_ops *ops;
struct drm_gem_shmem_object base;
struct ivpu_mmu_context *ctx;
struct list_head bo_list_node;
struct drm_mm_node mm_node;
struct mutex lock; /* Protects: pages, sgt, ctx, mmu_mapped, vpu_addr */
struct sg_table *sgt;
struct page **pages;
bool mmu_mapped;
void *kvaddr;
struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
u64 vpu_addr;
u32 handle;
u32 flags;
u32 job_status;
};
enum ivpu_bo_type {
IVPU_BO_TYPE_SHMEM = 1,
IVPU_BO_TYPE_INTERNAL,
IVPU_BO_TYPE_PRIME,
};
struct ivpu_bo_ops {
enum ivpu_bo_type type;
const char *name;
int (*alloc_pages)(struct ivpu_bo *bo);
void (*free_pages)(struct ivpu_bo *bo);
int (*map_pages)(struct ivpu_bo *bo);
void (*unmap_pages)(struct ivpu_bo *bo);
u32 job_status; /* Valid only for command buffer */
bool mmu_mapped;
};
int ivpu_bo_pin(struct ivpu_bo *bo);
void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
void ivpu_bo_list_print(struct drm_device *dev);
struct ivpu_bo *
ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
void ivpu_bo_free_internal(struct ivpu_bo *bo);
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
void ivpu_bo_unmap_sgt_and_remove_from_context(struct ivpu_bo *bo);
int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
void ivpu_bo_list_print(struct drm_device *dev);
static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj)
{
return container_of(obj, struct ivpu_bo, base);
return container_of(obj, struct ivpu_bo, base.base);
}
static inline void *ivpu_bo_vaddr(struct ivpu_bo *bo)
{
return bo->kvaddr;
return bo->base.vaddr;
}
static inline size_t ivpu_bo_size(struct ivpu_bo *bo)
{
return bo->base.size;
}
static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset)
{
if (offset > ivpu_bo_size(bo) || !bo->pages)
return NULL;
return bo->pages[offset / PAGE_SIZE];
return bo->base.base.size;
}
static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
@ -95,20 +64,9 @@ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
static inline pgprot_t ivpu_bo_pgprot(struct ivpu_bo *bo, pgprot_t prot)
{
if (bo->flags & DRM_IVPU_BO_WC)
return pgprot_writecombine(prot);
if (bo->flags & DRM_IVPU_BO_UNCACHED)
return pgprot_noncached(prot);
return prot;
}
static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
{
return to_ivpu_device(bo->base.dev);
return to_ivpu_device(bo->base.base.dev);
}
static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)

View file

@ -266,7 +266,7 @@ static void job_release(struct kref *ref)
for (i = 0; i < job->bo_count; i++)
if (job->bos[i])
drm_gem_object_put(&job->bos[i]->base);
drm_gem_object_put(&job->bos[i]->base.base);
dma_fence_put(job->done_fence);
ivpu_file_priv_put(&job->file_priv);
@ -450,7 +450,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
}
bo = job->bos[CMD_BUF_IDX];
if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) {
if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
ivpu_warn(vdev, "Buffer is already in use\n");
return -EBUSY;
}
@ -470,7 +470,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
}
for (i = 0; i < buf_count; i++) {
ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
if (ret) {
ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
goto unlock_reservations;
@ -479,7 +479,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
for (i = 0; i < buf_count; i++) {
usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, usage);
dma_resv_add_fence(job->bos[i]->base.base.resv, job->done_fence, usage);
}
unlock_reservations: