drm/i915: Add ww locking to dma-buf ops, v2.

vmap is using pin_pages, but needs to use ww locking,
add pin_pages_unlocked to correctly lock the mapping.

Also add ww locking to begin/end cpu access.

Changes since v1:
- Fix i915_gem_map_dma_buf by using pin_pages_unlocked().

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-38-maarten.lankhorst@linux.intel.com
This commit is contained in:
Maarten Lankhorst 2021-03-23 16:50:26 +01:00 committed by Daniel Vetter
parent c858ffa177
commit e944e3cf58

View file

@ -25,7 +25,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
ret = i915_gem_object_pin_pages(obj);
ret = i915_gem_object_pin_pages_unlocked(obj);
if (ret)
goto err;
@ -82,7 +82,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
void *vaddr;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@ -123,42 +123,48 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
struct i915_gem_ww_ctx ww;
int err;
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out;
err = i915_gem_object_set_to_cpu_domain(obj, write);
i915_gem_object_unlock(obj);
out:
i915_gem_object_unpin_pages(obj);
i915_gem_ww_ctx_init(&ww, true);
retry:
err = i915_gem_object_lock(obj, &ww);
if (!err)
err = i915_gem_object_pin_pages(obj);
if (!err) {
err = i915_gem_object_set_to_cpu_domain(obj, write);
i915_gem_object_unpin_pages(obj);
}
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
return err;
}
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct i915_gem_ww_ctx ww;
int err;
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out;
err = i915_gem_object_set_to_gtt_domain(obj, false);
i915_gem_object_unlock(obj);
out:
i915_gem_object_unpin_pages(obj);
i915_gem_ww_ctx_init(&ww, true);
retry:
err = i915_gem_object_lock(obj, &ww);
if (!err)
err = i915_gem_object_pin_pages(obj);
if (!err) {
err = i915_gem_object_set_to_gtt_domain(obj, false);
i915_gem_object_unpin_pages(obj);
}
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
return err;
}