Merge tag 'topic/drm-misc-2016-03-22' of git://anongit.freedesktop.org/drm-intel into drm-next

Bunch of small fixupes all over. Plus a dma-buf patch that Sumit asked me
to cherry-pick since that's the only one he had in his tree.

There's a sparse issue outstanding in the color mgr stuff, but Lionel is
still working on something that actually appeases sparse.

* tag 'topic/drm-misc-2016-03-22' of git://anongit.freedesktop.org/drm-intel:
  dma-buf/fence: fix fence_is_later v2
  dma-buf: Update docs for SYNC ioctl
  drm: remove excess description
  dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()
  drm/atmel-hlcdc: use helper to get crtc state
  drm/atomic: use helper to get crtc state
This commit is contained in:
Dave Airlie 2016-03-24 08:41:59 +10:00
commit 17efca93c8
11 changed files with 45 additions and 40 deletions

View File

@ -352,7 +352,8 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases:
No special interfaces, userspace simply calls mmap on the dma-buf fd, making
sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always*
used when the access happens. This is discussed next paragraphs.
used when the access happens. Note that DMA_BUF_IOCTL_SYNC can fail with
-EAGAIN or -EINTR, in which case it must be restarted.
Some systems might need some sort of cache coherency management e.g. when
CPU and GPU domains are being accessed through dma-buf at the same time. To
@ -366,10 +367,10 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases:
want (with the new data being consumed by the GPU or say scanout device)
- munmap once you don't need the buffer any more
Therefore, for correctness and optimal performance, systems with the memory
cache shared by the GPU and CPU i.e. the "coherent" and also the
"incoherent" are always required to use SYNC_START and SYNC_END before and
after, respectively, when accessing the mapped address.
For correctness and optimal performance, it is always required to use
SYNC_START and SYNC_END before and after, respectively, when accessing the
mapped address. Userspace cannot rely on coherent access, even when there
are systems where it just works without calling these ioctls.
2. Supporting existing mmap interfaces in importers

View File

@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file,
struct dma_buf *dmabuf;
struct dma_buf_sync sync;
enum dma_data_direction direction;
int ret;
dmabuf = file->private_data;
@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file,
}
if (sync.flags & DMA_BUF_SYNC_END)
dma_buf_end_cpu_access(dmabuf, direction);
ret = dma_buf_end_cpu_access(dmabuf, direction);
else
dma_buf_begin_cpu_access(dmabuf, direction);
ret = dma_buf_begin_cpu_access(dmabuf, direction);
return 0;
return ret;
default:
return -ENOTTY;
}
@ -611,15 +612,19 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
* @dmabuf: [in] buffer to complete cpu access for.
* @direction: [in] length of range for cpu access.
*
* This call must always succeed.
* Can return negative error values, returns 0 on success.
*/
void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
int ret = 0;
WARN_ON(!dmabuf);
if (dmabuf->ops->end_cpu_access)
dmabuf->ops->end_cpu_access(dmabuf, direction);
ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);

View File

@ -558,7 +558,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (!state->base.crtc || !fb)
return 0;
crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)];
crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc);
mode = &crtc_state->adjusted_mode;
state->src_x = s->src_x;

View File

@ -380,7 +380,6 @@ EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
* drm_atomic_replace_property_blob - replace a blob property
* @blob: a pointer to the member blob to be replaced
* @new_blob: the new blob to replace with
* @expected_size: the expected size of the new blob
* @replaced: whether the blob has been replaced
*
* RETURNS:

View File

@ -67,7 +67,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
if (plane->state->crtc) {
crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
crtc_state = drm_atomic_get_existing_crtc_state(state,
plane->state->crtc);
if (WARN_ON(!crtc_state))
return;
@ -76,8 +77,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
if (plane_state->crtc) {
crtc_state =
state->crtc_states[drm_crtc_index(plane_state->crtc)];
crtc_state = drm_atomic_get_existing_crtc_state(state,
plane_state->crtc);
if (WARN_ON(!crtc_state))
return;
@ -374,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state)
if (!conn_state->crtc || !conn_state->best_encoder)
continue;
crtc_state =
state->crtc_states[drm_crtc_index(conn_state->crtc)];
crtc_state = drm_atomic_get_existing_crtc_state(state,
conn_state->crtc);
/*
* Each encoder has at most one connector (since we always steal
@ -679,7 +680,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_conn_state->crtc)
continue;
old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
old_crtc_state = drm_atomic_get_existing_crtc_state(old_state,
old_conn_state->crtc);
if (!old_crtc_state->active ||
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))

View File

@ -228,25 +228,20 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
return ret;
}
static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
bool was_interruptible;
int ret;
mutex_lock(&dev->struct_mutex);
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ret = i915_gem_object_set_to_gtt_domain(obj, false);
dev_priv->mm.interruptible = was_interruptible;
mutex_unlock(&dev->struct_mutex);
if (unlikely(ret))
DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
return ret;
}
static const struct dma_buf_ops i915_dmabuf_ops = {

View File

@ -97,11 +97,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
return omap_gem_get_pages(obj, &pages, true);
}
static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
enum dma_data_direction dir)
static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = buffer->priv;
omap_gem_put_pages(obj);
return 0;
}

View File

@ -423,8 +423,8 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
}
if (ufb->obj->base.import_attach) {
dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
DMA_FROM_DEVICE);
ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
DMA_FROM_DEVICE);
}
unlock:

View File

@ -1075,14 +1075,16 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return PTR_ERR_OR_ZERO(vaddr);
}
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
return 0;
}
static struct dma_buf_ops dma_buf_ops = {

View File

@ -94,7 +94,7 @@ struct dma_buf_ops {
void (*release)(struct dma_buf *);
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
@ -224,8 +224,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);

View File

@ -292,7 +292,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2)
if (WARN_ON(f1->context != f2->context))
return false;
return f1->seqno - f2->seqno < INT_MAX;
return (int)(f1->seqno - f2->seqno) > 0;
}
/**