drm/i915: Fix race on unreferencing the wrong mmio-flip-request

As we perform the mmio-flip without any locking and then try to acquire
the struct_mutex prior to dereferencing the request, it is possible for
userspace to queue a new pageflip before the worker can finish clearing
the old state - and then it will clear the new flip request. The result
is that the new flip could be completed before the GPU has finished
rendering.

The bugs stems from removing the seqno checking in
commit 536f5b5e86
Author: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
Date:   Thu Nov 6 11:03:40 2014 +0200

    drm/i915: Make mmio flip wait for seqno in the work function

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Chris Wilson 2015-04-27 13:41:16 +01:00 committed by Daniel Vetter
parent d94b5030d2
commit b2cfe0ab63
3 changed files with 25 additions and 24 deletions

View file

@ -2161,10 +2161,12 @@ i915_gem_request_get_ring(struct drm_i915_gem_request *req)
return req ? req->ring : NULL;
}
static inline void
static inline struct drm_i915_gem_request *
i915_gem_request_reference(struct drm_i915_gem_request *req)
{
kref_get(&req->ref);
if (req)
kref_get(&req->ref);
return req;
}
static inline void

View file

@ -10793,22 +10793,18 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
static void intel_mmio_flip_work_func(struct work_struct *work)
{
struct intel_crtc *crtc =
container_of(work, struct intel_crtc, mmio_flip.work);
struct intel_mmio_flip *mmio_flip;
struct intel_mmio_flip *mmio_flip =
container_of(work, struct intel_mmio_flip, work);
mmio_flip = &crtc->mmio_flip;
if (mmio_flip->req)
WARN_ON(__i915_wait_request(mmio_flip->req,
crtc->reset_counter,
false, NULL, NULL) != 0);
if (mmio_flip->rq)
WARN_ON(__i915_wait_request(mmio_flip->rq,
mmio_flip->crtc->reset_counter,
false, NULL, NULL));
intel_do_mmio_flip(crtc);
if (mmio_flip->req) {
mutex_lock(&crtc->base.dev->struct_mutex);
i915_gem_request_assign(&mmio_flip->req, NULL);
mutex_unlock(&crtc->base.dev->struct_mutex);
}
intel_do_mmio_flip(mmio_flip->crtc);
i915_gem_request_unreference__unlocked(mmio_flip->rq);
kfree(mmio_flip);
}
static int intel_queue_mmio_flip(struct drm_device *dev,
@ -10818,12 +10814,17 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
struct intel_engine_cs *ring,
uint32_t flags)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_mmio_flip *mmio_flip;
i915_gem_request_assign(&intel_crtc->mmio_flip.req,
obj->last_write_req);
mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
if (mmio_flip == NULL)
return -ENOMEM;
schedule_work(&intel_crtc->mmio_flip.work);
mmio_flip->rq = i915_gem_request_reference(obj->last_write_req);
mmio_flip->crtc = to_intel_crtc(crtc);
INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
schedule_work(&mmio_flip->work);
return 0;
}
@ -13564,8 +13565,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);

View file

@ -459,8 +459,9 @@ struct intel_pipe_wm {
};
struct intel_mmio_flip {
struct drm_i915_gem_request *req;
struct work_struct work;
struct drm_i915_gem_request *rq;
struct intel_crtc *crtc;
};
struct skl_pipe_wm {
@ -544,7 +545,6 @@ struct intel_crtc {
} wm;
int scanline_offset;
struct intel_mmio_flip mmio_flip;
struct intel_crtc_atomic_commit atomic;