Merge tag 'drm-intel-next-2013-08-09' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
New pile of stuff for -next:
- Cleanup of the old crtc helper callbacks, all encoders are now converted
  to the i915 modeset infrastructure.
- Massive amount of wm patches from Ville for ilk, snb, ivb, hsw, this is
  prep work to eventually get things going for nuclear pageflips where we
  need to adjust watermarks on the fly.
- More vm/vma patches from Ben. This refactoring isn't yet fully rolled
  out, we miss the execbuf conversion and some of the low-level
  bind/unbind support code.
- Convert our hdmi infoframe code to use the new common helper functions
  (Damien). This contains some bugfixes for the common infoframe helpers.
- Some cruft removal from Damien.
- Various smaller bits&pieces all over, as usual.

* tag 'drm-intel-next-2013-08-09' of git://people.freedesktop.org/~danvet/drm-intel: (105 commits)
  drm/i915: Fix FB WM for HSW
  drm/i915: expose HDMI connectors on port C on BYT
  drm/i915: fix a limit check in hsw_compute_wm_results()
  drm/i915: unbreak i915_gem_object_ggtt_unbind()
  drm/i915: Make intel_set_mode() static
  drm/i915: Remove intel_modeset_disable()
  drm/i915: Make intel_encoder_dpms() static
  drm/i915: Make i915_hangcheck_elapsed() static
  drm/i915: Fix #endif comment
  drm/i915: Remove i915_gem_object_check_coherency()
  drm/i915: Remove stale prototypes
  drm/i915: List objects allocated from stolen memory in debugfs
  drm/i915: Always call intel_update_sprite_watermarks() when disabling a plane
  drm/i915: Pass plane and crtc to intel_update_sprite_watermarks
  drm/i915: Don't try to disable plane if it's already disabled
  drm/i915: Pass crtc to our update/disable_plane hooks
  drm/i915: Split plane watermark parameters into a separate struct
  drm/i915: Pull some watermarks state into a separate structure
  drm/i915: Calculate max watermark levels for ILK+
  drm/i915: Rename hsw_lp_wm_result to intel_wm_level
  ...
This commit is contained in:
Dave Airlie 2013-08-21 12:48:59 +10:00
commit 9712def2b3
35 changed files with 1679 additions and 1142 deletions

View file

@ -3102,11 +3102,13 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (err < 0)
return err;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
frame->video_code = drm_match_cea_mode(mode);
if (!frame->video_code)
return 0;
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
frame->active_info_valid = 1;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
return 0;

View file

@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/list_sort.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "intel_ringbuffer.h"
@ -89,13 +90,20 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
}
}
static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
{
return obj->has_global_gtt_mapping ? "g" : " ";
}
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
struct i915_vma *vma;
seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
@ -111,9 +119,14 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
if (i915_gem_obj_ggtt_bound(obj))
seq_printf(m, " (gtt offset: %08lx, size: %08x)",
i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj));
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!i915_is_ggtt(vma->vm))
seq_puts(m, " (pp");
else
seq_puts(m, " (g");
seq_printf(m, "gtt offset: %08lx, size: %08lx)",
vma->node.start, vma->node.size);
}
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
@ -137,7 +150,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
size_t total_obj_size, total_gtt_size;
int count, ret;
@ -145,6 +158,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
if (ret)
return ret;
/* FIXME: the user of this interface might want more than just GGTT */
switch (list) {
case ACTIVE_LIST:
seq_puts(m, "Active:\n");
@ -160,14 +174,75 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, head, mm_list) {
seq_puts(m, " ");
describe_obj(m, obj);
seq_putc(m, '\n');
list_for_each_entry(vma, head, mm_list) {
seq_printf(m, " ");
describe_obj(m, vma->obj);
seq_printf(m, "\n");
total_obj_size += vma->obj->base.size;
total_gtt_size += vma->node.size;
count++;
}
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
static int obj_rank_by_stolen(void *priv,
struct list_head *A, struct list_head *B)
{
struct drm_i915_gem_object *a =
container_of(A, struct drm_i915_gem_object, exec_list);
struct drm_i915_gem_object *b =
container_of(B, struct drm_i915_gem_object, exec_list);
return a->stolen->start - b->stolen->start;
}
static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
LIST_HEAD(stolen);
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->stolen == NULL)
continue;
list_add(&obj->exec_list, &stolen);
total_obj_size += obj->base.size;
total_gtt_size += i915_gem_obj_ggtt_size(obj);
count++;
}
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
if (obj->stolen == NULL)
continue;
list_add(&obj->exec_list, &stolen);
total_obj_size += obj->base.size;
count++;
}
list_sort(NULL, &stolen, obj_rank_by_stolen);
seq_puts(m, "Stolen:\n");
while (!list_empty(&stolen)) {
obj = list_first_entry(&stolen, typeof(*obj), exec_list);
seq_puts(m, " ");
describe_obj(m, obj);
seq_putc(m, '\n');
list_del_init(&obj->exec_list);
}
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
@ -212,7 +287,18 @@ static int per_file_stats(int id, void *ptr, void *data)
return 0;
}
static int i915_gem_object_info(struct seq_file *m, void *data)
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
size += i915_gem_obj_ggtt_size(vma->obj); \
++count; \
if (vma->obj->map_and_fenceable) { \
mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
++mappable_count; \
} \
} \
} while (0)
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
@ -222,6 +308,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
struct i915_vma *vma;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@ -238,12 +325,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&vm->active_list, mm_list);
count_vmas(&vm->active_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&vm->inactive_list, mm_list);
count_vmas(&vm->inactive_list, mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
@ -1099,6 +1186,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
} else {
seq_puts(m, "FBC disabled: ");
switch (dev_priv->fbc.no_fbc_reason) {
case FBC_OK:
seq_puts(m, "FBC actived, but currently disabled in hardware");
break;
case FBC_UNSUPPORTED:
seq_puts(m, "unsupported by this chipset");
break;
case FBC_NO_OUTPUT:
seq_puts(m, "no outputs");
break;
@ -1756,7 +1849,8 @@ i915_drop_caches_set(void *data, u64 val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct i915_address_space *vm;
struct i915_vma *vma, *x;
int ret;
DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
@ -1777,13 +1871,17 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
list_for_each_entry_safe(obj, next, &vm->inactive_list,
mm_list)
if (obj->pin_count == 0) {
ret = i915_gem_object_unbind(obj);
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry_safe(vma, x, &vm->inactive_list,
mm_list) {
if (vma->obj->pin_count)
continue;
ret = i915_vma_unbind(vma);
if (ret)
goto unlock;
}
}
}
if (val & DROP_UNBOUND) {
@ -2078,6 +2176,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
{"i915_gem_stolen", i915_gem_stolen_list_info },
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@ -2110,7 +2209,7 @@ static struct drm_info_list i915_debugfs_list[] = {
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
struct i915_debugfs_files {
static struct i915_debugfs_files {
const char *name;
const struct file_operations *fops;
} i915_debugfs_files[] = {

View file

@ -1485,10 +1485,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
i915_dump_device_info(dev_priv);
INIT_LIST_HEAD(&dev_priv->vm_list);
INIT_LIST_HEAD(&dev_priv->gtt.base.global_link);
list_add(&dev_priv->gtt.base.global_link, &dev_priv->vm_list);
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;

View file

@ -201,7 +201,6 @@ struct intel_ddi_plls {
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
#define WATCH_LISTS 0
#define WATCH_GTT 0
@ -323,8 +322,8 @@ struct drm_i915_error_state {
u32 purgeable:1;
s32 ring:4;
u32 cache_level:2;
} *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count;
} **active_bo, **pinned_bo;
u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
};
@ -359,9 +358,10 @@ struct drm_i915_display_funcs {
struct dpll *match_clock,
struct dpll *best_clock);
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enable);
bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
@ -449,8 +449,11 @@ struct intel_device_info {
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
caches, eg sampler/render caches, and the
large Last-Level-Cache. LLC is coherent with
the CPU, but L3 is only visible to the GPU. */
};
typedef uint32_t gen6_gtt_pte_t;
@ -542,7 +545,12 @@ struct i915_hw_ppgtt {
int (*enable)(struct drm_device *dev);
};
/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
* object into/from the address space.
*
* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
* will always be <= an objects lifetime. So object refcounting should cover us.
*/
struct i915_vma {
@ -550,6 +558,9 @@ struct i915_vma {
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
/** This object's place on the active/inactive lists */
struct list_head mm_list;
struct list_head vma_link; /* Link in the object's VMA list */
};
@ -589,7 +600,9 @@ struct i915_fbc {
int interval;
} *fbc_work;
enum {
enum no_fbc_reason {
FBC_OK, /* FBC is enabled */
FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
@ -1045,6 +1058,19 @@ struct intel_vbt_data {
struct child_device_config *child_dev;
};
enum intel_ddb_partitioning {
INTEL_DDB_PART_1_2,
INTEL_DDB_PART_5_6, /* IVB+ */
};
struct intel_wm_level {
bool enable;
uint32_t pri_val;
uint32_t spr_val;
uint32_t cur_val;
uint32_t fbc_val;
};
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@ -1214,6 +1240,20 @@ typedef struct drm_i915_private {
struct i915_suspend_saved_registers regfile;
struct {
/*
* Raw watermark latency values:
* in 0.1us units for WM0,
* in 0.5us units for WM1+.
*/
/* primary */
uint16_t pri_latency[5];
/* sprite */
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
} wm;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
@ -1221,6 +1261,11 @@ typedef struct drm_i915_private {
struct i915_ums_state ums;
} drm_i915_private_t;
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return dev->dev_private;
}
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@ -1265,9 +1310,7 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen;
struct list_head global_list;
/** This object's place on the active/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
@ -1386,52 +1429,6 @@ struct drm_i915_gem_object {
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/* This is a temporary define to help transition us to real VMAs. If you see
* this, you're either reviewing code, or bisecting it. */
static inline struct i915_vma *
__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
{
if (list_empty(&obj->vma_list))
return NULL;
return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
}
/* Whether or not this object is currently mapped by the translation tables */
static inline bool
i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
{
struct i915_vma *vma = __i915_gem_obj_to_vma(o);
if (vma == NULL)
return false;
return drm_mm_node_allocated(&vma->node);
}
/* Offset of the first PTE pointing to this object */
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
BUG_ON(list_empty(&o->vma_list));
return __i915_gem_obj_to_vma(o)->node.start;
}
/* The size used in the translation tables may be larger than the actual size of
* the object on GEN2/GEN3 because of the way tiling is handled. See
* i915_gem_get_gtt_size() for more details.
*/
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
BUG_ON(list_empty(&o->vma_list));
return __i915_gem_obj_to_vma(o)->node.size;
}
static inline void
i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
enum i915_cache_level color)
{
__i915_gem_obj_to_vma(o)->node.color = color;
}
/**
* Request queue structure.
*
@ -1482,7 +1479,7 @@ struct drm_i915_file_private {
struct i915_ctx_hang_stats hang_stats;
};
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
#define INTEL_INFO(dev) (to_i915(dev)->info)
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
@ -1576,7 +1573,7 @@ struct drm_i915_file_private {
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@ -1668,7 +1665,6 @@ extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
@ -1678,7 +1674,6 @@ extern void intel_pm_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_reset(struct drm_device *dev);
extern void intel_uncore_clear_errors(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
@ -1749,11 +1744,13 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@ -1842,9 +1839,6 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
@ -1897,6 +1891,53 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
static inline bool i915_is_ggtt(struct i915_address_space *vm)
{
struct i915_address_space *ggtt =
&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
return vm == ggtt;
}
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
}
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
}
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_size(obj, obj_to_ggtt(obj));
}
static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
{
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
map_and_fenceable, nonblocking);
}
#undef obj_to_ggtt
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
@ -1949,7 +1990,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
int __must_check i915_gem_evict_something(struct drm_device *dev,
struct i915_address_space *vm,
int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
@ -1971,7 +2014,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@ -1984,17 +2027,11 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
#else
#define i915_verify_lists(dev) 0
#endif
void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
int handle);
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);

View file

@ -39,10 +39,12 @@
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking);
static __must_check int
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@ -140,7 +142,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_ggtt_bound(obj) && !obj->active;
return i915_gem_obj_bound_any(obj) && !obj->active;
}
int
@ -414,7 +416,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
* anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1;
if (i915_gem_obj_ggtt_bound(obj)) {
if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
return ret;
@ -586,7 +588,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
char __user *user_data;
int page_offset, page_length, ret;
ret = i915_gem_object_pin(obj, 0, true, true);
ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
if (ret)
goto out;
@ -731,7 +733,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1;
if (i915_gem_obj_ggtt_bound(obj)) {
if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
return ret;
@ -1340,7 +1342,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Now bind it into the GTT if needed */
ret = i915_gem_object_pin(obj, 0, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
if (ret)
goto unlock;
@ -1655,11 +1657,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return 0;
BUG_ON(i915_gem_obj_ggtt_bound(obj));
if (obj->pages_pin_count)
return -EBUSY;
BUG_ON(i915_gem_obj_bound_any(obj));
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
@ -1679,7 +1681,6 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
struct drm_i915_gem_object *obj, *next;
struct i915_address_space *vm = &dev_priv->gtt.base;
long count = 0;
list_for_each_entry_safe(obj, next,
@ -1693,10 +1694,18 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages(obj) == 0) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
global_list) {
struct i915_vma *vma, *v;
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue;
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
if (!i915_gem_object_put_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
return count;
@ -1864,7 +1873,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
@ -1880,8 +1888,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
obj->active = 1;
}
/* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj->mm_list, &vm->active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_read_seqno = seqno;
@ -1903,14 +1909,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
list_move_tail(&obj->mm_list, &vm->inactive_list);
list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
list_del_init(&obj->ring_list);
obj->ring = NULL;
@ -2106,10 +2112,11 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
if (acthd >= i915_gem_obj_offset(obj, vm) &&
acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
return true;
return false;
@ -2132,6 +2139,17 @@ static bool i915_head_inside_request(const u32 acthd_unmasked,
return false;
}
static struct i915_address_space *
request_to_vm(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
struct i915_address_space *vm;
vm = &dev_priv->gtt.base;
return vm;
}
static bool i915_request_guilty(struct drm_i915_gem_request *request,
const u32 acthd, bool *inside)
{
@ -2139,9 +2157,9 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
* pointing inside the ring, matches the batch_obj address range.
* However this is extremely unlikely.
*/
if (request->batch_obj) {
if (i915_head_inside_object(acthd, request->batch_obj)) {
if (i915_head_inside_object(acthd, request->batch_obj,
request_to_vm(request))) {
*inside = true;
return true;
}
@ -2161,17 +2179,21 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
{
struct i915_ctx_hang_stats *hs = NULL;
bool inside, guilty;
unsigned long offset = 0;
/* Innocent until proven guilty */
guilty = false;
if (request->batch_obj)
offset = i915_gem_obj_offset(request->batch_obj,
request_to_vm(request));
if (ring->hangcheck.action != wait &&
i915_request_guilty(request, acthd, &inside)) {
DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
request->batch_obj ?
i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
offset,
request->ctx ? request->ctx->id : 0,
acthd);
@ -2262,20 +2284,12 @@ void i915_gem_restore_fences(struct drm_device *dev)
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
int i;
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
list_for_each_entry(obj, &vm->inactive_list, mm_list)
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
i915_gem_restore_fences(dev);
}
@ -2570,17 +2584,13 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_write_domain);
}
/**
* Unbinds an object from the GTT aperture.
*/
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
int i915_vma_unbind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
int ret;
if (!i915_gem_obj_ggtt_bound(obj))
if (list_empty(&vma->vma_link))
return 0;
if (obj->pin_count)
@ -2603,7 +2613,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret)
return ret;
trace_i915_gem_object_unbind(obj);
trace_i915_vma_unbind(vma);
if (obj->has_global_gtt_mapping)
i915_gem_gtt_unbind_object(obj);
@ -2614,12 +2624,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
i915_gem_gtt_finish_object(obj);
i915_gem_object_unpin_pages(obj);
list_del(&obj->mm_list);
list_del(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
if (i915_is_ggtt(vma->vm))
obj->map_and_fenceable = true;
vma = __i915_gem_obj_to_vma(obj);
list_del(&vma->vma_link);
drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
@ -2633,6 +2642,26 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return 0;
}
/**
* Unbinds an object from the global GTT aperture.
*/
int
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
if (!i915_gem_obj_ggtt_bound(obj))
return 0;
if (obj->pin_count)
return -EBUSY;
BUG_ON(obj->pages == NULL);
return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
}
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@ -3050,18 +3079,18 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
* Finds free space in the GTT aperture and binds the object there.
*/
static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking)
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
size_t gtt_max = map_and_fenceable ?
dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
size_t gtt_max =
map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
@ -3106,20 +3135,23 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
/* FIXME: For now we only ever use 1 VMA per object */
BUG_ON(!i915_is_ggtt(vm));
WARN_ON(!list_empty(&obj->vma_list));
vma = i915_gem_vma_create(obj, vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unpin;
}
search_free:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
&vma->node,
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
obj->cache_level, 0, gtt_max,
DRM_MM_SEARCH_DEFAULT);
if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
ret = i915_gem_evict_something(dev, vm, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
@ -3139,19 +3171,22 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &vm->inactive_list);
list_add(&vma->vma_link, &obj->vma_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
fenceable =
i915_is_ggtt(vm) &&
i915_gem_obj_ggtt_size(obj) == fence_size &&
(i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
dev_priv->gtt.mappable_end;
mappable =
i915_is_ggtt(vm) &&
vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
/* Map and fenceable only changes if the VM is the global GGTT */
if (i915_is_ggtt(vm))
obj->map_and_fenceable = mappable && fenceable;
trace_i915_gem_object_bind(obj, map_and_fenceable);
trace_i915_vma_bind(vma, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
@ -3257,7 +3292,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int ret;
/* Not valid to be called on unbound objects. */
if (!i915_gem_obj_ggtt_bound(obj))
if (!i915_gem_obj_bound_any(obj))
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@ -3295,9 +3330,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_write_domain);
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj))
list_move_tail(&obj->mm_list,
&dev_priv->gtt.base.inactive_list);
if (i915_gem_object_is_inactive(obj)) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj,
&dev_priv->gtt.base);
if (vma)
list_move_tail(&vma->mm_list,
&dev_priv->gtt.base.inactive_list);
}
return 0;
}
@ -3307,7 +3347,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
struct i915_vma *vma;
int ret;
if (obj->cache_level == cache_level)
@ -3318,13 +3358,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_vma_unbind(vma);
if (ret)
return ret;
break;
}
}
if (i915_gem_obj_ggtt_bound(obj)) {
if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@ -3346,8 +3390,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
i915_gem_obj_ggtt_set_color(obj, cache_level);
}
if (cache_level == I915_CACHE_NONE) {
@ -3373,6 +3415,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
old_write_domain);
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
i915_gem_verify_gtt(dev);
return 0;
@ -3476,7 +3520,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
ret = i915_gem_object_pin(obj, alignment, true, false);
ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
if (ret)
return ret;
@ -3619,37 +3663,44 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
{
struct i915_vma *vma;
int ret;
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
if (i915_gem_obj_ggtt_bound(obj)) {
if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
vma = i915_gem_obj_to_vma(obj, vm);
if (vma) {
if ((alignment &&
vma->node.start & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
i915_gem_obj_ggtt_offset(obj), alignment,
i915_gem_obj_offset(obj, vm), alignment,
map_and_fenceable,
obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj);
ret = i915_vma_unbind(vma);
if (ret)
return ret;
}
}
if (!i915_gem_obj_ggtt_bound(obj)) {
if (!i915_gem_obj_bound(obj, vm)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable,
nonblocking);
ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
map_and_fenceable,
nonblocking);
if (ret)
return ret;
@ -3670,7 +3721,7 @@ void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pin_count == 0);
BUG_ON(!i915_gem_obj_ggtt_bound(obj));
BUG_ON(!i915_gem_obj_bound_any(obj));
if (--obj->pin_count == 0)
obj->pin_mappable = false;
@ -3708,7 +3759,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
if (obj->user_pin_count == 0) {
ret = i915_gem_object_pin(obj, args->alignment, true, false);
ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
@ -3859,7 +3910,6 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
INIT_LIST_HEAD(&obj->mm_list);
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
@ -3945,6 +3995,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_vma *vma, *next;
trace_i915_gem_object_destroy(obj);
@ -3952,15 +4003,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_detach_phys_object(dev, obj);
obj->pin_count = 0;
if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
bool was_interruptible;
/* NB: 0 or 1 elements */
WARN_ON(!list_empty(&obj->vma_list) &&
!list_is_singular(&obj->vma_list));
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
int ret = i915_vma_unbind(vma);
if (WARN_ON(ret == -ERESTARTSYS)) {
bool was_interruptible;
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
WARN_ON(i915_gem_object_unbind(obj));
WARN_ON(i915_vma_unbind(vma));
dev_priv->mm.interruptible = was_interruptible;
dev_priv->mm.interruptible = was_interruptible;
}
}
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
@ -3994,15 +4051,23 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&vma->vma_link);
INIT_LIST_HEAD(&vma->mm_list);
vma->vm = vm;
vma->obj = obj;
/* Keep GGTT vmas first to make debug easier */
if (i915_is_ggtt(vm))
list_add(&vma->vma_link, &obj->vma_list);
else
list_add_tail(&vma->vma_link, &obj->vma_list);
return vma;
}
void i915_gem_vma_destroy(struct i915_vma *vma)
{
WARN_ON(vma->node.allocated);
list_del(&vma->vma_link);
kfree(vma);
}
@ -4328,6 +4393,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
INIT_LIST_HEAD(&ring->request_list);
}
static void i915_init_vm(struct drm_i915_private *dev_priv,
struct i915_address_space *vm)
{
vm->dev = dev_priv->dev;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
INIT_LIST_HEAD(&vm->global_link);
list_add(&vm->global_link, &dev_priv->vm_list);
}
void
i915_gem_load(struct drm_device *dev)
{
@ -4340,8 +4415,9 @@ i915_gem_load(struct drm_device *dev)
SLAB_HWCACHE_ALIGN,
NULL);
INIT_LIST_HEAD(&dev_priv->gtt.base.active_list);
INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list);
INIT_LIST_HEAD(&dev_priv->vm_list);
i915_init_vm(dev_priv, &dev_priv->gtt.base);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@ -4612,7 +4688,6 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
@ -4641,11 +4716,88 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &vm->inactive_list, mm_list)
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->active)
continue;
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
return cnt;
}
/* All the new VM stuff */
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
if (vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (vma->vm == vm)
return vma->node.start;
}
return -1;
}
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm)
{
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, vma_link)
if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
return true;
return false;
}
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_address_space *vm;
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
if (i915_gem_obj_bound(o, vm))
return true;
return false;
}
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
if (vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, vma_link)
if (vma->vm == vm)
return vma->node.size;
return 0;
}
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm)
return vma;
return NULL;
}

View file

@ -155,7 +155,7 @@ create_hw_context(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_LLC_MLC);
I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
goto err_out;
@ -214,7 +214,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
@ -400,7 +400,7 @@ static int do_switch(struct i915_hw_context *to)
if (from == to)
return 0;
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
@ -436,7 +436,10 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
i915_gem_object_move_to_active(from->obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the

View file

@ -115,73 +115,4 @@ i915_verify_lists(struct drm_device *dev)
return warned = err;
}
#endif /* WATCH_INACTIVE */
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
{
struct drm_device *dev = obj->base.dev;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
__func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
}
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
backing_map = kmap_atomic(obj->pages[page]);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
goto out;
}
for (i = 0; i < PAGE_SIZE / 4; i++) {
uint32_t cpuval = backing_map[i];
uint32_t gttval = readl(gtt_mapping +
page * 1024 + i);
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
(int)(obj->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
DRM_INFO("...\n");
goto out;
}
}
}
kunmap_atomic(backing_map);
backing_map = NULL;
}
out:
if (backing_map != NULL)
kunmap_atomic(backing_map);
iounmap(gtt_mapping);
/* give syslog time to catch up */
msleep(1);
/* Directly flush the object, since we just loaded values with the CPU
* from the backing pages and we don't want to disturb the cache
* management that we're trying to observe.
*/
i915_gem_clflush_object(obj);
}
#endif
#endif /* WATCH_LIST */

View file

@ -32,24 +32,21 @@
#include "i915_trace.h"
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
mark_free(struct i915_vma *vma, struct list_head *unwind)
{
struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
if (obj->pin_count)
if (vma->obj->pin_count)
return false;
list_add(&obj->exec_list, unwind);
list_add(&vma->obj->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
int
i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, unsigned cache_level,
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int min_size, unsigned alignment, unsigned cache_level,
bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
@ -81,16 +78,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
*/
INIT_LIST_HEAD(&unwind_list);
if (mappable)
if (mappable) {
BUG_ON(!i915_is_ggtt(vm));
drm_mm_init_scan_with_range(&vm->mm, min_size,
alignment, cache_level, 0,
dev_priv->gtt.mappable_end);
else
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &vm->inactive_list, mm_list) {
if (mark_free(obj, &unwind_list))
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
if (mark_free(vma, &unwind_list))
goto found;
}
@ -98,8 +96,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto none;
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &vm->active_list, mm_list) {
if (mark_free(obj, &unwind_list))
list_for_each_entry(vma, &vm->active_list, mm_list) {
if (mark_free(vma, &unwind_list))
goto found;
}
@ -109,7 +107,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
vma = __i915_gem_obj_to_vma(obj);
vma = i915_gem_obj_to_vma(obj, vm);
ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
@ -130,7 +128,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
vma = __i915_gem_obj_to_vma(obj);
vma = i915_gem_obj_to_vma(obj, vm);
if (drm_mm_scan_remove_block(&vma->node)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
@ -145,7 +143,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
struct drm_i915_gem_object,
exec_list);
if (ret == 0)
ret = i915_gem_object_unbind(obj);
ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
@ -158,13 +156,18 @@ int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj, *next;
bool lists_empty;
struct i915_address_space *vm;
struct i915_vma *vma, *next;
bool lists_empty = true;
int ret;
lists_empty = (list_empty(&vm->inactive_list) &&
list_empty(&vm->active_list));
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
lists_empty = (list_empty(&vm->inactive_list) &&
list_empty(&vm->active_list));
if (!lists_empty)
lists_empty = false;
}
if (lists_empty)
return -ENOSPC;
@ -181,9 +184,11 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
if (obj->pin_count == 0)
WARN_ON(i915_gem_object_unbind(obj));
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
if (vma->obj->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
}
return 0;
}

View file

@ -174,7 +174,8 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *reloc)
struct drm_i915_gem_relocation_entry *reloc,
struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
@ -297,7 +298,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
struct eb_objects *eb,
struct i915_address_space *vm)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
@ -321,7 +323,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
vm);
if (ret)
return ret;
@ -344,13 +347,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs)
struct drm_i915_gem_relocation_entry *relocs,
struct i915_address_space *vm)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
vm);
if (ret)
return ret;
}
@ -359,7 +364,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
i915_gem_execbuffer_relocate(struct eb_objects *eb)
i915_gem_execbuffer_relocate(struct eb_objects *eb,
struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@ -373,7 +379,7 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
*/
pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
if (ret)
break;
}
@ -395,6 +401,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct i915_address_space *vm,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@ -409,7 +416,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
false);
if (ret)
return ret;
@ -436,8 +444,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
entry->offset = i915_gem_obj_ggtt_offset(obj);
if (entry->offset != i915_gem_obj_offset(obj, vm)) {
entry->offset = i915_gem_obj_offset(obj, vm);
*need_reloc = true;
}
@ -458,7 +466,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
if (!i915_gem_obj_ggtt_bound(obj))
if (!i915_gem_obj_bound_any(obj))
return;
entry = obj->exec_entry;
@ -475,6 +483,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *objects,
struct i915_address_space *vm,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@ -529,32 +538,37 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
u32 obj_offset;
if (!i915_gem_obj_ggtt_bound(obj))
if (!i915_gem_obj_bound(obj, vm))
continue;
obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
WARN_ON((need_mappable || need_fence) &&
!i915_is_ggtt(vm));
if ((entry->alignment &&
i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
obj_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
else
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
if (i915_gem_obj_ggtt_bound(obj))
if (i915_gem_obj_bound(obj, vm))
continue;
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
@ -578,7 +592,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec)
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
@ -662,14 +677,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
reloc + reloc_offset[offset],
vm);
if (ret)
goto err;
}
@ -770,6 +786,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@ -784,6 +801,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
/* FIXME: This lookup gets fixed later <-- danvet */
list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
@ -838,7 +857,8 @@ static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_objects *eb;
@ -1000,17 +1020,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
ret = i915_gem_execbuffer_relocate(eb);
ret = i915_gem_execbuffer_relocate(eb, vm);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
eb, exec);
eb, exec, vm);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@ -1061,7 +1081,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
exec_start = i915_gem_obj_offset(batch_obj, vm) +
args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
@ -1086,7 +1107,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
@ -1107,6 +1128,7 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@ -1162,7 +1184,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
&dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
@ -1188,6 +1211,7 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
@ -1218,7 +1242,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT;
}
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
&dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user(to_user_ptr(args->buffers_ptr),

View file

@ -43,7 +43,7 @@
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
@ -52,18 +52,40 @@
*/
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
(((bits) & 0x8) << (11 - 3)))
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_LLC_MLC:
pte |= GEN6_PTE_CACHE_LLC_MLC;
case I915_CACHE_L3_LLC:
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
pte |= GEN6_PTE_UNCACHED;
break;
default:
WARN_ON(1);
}
return pte;
}
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_L3_LLC:
pte |= GEN7_PTE_CACHE_L3_LLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
@ -72,7 +94,7 @@ static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
pte |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
WARN_ON(1);
}
return pte;
@ -105,7 +127,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
pte |= HSW_WB_LLC_AGE0;
pte |= HSW_WB_LLC_AGE3;
return pte;
}
@ -298,13 +320,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
* now. */
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
if (IS_HASWELL(dev)) {
ppgtt->base.pte_encode = hsw_pte_encode;
} else if (IS_VALLEYVIEW(dev)) {
ppgtt->base.pte_encode = byt_pte_encode;
} else {
ppgtt->base.pte_encode = gen6_pte_encode;
}
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
@ -648,7 +664,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
@ -656,19 +673,19 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end);
/* Subtract the guard page ... */
drm_mm_init(&dev_priv->gtt.base.mm, start, end - start - PAGE_SIZE);
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
int ret;
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
WARN_ON(i915_gem_obj_ggtt_bound(obj));
ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
@ -679,19 +696,15 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
dev_priv->gtt.base.total = end - start;
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &dev_priv->gtt.base.mm,
hole_start, hole_end) {
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
hole_start / PAGE_SIZE,
count);
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
}
/* And finally clear the reserved guard page */
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
end / PAGE_SIZE - 1, 1);
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
}
static bool
@ -898,8 +911,10 @@ int i915_gem_gtt_init(struct drm_device *dev)
gtt->base.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev))
gtt->base.pte_encode = byt_pte_encode;
else if (INTEL_INFO(dev)->gen >= 7)
gtt->base.pte_encode = ivb_pte_encode;
else
gtt->base.pte_encode = gen6_pte_encode;
gtt->base.pte_encode = snb_pte_encode;
}
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,

View file

@ -349,7 +349,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
@ -393,7 +393,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
vma = i915_gem_vma_create(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
@ -406,8 +406,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
*/
vma->node.start = gtt_offset;
vma->node.size = size;
if (drm_mm_initialized(&dev_priv->gtt.base.mm)) {
ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
if (drm_mm_initialized(&ggtt->mm)) {
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
i915_gem_vma_destroy(vma);
@ -418,7 +418,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &vm->inactive_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
return obj;

View file

@ -360,17 +360,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->map_and_fenceable =
!i915_gem_obj_ggtt_bound(obj) ||
(i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
(i915_gem_obj_ggtt_offset(obj) +
obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
u32 unfenced_align =
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
ret = i915_gem_object_ggtt_unbind(obj);
}
if (ret == 0) {

View file

@ -304,13 +304,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (error->active_bo)
print_error_buffers(m, "Active",
error->active_bo,
error->active_bo_count);
error->active_bo[0],
error->active_bo_count[0]);
if (error->pinned_bo)
print_error_buffers(m, "Pinned",
error->pinned_bo,
error->pinned_bo_count);
error->pinned_bo[0],
error->pinned_bo_count[0]);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
@ -556,11 +556,11 @@ static void capture_bo(struct drm_i915_error_buffer *err,
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
int count, struct list_head *head)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int i = 0;
list_for_each_entry(obj, head, mm_list) {
capture_bo(err++, obj);
list_for_each_entry(vma, head, mm_list) {
capture_bo(err++, vma->obj);
if (++i == count)
break;
}
@ -622,7 +622,8 @@ static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
struct i915_address_space *vm = &dev_priv->gtt.base;
struct i915_address_space *vm;
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
u32 seqno;
@ -642,20 +643,23 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
}
seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &vm->active_list, mm_list) {
if (obj->ring != ring)
continue;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry(vma, &vm->active_list, mm_list) {
obj = vma->obj;
if (obj->ring != ring)
continue;
if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
continue;
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
return i915_error_object_create(dev_priv, obj);
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
return i915_error_object_create(dev_priv, obj);
}
}
return NULL;
@ -771,41 +775,72 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
}
static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
* VM.
*/
static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct i915_address_space *vm,
const int ndx)
{
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int i;
i = 0;
list_for_each_entry(obj, &vm->active_list, mm_list)
list_for_each_entry(vma, &vm->active_list, mm_list)
i++;
error->active_bo_count = i;
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
i++;
error->pinned_bo_count = i - error->active_bo_count;
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
if (i) {
error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
GFP_ATOMIC);
if (error->active_bo)
error->pinned_bo =
error->active_bo + error->active_bo_count;
active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
if (active_bo)
pinned_bo = active_bo + error->active_bo_count[ndx];
}
if (error->active_bo)
error->active_bo_count =
capture_active_bo(error->active_bo,
error->active_bo_count,
if (active_bo)
error->active_bo_count[ndx] =
capture_active_bo(active_bo,
error->active_bo_count[ndx],
&vm->active_list);
if (error->pinned_bo)
error->pinned_bo_count =
capture_pinned_bo(error->pinned_bo,
error->pinned_bo_count,
if (pinned_bo)
error->pinned_bo_count[ndx] =
capture_pinned_bo(pinned_bo,
error->pinned_bo_count[ndx],
&dev_priv->mm.bound_list);
error->active_bo[ndx] = active_bo;
error->pinned_bo[ndx] = pinned_bo;
}
static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
struct i915_address_space *vm;
int cnt = 0, i = 0;
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
cnt++;
if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
cnt = 1;
vm = &dev_priv->gtt.base;
error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
GFP_ATOMIC);
error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
GFP_ATOMIC);
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
i915_gem_capture_vm(dev_priv, error, vm, i++);
}
/**
@ -938,8 +973,8 @@ const char *i915_cache_level_str(int type)
{
switch (type) {
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return " snooped (LLC)";
case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
case I915_CACHE_LLC: return " snooped or LLC";
case I915_CACHE_L3_LLC: return " L3+LLC";
default: return "";
}
}

View file

@ -1869,7 +1869,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
void i915_hangcheck_elapsed(unsigned long data)
static void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;

View file

@ -3196,9 +3196,6 @@
#define MLTR_WM2_SHIFT 8
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
/* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128
@ -3245,12 +3242,6 @@
#define SSKPD_WM2_SHIFT 16
#define SSKPD_WM3_SHIFT 24
#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code

View file

@ -33,47 +33,52 @@ TRACE_EVENT(i915_gem_object_create,
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
);
TRACE_EVENT(i915_gem_object_bind,
TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
TP_ARGS(obj, mappable),
TRACE_EVENT(i915_vma_bind,
TP_PROTO(struct i915_vma *vma, bool mappable),
TP_ARGS(vma, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
__field(bool, mappable)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = i915_gem_obj_ggtt_offset(obj);
__entry->size = i915_gem_obj_ggtt_size(obj);
__entry->obj = vma->obj;
__entry->vm = vma->vm;
__entry->offset = vma->node.start;
__entry->size = vma->node.size;
__entry->mappable = mappable;
),
TP_printk("obj=%p, offset=%08x size=%x%s",
TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
__entry->mappable ? ", mappable" : "")
__entry->mappable ? ", mappable" : "",
__entry->vm)
);
TRACE_EVENT(i915_gem_object_unbind,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TRACE_EVENT(i915_vma_unbind,
TP_PROTO(struct i915_vma *vma),
TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = i915_gem_obj_ggtt_offset(obj);
__entry->size = i915_gem_obj_ggtt_size(obj);
__entry->obj = vma->obj;
__entry->vm = vma->vm;
__entry->offset = vma->node.start;
__entry->size = vma->node.size;
),
TP_printk("obj=%p, offset=%08x size=%x",
__entry->obj, __entry->offset, __entry->size)
TP_printk("obj=%p, offset=%08x size=%x vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_gem_object_change_domain,

View file

@ -28,7 +28,7 @@ static const u8 intel_dsm_guid[] = {
0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
};
static int intel_dsm(acpi_handle handle, int func, int arg)
static int intel_dsm(acpi_handle handle, int func)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
@ -46,8 +46,9 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
params[1].integer.value = INTEL_DSM_REVISION_ID;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
params[3].type = ACPI_TYPE_INTEGER;
params[3].integer.value = arg;
params[3].type = ACPI_TYPE_PACKAGE;
params[3].package.count = 0;
params[3].package.elements = NULL;
ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (ret) {
@ -151,8 +152,9 @@ static void intel_dsm_platform_mux_info(void)
params[1].integer.value = INTEL_DSM_REVISION_ID;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
params[3].type = ACPI_TYPE_INTEGER;
params[3].integer.value = 0;
params[3].type = ACPI_TYPE_PACKAGE;
params[3].package.count = 0;
params[3].package.elements = NULL;
ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
&output);
@ -205,7 +207,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
return false;
}
ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS);
if (ret < 0) {
DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
return false;

View file

@ -52,17 +52,16 @@ struct intel_crt {
u32 adpa_reg;
};
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_crt, base);
}
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_crt, base);
}
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
{
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
@ -238,17 +237,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
return true;
}
static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_crt_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crt *crt =
intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->base.dev;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
if (HAS_PCH_SPLIT(dev))
@ -265,14 +261,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
I915_WRITE(BCLRPAT(crtc->pipe), 0);
I915_WRITE(crt->adpa_reg, adpa);
}
@ -711,10 +707,6 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_set = intel_crt_mode_set,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
.dpms = intel_crt_dpms,
@ -804,6 +796,7 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = ADPA;
crt->base.compute_config = intel_crt_compute_config;
crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
crt->base.get_config = intel_crt_get_config;
@ -815,7 +808,6 @@ void intel_crt_init(struct drm_device *dev)
crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);

View file

@ -84,25 +84,17 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bool use_fdi_mode)
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
const u32 *ddi_translations = ((use_fdi_mode) ?
const u32 *ddi_translations = (port == PORT_E) ?
hsw_ddi_translations_fdi :
hsw_ddi_translations_dp);
hsw_ddi_translations_dp;
DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
port_name(port),
use_fdi_mode ? "FDI" : "DP");
WARN((use_fdi_mode && (port != PORT_E)),
"Programming port %c in FDI mode, this probably will not work.\n",
port_name(port));
for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
for (i = 0, reg = DDI_BUF_TRANS(port);
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
@ -118,14 +110,8 @@ void intel_prepare_ddi(struct drm_device *dev)
if (!HAS_DDI(dev))
return;
for (port = PORT_A; port < PORT_E; port++)
intel_prepare_ddi_buffers(dev, port, false);
/* DDI E is the suggested one to work in FDI mode, so program is as such
* by default. It will have to be re-programmed in case a digital DP
* output will be detected on it
*/
intel_prepare_ddi_buffers(dev, PORT_E, true);
for (port = PORT_A; port <= PORT_E; port++)
intel_prepare_ddi_buffers(dev, port);
}
static const long hsw_ddi_buf_ctl_values[] = {
@ -281,25 +267,22 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DRM_ERROR("FDI link training failed!\n");
}
static void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_ddi_mode_set(struct intel_encoder *encoder)
{
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
int port = intel_ddi_get_encoder_port(intel_encoder);
int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
int port = intel_ddi_get_encoder_port(encoder);
int pipe = crtc->pipe;
int type = encoder->type;
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
intel_crtc->eld_vld = false;
crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
enc_to_dig_port(&encoder->base);
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
@ -307,17 +290,17 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
/* write eld */
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
intel_dp_init_link_config(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
if (intel_hdmi->has_audio) {
/* Proper support for digital audio needs a new logic
@ -325,14 +308,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
* patch bombing.
*/
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
/* write eld */
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
intel_hdmi->set_infoframes(encoder, adjusted_mode);
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
}
@ -1311,10 +1294,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
.destroy = intel_ddi_destroy,
};
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
.mode_set = intel_ddi_mode_set,
};
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1339,9 +1318,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->mode_set = intel_ddi_mode_set;
intel_encoder->enable = intel_enable_ddi;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;

View file

@ -50,6 +50,10 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
typedef struct {
int min, max;
} intel_range_t;
@ -59,7 +63,6 @@ typedef struct {
int p2_slow, p2_fast;
} intel_p2_t;
#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
@ -3653,8 +3656,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
mutex_lock(&dev_priv->dpio_lock);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
@ -3665,10 +3666,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
/* VLV wants encoder enabling _before_ the pipe is up. */
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
i9xx_pfit_enable(intel_crtc);
intel_crtc_load_lut(crtc);
@ -3680,7 +3677,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_update_fbc(dev);
mutex_unlock(&dev_priv->dpio_lock);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
@ -3877,16 +3875,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
}
}
void intel_modeset_disable(struct drm_device *dev)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->enabled)
intel_crtc_disable(crtc);
}
}
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@ -3895,10 +3883,10 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_encoder);
}
/* Simple dpms helper for encodres with just one connector, no cloning and only
/* Simple dpms helper for encoders with just one connector, no cloning and only
* one kind of off state. It clamps all !ON modes to fully OFF and changes the
* state of the entire output pipe. */
void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
if (mode == DRM_MODE_DPMS_ON) {
encoder->connectors_active = true;
@ -4092,7 +4080,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
{
pipe_config->ips_enabled = i915_enable_ips &&
hsw_crtc_supports_ips(crtc) &&
pipe_config->pipe_bpp == 24;
pipe_config->pipe_bpp <= 24;
}
static int intel_crtc_compute_config(struct intel_crtc *crtc,
@ -4108,12 +4096,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
/* All interlaced capable intel hw wants timings in frames. Note though
* that intel_lvds_mode_fixup does some funny tricks with the crtc
* timings, so we need to be careful not to clobber these.*/
if (!pipe_config->timings_set)
drm_mode_set_crtcinfo(adjusted_mode, 0);
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
@ -6220,11 +6202,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int ret;
@ -6243,12 +6222,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
mode->base.id, mode->name);
if (encoder->mode_set) {
encoder->mode_set(encoder);
} else {
encoder_funcs = encoder->base.helper_private;
encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
}
encoder->mode_set(encoder);
}
return 0;
@ -8061,7 +8035,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc_config *pipe_config;
int plane_bpp, ret = -EINVAL;
@ -8082,6 +8055,19 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
(enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
/*
* Sanitize sync polarity flags based on requested ones. If neither
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
if (!(pipe_config->adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
if (!(pipe_config->adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
/* Compute a starting value for pipe_config->pipe_bpp taking the source
* plane pixel format and any sink constraints into account. Returns the
* source plane bpp so that dithering can be selected on mismatches
@ -8096,6 +8082,9 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
pipe_config->port_clock = 0;
pipe_config->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
@ -8106,20 +8095,8 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (&encoder->new_crtc->base != crtc)
continue;
if (encoder->compute_config) {
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
}
continue;
}
encoder_funcs = encoder->base.helper_private;
if (!(encoder_funcs->mode_fixup(&encoder->base,
&pipe_config->requested_mode,
&pipe_config->adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
}
}
@ -8759,9 +8736,9 @@ static int __intel_set_mode(struct drm_crtc *crtc,
return ret;
}
int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
int ret;
@ -9358,8 +9335,13 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
PORT_C);
}
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
@ -9419,13 +9401,17 @@ static void intel_setup_outputs(struct drm_device *dev)
drm_helper_move_panel_connectors_to_head(dev);
}
void intel_framebuffer_fini(struct intel_framebuffer *fb)
{
drm_framebuffer_cleanup(&fb->base);
drm_gem_object_unreference_unlocked(&fb->obj->base);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
intel_framebuffer_fini(intel_fb);
kfree(intel_fb);
}

View file

@ -833,15 +833,14 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
udelay(500);
}
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_dp_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
/*
* There are four kinds of DP registers:
@ -873,7 +872,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
intel_dp_init_link_config(intel_dp);
@ -1701,47 +1700,50 @@ static void intel_enable_dp(struct intel_encoder *encoder)
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
}
if (IS_VALLEYVIEW(dev)) {
struct intel_digital_port *dport =
enc_to_dig_port(&encoder->base);
int channel = vlv_dport_to_channel(dport);
vlv_wait_port_ready(dev_priv, channel);
}
static void vlv_enable_dp(struct intel_encoder *encoder)
{
}
static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
if (dport->port == PORT_A)
ironlake_edp_pll_on(intel_dp);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
ironlake_edp_pll_on(intel_dp);
mutex_lock(&dev_priv->dpio_lock);
if (IS_VALLEYVIEW(dev)) {
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
mutex_unlock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
0x00760018);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
}
intel_enable_dp(encoder);
vlv_wait_port_ready(dev_priv, port);
}
static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
@ -1755,6 +1757,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
return;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
@ -1768,6 +1771,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
mutex_unlock(&dev_priv->dpio_lock);
}
/*
@ -1979,6 +1983,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
@ -1987,6 +1992,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
mutex_unlock(&dev_priv->dpio_lock);
return 0;
}
@ -3035,10 +3041,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.mode_set = intel_dp_mode_set,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_detect,
@ -3518,17 +3520,21 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->enable = intel_enable_dp;
intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->mode_set = intel_dp_mode_set;
intel_encoder->disable = intel_disable_dp;
intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
} else {
intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->enable = intel_enable_dp;
}
intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;

View file

@ -26,6 +26,7 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
#include <linux/hdmi.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <drm/drm_crtc.h>
@ -208,10 +209,6 @@ struct intel_crtc_config {
struct drm_display_mode requested_mode;
struct drm_display_mode adjusted_mode;
/* This flag must be set by the encoder's compute_config callback if it
* changes the crtc timings in the mode to prevent the crtc fixup from
* overwriting them. Currently only lvds needs that. */
bool timings_set;
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
@ -334,6 +331,13 @@ struct intel_crtc {
bool pch_fifo_underrun_disabled;
};
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
};
struct intel_plane {
struct drm_plane base;
int plane;
@ -352,20 +356,18 @@ struct intel_plane {
* as the other pieces of the struct may not reflect the values we want
* for the watermark calculations. Currently only Haswell uses this.
*/
struct {
bool enable;
uint8_t bytes_per_pixel;
uint32_t horiz_pixels;
} wm;
struct intel_plane_wm_parameters wm;
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
@ -397,66 +399,6 @@ struct cxsr_latency {
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define DIP_HEADER_SIZE 5
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
#define DIP_LEN_SPD 25
#define DIP_SPD_UNKNOWN 0
#define DIP_SPD_DSTB 0x1
#define DIP_SPD_DVDP 0x2
#define DIP_SPD_DVHS 0x3
#define DIP_SPD_HDDVR 0x4
#define DIP_SPD_DVC 0x5
#define DIP_SPD_DSC 0x6
#define DIP_SPD_VCD 0x7
#define DIP_SPD_GAME 0x8
#define DIP_SPD_PC 0x9
#define DIP_SPD_BD 0xa
#define DIP_SPD_SCD 0xb
struct dip_infoframe {
uint8_t type; /* HB0 */
uint8_t ver; /* HB1 */
uint8_t len; /* HB2 - body len, not including checksum */
uint8_t ecc; /* Header ECC */
uint8_t checksum; /* PB0 */
union {
struct {
/* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
uint8_t Y_A_B_S;
/* PB2 - C 7:6, M 5:4, R 3:0 */
uint8_t C_M_R;
/* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
uint8_t ITC_EC_Q_SC;
/* PB4 - VIC 6:0 */
uint8_t VIC;
/* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
uint8_t YQ_CN_PR;
/* PB6 to PB13 */
uint16_t top_bar_end;
uint16_t bottom_bar_start;
uint16_t left_bar_end;
uint16_t right_bar_start;
} __attribute__ ((packed)) avi;
struct {
uint8_t vn[8];
uint8_t pd[16];
uint8_t sdi;
} __attribute__ ((packed)) spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
struct intel_hdmi {
u32 hdmi_reg;
int ddc_bus;
@ -467,7 +409,8 @@ struct intel_hdmi {
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
};
@ -569,7 +512,6 @@ extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
@ -634,14 +576,10 @@ struct intel_set_config {
bool mode_changed;
};
extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern void intel_connector_dpms(struct drm_connector *, int mode);
extern bool intel_connector_get_hw_state(struct intel_connector *connector);
extern void intel_modeset_check_state(struct drm_device *dev);
@ -707,12 +645,10 @@ extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
extern void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@ -723,6 +659,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
@ -773,9 +710,10 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port);
/* For use by IVB LP watermark workaround in intel_sprite.c */
extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width,
int pixel_size, bool enable);
extern void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled);
extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,

View file

@ -100,15 +100,14 @@ struct intel_dvo {
bool panel_wants_dither;
};
static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_dvo, base.base);
return container_of(encoder, struct intel_dvo, base);
}
static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_dvo, base);
return enc_to_dvo(intel_attached_encoder(connector));
}
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
@ -123,7 +122,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
@ -140,7 +139,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
@ -159,7 +158,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
static void intel_disable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
@ -171,7 +170,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
static void intel_enable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
@ -241,11 +240,11 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static bool intel_dvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@ -267,20 +266,21 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
}
if (intel_dvo->dev.dev_ops->mode_fixup)
return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
&pipe_config->requested_mode,
adjusted_mode);
return true;
}
static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_dvo_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
int pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
@ -297,7 +297,9 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
break;
}
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&crtc->config.requested_mode,
adjusted_mode);
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
@ -371,11 +373,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.mode_fixup = intel_dvo_mode_fixup,
.mode_set = intel_dvo_mode_set,
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
@ -391,7 +388,7 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
if (intel_dvo->dev.dev_ops->destroy)
intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
@ -470,6 +467,8 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->enable = intel_enable_dvo;
intel_encoder->get_hw_state = intel_dvo_get_hw_state;
intel_encoder->get_config = intel_dvo_get_config;
intel_encoder->compute_config = intel_dvo_compute_config;
intel_encoder->mode_set = intel_dvo_mode_set;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
/* Now, try to find a controller */
@ -536,9 +535,6 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_encoder_helper_add(&intel_encoder->base,
&intel_dvo_helper_funcs);
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able

View file

@ -193,26 +193,21 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
struct fb_info *info;
struct intel_framebuffer *ifb = &ifbdev->ifb;
if (ifbdev->helper.fbdev) {
info = ifbdev->helper.fbdev;
struct fb_info *info = ifbdev->helper.fbdev;
unregister_framebuffer(info);
iounmap(info->screen_base);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
drm_fb_helper_fini(&ifbdev->helper);
drm_framebuffer_unregister_private(&ifb->base);
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
drm_gem_object_unreference_unlocked(&ifb->obj->base);
ifb->obj = NULL;
}
drm_framebuffer_unregister_private(&ifbdev->ifb.base);
intel_framebuffer_fini(&ifbdev->ifb);
}
int intel_fbdev_init(struct drm_device *dev)

View file

@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@ -66,89 +67,75 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
void intel_dip_infoframe_csum(struct dip_infoframe *frame)
static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
{
uint8_t *data = (uint8_t *)frame;
uint8_t sum = 0;
unsigned i;
frame->checksum = 0;
frame->ecc = 0;
for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
sum += data[i];
frame->checksum = 0x100 - sum;
}
static u32 g4x_infoframe_index(struct dip_infoframe *frame)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_SELECT_AVI;
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_SELECT_SPD;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD_HSW;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
enum transcoder cpu_transcoder)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
static void g4x_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i;
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(frame);
val &= ~g4x_infoframe_enable(type);
I915_WRITE(VIDEO_DIP_CTL, val);
@ -162,7 +149,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@ -171,22 +158,22 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(frame);
val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@ -200,7 +187,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@ -209,25 +196,25 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
if (frame->type != DIP_TYPE_AVI)
val &= ~g4x_infoframe_enable(frame);
if (type != HDMI_INFOFRAME_TYPE_AVI)
val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@ -241,7 +228,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@ -250,22 +237,22 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(frame);
val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@ -279,7 +266,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@ -288,21 +275,24 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
unsigned int i, len = DIP_HEADER_SIZE + frame->len;
u32 data_reg;
int i;
u32 val = I915_READ(ctl_reg);
data_reg = hsw_infoframe_data_reg(type,
intel_crtc->config.cpu_transcoder);
if (data_reg == 0)
return;
val &= ~hsw_infoframe_enable(frame);
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
mmiowb();
@ -315,18 +305,48 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(data_reg + i, 0);
mmiowb();
val |= hsw_infoframe_enable(frame);
val |= hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
POSTING_READ(ctl_reg);
}
static void intel_set_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
* at 0). It's also a byte used by DisplayPort so the same DIP registers can be
* used for both technologies.
*
* DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
* DW1: DB3 | DB2 | DB1 | DB0
* DW2: DB7 | DB6 | DB5 | DB4
* DW3: ...
*
* (HB is Header Byte, DB is Data Byte)
*
* The hdmi pack() functions don't know about that hardware specific hole so we
* trick them by giving an offset into the buffer and moving back the header
* bytes by one.
*/
static void intel_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
uint8_t buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
intel_dip_infoframe_csum(frame);
intel_hdmi->write_infoframe(encoder, frame);
/* see comment above for the reason for this offset */
len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
if (len < 0)
return;
/* Insert the 'hole' (see big comment above) at position 3 */
buffer[0] = buffer[1];
buffer[1] = buffer[2];
buffer[2] = buffer[3];
buffer[3] = 0;
len++;
intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
@ -334,40 +354,42 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
union hdmi_infoframe frame;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
}
if (intel_hdmi->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_FULL;
}
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
intel_set_infoframe(encoder, &avi_if);
intel_write_infoframe(encoder, &frame);
}
static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
union hdmi_infoframe frame;
int ret;
memset(&spd_if, 0, sizeof(spd_if));
spd_if.type = DIP_TYPE_SPD;
spd_if.ver = DIP_VERSION_SPD;
spd_if.len = DIP_LEN_SPD;
strcpy(spd_if.body.spd.vn, "Intel");
strcpy(spd_if.body.spd.pd, "Integrated gfx");
spd_if.body.spd.sdi = DIP_SPD_PC;
ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
if (ret < 0) {
DRM_ERROR("couldn't fill SPD infoframe\n");
return;
}
intel_set_infoframe(encoder, &spd_if);
frame.spd.sdi = HDMI_SPD_SDI_PC;
intel_write_infoframe(encoder, &frame);
}
static void g4x_set_infoframes(struct drm_encoder *encoder,
@ -591,14 +613,13 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_spd_infoframe(encoder);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_hdmi_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 hdmi_val;
hdmi_val = SDVO_ENCODING_HDMI;
@ -609,7 +630,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
if (intel_crtc->config.pipe_bpp > 24)
if (crtc->config.pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
@ -620,21 +641,21 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (intel_hdmi->has_audio) {
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
hdmi_val |= SDVO_AUDIO_ENABLE;
hdmi_val |= HDMI_MODE_SELECT_HDMI;
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
if (HAS_PCH_CPT(dev))
hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
POSTING_READ(intel_hdmi->hdmi_reg);
intel_hdmi->set_infoframes(encoder, adjusted_mode);
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@ -719,14 +740,10 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
}
if (IS_VALLEYVIEW(dev)) {
struct intel_digital_port *dport =
enc_to_dig_port(&encoder->base);
int channel = vlv_dport_to_channel(dport);
vlv_wait_port_ready(dev_priv, channel);
}
static void vlv_enable_hdmi(struct intel_encoder *encoder)
{
}
static void intel_disable_hdmi(struct intel_encoder *encoder)
@ -1033,6 +1050,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
return;
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
@ -1063,6 +1081,11 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
0x00760018);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
intel_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, port);
}
static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@ -1076,6 +1099,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
return;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
@ -1094,6 +1118,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
0x00002000);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->dpio_lock);
}
static void intel_hdmi_post_disable(struct intel_encoder *encoder)
@ -1116,10 +1141,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_set = intel_hdmi_mode_set,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
@ -1242,17 +1263,19 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_encoder->compute_config = intel_hdmi_compute_config;
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->mode_set = intel_hdmi_mode_set;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = intel_hdmi_post_disable;
} else {
intel_encoder->enable = intel_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;

View file

@ -319,14 +319,12 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return true;
}
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_lvds_mode_set(struct intel_encoder *encoder)
{
/*
* The LVDS pin pair will already have been turned on in the
* intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
* We don't do anything here, the LVDS port is fully set up in the pre
* enable hook - the ordering constraints for enabling the lvds port vs.
* enabling the display pll are too strict.
*/
}
@ -507,10 +505,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
return 0;
}
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
.mode_set = intel_lvds_mode_set,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
@ -971,6 +965,7 @@ void intel_lvds_init(struct drm_device *dev)
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
intel_encoder->mode_set = intel_lvds_mode_set;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
@ -987,7 +982,6 @@ void intel_lvds_init(struct drm_device *dev)
else
intel_encoder->crtc_mask = (1 << 1);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;

View file

@ -1352,7 +1352,7 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;

View file

@ -194,9 +194,6 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
drm_mode_set_crtcinfo(adjusted_mode, 0);
pipe_config->timings_set = true;
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*

File diff suppressed because it is too large Load diff

View file

@ -501,7 +501,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
if (ret)
goto err_unref;
@ -1224,7 +1224,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
@ -1307,7 +1307,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
@ -1828,7 +1828,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return -ENOMEM;
}
ret = i915_gem_object_pin(obj, 0, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");

View file

@ -202,15 +202,14 @@ struct intel_sdvo_connector {
u32 cur_dot_crawl, max_dot_crawl;
};
static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_sdvo, base.base);
return container_of(encoder, struct intel_sdvo, base);
}
static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_sdvo, base);
return to_sdvo(intel_attached_encoder(connector));
}
static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@ -964,30 +963,32 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
union hdmi_infoframe frame;
int ret;
ssize_t len;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return false;
}
if (intel_sdvo->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_FULL;
}
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
intel_dip_infoframe_csum(&avi_if);
/* sdvo spec says that the ecc is handled by the hw, and it looks like
* we must not send the ecc field, either. */
memcpy(sdvo_data, &avi_if, 3);
sdvo_data[3] = avi_if.checksum;
memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
if (len < 0)
return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
SDVO_HBUF_TX_VSYNC,
@ -1084,7 +1085,7 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
@ -1154,7 +1155,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd, output_dtd;
@ -1292,7 +1293,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
u32 tmp;
@ -1315,7 +1316,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
u32 flags = 0, sdvox;
@ -1380,7 +1381,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
static void intel_disable_sdvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u32 temp;
intel_sdvo_set_active_outputs(intel_sdvo, 0);
@ -1422,7 +1423,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
bool input1, input2;
@ -1583,7 +1584,7 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
&intel_sdvo->hotplug_active, 2);
@ -2190,7 +2191,7 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,

View file

@ -38,7 +38,8 @@
#include "i915_drv.h"
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@ -108,14 +109,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
sprctl |= SP_ENABLE;
intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@ -139,7 +141,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
}
static void
vlv_disable_plane(struct drm_plane *dplane)
vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -152,6 +154,8 @@ vlv_disable_plane(struct drm_plane *dplane)
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
}
static int
@ -206,7 +210,8 @@ vlv_get_colorkey(struct drm_plane *dplane,
}
static void
ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@ -262,14 +267,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (IS_HASWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
@ -318,7 +324,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
}
static void
ivb_disable_plane(struct drm_plane *plane)
ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -336,7 +342,7 @@ ivb_disable_plane(struct drm_plane *plane)
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
intel_update_sprite_watermarks(dev, pipe, 0, 0, false);
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
@ -398,7 +404,8 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
}
static void
ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@ -450,14 +457,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
dvsscale = 0;
if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
@ -485,7 +493,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
}
static void
ilk_disable_plane(struct drm_plane *plane)
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -498,6 +506,8 @@ ilk_disable_plane(struct drm_plane *plane)
/* Flush double buffered register updates */
I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
}
static void
@ -820,11 +830,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_enable_primary(crtc);
if (visible)
intel_plane->update_plane(plane, fb, obj,
intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
else
intel_plane->disable_plane(plane);
intel_plane->disable_plane(plane, crtc);
if (disable_primary)
intel_disable_primary(crtc);
@ -857,9 +867,14 @@ intel_disable_plane(struct drm_plane *plane)
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
if (plane->crtc)
intel_enable_primary(plane->crtc);
intel_plane->disable_plane(plane);
if (!plane->fb)
return 0;
if (WARN_ON(!plane->crtc))
return -EINVAL;
intel_enable_primary(plane->crtc);
intel_plane->disable_plane(plane, plane->crtc);
if (!intel_plane->obj)
goto out;

View file

@ -823,16 +823,14 @@ static const struct tv_mode tv_modes[] = {
},
};
static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_tv, base.base);
return container_of(encoder, struct intel_tv, base);
}
static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_tv,
base);
return enc_to_tv(intel_attached_encoder(connector));
}
static bool
@ -908,7 +906,7 @@ static bool
intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (!tv_mode)
@ -921,15 +919,12 @@ intel_tv_compute_config(struct intel_encoder *encoder,
return true;
}
static void
intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_tv_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
@ -1487,10 +1482,6 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
return ret;
}
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
.mode_set = intel_tv_mode_set,
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_tv_detect,
@ -1623,6 +1614,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
intel_encoder->compute_config = intel_tv_compute_config;
intel_encoder->mode_set = intel_tv_mode_set;
intel_encoder->enable = intel_enable_tv;
intel_encoder->disable = intel_disable_tv;
intel_encoder->get_hw_state = intel_tv_get_hw_state;
@ -1644,7 +1636,6 @@ intel_tv_init(struct drm_device *dev)
intel_tv->tv_format = tv_modes[initial_mode].name;
drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
connector->doublescan_allowed = false;

View file

@ -22,6 +22,7 @@
*/
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/hdmi.h>
@ -52,7 +53,7 @@ int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = 13;
frame->length = HDMI_AVI_INFOFRAME_SIZE;
return 0;
}
@ -83,7 +84,7 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
if (size < length)
return -ENOSPC;
memset(buffer, 0, length);
memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
@ -151,7 +152,7 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
frame->type = HDMI_INFOFRAME_TYPE_SPD;
frame->version = 1;
frame->length = 25;
frame->length = HDMI_SPD_INFOFRAME_SIZE;
strncpy(frame->vendor, vendor, sizeof(frame->vendor));
strncpy(frame->product, product, sizeof(frame->product));
@ -185,7 +186,7 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
if (size < length)
return -ENOSPC;
memset(buffer, 0, length);
memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
@ -218,7 +219,7 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
frame->version = 1;
frame->length = 10;
frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
return 0;
}
@ -250,7 +251,7 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
if (size < length)
return -ENOSPC;
memset(buffer, 0, length);
memset(buffer, 0, size);
if (frame->channels >= 2)
channels = frame->channels - 1;
@ -307,7 +308,7 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
if (size < length)
return -ENOSPC;
memset(buffer, 0, length);
memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
@ -321,3 +322,45 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
return length;
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
/**
* hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
{
ssize_t length;
switch (frame->any.type) {
case HDMI_INFOFRAME_TYPE_AVI:
length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_SPD:
length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_AUDIO:
length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_VENDOR:
length = hdmi_vendor_infoframe_pack(&frame->vendor,
buffer, size);
break;
default:
WARN(1, "Bad infoframe type %d\n", frame->any.type);
length = -EINVAL;
}
return length;
}
EXPORT_SYMBOL(hdmi_infoframe_pack);

View file

@ -23,6 +23,15 @@ enum hdmi_infoframe_type {
#define HDMI_SPD_INFOFRAME_SIZE 25
#define HDMI_AUDIO_INFOFRAME_SIZE 10
#define HDMI_INFOFRAME_SIZE(type) \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
struct hdmi_any_infoframe {
enum hdmi_infoframe_type type;
unsigned char version;
unsigned char length;
};
enum hdmi_colorspace {
HDMI_COLORSPACE_RGB,
HDMI_COLORSPACE_YUV422,
@ -228,4 +237,15 @@ struct hdmi_vendor_infoframe {
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
union hdmi_infoframe {
struct hdmi_any_infoframe any;
struct hdmi_avi_infoframe avi;
struct hdmi_spd_infoframe spd;
struct hdmi_vendor_infoframe vendor;
struct hdmi_audio_infoframe audio;
};
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
#endif /* _DRM_HDMI_H */