Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2016-05-22:
- cmd-parser support for direct reg->reg loads (Ken Graunke)
- better handle DP++ smart dongles (Ville)
- bxt guc fw loading support (Nick Hoathe)
- remove a bunch of struct typedefs from dpll code (Ander)
- tons of small work all over to avoid casting between drm_device and the i915
  dev struct (Tvrtko&Chris)
- untangle request retiring from other operations, also fixes reset stat corner
  cases (Chris)
- skl atomic watermark support from Matt Roper, yay!
- various wm handling bugfixes from Ville
- big pile of cdclck rework for bxt/skl (Ville)
- CABC (Content Adaptive Brigthness Control) for dsi panels (Jani&Deepak M)
- nonblocking atomic commits for plane-only updates (Maarten Lankhorst)
- bunch of PSR fixes&improvements
- untangle our map/pin/sg_iter code a bit (Dave Gordon)
drm-intel-next-2016-05-08:
- refactor stolen quirks to share code between early quirks and i915 (Joonas)
- refactor gem BO/vma funcstion (Tvrtko&Dave)
- backlight over DPCD support (Yetunde Abedisi)
- more dsi panel sequence support (Jani)
- lots of refactoring around handling iomaps, vma, ring access and related
  topics culmulating in removing the duplicated request tracking in the execlist
  code (Chris & Tvrtko) includes a small patch for core iomapping code
- hw state readout for bxt dsi (Ramalingam C)
- cdclk cleanups (Ville)
- dedupe chv pll code a bit (Ander)
- enable semaphores on gen8+ for legacy submission, to be able to have a direct
  comparison against execlist on the same platform (Chris) Not meant to be used
  for anything else but performance tuning
- lvds border bit hw state checker fix (Jani)
- rpm vs. shrinker/oom-notifier fixes (Praveen Paneri)
- l3 tuning (Imre)
- revert mst dp audio, it's totally non-functional and crash-y (Lyude)
- first official dmc for kbl (Rodrigo)
- and tons of small things all over as usual

* 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (194 commits)
  drm/i915: Revert async unpin and nonblocking atomic commit
  drm/i915: Update DRIVER_DATE to 20160522
  drm/i915: Inline sg_next() for the optimised SGL iterator
  drm/i915: Introduce & use new lightweight SGL iterators
  drm/i915: optimise i915_gem_object_map() for small objects
  drm/i915: refactor i915_gem_object_pin_map()
  drm/i915/psr: Implement PSR2 w/a for gen9
  drm/i915/psr: Use ->get_aux_send_ctl functions
  drm/i915/psr: Order DP aux transactions correctly
  drm/i915/psr: Make idle_frames sensible again
  drm/i915/psr: Try to program link training times correctly
  drm/i915/userptr: Convert to drm_i915_private
  drm/i915: Allow nonblocking update of pageflips.
  drm/i915: Check for unpin correctness.
  Reapply "drm/i915: Avoid stalling on pending flips for legacy cursor updates"
  drm/i915: Make unpin async.
  drm/i915: Prepare connectors for nonblocking checks.
  drm/i915: Pass atomic states to fbc update functions.
  drm/i915: Remove reset_counter from intel_crtc.
  drm/i915: Remove queue_flip pointer.
  ...
This commit is contained in:
Dave Airlie 2016-06-02 07:58:36 +10:00
commit 66fd7a66e8
72 changed files with 4579 additions and 3930 deletions

View file

@ -223,36 +223,19 @@ static void __init intel_remapping_check(int num, int slot, int func)
* despite the efforts of the "RAM buffer" approach, which simply rounds * despite the efforts of the "RAM buffer" approach, which simply rounds
* memory boundaries up to 64M to try to catch space that may decode * memory boundaries up to 64M to try to catch space that may decode
* as RAM and so is not suitable for MMIO. * as RAM and so is not suitable for MMIO.
*
* And yes, so far on current devices the base addr is always under 4G.
*/ */
static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
{
u32 base;
/*
* For the PCI IDs in this quirk, the stolen base is always
* in 0x5c, aka the BDSM register (yes that's really what
* it's called).
*/
base = read_pci_config(num, slot, func, 0x5c);
base &= ~((1<<20) - 1);
return base;
}
#define KB(x) ((x) * 1024UL) #define KB(x) ((x) * 1024UL)
#define MB(x) (KB (KB (x))) #define MB(x) (KB (KB (x)))
#define GB(x) (MB (KB (x)))
static size_t __init i830_tseg_size(void) static size_t __init i830_tseg_size(void)
{ {
u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC); u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
if (!(tmp & TSEG_ENABLE)) if (!(esmramc & TSEG_ENABLE))
return 0; return 0;
if (tmp & I830_TSEG_SIZE_1M) if (esmramc & I830_TSEG_SIZE_1M)
return MB(1); return MB(1);
else else
return KB(512); return KB(512);
@ -260,27 +243,26 @@ static size_t __init i830_tseg_size(void)
static size_t __init i845_tseg_size(void) static size_t __init i845_tseg_size(void)
{ {
u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC); u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;
if (!(tmp & TSEG_ENABLE)) if (!(esmramc & TSEG_ENABLE))
return 0; return 0;
switch (tmp & I845_TSEG_SIZE_MASK) { switch (tseg_size) {
case I845_TSEG_SIZE_512K: case I845_TSEG_SIZE_512K: return KB(512);
return KB(512); case I845_TSEG_SIZE_1M: return MB(1);
case I845_TSEG_SIZE_1M:
return MB(1);
default: default:
WARN_ON(1); WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc);
return 0;
} }
return 0;
} }
static size_t __init i85x_tseg_size(void) static size_t __init i85x_tseg_size(void)
{ {
u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC); u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
if (!(tmp & TSEG_ENABLE)) if (!(esmramc & TSEG_ENABLE))
return 0; return 0;
return MB(1); return MB(1);
@ -300,285 +282,287 @@ static size_t __init i85x_mem_size(void)
* On 830/845/85x the stolen memory base isn't available in any * On 830/845/85x the stolen memory base isn't available in any
* register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
*/ */
static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size) static phys_addr_t __init i830_stolen_base(int num, int slot, int func,
size_t stolen_size)
{ {
return i830_mem_size() - i830_tseg_size() - stolen_size; return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size;
} }
static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size) static phys_addr_t __init i845_stolen_base(int num, int slot, int func,
size_t stolen_size)
{ {
return i830_mem_size() - i845_tseg_size() - stolen_size; return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size;
} }
static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size) static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
size_t stolen_size)
{ {
return i85x_mem_size() - i85x_tseg_size() - stolen_size; return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size;
} }
static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
size_t stolen_size)
{ {
u16 toud;
/* /*
* FIXME is the graphics stolen memory region * FIXME is the graphics stolen memory region
* always at TOUD? Ie. is it always the last * always at TOUD? Ie. is it always the last
* one to be allocated by the BIOS? * one to be allocated by the BIOS?
*/ */
return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; toud = read_pci_config_16(0, 0, 0, I865_TOUD);
return (phys_addr_t)toud << 16;
}
static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
size_t stolen_size)
{
u32 bsm;
/* Almost universally we can find the Graphics Base of Stolen Memory
* at register BSM (0x5c) in the igfx configuration space. On a few
* (desktop) machines this is also mirrored in the bridge device at
* different locations, or in the MCHBAR.
*/
bsm = read_pci_config(num, slot, func, INTEL_BSM);
return (phys_addr_t)bsm & INTEL_BSM_MASK;
} }
static size_t __init i830_stolen_size(int num, int slot, int func) static size_t __init i830_stolen_size(int num, int slot, int func)
{ {
size_t stolen_size;
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
gms = gmch_ctrl & I830_GMCH_GMS_MASK;
switch (gmch_ctrl & I830_GMCH_GMS_MASK) { switch (gms) {
case I830_GMCH_GMS_STOLEN_512: case I830_GMCH_GMS_STOLEN_512: return KB(512);
stolen_size = KB(512); case I830_GMCH_GMS_STOLEN_1024: return MB(1);
break; case I830_GMCH_GMS_STOLEN_8192: return MB(8);
case I830_GMCH_GMS_STOLEN_1024: /* local memory isn't part of the normal address space */
stolen_size = MB(1); case I830_GMCH_GMS_LOCAL: return 0;
break;
case I830_GMCH_GMS_STOLEN_8192:
stolen_size = MB(8);
break;
case I830_GMCH_GMS_LOCAL:
/* local memory isn't part of the normal address space */
stolen_size = 0;
break;
default: default:
return 0; WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
} }
return stolen_size; return 0;
} }
static size_t __init gen3_stolen_size(int num, int slot, int func) static size_t __init gen3_stolen_size(int num, int slot, int func)
{ {
size_t stolen_size;
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
gms = gmch_ctrl & I855_GMCH_GMS_MASK;
switch (gmch_ctrl & I855_GMCH_GMS_MASK) { switch (gms) {
case I855_GMCH_GMS_STOLEN_1M: case I855_GMCH_GMS_STOLEN_1M: return MB(1);
stolen_size = MB(1); case I855_GMCH_GMS_STOLEN_4M: return MB(4);
break; case I855_GMCH_GMS_STOLEN_8M: return MB(8);
case I855_GMCH_GMS_STOLEN_4M: case I855_GMCH_GMS_STOLEN_16M: return MB(16);
stolen_size = MB(4); case I855_GMCH_GMS_STOLEN_32M: return MB(32);
break; case I915_GMCH_GMS_STOLEN_48M: return MB(48);
case I855_GMCH_GMS_STOLEN_8M: case I915_GMCH_GMS_STOLEN_64M: return MB(64);
stolen_size = MB(8); case G33_GMCH_GMS_STOLEN_128M: return MB(128);
break; case G33_GMCH_GMS_STOLEN_256M: return MB(256);
case I855_GMCH_GMS_STOLEN_16M: case INTEL_GMCH_GMS_STOLEN_96M: return MB(96);
stolen_size = MB(16); case INTEL_GMCH_GMS_STOLEN_160M:return MB(160);
break; case INTEL_GMCH_GMS_STOLEN_224M:return MB(224);
case I855_GMCH_GMS_STOLEN_32M: case INTEL_GMCH_GMS_STOLEN_352M:return MB(352);
stolen_size = MB(32);
break;
case I915_GMCH_GMS_STOLEN_48M:
stolen_size = MB(48);
break;
case I915_GMCH_GMS_STOLEN_64M:
stolen_size = MB(64);
break;
case G33_GMCH_GMS_STOLEN_128M:
stolen_size = MB(128);
break;
case G33_GMCH_GMS_STOLEN_256M:
stolen_size = MB(256);
break;
case INTEL_GMCH_GMS_STOLEN_96M:
stolen_size = MB(96);
break;
case INTEL_GMCH_GMS_STOLEN_160M:
stolen_size = MB(160);
break;
case INTEL_GMCH_GMS_STOLEN_224M:
stolen_size = MB(224);
break;
case INTEL_GMCH_GMS_STOLEN_352M:
stolen_size = MB(352);
break;
default: default:
stolen_size = 0; WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
break;
} }
return stolen_size; return 0;
} }
static size_t __init gen6_stolen_size(int num, int slot, int func) static size_t __init gen6_stolen_size(int num, int slot, int func)
{ {
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
gmch_ctrl &= SNB_GMCH_GMS_MASK;
return gmch_ctrl << 25; /* 32 MB units */ return (size_t)gms * MB(32);
} }
static size_t __init gen8_stolen_size(int num, int slot, int func) static size_t __init gen8_stolen_size(int num, int slot, int func)
{ {
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
gmch_ctrl &= BDW_GMCH_GMS_MASK;
return gmch_ctrl << 25; /* 32 MB units */ return (size_t)gms * MB(32);
} }
static size_t __init chv_stolen_size(int num, int slot, int func) static size_t __init chv_stolen_size(int num, int slot, int func)
{ {
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
gmch_ctrl &= SNB_GMCH_GMS_MASK;
/* /*
* 0x0 to 0x10: 32MB increments starting at 0MB * 0x0 to 0x10: 32MB increments starting at 0MB
* 0x11 to 0x16: 4MB increments starting at 8MB * 0x11 to 0x16: 4MB increments starting at 8MB
* 0x17 to 0x1d: 4MB increments start at 36MB * 0x17 to 0x1d: 4MB increments start at 36MB
*/ */
if (gmch_ctrl < 0x11) if (gms < 0x11)
return gmch_ctrl << 25; return (size_t)gms * MB(32);
else if (gmch_ctrl < 0x17) else if (gms < 0x17)
return (gmch_ctrl - 0x11 + 2) << 22; return (size_t)(gms - 0x11 + 2) * MB(4);
else else
return (gmch_ctrl - 0x17 + 9) << 22; return (size_t)(gms - 0x17 + 9) * MB(4);
} }
struct intel_stolen_funcs {
size_t (*size)(int num, int slot, int func);
u32 (*base)(int num, int slot, int func, size_t size);
};
static size_t __init gen9_stolen_size(int num, int slot, int func) static size_t __init gen9_stolen_size(int num, int slot, int func)
{ {
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
gmch_ctrl &= BDW_GMCH_GMS_MASK;
if (gmch_ctrl < 0xf0) /* 0x0 to 0xef: 32MB increments starting at 0MB */
return gmch_ctrl << 25; /* 32 MB units */ /* 0xf0 to 0xfe: 4MB increments starting at 4MB */
if (gms < 0xf0)
return (size_t)gms * MB(32);
else else
/* 4MB increments starting at 0xf0 for 4MB */ return (size_t)(gms - 0xf0 + 1) * MB(4);
return (gmch_ctrl - 0xf0 + 1) << 22;
} }
typedef size_t (*stolen_size_fn)(int num, int slot, int func); struct intel_early_ops {
size_t (*stolen_size)(int num, int slot, int func);
static const struct intel_stolen_funcs i830_stolen_funcs __initconst = { phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size);
.base = i830_stolen_base,
.size = i830_stolen_size,
}; };
static const struct intel_stolen_funcs i845_stolen_funcs __initconst = { static const struct intel_early_ops i830_early_ops __initconst = {
.base = i845_stolen_base, .stolen_base = i830_stolen_base,
.size = i830_stolen_size, .stolen_size = i830_stolen_size,
}; };
static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = { static const struct intel_early_ops i845_early_ops __initconst = {
.base = i85x_stolen_base, .stolen_base = i845_stolen_base,
.size = gen3_stolen_size, .stolen_size = i830_stolen_size,
}; };
static const struct intel_stolen_funcs i865_stolen_funcs __initconst = { static const struct intel_early_ops i85x_early_ops __initconst = {
.base = i865_stolen_base, .stolen_base = i85x_stolen_base,
.size = gen3_stolen_size, .stolen_size = gen3_stolen_size,
}; };
static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = { static const struct intel_early_ops i865_early_ops __initconst = {
.base = intel_stolen_base, .stolen_base = i865_stolen_base,
.size = gen3_stolen_size, .stolen_size = gen3_stolen_size,
}; };
static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = { static const struct intel_early_ops gen3_early_ops __initconst = {
.base = intel_stolen_base, .stolen_base = gen3_stolen_base,
.size = gen6_stolen_size, .stolen_size = gen3_stolen_size,
}; };
static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = { static const struct intel_early_ops gen6_early_ops __initconst = {
.base = intel_stolen_base, .stolen_base = gen3_stolen_base,
.size = gen8_stolen_size, .stolen_size = gen6_stolen_size,
}; };
static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = { static const struct intel_early_ops gen8_early_ops __initconst = {
.base = intel_stolen_base, .stolen_base = gen3_stolen_base,
.size = gen9_stolen_size, .stolen_size = gen8_stolen_size,
}; };
static const struct intel_stolen_funcs chv_stolen_funcs __initconst = { static const struct intel_early_ops gen9_early_ops __initconst = {
.base = intel_stolen_base, .stolen_base = gen3_stolen_base,
.size = chv_stolen_size, .stolen_size = gen9_stolen_size,
}; };
static const struct pci_device_id intel_stolen_ids[] __initconst = { static const struct intel_early_ops chv_early_ops __initconst = {
INTEL_I830_IDS(&i830_stolen_funcs), .stolen_base = gen3_stolen_base,
INTEL_I845G_IDS(&i845_stolen_funcs), .stolen_size = chv_stolen_size,
INTEL_I85X_IDS(&i85x_stolen_funcs),
INTEL_I865G_IDS(&i865_stolen_funcs),
INTEL_I915G_IDS(&gen3_stolen_funcs),
INTEL_I915GM_IDS(&gen3_stolen_funcs),
INTEL_I945G_IDS(&gen3_stolen_funcs),
INTEL_I945GM_IDS(&gen3_stolen_funcs),
INTEL_VLV_M_IDS(&gen6_stolen_funcs),
INTEL_VLV_D_IDS(&gen6_stolen_funcs),
INTEL_PINEVIEW_IDS(&gen3_stolen_funcs),
INTEL_I965G_IDS(&gen3_stolen_funcs),
INTEL_G33_IDS(&gen3_stolen_funcs),
INTEL_I965GM_IDS(&gen3_stolen_funcs),
INTEL_GM45_IDS(&gen3_stolen_funcs),
INTEL_G45_IDS(&gen3_stolen_funcs),
INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs),
INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs),
INTEL_SNB_D_IDS(&gen6_stolen_funcs),
INTEL_SNB_M_IDS(&gen6_stolen_funcs),
INTEL_IVB_M_IDS(&gen6_stolen_funcs),
INTEL_IVB_D_IDS(&gen6_stolen_funcs),
INTEL_HSW_D_IDS(&gen6_stolen_funcs),
INTEL_HSW_M_IDS(&gen6_stolen_funcs),
INTEL_BDW_M_IDS(&gen8_stolen_funcs),
INTEL_BDW_D_IDS(&gen8_stolen_funcs),
INTEL_CHV_IDS(&chv_stolen_funcs),
INTEL_SKL_IDS(&gen9_stolen_funcs),
INTEL_BXT_IDS(&gen9_stolen_funcs),
INTEL_KBL_IDS(&gen9_stolen_funcs),
}; };
static void __init intel_graphics_stolen(int num, int slot, int func) static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_I830_IDS(&i830_early_ops),
INTEL_I845G_IDS(&i845_early_ops),
INTEL_I85X_IDS(&i85x_early_ops),
INTEL_I865G_IDS(&i865_early_ops),
INTEL_I915G_IDS(&gen3_early_ops),
INTEL_I915GM_IDS(&gen3_early_ops),
INTEL_I945G_IDS(&gen3_early_ops),
INTEL_I945GM_IDS(&gen3_early_ops),
INTEL_VLV_M_IDS(&gen6_early_ops),
INTEL_VLV_D_IDS(&gen6_early_ops),
INTEL_PINEVIEW_IDS(&gen3_early_ops),
INTEL_I965G_IDS(&gen3_early_ops),
INTEL_G33_IDS(&gen3_early_ops),
INTEL_I965GM_IDS(&gen3_early_ops),
INTEL_GM45_IDS(&gen3_early_ops),
INTEL_G45_IDS(&gen3_early_ops),
INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
INTEL_SNB_D_IDS(&gen6_early_ops),
INTEL_SNB_M_IDS(&gen6_early_ops),
INTEL_IVB_M_IDS(&gen6_early_ops),
INTEL_IVB_D_IDS(&gen6_early_ops),
INTEL_HSW_D_IDS(&gen6_early_ops),
INTEL_HSW_M_IDS(&gen6_early_ops),
INTEL_BDW_M_IDS(&gen8_early_ops),
INTEL_BDW_D_IDS(&gen8_early_ops),
INTEL_CHV_IDS(&chv_early_ops),
INTEL_SKL_IDS(&gen9_early_ops),
INTEL_BXT_IDS(&gen9_early_ops),
INTEL_KBL_IDS(&gen9_early_ops),
};
static void __init
intel_graphics_stolen(int num, int slot, int func,
const struct intel_early_ops *early_ops)
{ {
phys_addr_t base, end;
size_t size; size_t size;
size = early_ops->stolen_size(num, slot, func);
base = early_ops->stolen_base(num, slot, func, size);
if (!size || !base)
return;
end = base + size - 1;
printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n",
&base, &end);
/* Mark this space as reserved */
e820_add_region(base, size, E820_RESERVED);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
static void __init intel_graphics_quirks(int num, int slot, int func)
{
const struct intel_early_ops *early_ops;
u16 device;
int i; int i;
u32 start;
u16 device, subvendor, subdevice;
device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
subvendor = read_pci_config_16(num, slot, func,
PCI_SUBSYSTEM_VENDOR_ID);
subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);
for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
if (intel_stolen_ids[i].device == device) { kernel_ulong_t driver_data = intel_early_ids[i].driver_data;
const struct intel_stolen_funcs *stolen_funcs =
(const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data; if (intel_early_ids[i].device != device)
size = stolen_funcs->size(num, slot, func); continue;
start = stolen_funcs->base(num, slot, func, size);
if (size && start) { early_ops = (typeof(early_ops))driver_data;
printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n",
start, start + (u32)size - 1); intel_graphics_stolen(num, slot, func, early_ops);
/* Mark this space as reserved */
e820_add_region(start, size, E820_RESERVED); return;
sanitize_e820_map(e820.map,
ARRAY_SIZE(e820.map),
&e820.nr_map);
}
return;
}
} }
} }
@ -627,7 +611,7 @@ static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
QFLAG_APPLY_ONCE, intel_graphics_stolen }, QFLAG_APPLY_ONCE, intel_graphics_quirks },
/* /*
* HPET on the current version of the Baytrail platform has accuracy * HPET on the current version of the Baytrail platform has accuracy
* problems: it will halt in deep idle state - so we disable it. * problems: it will halt in deep idle state - so we disable it.

View file

@ -242,6 +242,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
} }
/**
* drm_accurate_vblank_count - retrieve the master vblank counter
* @crtc: which counter to retrieve
*
* This function is similar to @drm_crtc_vblank_count but this
* function interpolates to handle a race with vblank irq's.
*
* This is mostly useful for hardware that can obtain the scanout
* position, but doesn't have a frame counter.
*/
u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
u32 vblank;
unsigned long flags;
WARN(!dev->driver->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
drm_update_vblank_count(dev, pipe, 0);
vblank = drm_vblank_count(dev, pipe);
spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
return vblank;
}
EXPORT_SYMBOL(drm_accurate_vblank_count);
/* /*
* Disable vblank irq's on crtc, make sure that last vblank count * Disable vblank irq's on crtc, make sure that last vblank count
* of hardware and corresponding consistent software vblank counter * of hardware and corresponding consistent software vblank counter

View file

@ -59,6 +59,7 @@ i915-y += intel_audio.o \
intel_bios.o \ intel_bios.o \
intel_color.o \ intel_color.o \
intel_display.o \ intel_display.o \
intel_dpio_phy.o \
intel_dpll_mgr.o \ intel_dpll_mgr.o \
intel_fbc.o \ intel_fbc.o \
intel_fifo_underrun.o \ intel_fifo_underrun.o \
@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \
dvo_tfp410.o \ dvo_tfp410.o \
intel_crt.o \ intel_crt.o \
intel_ddi.o \ intel_ddi.o \
intel_dp_aux_backlight.o \
intel_dp_link_training.o \ intel_dp_link_training.o \
intel_dp_mst.o \ intel_dp_mst.o \
intel_dp.o \ intel_dp.o \
intel_dsi.o \ intel_dsi.o \
intel_dsi_dcs_backlight.o \
intel_dsi_panel_vbt.o \ intel_dsi_panel_vbt.o \
intel_dsi_pll.o \ intel_dsi_pll.o \
intel_dvo.o \ intel_dvo.o \

View file

@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_RS_CONTEXT, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
int cmd_table_count; int cmd_table_count;
int ret; int ret;
if (!IS_GEN7(engine->dev)) if (!IS_GEN7(engine->i915))
return 0; return 0;
switch (engine->id) { switch (engine->id) {
case RCS: case RCS:
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds; cmd_tables = hsw_render_ring_cmds;
cmd_table_count = cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds); ARRAY_SIZE(hsw_render_ring_cmds);
@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_render_cmds); cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
} }
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_render_reg_tables; engine->reg_tables = hsw_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
} else { } else {
@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break; break;
case BCS: case BCS:
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds; cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else { } else {
@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
} }
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables; engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else { } else {
@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
if (!engine->needs_cmd_parser) if (!engine->needs_cmd_parser)
return false; return false;
if (!USES_PPGTT(engine->dev)) if (!USES_PPGTT(engine->i915))
return false; return false;
return (i915.enable_cmd_parser == 1); return (i915.enable_cmd_parser == 1);
@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false; return false;
} }
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[offset + 1] != 0); *oacontrol_set = (cmd[offset + 1] != 0);
} }
@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false; return false;
} }
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
reg_addr);
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
(offset + 2 > length || (offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) { (cmd[offset + 1] & reg->mask) != reg->value)) {
@ -1275,8 +1287,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
* *
* Return: the current version number of the cmd parser * Return: the current version number of the cmd parser
*/ */
int i915_cmd_parser_get_version(void) int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{ {
struct intel_engine_cs *engine;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv) {
if (i915_needs_cmd_parser(engine)) {
active = true;
break;
}
}
if (!active)
return 0;
/* /*
* Command parser version history * Command parser version history
* *
@ -1288,6 +1313,7 @@ int i915_cmd_parser_get_version(void)
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers. * 5. GPGPU dispatch compute indirect registers.
* 6. TIMESTAMP register and Haswell CS GPR registers * 6. TIMESTAMP register and Haswell CS GPR registers
* 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
*/ */
return 6; return 7;
} }

View file

@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0; return 0;
} }
static const char get_active_flag(struct drm_i915_gem_object *obj) static char get_active_flag(struct drm_i915_gem_object *obj)
{ {
return obj->active ? '*' : ' '; return obj->active ? '*' : ' ';
} }
static const char get_pin_flag(struct drm_i915_gem_object *obj) static char get_pin_flag(struct drm_i915_gem_object *obj)
{ {
return obj->pin_display ? 'p' : ' '; return obj->pin_display ? 'p' : ' ';
} }
static const char get_tiling_flag(struct drm_i915_gem_object *obj) static char get_tiling_flag(struct drm_i915_gem_object *obj)
{ {
switch (obj->tiling_mode) { switch (obj->tiling_mode) {
default: default:
@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
} }
} }
static inline const char get_global_flag(struct drm_i915_gem_object *obj) static char get_global_flag(struct drm_i915_gem_object *obj)
{ {
return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
} }
static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{ {
return obj->mapping ? 'M' : ' '; return obj->mapping ? 'M' : ' ';
} }
@ -607,18 +607,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe); const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane); const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work; struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock); spin_lock_irq(&dev->event_lock);
work = crtc->unpin_work; work = crtc->flip_work;
if (work == NULL) { if (work == NULL) {
seq_printf(m, "No flip due on pipe %c (plane %c)\n", seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
u32 pending;
u32 addr; u32 addr;
if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { pending = atomic_read(&work->pending);
seq_printf(m, "Flip queued on pipe %c (plane %c)\n", if (pending) {
seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
@ -638,11 +640,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank, work->flip_queued_vblank,
work->flip_ready_vblank, work->flip_ready_vblank,
drm_crtc_vblank_count(&crtc->base)); intel_crtc_get_vblank_counter(crtc));
if (work->enable_stall_check)
seq_puts(m, "Stall check enabled, ");
else
seq_puts(m, "Stall check waiting for page flip ioctl, ");
seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (INTEL_INFO(dev)->gen >= 4) if (INTEL_INFO(dev)->gen >= 4)
@ -1383,7 +1381,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seqno[id] = engine->get_seqno(engine); seqno[id] = engine->get_seqno(engine);
} }
i915_get_extra_instdone(dev, instdone); i915_get_extra_instdone(dev_priv, instdone);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
@ -2004,7 +2002,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
ctx->legacy_hw_ctx.rcs_state == NULL) ctx->legacy_hw_ctx.rcs_state == NULL)
continue; continue;
seq_puts(m, "HW context "); seq_printf(m, "HW context %u ", ctx->hw_id);
describe_ctx(m, ctx); describe_ctx(m, ctx);
if (ctx == dev_priv->kernel_context) if (ctx == dev_priv->kernel_context)
seq_printf(m, "(kernel context) "); seq_printf(m, "(kernel context) ");
@ -2046,15 +2044,13 @@ static void i915_dump_lrc_obj(struct seq_file *m,
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0; unsigned long ggtt_offset = 0;
seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
if (ctx_obj == NULL) { if (ctx_obj == NULL) {
seq_printf(m, "Context on %s with no gem object\n", seq_puts(m, "\tNot allocated\n");
engine->name);
return; return;
} }
seq_printf(m, "CONTEXT: %s %u\n", engine->name,
intel_execlists_ctx_id(ctx, engine));
if (!i915_gem_obj_ggtt_bound(ctx_obj)) if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n"); seq_puts(m, "\tNot bound in GGTT\n");
else else
@ -2100,9 +2096,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret; return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context) for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv) i915_dump_lrc_obj(m, ctx, engine);
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
@ -2173,8 +2168,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\t%d requests in queue\n", count); seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) { if (head_req) {
seq_printf(m, "\tHead request id: %u\n", seq_printf(m, "\tHead request context: %u\n",
intel_execlists_ctx_id(head_req->ctx, engine)); head_req->ctx->hw_id);
seq_printf(m, "\tHead request tail: %u\n", seq_printf(m, "\tHead request tail: %u\n",
head_req->tail); head_req->tail);
} }
@ -2313,12 +2308,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
if (INTEL_INFO(dev)->gen == 6) if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
seq_printf(m, "%s\n", engine->name); seq_printf(m, "%s\n", engine->name);
if (INTEL_INFO(dev)->gen == 7) if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", seq_printf(m, "GFX_MODE: 0x%08x\n",
I915_READ(RING_MODE_GEN7(engine))); I915_READ(RING_MODE_GEN7(engine)));
seq_printf(m, "PP_DIR_BASE: 0x%08x\n", seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
@ -3168,7 +3163,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
enum intel_engine_id id; enum intel_engine_id id;
int j, ret; int j, ret;
if (!i915_semaphore_is_enabled(dev)) { if (!i915_semaphore_is_enabled(dev_priv)) {
seq_puts(m, "Semaphores are disabled\n"); seq_puts(m, "Semaphores are disabled\n");
return 0; return 0;
} }
@ -4769,7 +4764,7 @@ i915_wedged_set(void *data, u64 val)
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
i915_handle_error(dev, val, i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val); "Manually setting wedged to %llu", val);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
@ -4919,7 +4914,7 @@ i915_drop_caches_set(void *data, u64 val)
} }
if (val & (DROP_RETIRE | DROP_ACTIVE)) if (val & (DROP_RETIRE | DROP_ACTIVE))
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev_priv);
if (val & DROP_BOUND) if (val & DROP_BOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
@ -4993,7 +4988,7 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val; dev_priv->rps.max_freq_softlimit = val;
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
@ -5060,7 +5055,7 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val; dev_priv->rps.min_freq_softlimit = val;
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);

View file

@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_SEMAPHORES: case I915_PARAM_HAS_SEMAPHORES:
value = i915_semaphore_is_enabled(dev); value = i915_semaphore_is_enabled(dev_priv);
break; break;
case I915_PARAM_HAS_PRIME_VMAP_FLUSH: case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1; value = 1;
@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_CMD_PARSER_VERSION: case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(); value = i915_cmd_parser_get_version(dev_priv);
break; break;
case I915_PARAM_HAS_COHERENT_PHYS_GTT: case I915_PARAM_HAS_COHERENT_PHYS_GTT:
value = 1; value = 1;
@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -ENODEV; return -ENODEV;
break; break;
case I915_PARAM_HAS_GPU_RESET: case I915_PARAM_HAS_GPU_RESET:
value = i915.enable_hangcheck && value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
intel_has_gpu_reset(dev);
break; break;
case I915_PARAM_HAS_RESOURCE_STREAMER: case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev); value = HAS_RESOURCE_STREAMER(dev);
@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch, .can_switch = i915_switcheroo_can_switch,
}; };
static void i915_gem_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
/*
* Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup,
* so we need to reset the GPU back to legacy mode. And the only
* known way to disable logical contexts is through a GPU reset.
*
* So in order to leave the system in a known default configuration,
* always reset the GPU upon unload. Afterwards we then clean up the
* GEM state tracking, flushing off the requests and leaving the
* system in a known idle state.
*
* Note that is of the upmost importance that the GPU is idle and
* all stray writes are flushed *before* we dismantle the backing
* storage for the pinned objects.
*
* However, since we are uncertain that reseting the GPU on older
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
if (HAS_HW_CONTEXTS(dev)) {
int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
WARN_ON(reset && reset != -ENODEV);
}
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
WARN_ON(!list_empty(&to_i915(dev)->context_list));
}
static int i915_load_modeset_init(struct drm_device *dev) static int i915_load_modeset_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_vga_client; goto cleanup_vga_client;
/* must happen before intel_power_domains_init_hw() on VLV/CHV */
intel_update_rawclk(dev_priv);
intel_power_domains_init_hw(dev_priv, false); intel_power_domains_init_hw(dev_priv, false);
intel_csr_ucode_init(dev_priv); intel_csr_ucode_init(dev_priv);
@ -503,10 +542,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0; return 0;
cleanup_gem: cleanup_gem:
mutex_lock(&dev->struct_mutex); i915_gem_fini(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
cleanup_irq: cleanup_irq:
intel_guc_ucode_fini(dev); intel_guc_ucode_fini(dev);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
@ -850,7 +886,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
DRM_INFO("Display disabled (module parameter)\n"); DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0; info->num_pipes = 0;
} else if (info->num_pipes > 0 && } else if (info->num_pipes > 0 &&
(INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
HAS_PCH_SPLIT(dev)) { HAS_PCH_SPLIT(dev)) {
u32 fuse_strap = I915_READ(FUSE_STRAP); u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@ -874,7 +910,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
DRM_INFO("PipeC fused off\n"); DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1; info->num_pipes -= 1;
} }
} else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
u32 dfsm = I915_READ(SKL_DFSM); u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0; u8 disabled_mask = 0;
bool invalid; bool invalid;
@ -915,9 +951,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 9) else if (INTEL_INFO(dev)->gen >= 9)
gen9_sseu_info_init(dev); gen9_sseu_info_init(dev);
/* Snooping is broken on BXT A stepping. */
info->has_snoop = !info->has_llc; info->has_snoop = !info->has_llc;
info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
/* Snooping is broken on BXT A stepping. */
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
info->has_snoop = false;
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
@ -930,6 +968,20 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->has_subslice_pg ? "y" : "n"); info->has_subslice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has EU power gating: %s\n", DRM_DEBUG_DRIVER("has EU power gating: %s\n",
info->has_eu_pg ? "y" : "n"); info->has_eu_pg ? "y" : "n");
i915.enable_execlists =
intel_sanitize_enable_execlists(dev_priv,
i915.enable_execlists);
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
* do this now so that we can print out any log messages once rather
* than every time we check intel_enable_ppgtt().
*/
i915.enable_ppgtt =
intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
} }
static void intel_init_dpio(struct drm_i915_private *dev_priv) static void intel_init_dpio(struct drm_i915_private *dev_priv)
@ -1020,6 +1072,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
memcpy(device_info, info, sizeof(dev_priv->info)); memcpy(device_info, info, sizeof(dev_priv->info));
device_info->device_id = dev->pdev->device; device_info->device_id = dev->pdev->device;
BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
device_info->gen_mask = BIT(device_info->gen - 1);
spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock); spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock); mutex_init(&dev_priv->backlight_lock);
@ -1137,7 +1192,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
if (ret < 0) if (ret < 0)
goto put_bridge; goto put_bridge;
intel_uncore_init(dev); intel_uncore_init(dev_priv);
return 0; return 0;
@ -1155,7 +1210,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
intel_uncore_fini(dev); intel_uncore_fini(dev_priv);
i915_mmio_cleanup(dev); i915_mmio_cleanup(dev);
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
} }
@ -1206,8 +1261,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(dev->pdev); pci_set_master(dev->pdev);
/* overlay on gen2 is broken and can't address above 1G */ /* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN2(dev)) if (IS_GEN2(dev)) {
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
goto out_ggtt;
}
}
/* 965GM sometimes incorrectly writes to hardware status page (HWS) /* 965GM sometimes incorrectly writes to hardware status page (HWS)
* using 32bit addressing, overwriting memory if HWS is located * using 32bit addressing, overwriting memory if HWS is located
@ -1217,8 +1279,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB, * behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully. * which also needs to be handled carefully.
*/ */
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
goto out_ggtt;
}
}
aperture_size = ggtt->mappable_end; aperture_size = ggtt->mappable_end;
@ -1236,7 +1305,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE); PM_QOS_DEFAULT_VALUE);
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev_priv);
intel_opregion_setup(dev); intel_opregion_setup(dev);
@ -1300,7 +1369,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* Notify a valid surface after modesetting, * Notify a valid surface after modesetting,
* when running inside a VM. * when running inside a VM.
*/ */
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev_priv))
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
i915_setup_sysfs(dev); i915_setup_sysfs(dev);
@ -1459,10 +1528,7 @@ int i915_driver_unload(struct drm_device *dev)
flush_workqueue(dev_priv->wq); flush_workqueue(dev_priv->wq);
intel_guc_ucode_fini(dev); intel_guc_ucode_fini(dev);
mutex_lock(&dev->struct_mutex); i915_gem_fini(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv); intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv); intel_power_domains_fini(dev_priv);
@ -1570,15 +1636,15 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),

View file

@ -298,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = {
static const struct intel_device_info intel_broadwell_d_info = { static const struct intel_device_info intel_broadwell_d_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .gen = 8,
.is_broadwell = 1,
}; };
static const struct intel_device_info intel_broadwell_m_info = { static const struct intel_device_info intel_broadwell_m_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .is_mobile = 1, .gen = 8, .is_mobile = 1,
.is_broadwell = 1,
}; };
static const struct intel_device_info intel_broadwell_gt3d_info = { static const struct intel_device_info intel_broadwell_gt3d_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .gen = 8,
.is_broadwell = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
}; };
static const struct intel_device_info intel_broadwell_gt3m_info = { static const struct intel_device_info intel_broadwell_gt3m_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .is_mobile = 1, .gen = 8, .is_mobile = 1,
.is_broadwell = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
}; };
@ -528,9 +532,9 @@ void intel_detect_pch(struct drm_device *dev)
pci_dev_put(pch); pci_dev_put(pch);
} }
bool i915_semaphore_is_enabled(struct drm_device *dev) bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
{ {
if (INTEL_INFO(dev)->gen < 6) if (INTEL_GEN(dev_priv) < 6)
return false; return false;
if (i915.semaphores >= 0) if (i915.semaphores >= 0)
@ -540,13 +544,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.enable_execlists) if (i915.enable_execlists)
return false; return false;
/* Until we get further testing... */
if (IS_GEN8(dev))
return false;
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */ /* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
return false; return false;
#endif #endif
@ -608,7 +608,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_guc_suspend(dev); intel_guc_suspend(dev);
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev_priv);
intel_display_suspend(dev); intel_display_suspend(dev);
@ -628,7 +628,7 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev, opregion_target_state); intel_opregion_notify_adapter(dev, opregion_target_state);
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev_priv, false);
intel_opregion_fini(dev); intel_opregion_fini(dev);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@ -775,7 +775,7 @@ static int i915_drm_resume(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup) if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev); dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
intel_dp_mst_resume(dev); intel_dp_mst_resume(dev);
@ -868,9 +868,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret); ret);
intel_uncore_early_sanitize(dev, true); intel_uncore_early_sanitize(dev_priv, true);
if (IS_BROXTON(dev)) { if (IS_BROXTON(dev_priv)) {
if (!dev_priv->suspended_to_idle) if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv); gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv); bxt_disable_dc9(dev_priv);
@ -878,7 +878,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
hsw_disable_pc8(dev_priv); hsw_disable_pc8(dev_priv);
} }
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev_priv);
if (IS_BROXTON(dev_priv) || if (IS_BROXTON(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
@ -921,14 +921,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
* - re-init interrupt state * - re-init interrupt state
* - re-init display * - re-init display
*/ */
int i915_reset(struct drm_device *dev) int i915_reset(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_device *dev = dev_priv->dev;
struct i915_gpu_error *error = &dev_priv->gpu_error; struct i915_gpu_error *error = &dev_priv->gpu_error;
unsigned reset_counter; unsigned reset_counter;
int ret; int ret;
intel_reset_gt_powersave(dev); intel_reset_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
@ -944,7 +944,7 @@ int i915_reset(struct drm_device *dev)
i915_gem_reset(dev); i915_gem_reset(dev);
ret = intel_gpu_reset(dev, ALL_ENGINES); ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
/* Also reset the gpu hangman. */ /* Also reset the gpu hangman. */
if (error->stop_rings != 0) { if (error->stop_rings != 0) {
@ -999,7 +999,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset. * of re-init after reset.
*/ */
if (INTEL_INFO(dev)->gen > 5) if (INTEL_INFO(dev)->gen > 5)
intel_enable_gt_powersave(dev); intel_enable_gt_powersave(dev_priv);
return 0; return 0;
@ -1107,6 +1107,49 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev); return i915_drm_resume(drm_dev);
} }
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *dev)
{
return i915_pm_suspend(dev);
}
static int i915_pm_freeze_late(struct device *dev)
{
int ret;
ret = i915_pm_suspend_late(dev);
if (ret)
return ret;
ret = i915_gem_freeze_late(dev_to_i915(dev));
if (ret)
return ret;
return 0;
}
/* thaw: called after creating the hibernation image, but before turning off. */
static int i915_pm_thaw_early(struct device *dev)
{
return i915_pm_resume_early(dev);
}
static int i915_pm_thaw(struct device *dev)
{
return i915_pm_resume(dev);
}
/* restore: called after loading the hibernation image. */
static int i915_pm_restore_early(struct device *dev)
{
return i915_pm_resume_early(dev);
}
static int i915_pm_restore(struct device *dev)
{
return i915_pm_resume(dev);
}
/* /*
* Save all Gunit registers that may be lost after a D3 and a subsequent * Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is * S0i[R123] transition. The list of registers needing a save/restore is
@ -1470,7 +1513,7 @@ static int intel_runtime_suspend(struct device *device)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV; return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@ -1509,7 +1552,7 @@ static int intel_runtime_suspend(struct device *device)
intel_guc_suspend(dev); intel_guc_suspend(dev);
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv);
ret = 0; ret = 0;
@ -1531,7 +1574,7 @@ static int intel_runtime_suspend(struct device *device)
return ret; return ret;
} }
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev_priv, false);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@ -1612,7 +1655,7 @@ static int intel_runtime_resume(struct device *device)
* we can do is to hope that things will still work (and disable RPM). * we can do is to hope that things will still work (and disable RPM).
*/ */
i915_gem_init_swizzling(dev); i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev); gen6_update_ring_freq(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv);
@ -1624,7 +1667,7 @@ static int intel_runtime_resume(struct device *device)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv); intel_hpd_init(dev_priv);
intel_enable_gt_powersave(dev); intel_enable_gt_powersave(dev_priv);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
@ -1661,14 +1704,14 @@ static const struct dev_pm_ops i915_pm_ops = {
* @restore, @restore_early : called after rebooting and restoring the * @restore, @restore_early : called after rebooting and restoring the
* hibernation image [PMSG_RESTORE] * hibernation image [PMSG_RESTORE]
*/ */
.freeze = i915_pm_suspend, .freeze = i915_pm_freeze,
.freeze_late = i915_pm_suspend_late, .freeze_late = i915_pm_freeze_late,
.thaw_early = i915_pm_resume_early, .thaw_early = i915_pm_thaw_early,
.thaw = i915_pm_resume, .thaw = i915_pm_thaw,
.poweroff = i915_pm_suspend, .poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_poweroff_late, .poweroff_late = i915_pm_poweroff_late,
.restore_early = i915_pm_resume_early, .restore_early = i915_pm_restore_early,
.restore = i915_pm_resume, .restore = i915_pm_restore,
/* S0ix (via runtime suspend) event handlers */ /* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend, .runtime_suspend = intel_runtime_suspend,

View file

@ -66,7 +66,7 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20160425" #define DRIVER_DATE "20160522"
#undef WARN_ON #undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */ /* Many gcc seem to no see through this and fall over :( */
@ -324,6 +324,12 @@ struct i915_hotplug {
&dev->mode_config.plane_list, \ &dev->mode_config.plane_list, \
base.head) base.head)
#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \
base.head) \
for_each_if ((plane_mask) & \
(1 << drm_plane_index(&intel_plane->base)))
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \ list_for_each_entry(intel_plane, \
&(dev)->mode_config.plane_list, \ &(dev)->mode_config.plane_list, \
@ -333,6 +339,10 @@ struct i915_hotplug {
#define for_each_intel_crtc(dev, intel_crtc) \ #define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \
for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
#define for_each_intel_encoder(dev, intel_encoder) \ #define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \ list_for_each_entry(intel_encoder, \
&(dev)->mode_config.encoder_list, \ &(dev)->mode_config.encoder_list, \
@ -588,6 +598,7 @@ struct drm_i915_display_funcs {
struct intel_crtc_state *newstate); struct intel_crtc_state *newstate);
void (*initial_watermarks)(struct intel_crtc_state *cstate); void (*initial_watermarks)(struct intel_crtc_state *cstate);
void (*optimize_watermarks)(struct intel_crtc_state *cstate); void (*optimize_watermarks)(struct intel_crtc_state *cstate);
int (*compute_global_watermarks)(struct drm_atomic_state *state);
void (*update_wm)(struct drm_crtc *crtc); void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state); int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@ -612,7 +623,7 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req, struct drm_i915_gem_request *req,
uint32_t flags); uint32_t flags);
void (*hpd_irq_setup)(struct drm_device *dev); void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */ /* clock updates for mode set */
/* cursor updates */ /* cursor updates */
/* render clock increase/decrease */ /* render clock increase/decrease */
@ -735,6 +746,7 @@ struct intel_csr {
func(is_valleyview) sep \ func(is_valleyview) sep \
func(is_cherryview) sep \ func(is_cherryview) sep \
func(is_haswell) sep \ func(is_haswell) sep \
func(is_broadwell) sep \
func(is_skylake) sep \ func(is_skylake) sep \
func(is_broxton) sep \ func(is_broxton) sep \
func(is_kabylake) sep \ func(is_kabylake) sep \
@ -757,9 +769,10 @@ struct intel_csr {
struct intel_device_info { struct intel_device_info {
u32 display_mmio_offset; u32 display_mmio_offset;
u16 device_id; u16 device_id;
u8 num_pipes:3; u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES]; u8 num_sprites[I915_MAX_PIPES];
u8 gen; u8 gen;
u16 gen_mask;
u8 ring_mask; /* Rings supported by the HW */ u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
/* Register offsets for the various display pipes and transcoders */ /* Register offsets for the various display pipes and transcoders */
@ -851,6 +864,9 @@ struct intel_context {
struct i915_ctx_hang_stats hang_stats; struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt; struct i915_hw_ppgtt *ppgtt;
/* Unique identifier for this context, used by the hw for tracking */
unsigned hw_id;
/* Legacy ring buffer submission */ /* Legacy ring buffer submission */
struct { struct {
struct drm_i915_gem_object *rcs_state; struct drm_i915_gem_object *rcs_state;
@ -865,6 +881,7 @@ struct intel_context {
struct i915_vma *lrc_vma; struct i915_vma *lrc_vma;
u64 lrc_desc; u64 lrc_desc;
uint32_t *lrc_reg_state; uint32_t *lrc_reg_state;
bool initialised;
} engine[I915_NUM_ENGINES]; } engine[I915_NUM_ENGINES];
struct list_head link; struct list_head link;
@ -1488,6 +1505,7 @@ struct intel_vbt_data {
bool present; bool present;
bool active_low_pwm; bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */ u8 min_brightness; /* min_brightness/255 of max */
enum intel_backlight_type type;
} backlight; } backlight;
/* MIPI DSI */ /* MIPI DSI */
@ -1580,7 +1598,7 @@ struct skl_ddb_allocation {
}; };
struct skl_wm_values { struct skl_wm_values {
bool dirty[I915_MAX_PIPES]; unsigned dirty_pipes;
struct skl_ddb_allocation ddb; struct skl_ddb_allocation ddb;
uint32_t wm_linetime[I915_MAX_PIPES]; uint32_t wm_linetime[I915_MAX_PIPES];
uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
@ -1838,6 +1856,13 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7); DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock; struct mutex mm_lock;
/* The hw wants to have a stable context identifier for the lifetime
* of the context (for OA, PASID, faults, etc). This is limited
* in execlists to 21 bits.
*/
struct ida context_hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
/* Kernel Modesetting */ /* Kernel Modesetting */
struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@ -1950,9 +1975,6 @@ struct drm_i915_private {
*/ */
uint16_t skl_latency[8]; uint16_t skl_latency[8];
/* Committed wm config */
struct intel_wm_config config;
/* /*
* The skl_wm_values structure is a bit too big for stack * The skl_wm_values structure is a bit too big for stack
* allocation, so we keep the staging struct where we store * allocation, so we keep the staging struct where we store
@ -1975,6 +1997,13 @@ struct drm_i915_private {
* cstate->wm.need_postvbl_update. * cstate->wm.need_postvbl_update.
*/ */
struct mutex wm_mutex; struct mutex wm_mutex;
/*
* Set during HW readout of watermarks/DDB. Some platforms
* need to know when we're still using BIOS-provided values
* (which we don't fully trust).
*/
bool distrust_bios_wm;
} wm; } wm;
struct i915_runtime_pm pm; struct i915_runtime_pm pm;
@ -2227,9 +2256,75 @@ struct drm_i915_gem_object {
}; };
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
void i915_gem_track_fb(struct drm_i915_gem_object *old, /*
struct drm_i915_gem_object *new, * Optimised SGL iterator for GEM objects
unsigned frontbuffer_bits); */
static __always_inline struct sgt_iter {
struct scatterlist *sgp;
union {
unsigned long pfn;
dma_addr_t dma;
};
unsigned int curr;
unsigned int max;
} __sgt_iter(struct scatterlist *sgl, bool dma) {
struct sgt_iter s = { .sgp = sgl };
if (s.sgp) {
s.max = s.curr = s.sgp->offset;
s.max += s.sgp->length;
if (dma)
s.dma = sg_dma_address(s.sgp);
else
s.pfn = page_to_pfn(sg_page(s.sgp));
}
return s;
}
/**
* __sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
*
* Description:
* If the entry is the last, return NULL; otherwise, step to the next
* element in the array (@sg@+1). If that's a chain pointer, follow it;
* otherwise just return the pointer to the current element.
**/
static inline struct scatterlist *__sg_next(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
return sg_is_last(sg) ? NULL :
likely(!sg_is_chain(++sg)) ? sg :
sg_chain_ptr(sg);
}
/**
* for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
* @__dmap: DMA address (output)
* @__iter: 'struct sgt_iter' (iterator state, internal)
* @__sgt: sg_table to iterate over (input)
*/
#define for_each_sgt_dma(__dmap, __iter, __sgt) \
for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
((__dmap) = (__iter).dma + (__iter).curr); \
(((__iter).curr += PAGE_SIZE) < (__iter).max) || \
((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
/**
* for_each_sgt_page - iterate over the pages of the given sg_table
* @__pp: page pointer (output)
* @__iter: 'struct sgt_iter' (iterator state, internal)
* @__sgt: sg_table to iterate over (input)
*/
#define for_each_sgt_page(__pp, __iter, __sgt) \
for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
((__pp) = (__iter).pfn == 0 ? NULL : \
pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
(((__iter).curr += PAGE_SIZE) < (__iter).max) || \
((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
/** /**
* Request queue structure. * Request queue structure.
@ -2278,6 +2373,9 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the end of the whole request */ /** Position in the ringbuffer of the end of the whole request */
u32 tail; u32 tail;
/** Preallocate space in the ringbuffer for the emitting the request */
u32 reserved_space;
/** /**
* Context and ring buffer related to this request * Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a * Contexts are refcounted, so when this request is associated with a
@ -2291,6 +2389,17 @@ struct drm_i915_gem_request {
struct intel_context *ctx; struct intel_context *ctx;
struct intel_ringbuffer *ringbuf; struct intel_ringbuffer *ringbuf;
/**
* Context related to the previous request.
* As the contexts are accessed by the hardware until the switch is
* completed to a new context, the hardware may still be writing
* to the context object after the breadcrumb is visible. We must
* not unpin/unbind/prune that object whilst still active and so
* we keep the previous context pinned until the following (this)
* request is retired.
*/
struct intel_context *previous_context;
/** Batch buffer related to this request if any (used for /** Batch buffer related to this request if any (used for
error state dump only) */ error state dump only) */
struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_object *batch_obj;
@ -2327,6 +2436,8 @@ struct drm_i915_gem_request {
/** Execlists no. of times this request has been sent to the ELSP */ /** Execlists no. of times this request has been sent to the ELSP */
int elsp_submitted; int elsp_submitted;
/** Execlists context hardware id. */
unsigned ctx_hw_id;
}; };
struct drm_i915_gem_request * __must_check struct drm_i915_gem_request * __must_check
@ -2359,23 +2470,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
static inline void static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req) i915_gem_request_unreference(struct drm_i915_gem_request *req)
{ {
WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free); kref_put(&req->ref, i915_gem_request_free);
} }
static inline void
i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
{
struct drm_device *dev;
if (!req)
return;
dev = req->engine->dev;
if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
}
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src) struct drm_i915_gem_request *src)
{ {
@ -2503,9 +2600,29 @@ struct drm_i915_cmd_table {
#define INTEL_INFO(p) (&__I915__(p)->info) #define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_GEN(p) (INTEL_INFO(p)->gen) #define INTEL_GEN(p) (INTEL_INFO(p)->gen)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define REVID_FOREVER 0xff #define REVID_FOREVER 0xff
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define GEN_FOREVER (0)
/*
* Returns true if Gen is in inclusive range [Start, End].
*
* Use GEN_FOREVER for unbound start and or end.
*/
#define IS_GEN(p, s, e) ({ \
unsigned int __s = (s), __e = (e); \
BUILD_BUG_ON(!__builtin_constant_p(s)); \
BUILD_BUG_ON(!__builtin_constant_p(e)); \
if ((__s) != GEN_FOREVER) \
__s = (s) - 1; \
if ((__e) == GEN_FOREVER) \
__e = BITS_PER_LONG - 1; \
else \
__e = (e) - 1; \
!!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
})
/* /*
* Return true if revision is in range [since,until] inclusive. * Return true if revision is in range [since,until] inclusive.
* *
@ -2538,7 +2655,7 @@ struct drm_i915_cmd_table {
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) #define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
@ -2606,14 +2723,14 @@ struct drm_i915_cmd_table {
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
* chips, etc.). * chips, etc.).
*/ */
#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) #define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1))
#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) #define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2))
#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) #define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3))
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) #define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4))
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) #define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5))
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) #define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6))
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) #define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7))
#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) #define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8))
#define RENDER_RING (1<<RCS) #define RENDER_RING (1<<RCS)
#define BSD_RING (1<<VCS) #define BSD_RING (1<<VCS)
@ -2686,7 +2803,7 @@ struct drm_i915_cmd_table {
IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
IS_KABYLAKE(dev) || IS_BROXTON(dev)) IS_KABYLAKE(dev) || IS_BROXTON(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) #define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
#define HAS_CSR(dev) (IS_GEN9(dev)) #define HAS_CSR(dev) (IS_GEN9(dev))
@ -2740,6 +2857,9 @@ extern int i915_max_ioctl;
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev); extern int i915_resume_switcheroo(struct drm_device *dev);
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt);
/* i915_dma.c */ /* i915_dma.c */
void __printf(3, 4) void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level, __i915_printk(struct drm_i915_private *dev_priv, const char *level,
@ -2760,9 +2880,9 @@ extern void i915_driver_postclose(struct drm_device *dev,
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg); unsigned long arg);
#endif #endif
extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_device *dev); extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
extern int i915_reset(struct drm_device *dev); extern int i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv); extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@ -2772,30 +2892,33 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
/* intel_hotplug.c */ /* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
/* i915_irq.c */ /* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev); void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
__printf(3, 4) __printf(3, 4)
void i915_handle_error(struct drm_device *dev, u32 engine_mask, void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *fmt, ...); const char *fmt, ...);
extern void intel_irq_init(struct drm_i915_private *dev_priv); extern void intel_irq_init(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv);
extern void intel_uncore_sanitize(struct drm_device *dev); extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
extern void intel_uncore_early_sanitize(struct drm_device *dev, extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake); bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev); extern void intel_uncore_init(struct drm_i915_private *dev_priv);
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
extern void intel_uncore_fini(struct drm_device *dev); extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
bool restore);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains domains); enum forcewake_domains domains);
@ -2811,9 +2934,9 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
static inline bool intel_vgpu_active(struct drm_device *dev) static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
{ {
return to_i915(dev)->vgpu.active; return dev_priv->vgpu.active;
} }
void void
@ -2909,7 +3032,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data, int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_init_userptr(struct drm_device *dev); void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@ -2919,11 +3042,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
void i915_gem_load_init(struct drm_device *dev); void i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev); void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
void *i915_gem_object_alloc(struct drm_device *dev); void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj, void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops); const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size); size_t size);
struct drm_i915_gem_object *i915_gem_object_create_from_data( struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size); struct drm_device *dev, const void *data, size_t size);
@ -3054,6 +3179,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args); struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset); uint32_t handle, uint64_t *offset);
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
/** /**
* Returns true if seq1 is later than seq2. * Returns true if seq1 is later than seq2.
*/ */
@ -3081,13 +3211,13 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
req->seqno); req->seqno);
} }
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request * struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine); i915_gem_find_active_request(struct intel_engine_cs *engine);
bool i915_gem_retire_requests(struct drm_device *dev); bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
static inline u32 i915_reset_counter(struct i915_gpu_error *error) static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@ -3147,7 +3277,6 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_engines(struct drm_device *dev); int i915_gem_init_engines(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_engines(struct drm_device *dev); void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev);
@ -3215,8 +3344,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
bool i915_gem_obj_bound(struct drm_i915_gem_object *o, bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm); struct i915_address_space *vm);
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
struct i915_vma * struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm); struct i915_address_space *vm);
@ -3251,14 +3378,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
} }
static inline unsigned long unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
return i915_gem_obj_size(obj, &ggtt->base);
}
static inline int __must_check static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
@ -3272,12 +3393,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
alignment, flags | PIN_GLOBAL); alignment, flags | PIN_GLOBAL);
} }
static inline int
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
{
return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
}
void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view); const struct i915_ggtt_view *view);
static inline void static inline void
@ -3301,10 +3416,10 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_context.c */ /* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev); int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_lost(struct drm_i915_private *dev_priv);
void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev); void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
int i915_gem_context_enable(struct drm_i915_gem_request *req);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req); int i915_switch_context(struct drm_i915_gem_request *req);
struct intel_context * struct intel_context *
@ -3335,6 +3450,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_gem_evict.c */ /* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int __must_check i915_gem_evict_something(struct drm_device *dev,
@ -3349,9 +3466,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
/* belongs in i915_gem_gtt.h */ /* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_device *dev) static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
{ {
if (INTEL_INFO(dev)->gen < 6) if (INTEL_GEN(dev_priv) < 6)
intel_gtt_chipset_flush(); intel_gtt_chipset_flush();
} }
@ -3430,18 +3547,19 @@ static inline void i915_error_state_buf_release(
{ {
kfree(eb->buf); kfree(eb->buf);
} }
void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, void i915_capture_error_state(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *error_msg); const char *error_msg);
void i915_error_state_get(struct drm_device *dev, void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv); struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv); void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev); void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type); const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */ /* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void); int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
bool i915_needs_cmd_parser(struct intel_engine_cs *engine); bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
@ -3492,7 +3610,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
extern int intel_opregion_setup(struct drm_device *dev); extern int intel_opregion_setup(struct drm_device *dev);
extern void intel_opregion_init(struct drm_device *dev); extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev); extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev); extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable); bool enable);
extern int intel_opregion_notify_adapter(struct drm_device *dev, extern int intel_opregion_notify_adapter(struct drm_device *dev,
@ -3502,7 +3620,9 @@ extern int intel_opregion_get_panel_type(struct drm_device *dev);
static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
static inline void intel_opregion_init(struct drm_device *dev) { return; } static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; } static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
}
static inline int static inline int
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
{ {
@ -3538,26 +3658,25 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_display_resume(struct drm_device *dev); extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev); extern void intel_init_pch_refclk(struct drm_device *dev);
extern void intel_set_rps(struct drm_device *dev, u8 val); extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable); bool enable);
extern void intel_detect_pch(struct drm_device *dev); extern void intel_detect_pch(struct drm_device *dev);
extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
int i915_reg_read_ioctl(struct drm_device *dev, void *data, int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* overlay */ /* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error); struct intel_overlay_error_state *error);
extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev, struct drm_device *dev,
struct intel_display_error_state *error); struct intel_display_error_state *error);
@ -3586,6 +3705,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
/* intel_dpio_phy.c */
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale);
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset);
void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
void chv_phy_release_cl2_override(struct intel_encoder *encoder);
void chv_phy_post_pll_disable(struct intel_encoder *encoder);
void vlv_set_phy_signal_level(struct intel_encoder *encoder,
u32 demph_reg_value, u32 preemph_reg_value,
u32 uniqtranscale_reg_value, u32 tx3_demph);
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
void vlv_phy_reset_lanes(struct intel_encoder *encoder);
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);

View file

@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
} }
i915_gem_chipset_flush(obj->base.dev); i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) if (st == NULL)
@ -347,7 +347,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
} }
drm_clflush_virt_range(vaddr, args->size); drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(dev); i915_gem_chipset_flush(to_i915(dev));
out: out:
intel_fb_obj_flush(obj, false, ORIGIN_CPU); intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@ -381,9 +381,9 @@ i915_gem_create(struct drm_file *file,
return -EINVAL; return -EINVAL;
/* Allocate the new object */ /* Allocate the new object */
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_object_create(dev, size);
if (obj == NULL) if (IS_ERR(obj))
return -ENOMEM; return PTR_ERR(obj);
ret = drm_gem_handle_create(file, &obj->base, &handle); ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */ /* drop reference from allocate - handle holds it now */
@ -1006,7 +1006,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
} }
if (needs_clflush_after) if (needs_clflush_after)
i915_gem_chipset_flush(dev); i915_gem_chipset_flush(to_i915(dev));
else else
obj->cache_dirty = true; obj->cache_dirty = true;
@ -1230,8 +1230,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
struct intel_rps_client *rps) struct intel_rps_client *rps)
{ {
struct intel_engine_cs *engine = i915_gem_request_get_engine(req); struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = req->i915;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress = const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@ -1413,6 +1412,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->list); list_del_init(&request->list);
i915_gem_request_remove_from_client(request); i915_gem_request_remove_from_client(request);
if (request->previous_context) {
if (i915.enable_execlists)
intel_lr_context_unpin(request->previous_context,
request->engine);
}
i915_gem_context_unreference(request->ctx);
i915_gem_request_unreference(request); i915_gem_request_unreference(request);
} }
@ -1422,7 +1428,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *tmp; struct drm_i915_gem_request *tmp;
lockdep_assert_held(&engine->dev->struct_mutex); lockdep_assert_held(&engine->i915->dev->struct_mutex);
if (list_empty(&req->list)) if (list_empty(&req->list))
return; return;
@ -1982,7 +1988,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
return size; return size;
/* Previous chips need a power-of-two fence region when tiling */ /* Previous chips need a power-of-two fence region when tiling */
if (INTEL_INFO(dev)->gen == 3) if (IS_GEN3(dev))
gtt_size = 1024*1024; gtt_size = 1024*1024;
else else
gtt_size = 512*1024; gtt_size = 512*1024;
@ -2162,7 +2168,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
static void static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{ {
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page;
int ret; int ret;
BUG_ON(obj->madv == __I915_MADV_PURGED); BUG_ON(obj->madv == __I915_MADV_PURGED);
@ -2184,9 +2191,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED) if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0; obj->dirty = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { for_each_sgt_page(page, sgt_iter, obj->pages) {
struct page *page = sg_page_iter_page(&sg_iter);
if (obj->dirty) if (obj->dirty)
set_page_dirty(page); set_page_dirty(page);
@ -2243,7 +2248,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct address_space *mapping; struct address_space *mapping;
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned long last_pfn = 0; /* suppress gcc warning */
int ret; int ret;
@ -2340,8 +2345,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages: err_pages:
sg_mark_end(sg); sg_mark_end(sg);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) for_each_sgt_page(page, sgt_iter, st)
put_page(sg_page_iter_page(&sg_iter)); put_page(page);
sg_free_table(st); sg_free_table(st);
kfree(st); kfree(st);
@ -2395,6 +2400,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
/* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->pages;
struct sgt_iter sgt_iter;
struct page *page;
struct page *stack_pages[32];
struct page **pages = stack_pages;
unsigned long i = 0;
void *addr;
/* A single page can always be kmapped */
if (n_pages == 1)
return kmap(sg_page(sgt->sgl));
if (n_pages > ARRAY_SIZE(stack_pages)) {
/* Too big for stack -- allocate temporary array instead */
pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
if (!pages)
return NULL;
}
for_each_sgt_page(page, sgt_iter, sgt)
pages[i++] = page;
/* Check that we have the expected number of pages */
GEM_BUG_ON(i != n_pages);
addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
if (pages != stack_pages)
drm_free_large(pages);
return addr;
}
/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
{ {
int ret; int ret;
@ -2407,29 +2450,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
i915_gem_object_pin_pages(obj); i915_gem_object_pin_pages(obj);
if (obj->mapping == NULL) { if (!obj->mapping) {
struct page **pages; obj->mapping = i915_gem_object_map(obj);
if (!obj->mapping) {
pages = NULL;
if (obj->base.size == PAGE_SIZE)
obj->mapping = kmap(sg_page(obj->pages->sgl));
else
pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
sizeof(*pages),
GFP_TEMPORARY);
if (pages != NULL) {
struct sg_page_iter sg_iter;
int n;
n = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter,
obj->pages->nents, 0)
pages[n++] = sg_page_iter_page(&sg_iter);
obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
drm_free_large(pages);
}
if (obj->mapping == NULL) {
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -2502,9 +2525,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
} }
static int static int
i915_gem_init_seqno(struct drm_device *dev, u32 seqno) i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int ret; int ret;
@ -2514,7 +2536,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
if (ret) if (ret)
return ret; return ret;
} }
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev_priv);
/* Finally reset hw state */ /* Finally reset hw state */
for_each_engine(engine, dev_priv) for_each_engine(engine, dev_priv)
@ -2534,7 +2556,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
/* HWS page needs to be set less than what we /* HWS page needs to be set less than what we
* will inject to ring * will inject to ring
*/ */
ret = i915_gem_init_seqno(dev, seqno - 1); ret = i915_gem_init_seqno(dev_priv, seqno - 1);
if (ret) if (ret)
return ret; return ret;
@ -2550,13 +2572,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
} }
int int
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
/* reserve 0 for non-seqno */ /* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) { if (dev_priv->next_seqno == 0) {
int ret = i915_gem_init_seqno(dev, 0); int ret = i915_gem_init_seqno(dev_priv, 0);
if (ret) if (ret)
return ret; return ret;
@ -2580,6 +2600,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf; struct intel_ringbuffer *ringbuf;
u32 request_start; u32 request_start;
u32 reserved_tail;
int ret; int ret;
if (WARN_ON(request == NULL)) if (WARN_ON(request == NULL))
@ -2594,9 +2615,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
* should already have been reserved in the ring buffer. Let the ring * should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up. * know that it is time to use that space up.
*/ */
intel_ring_reserved_space_use(ringbuf);
request_start = intel_ring_get_tail(ringbuf); request_start = intel_ring_get_tail(ringbuf);
reserved_tail = request->reserved_space;
request->reserved_space = 0;
/* /*
* Emit any outstanding flushes - execbuf can fail to emit the flush * Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix * after having emitted the batchbuffer command. Hence we need to fix
@ -2652,15 +2674,21 @@ void __i915_add_request(struct drm_i915_gem_request *request,
/* Not allowed to fail! */ /* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret); WARN(ret, "emit|add_request failed: %d!\n", ret);
i915_queue_hangcheck(engine->dev); i915_queue_hangcheck(engine->i915);
queue_delayed_work(dev_priv->wq, queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ)); round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev); intel_mark_busy(dev_priv);
/* Sanity check that the reserved size was large enough. */ /* Sanity check that the reserved size was large enough. */
intel_ring_reserved_space_end(ringbuf); ret = intel_ring_get_tail(ringbuf) - request_start;
if (ret < 0)
ret += ringbuf->size;
WARN_ONCE(ret > reserved_tail,
"Not enough space reserved (%d bytes) "
"for adding the request (%d bytes)\n",
reserved_tail, ret);
} }
static bool i915_context_is_banned(struct drm_i915_private *dev_priv, static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@ -2712,18 +2740,6 @@ void i915_gem_request_free(struct kref *req_ref)
{ {
struct drm_i915_gem_request *req = container_of(req_ref, struct drm_i915_gem_request *req = container_of(req_ref,
typeof(*req), ref); typeof(*req), ref);
struct intel_context *ctx = req->ctx;
if (req->file_priv)
i915_gem_request_remove_from_client(req);
if (ctx) {
if (i915.enable_execlists && ctx != req->i915->kernel_context)
intel_lr_context_unpin(ctx, req->engine);
i915_gem_context_unreference(ctx);
}
kmem_cache_free(req->i915->requests, req); kmem_cache_free(req->i915->requests, req);
} }
@ -2732,7 +2748,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx, struct intel_context *ctx,
struct drm_i915_gem_request **req_out) struct drm_i915_gem_request **req_out)
{ {
struct drm_i915_private *dev_priv = to_i915(engine->dev); struct drm_i915_private *dev_priv = engine->i915;
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
int ret; int ret;
@ -2754,7 +2770,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
if (req == NULL) if (req == NULL)
return -ENOMEM; return -ENOMEM;
ret = i915_gem_get_seqno(engine->dev, &req->seqno); ret = i915_gem_get_seqno(engine->i915, &req->seqno);
if (ret) if (ret)
goto err; goto err;
@ -2765,15 +2781,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
req->ctx = ctx; req->ctx = ctx;
i915_gem_context_reference(req->ctx); i915_gem_context_reference(req->ctx);
if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(req);
else
ret = intel_ring_alloc_request_extras(req);
if (ret) {
i915_gem_context_unreference(req->ctx);
goto err;
}
/* /*
* Reserve space in the ring buffer for all the commands required to * Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the * eventually emit this request. This is to guarantee that the
@ -2781,24 +2788,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
* to be redone if the request is not actually submitted straight * to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it. * away, e.g. because a GPU scheduler has deferred it.
*/ */
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
if (i915.enable_execlists) if (i915.enable_execlists)
ret = intel_logical_ring_reserve_space(req); ret = intel_logical_ring_alloc_request_extras(req);
else else
ret = intel_ring_reserve_space(req); ret = intel_ring_alloc_request_extras(req);
if (ret) { if (ret)
/* goto err_ctx;
* At this point, the request is fully allocated even if not
* fully prepared. Thus it can be cleaned up using the proper
* free code.
*/
intel_ring_reserved_space_cancel(req->ringbuf);
i915_gem_request_unreference(req);
return ret;
}
*req_out = req; *req_out = req;
return 0; return 0;
err_ctx:
i915_gem_context_unreference(ctx);
err: err:
kmem_cache_free(dev_priv->requests, req); kmem_cache_free(dev_priv->requests, req);
return ret; return ret;
@ -2824,7 +2827,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
int err; int err;
if (ctx == NULL) if (ctx == NULL)
ctx = to_i915(engine->dev)->kernel_context; ctx = engine->i915->kernel_context;
err = __i915_gem_request_alloc(engine, ctx, &req); err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req; return err ? ERR_PTR(err) : req;
} }
@ -2888,13 +2891,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
/* Ensure irq handler finishes or is cancelled. */ /* Ensure irq handler finishes or is cancelled. */
tasklet_kill(&engine->irq_tasklet); tasklet_kill(&engine->irq_tasklet);
spin_lock_bh(&engine->execlist_lock); intel_execlists_cancel_requests(engine);
/* list_splice_tail_init checks for empty lists */
list_splice_tail_init(&engine->execlist_queue,
&engine->execlist_retired_req_list);
spin_unlock_bh(&engine->execlist_lock);
intel_execlists_retire_requests(engine);
} }
/* /*
@ -3005,9 +3002,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
} }
bool bool
i915_gem_retire_requests(struct drm_device *dev) i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
bool idle = true; bool idle = true;
@ -3018,8 +3014,6 @@ i915_gem_retire_requests(struct drm_device *dev)
spin_lock_bh(&engine->execlist_lock); spin_lock_bh(&engine->execlist_lock);
idle &= list_empty(&engine->execlist_queue); idle &= list_empty(&engine->execlist_queue);
spin_unlock_bh(&engine->execlist_lock); spin_unlock_bh(&engine->execlist_lock);
intel_execlists_retire_requests(engine);
} }
} }
@ -3042,7 +3036,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */ /* Come back later if the device is busy... */
idle = false; idle = false;
if (mutex_trylock(&dev->struct_mutex)) { if (mutex_trylock(&dev->struct_mutex)) {
idle = i915_gem_retire_requests(dev); idle = i915_gem_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
if (!idle) if (!idle)
@ -3066,7 +3060,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Also locking seems to be fubar here, engine->request_list is protected * Also locking seems to be fubar here, engine->request_list is protected
* by dev->struct_mutex. */ * by dev->struct_mutex. */
intel_mark_idle(dev); intel_mark_idle(dev_priv);
if (mutex_trylock(&dev->struct_mutex)) { if (mutex_trylock(&dev->struct_mutex)) {
for_each_engine(engine, dev_priv) for_each_engine(engine, dev_priv)
@ -3096,14 +3090,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (req == NULL) if (req == NULL)
continue; continue;
if (list_empty(&req->list)) if (i915_gem_request_completed(req, true))
goto retire;
if (i915_gem_request_completed(req, true)) {
__i915_gem_request_retire__upto(req);
retire:
i915_gem_object_retire__read(obj, i); i915_gem_object_retire__read(obj, i);
}
} }
return 0; return 0;
@ -3185,7 +3173,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
ret = __i915_wait_request(req[i], true, ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL, args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file)); to_rps_client(file));
i915_gem_request_unreference__unlocked(req[i]); i915_gem_request_unreference(req[i]);
} }
return ret; return ret;
@ -3211,7 +3199,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (i915_gem_request_completed(from_req, true)) if (i915_gem_request_completed(from_req, true))
return 0; return 0;
if (!i915_semaphore_is_enabled(obj->base.dev)) { if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req, ret = __i915_wait_request(from_req,
i915->mm.interruptible, i915->mm.interruptible,
@ -3345,6 +3333,17 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_write_domain); old_write_domain);
} }
static void __i915_vma_iounmap(struct i915_vma *vma)
{
GEM_BUG_ON(vma->pin_count);
if (vma->iomap == NULL)
return;
io_mapping_unmap(vma->iomap);
vma->iomap = NULL;
}
static int __i915_vma_unbind(struct i915_vma *vma, bool wait) static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
@ -3377,6 +3376,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
ret = i915_gem_object_put_fence(obj); ret = i915_gem_object_put_fence(obj);
if (ret) if (ret)
return ret; return ret;
__i915_vma_iounmap(vma);
} }
trace_i915_vma_unbind(vma); trace_i915_vma_unbind(vma);
@ -3731,7 +3732,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return; return;
if (i915_gem_clflush_object(obj, obj->pin_display)) if (i915_gem_clflush_object(obj, obj->pin_display))
i915_gem_chipset_flush(obj->base.dev); i915_gem_chipset_flush(to_i915(obj->base.dev));
old_write_domain = obj->base.write_domain; old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0; obj->base.write_domain = 0;
@ -3929,7 +3930,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
obj->base.write_domain != I915_GEM_DOMAIN_CPU && obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) { cpu_write_needs_clflush(obj)) {
if (i915_gem_clflush_object(obj, true)) if (i915_gem_clflush_object(obj, true))
i915_gem_chipset_flush(obj->base.dev); i915_gem_chipset_flush(to_i915(obj->base.dev));
} }
return 0; return 0;
@ -4198,7 +4199,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0) if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
i915_gem_request_unreference__unlocked(target); i915_gem_request_unreference(target);
return ret; return ret;
} }
@ -4499,21 +4500,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.put_pages = i915_gem_object_put_pages_gtt, .put_pages = i915_gem_object_put_pages_gtt,
}; };
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size) size_t size)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct address_space *mapping; struct address_space *mapping;
gfp_t mask; gfp_t mask;
int ret;
obj = i915_gem_object_alloc(dev); obj = i915_gem_object_alloc(dev);
if (obj == NULL) if (obj == NULL)
return NULL; return ERR_PTR(-ENOMEM);
if (drm_gem_object_init(dev, &obj->base, size) != 0) { ret = drm_gem_object_init(dev, &obj->base, size);
i915_gem_object_free(obj); if (ret)
return NULL; goto fail;
}
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@ -4550,6 +4551,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
trace_i915_gem_object_create(obj); trace_i915_gem_object_create(obj);
return obj; return obj;
fail:
i915_gem_object_free(obj);
return ERR_PTR(ret);
} }
static bool discard_backing_storage(struct drm_i915_gem_object *obj) static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@ -4655,16 +4661,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
BUG_ON(!view); GEM_BUG_ON(!view);
list_for_each_entry(vma, &obj->vma_list, obj_link) list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == &ggtt->base && if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma; return vma;
return NULL; return NULL;
} }
@ -4706,9 +4708,10 @@ i915_gem_suspend(struct drm_device *dev)
if (ret) if (ret)
goto err; goto err;
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev_priv);
i915_gem_stop_engines(dev); i915_gem_stop_engines(dev);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@ -4727,37 +4730,6 @@ i915_gem_suspend(struct drm_device *dev)
return ret; return ret;
} }
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
{
struct intel_engine_cs *engine = req->engine;
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret;
if (!HAS_L3_DPF(dev) || !remap_info)
return 0;
ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
if (ret)
return ret;
/*
* Note: We do not worry about the concurrent register cacheline hang
* here because no other code should access these registers other than
* at initialization time.
*/
for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
intel_ring_emit(engine, remap_info[i]);
}
intel_ring_advance(engine);
return ret;
}
void i915_gem_init_swizzling(struct drm_device *dev) void i915_gem_init_swizzling(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -4862,7 +4834,7 @@ i915_gem_init_hw(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int ret, j; int ret;
/* Double layer security blanket, see i915_gem_init() */ /* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@ -4928,44 +4900,6 @@ i915_gem_init_hw(struct drm_device *dev)
* on re-initialisation * on re-initialisation
*/ */
ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100); ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
if (ret)
goto out;
/* Now it is safe to go back round and do everything else: */
for_each_engine(engine, dev_priv) {
struct drm_i915_gem_request *req;
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
break;
}
if (engine->id == RCS) {
for (j = 0; j < NUM_L3_SLICES(dev); j++) {
ret = i915_gem_l3_remap(req, j);
if (ret)
goto err_request;
}
}
ret = i915_ppgtt_init_ring(req);
if (ret)
goto err_request;
ret = i915_gem_context_enable(req);
if (ret)
goto err_request;
err_request:
i915_add_request_no_flush(req);
if (ret) {
DRM_ERROR("Failed to enable %s, error=%d\n",
engine->name, ret);
i915_gem_cleanup_engines(dev);
break;
}
}
out: out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@ -4977,9 +4911,6 @@ int i915_gem_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
i915.enable_execlists = intel_sanitize_enable_execlists(dev,
i915.enable_execlists);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (!i915.enable_execlists) { if (!i915.enable_execlists) {
@ -5002,10 +4933,7 @@ int i915_gem_init(struct drm_device *dev)
*/ */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = i915_gem_init_userptr(dev); i915_gem_init_userptr(dev_priv);
if (ret)
goto out_unlock;
i915_gem_init_ggtt(dev); i915_gem_init_ggtt(dev);
ret = i915_gem_context_init(dev); ret = i915_gem_context_init(dev);
@ -5042,14 +4970,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
for_each_engine(engine, dev_priv) for_each_engine(engine, dev_priv)
dev_priv->gt.cleanup_engine(engine); dev_priv->gt.cleanup_engine(engine);
if (i915.enable_execlists)
/*
* Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup,
* so we need to reset the GPU back to legacy mode.
*/
intel_gpu_reset(dev, ALL_ENGINES);
} }
static void static void
@ -5073,7 +4993,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
else else
dev_priv->num_fence_regs = 8; dev_priv->num_fence_regs = 8;
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev_priv))
dev_priv->num_fence_regs = dev_priv->num_fence_regs =
I915_READ(vgtif_reg(avail_rs.fence_num)); I915_READ(vgtif_reg(avail_rs.fence_num));
@ -5148,6 +5068,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
kmem_cache_destroy(dev_priv->objects); kmem_cache_destroy(dev_priv->objects);
} }
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
/* Called just before we write the hibernation image.
*
* We need to update the domain tracking to reflect that the CPU
* will be accessing all the pages to create and restore from the
* hibernation, and so upon restoration those pages will be in the
* CPU domain.
*
* To make sure the hibernation image contains the latest state,
* we update that state just before writing out the image.
*/
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
return 0;
}
void i915_gem_release(struct drm_device *dev, struct drm_file *file) void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
@ -5254,13 +5202,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct drm_i915_private *dev_priv = to_i915(o->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link) list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == &ggtt->base && if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start; return vma->node.start;
WARN(1, "global vma for this object not found. (view=%u)\n", view->type); WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@ -5286,12 +5231,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct drm_i915_private *dev_priv = to_i915(o->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link) list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == &ggtt->base && if (vma->is_ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) && i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node)) drm_mm_node_allocated(&vma->node))
return true; return true;
@ -5310,23 +5253,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
return false; return false;
} }
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
struct i915_address_space *vm)
{ {
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma; struct i915_vma *vma;
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); GEM_BUG_ON(list_empty(&o->vma_list));
BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, obj_link) { list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt && if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
return vma->node.size; return vma->node.size;
} }
return 0; return 0;
} }
@ -5365,8 +5303,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
size_t bytes; size_t bytes;
int ret; int ret;
obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE)); obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
if (IS_ERR_OR_NULL(obj)) if (IS_ERR(obj))
return obj; return obj;
ret = i915_gem_object_set_to_cpu_domain(obj, true); ret = i915_gem_object_set_to_cpu_domain(obj, true);

View file

@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (obj == NULL) { if (obj == NULL) {
int ret; int ret;
obj = i915_gem_alloc_object(pool->dev, size); obj = i915_gem_object_create(pool->dev, size);
if (obj == NULL) if (IS_ERR(obj))
return ERR_PTR(-ENOMEM); return obj;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_get_pages(obj);
if (ret) if (ret)

View file

@ -90,6 +90,8 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
/* This is a HW constraint. The value below is the largest known requirement /* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping * I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is. * part. It should be safe to decrease this, but it's more future proof as is.
@ -97,28 +99,27 @@
#define GEN6_CONTEXT_ALIGN (64<<10) #define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096 #define GEN7_CONTEXT_ALIGN 4096
static size_t get_context_alignment(struct drm_device *dev) static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{ {
if (IS_GEN6(dev)) if (IS_GEN6(dev_priv))
return GEN6_CONTEXT_ALIGN; return GEN6_CONTEXT_ALIGN;
return GEN7_CONTEXT_ALIGN; return GEN7_CONTEXT_ALIGN;
} }
static int get_context_size(struct drm_device *dev) static int get_context_size(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
u32 reg; u32 reg;
switch (INTEL_INFO(dev)->gen) { switch (INTEL_GEN(dev_priv)) {
case 6: case 6:
reg = I915_READ(CXT_SIZE); reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break; break;
case 7: case 7:
reg = I915_READ(GEN7_CXT_SIZE); reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev)) if (IS_HASWELL(dev_priv))
ret = HSW_CXT_TOTAL_SIZE; ret = HSW_CXT_TOTAL_SIZE;
else else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@ -169,6 +170,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ctx->legacy_hw_ctx.rcs_state) if (ctx->legacy_hw_ctx.rcs_state)
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link); list_del(&ctx->link);
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
kfree(ctx); kfree(ctx);
} }
@ -178,9 +181,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret; int ret;
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_object_create(dev, size);
if (obj == NULL) if (IS_ERR(obj))
return ERR_PTR(-ENOMEM); return obj;
/* /*
* Try to make the context utilize L3 as well as LLC. * Try to make the context utilize L3 as well as LLC.
@ -209,6 +212,28 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
return obj; return obj;
} }
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
int ret;
ret = ida_simple_get(&dev_priv->context_hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0) {
/* Contexts are only released when no longer active.
* Flush any pending retires to hopefully release some
* stale contexts and try again.
*/
i915_gem_retire_requests(dev_priv);
ret = ida_simple_get(&dev_priv->context_hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
return ret;
}
*out = ret;
return 0;
}
static struct intel_context * static struct intel_context *
__create_hw_context(struct drm_device *dev, __create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv) struct drm_i915_file_private *file_priv)
@ -221,6 +246,12 @@ __create_hw_context(struct drm_device *dev,
if (ctx == NULL) if (ctx == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = assign_hw_id(dev_priv, &ctx->hw_id);
if (ret) {
kfree(ctx);
return ERR_PTR(ret);
}
kref_init(&ctx->ref); kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list); list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv; ctx->i915 = dev_priv;
@ -249,7 +280,7 @@ __create_hw_context(struct drm_device *dev,
/* NB: Mark all slices as needing a remap so that when the context first /* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there * loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */ * is no remap info, it will be a NOP. */
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; ctx->remap_slice = ALL_L3_SLICES(dev_priv);
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
@ -288,7 +319,7 @@ i915_gem_create_context(struct drm_device *dev,
* context. * context.
*/ */
ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0); get_context_alignment(to_i915(dev)), 0);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy; goto err_destroy;
@ -336,7 +367,6 @@ static void i915_gem_context_unpin(struct intel_context *ctx,
void i915_gem_context_reset(struct drm_device *dev) void i915_gem_context_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (i915.enable_execlists) { if (i915.enable_execlists) {
struct intel_context *ctx; struct intel_context *ctx;
@ -345,17 +375,7 @@ void i915_gem_context_reset(struct drm_device *dev)
intel_lr_context_reset(dev_priv, ctx); intel_lr_context_reset(dev_priv, ctx);
} }
for (i = 0; i < I915_NUM_ENGINES; i++) { i915_gem_context_lost(dev_priv);
struct intel_engine_cs *engine = &dev_priv->engine[i];
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
}
/* Force the GPU state to be reinitialised on enabling */
dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
} }
int i915_gem_context_init(struct drm_device *dev) int i915_gem_context_init(struct drm_device *dev)
@ -368,19 +388,25 @@ int i915_gem_context_init(struct drm_device *dev)
if (WARN_ON(dev_priv->kernel_context)) if (WARN_ON(dev_priv->kernel_context))
return 0; return 0;
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) { if (!i915.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL; return -EINVAL;
} }
} }
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->context_hw_ida);
if (i915.enable_execlists) { if (i915.enable_execlists) {
/* NB: intentionally left blank. We will allocate our own /* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */ * backing objects as we need them, thank you very much */
dev_priv->hw_context_size = 0; dev_priv->hw_context_size = 0;
} else if (HAS_HW_CONTEXTS(dev)) { } else if (HAS_HW_CONTEXTS(dev_priv)) {
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); dev_priv->hw_context_size =
round_up(get_context_size(dev_priv), 4096);
if (dev_priv->hw_context_size > (1<<20)) { if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size); dev_priv->hw_context_size);
@ -403,61 +429,35 @@ int i915_gem_context_init(struct drm_device *dev)
return 0; return 0;
} }
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) {
if (engine->last_context == NULL)
continue;
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
/* Force the GPU state to be reinitialised on enabling */
dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
}
void i915_gem_context_fini(struct drm_device *dev) void i915_gem_context_fini(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *dctx = dev_priv->kernel_context; struct intel_context *dctx = dev_priv->kernel_context;
int i;
if (dctx->legacy_hw_ctx.rcs_state) {
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
intel_gpu_reset(dev, ALL_ENGINES);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
WARN_ON(!dev_priv->engine[RCS].last_context);
if (dctx->legacy_hw_ctx.rcs_state)
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
for (i = I915_NUM_ENGINES; --i >= 0;) {
struct intel_engine_cs *engine = &dev_priv->engine[i];
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
}
i915_gem_context_unreference(dctx); i915_gem_context_unreference(dctx);
dev_priv->kernel_context = NULL; dev_priv->kernel_context = NULL;
}
int i915_gem_context_enable(struct drm_i915_gem_request *req) ida_destroy(&dev_priv->context_hw_ida);
{
struct intel_engine_cs *engine = req->engine;
int ret;
if (i915.enable_execlists) {
if (engine->init_context == NULL)
return 0;
ret = engine->init_context(req);
} else
ret = i915_switch_context(req);
if (ret) {
DRM_ERROR("ring init context: %d\n", ret);
return ret;
}
return 0;
} }
static int context_idr_cleanup(int id, void *p, void *data) static int context_idr_cleanup(int id, void *p, void *data)
@ -510,12 +510,13 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
static inline int static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{ {
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT; u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings = const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */ /* Use an extended w/a on ivb+ if signalling from other rings */
i915_semaphore_is_enabled(engine->dev) ? i915_semaphore_is_enabled(dev_priv) ?
hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
0; 0;
int len, ret; int len, ret;
@ -524,21 +525,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
* explicitly, so we rely on the value at ring init, stored in * explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch. * itlb_before_ctx_switch.
*/ */
if (IS_GEN6(engine->dev)) { if (IS_GEN6(dev_priv)) {
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret) if (ret)
return ret; return ret;
} }
/* These flags are for resource streamer on HSW+ */ /* These flags are for resource streamer on HSW+ */
if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
else if (INTEL_INFO(engine->dev)->gen < 8) else if (INTEL_GEN(dev_priv) < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4; len = 4;
if (INTEL_INFO(engine->dev)->gen >= 7) if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0); len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len); ret = intel_ring_begin(req, len);
@ -546,14 +547,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret; return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_INFO(engine->dev)->gen >= 7) { if (INTEL_GEN(dev_priv) >= 7) {
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) { if (num_rings) {
struct intel_engine_cs *signaller; struct intel_engine_cs *signaller;
intel_ring_emit(engine, intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings)); MI_LOAD_REGISTER_IMM(num_rings));
for_each_engine(signaller, to_i915(engine->dev)) { for_each_engine(signaller, dev_priv) {
if (signaller == engine) if (signaller == engine)
continue; continue;
@ -576,14 +577,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
*/ */
intel_ring_emit(engine, MI_NOOP); intel_ring_emit(engine, MI_NOOP);
if (INTEL_INFO(engine->dev)->gen >= 7) { if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) { if (num_rings) {
struct intel_engine_cs *signaller; struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */ i915_reg_t last_reg = {}; /* keep gcc quiet */
intel_ring_emit(engine, intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings)); MI_LOAD_REGISTER_IMM(num_rings));
for_each_engine(signaller, to_i915(engine->dev)) { for_each_engine(signaller, dev_priv) {
if (signaller == engine) if (signaller == engine)
continue; continue;
@ -609,7 +610,37 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret; return ret;
} }
static inline bool skip_rcs_switch(struct intel_engine_cs *engine, static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
struct intel_engine_cs *engine = req->engine;
int i, ret;
if (!remap_info)
return 0;
ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
if (ret)
return ret;
/*
* Note: We do not worry about the concurrent register cacheline hang
* here because no other code should access these registers other than
* at initialization time.
*/
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
intel_ring_emit(engine, remap_info[i]);
}
intel_ring_emit(engine, MI_NOOP);
intel_ring_advance(engine);
return 0;
}
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *engine,
struct intel_context *to) struct intel_context *to)
{ {
if (to->remap_slice) if (to->remap_slice)
@ -618,36 +649,44 @@ static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
if (!to->legacy_hw_ctx.initialized) if (!to->legacy_hw_ctx.initialized)
return false; return false;
if (to->ppgtt && if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
return false; return false;
return to == engine->last_context; return to == engine->last_context;
} }
static bool static bool
needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *engine,
struct intel_context *to)
{ {
if (!to->ppgtt) if (!ppgtt)
return false; return false;
/* Always load the ppgtt on first use */
if (!engine->last_context)
return true;
/* Same context without new entries, skip */
if (engine->last_context == to && if (engine->last_context == to &&
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false; return false;
if (engine->id != RCS) if (engine->id != RCS)
return true; return true;
if (INTEL_INFO(engine->dev)->gen < 8) if (INTEL_GEN(engine->i915) < 8)
return true; return true;
return false; return false;
} }
static bool static bool
needs_pd_load_post(struct intel_context *to, u32 hw_flags) needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
struct intel_context *to,
u32 hw_flags)
{ {
if (!to->ppgtt) if (!ppgtt)
return false; return false;
if (!IS_GEN8(to->i915)) if (!IS_GEN8(to->i915))
@ -663,16 +702,17 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
{ {
struct intel_context *to = req->ctx; struct intel_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
struct intel_context *from; struct intel_context *from;
u32 hw_flags; u32 hw_flags;
int ret, i; int ret, i;
if (skip_rcs_switch(engine, to)) if (skip_rcs_switch(ppgtt, engine, to))
return 0; return 0;
/* Trying to pin first makes error handling easier. */ /* Trying to pin first makes error handling easier. */
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(engine->dev), get_context_alignment(engine->i915),
0); 0);
if (ret) if (ret)
return ret; return ret;
@ -698,13 +738,13 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (ret) if (ret)
goto unpin_out; goto unpin_out;
if (needs_pd_load_pre(engine, to)) { if (needs_pd_load_pre(ppgtt, engine, to)) {
/* Older GENs and non render rings still want the load first, /* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load * "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting * Register Immediate commands in Ring Buffer before submitting
* a context."*/ * a context."*/
trace_switch_mm(engine, to); trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req); ret = ppgtt->switch_mm(ppgtt, req);
if (ret) if (ret)
goto unpin_out; goto unpin_out;
} }
@ -715,16 +755,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* space. This means we must enforce that a page table load * space. This means we must enforce that a page table load
* occur when this occurs. */ * occur when this occurs. */
hw_flags = MI_RESTORE_INHIBIT; hw_flags = MI_RESTORE_INHIBIT;
else if (to->ppgtt && else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
hw_flags = MI_FORCE_RESTORE; hw_flags = MI_FORCE_RESTORE;
else else
hw_flags = 0; hw_flags = 0;
/* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(engine, to) &&
needs_pd_load_post(to, hw_flags));
if (to != from || (hw_flags & MI_FORCE_RESTORE)) { if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags); ret = mi_set_context(req, hw_flags);
if (ret) if (ret)
@ -759,9 +794,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
/* GEN8 does *not* require an explicit reload if the PDPs have been /* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them. * setup, and we do not wish to move them.
*/ */
if (needs_pd_load_post(to, hw_flags)) { if (needs_pd_load_post(ppgtt, to, hw_flags)) {
trace_switch_mm(engine, to); trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req); ret = ppgtt->switch_mm(ppgtt, req);
/* The hardware context switch is emitted, but we haven't /* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail * actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has * here. Still, let the user know something dangerous has
@ -771,14 +806,14 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return ret; return ret;
} }
if (to->ppgtt) if (ppgtt)
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
for (i = 0; i < MAX_L3_SLICES; i++) { for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i))) if (!(to->remap_slice & (1<<i)))
continue; continue;
ret = i915_gem_l3_remap(req, i); ret = remap_l3(req, i);
if (ret) if (ret)
return ret; return ret;
@ -825,17 +860,18 @@ int i915_switch_context(struct drm_i915_gem_request *req)
if (engine->id != RCS || if (engine->id != RCS ||
req->ctx->legacy_hw_ctx.rcs_state == NULL) { req->ctx->legacy_hw_ctx.rcs_state == NULL) {
struct intel_context *to = req->ctx; struct intel_context *to = req->ctx;
struct i915_hw_ppgtt *ppgtt =
to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
if (needs_pd_load_pre(engine, to)) { if (needs_pd_load_pre(ppgtt, engine, to)) {
int ret; int ret;
trace_switch_mm(engine, to); trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req); ret = ppgtt->switch_mm(ppgtt, req);
if (ret) if (ret)
return ret; return ret;
/* Doing a PD load always reloads the page dirs */ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
} }
if (to != engine->last_context) { if (to != engine->last_context) {
@ -1004,3 +1040,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
return ret; return ret;
} }
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reset_stats *args = data;
struct i915_ctx_hang_stats *hs;
struct intel_context *ctx;
int ret;
if (args->flags || args->pad)
return -EINVAL;
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
hs = &ctx->hang_stats;
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
else
args->reset_count = 0;
args->batch_active = hs->batch_active;
args->batch_pending = hs->batch_pending;
mutex_unlock(&dev->struct_mutex);
return 0;
}

View file

@ -154,7 +154,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(dev); i915_gem_retire_requests(to_i915(dev));
goto search_again; goto search_again;
} }
@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(vm->dev); i915_gem_retire_requests(to_i915(vm->dev));
WARN_ON(!list_empty(&vm->active_list)); WARN_ON(!list_empty(&vm->active_list));
} }

View file

@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct i915_address_space *vm; struct i915_address_space *vm;
struct list_head ordered_vmas; struct list_head ordered_vmas;
struct list_head pinned_vmas; struct list_head pinned_vmas;
bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
int retry; int retry;
i915_gem_retire_requests_ring(engine); i915_gem_retire_requests_ring(engine);
@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
} }
if (flush_chipset) if (flush_chipset)
i915_gem_chipset_flush(req->engine->dev); i915_gem_chipset_flush(req->engine->i915);
if (flush_domains & I915_GEM_DOMAIN_GTT) if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb(); wmb();
@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
if (i915.enable_execlists && !ctx->engine[engine->id].state) {
int ret = intel_lr_context_deferred_alloc(ctx, engine);
if (ret) {
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
return ERR_PTR(ret);
}
}
return ctx; return ctx;
} }
@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req); i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(engine->dev); struct drm_i915_private *dev_priv = engine->i915;
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list); &dev_priv->mm.fence_list);
} }

View file

@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
void void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page;
int i; int i;
if (obj->bit_17 == NULL) if (obj->bit_17 == NULL)
return; return;
i = 0; i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { for_each_sgt_page(page, sgt_iter, obj->pages) {
struct page *page = sg_page_iter_page(&sg_iter);
char new_bit_17 = page_to_phys(page) >> 17; char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) { (test_bit(i, obj->bit_17) != 0)) {
@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page;
int page_count = obj->base.size >> PAGE_SHIFT; int page_count = obj->base.size >> PAGE_SHIFT;
int i; int i;
@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
} }
i = 0; i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) for_each_sgt_page(page, sgt_iter, obj->pages) {
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17); __set_bit(i, obj->bit_17);
else else
__clear_bit(i, obj->bit_17); __clear_bit(i, obj->bit_17);

View file

@ -93,6 +93,13 @@
* *
*/ */
static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
GEM_BUG_ON(!i915_is_ggtt(vm));
return container_of(vm, struct i915_ggtt, base);
}
static int static int
i915_get_ggtt_vma_pages(struct i915_vma *vma); i915_get_ggtt_vma_pages(struct i915_vma *vma);
@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED, .type = I915_GGTT_VIEW_ROTATED,
}; };
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt)
{ {
bool has_aliasing_ppgtt; bool has_aliasing_ppgtt;
bool has_full_ppgtt; bool has_full_ppgtt;
bool has_full_48bit_ppgtt; bool has_full_48bit_ppgtt;
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; has_full_48bit_ppgtt =
IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev_priv))
has_full_ppgtt = false; /* emulation is too hard */ has_full_ppgtt = false; /* emulation is too hard */
if (!has_aliasing_ppgtt)
return 0;
/* /*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for * We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work. * execlists, the sole mechanism available to submit work.
*/ */
if (INTEL_INFO(dev)->gen < 9 && if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
(enable_ppgtt == 0 || !has_aliasing_ppgtt))
return 0; return 0;
if (enable_ppgtt == 1) if (enable_ppgtt == 1)
@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */ /* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
DRM_INFO("Disabling PPGTT because VT-d is on\n"); DRM_INFO("Disabling PPGTT because VT-d is on\n");
return 0; return 0;
} }
#endif #endif
/* Early VLV doesn't have this */ /* Early VLV doesn't have this */
if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0; return 0;
} }
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
return has_full_48bit_ppgtt ? 3 : 2; return has_full_48bit_ppgtt ? 3 : 2;
else else
return has_aliasing_ppgtt ? 1 : 0; return has_aliasing_ppgtt ? 1 : 0;
@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev,
static int gen8_init_scratch(struct i915_address_space *vm) static int gen8_init_scratch(struct i915_address_space *vm)
{ {
struct drm_device *dev = vm->dev; struct drm_device *dev = vm->dev;
int ret;
vm->scratch_page = alloc_scratch_page(dev); vm->scratch_page = alloc_scratch_page(dev);
if (IS_ERR(vm->scratch_page)) if (IS_ERR(vm->scratch_page))
@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch_pt = alloc_pt(dev); vm->scratch_pt = alloc_pt(dev);
if (IS_ERR(vm->scratch_pt)) { if (IS_ERR(vm->scratch_pt)) {
free_scratch_page(dev, vm->scratch_page); ret = PTR_ERR(vm->scratch_pt);
return PTR_ERR(vm->scratch_pt); goto free_scratch_page;
} }
vm->scratch_pd = alloc_pd(dev); vm->scratch_pd = alloc_pd(dev);
if (IS_ERR(vm->scratch_pd)) { if (IS_ERR(vm->scratch_pd)) {
free_pt(dev, vm->scratch_pt); ret = PTR_ERR(vm->scratch_pd);
free_scratch_page(dev, vm->scratch_page); goto free_pt;
return PTR_ERR(vm->scratch_pd);
} }
if (USES_FULL_48BIT_PPGTT(dev)) { if (USES_FULL_48BIT_PPGTT(dev)) {
vm->scratch_pdp = alloc_pdp(dev); vm->scratch_pdp = alloc_pdp(dev);
if (IS_ERR(vm->scratch_pdp)) { if (IS_ERR(vm->scratch_pdp)) {
free_pd(dev, vm->scratch_pd); ret = PTR_ERR(vm->scratch_pdp);
free_pt(dev, vm->scratch_pt); goto free_pd;
free_scratch_page(dev, vm->scratch_page);
return PTR_ERR(vm->scratch_pdp);
} }
} }
@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pdp(vm, vm->scratch_pdp); gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0; return 0;
free_pd:
free_pd(dev, vm->scratch_pd);
free_pt:
free_pt(dev, vm->scratch_pt);
free_scratch_page:
free_scratch_page(dev, vm->scratch_page);
return ret;
} }
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
if (intel_vgpu_active(vm->dev)) if (intel_vgpu_active(to_i915(vm->dev)))
gen8_ppgtt_notify_vgt(ppgtt, false); gen8_ppgtt_notify_vgt(ppgtt, false);
if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
0, 0, 0, 0,
GEN8_PML4E_SHIFT); GEN8_PML4E_SHIFT);
if (intel_vgpu_active(ppgtt->base.dev)) { if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
ret = gen8_preallocate_top_level_pdps(ppgtt); ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret) if (ret)
goto free_scratch; goto free_scratch;
} }
} }
if (intel_vgpu_active(ppgtt->base.dev)) if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
gen8_ppgtt_notify_vgt(ppgtt, true); gen8_ppgtt_notify_vgt(ppgtt, true);
return 0; return 0;
@ -1821,20 +1839,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level cache_level, u32 flags) enum i915_cache_level cache_level, u32 flags)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES; unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES; unsigned act_pte = first_entry % GEN6_PTES;
struct sg_page_iter sg_iter; gen6_pte_t *pt_vaddr = NULL;
struct sgt_iter sgt_iter;
dma_addr_t addr;
pt_vaddr = NULL; for_each_sgt_dma(addr, sgt_iter, pages) {
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL) if (pt_vaddr == NULL)
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] = pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter), vm->pte_encode(addr, cache_level, true, flags);
cache_level, true, flags);
if (++act_pte == GEN6_PTES) { if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr); kunmap_px(ppgtt, pt_vaddr);
@ -1843,6 +1860,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
act_pte = 0; act_pte = 0;
} }
} }
if (pt_vaddr) if (pt_vaddr)
kunmap_px(ppgtt, pt_vaddr); kunmap_px(ppgtt, pt_vaddr);
} }
@ -2064,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
} else } else
BUG(); BUG();
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev_priv))
ppgtt->switch_mm = vgpu_mm_switch; ppgtt->switch_mm = vgpu_mm_switch;
ret = gen6_ppgtt_alloc(ppgtt); ret = gen6_ppgtt_alloc(ppgtt);
@ -2140,7 +2158,7 @@ static void gtt_write_workarounds(struct drm_device *dev)
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
} }
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0; int ret = 0;
@ -2179,20 +2197,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
return 0; return 0;
} }
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
{
struct drm_i915_private *dev_priv = req->i915;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
if (i915.enable_execlists)
return 0;
if (!ppgtt)
return 0;
return ppgtt->switch_mm(ppgtt, req);
}
struct i915_hw_ppgtt * struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{ {
@ -2275,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible; dev_priv->mm.interruptible = interruptible;
} }
void i915_check_and_clear_faults(struct drm_device *dev) void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev_priv)->gen < 6)
return; return;
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
@ -2324,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 6)
return; return;
i915_check_and_clear_faults(dev); i915_check_and_clear_faults(dev_priv);
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
true); true);
@ -2358,23 +2361,21 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level, u32 unused) enum i915_cache_level level, u32 unused)
{ {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT; struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries = gen8_pte_t __iomem *gtt_entries;
(gen8_pte_t __iomem *)ggtt->gsm + first_entry; gen8_pte_t gtt_entry;
int i = 0; dma_addr_t addr;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
int rpm_atomic_seq; int rpm_atomic_seq;
int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
addr = sg_dma_address(sg_iter.sg) +
(sg_iter.sg_pgoffset << PAGE_SHIFT); for_each_sgt_dma(addr, sgt_iter, st) {
gen8_set_pte(&gtt_entries[i], gtt_entry = gen8_pte_encode(addr, level, true);
gen8_pte_encode(addr, level, true)); gen8_set_pte(&gtt_entries[i++], gtt_entry);
i++;
} }
/* /*
@ -2385,8 +2386,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* hardware should work, we must keep this posting read for paranoia. * hardware should work, we must keep this posting read for paranoia.
*/ */
if (i != 0) if (i != 0)
WARN_ON(readq(&gtt_entries[i-1]) WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
!= gen8_pte_encode(addr, level, true));
/* This next bit makes the above posting read even more important. We /* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates * want to flush the TLBs only after we're certain all the PTE updates
@ -2436,21 +2436,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level, u32 flags) enum i915_cache_level level, u32 flags)
{ {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT; struct sgt_iter sgt_iter;
gen6_pte_t __iomem *gtt_entries = gen6_pte_t __iomem *gtt_entries;
(gen6_pte_t __iomem *)ggtt->gsm + first_entry; gen6_pte_t gtt_entry;
int i = 0; dma_addr_t addr;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
int rpm_atomic_seq; int rpm_atomic_seq;
int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]); for_each_sgt_dma(addr, sgt_iter, st) {
i++; gtt_entry = vm->pte_encode(addr, level, true, flags);
iowrite32(gtt_entry, &gtt_entries[i++]);
} }
/* XXX: This serves as a posting read to make sure that the PTE has /* XXX: This serves as a posting read to make sure that the PTE has
@ -2459,10 +2459,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* of NUMA access patterns. Therefore, even with the way we assume * of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia. * hardware should work, we must keep this posting read for paranoia.
*/ */
if (i != 0) { if (i != 0)
unsigned long gtt = readl(&gtt_entries[i-1]); WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
}
/* This next bit makes the above posting read even more important. We /* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates * want to flush the TLBs only after we're certain all the PTE updates
@ -2474,13 +2472,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
} }
static void nop_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch)
{
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm, static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start, uint64_t start,
uint64_t length, uint64_t length,
bool use_scratch) bool use_scratch)
{ {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT;
gen8_pte_t scratch_pte, __iomem *gtt_base = gen8_pte_t scratch_pte, __iomem *gtt_base =
@ -2512,7 +2517,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
bool use_scratch) bool use_scratch)
{ {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT;
gen6_pte_t scratch_pte, __iomem *gtt_base = gen6_pte_t scratch_pte, __iomem *gtt_base =
@ -2727,7 +2732,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
i915_address_space_init(&ggtt->base, dev_priv); i915_address_space_init(&ggtt->base, dev_priv);
ggtt->base.total += PAGE_SIZE; ggtt->base.total += PAGE_SIZE;
if (intel_vgpu_active(dev)) { if (intel_vgpu_active(dev_priv)) {
ret = intel_vgt_balloon(dev); ret = intel_vgt_balloon(dev);
if (ret) if (ret)
return ret; return ret;
@ -2831,7 +2836,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
if (drm_mm_initialized(&ggtt->base.mm)) { if (drm_mm_initialized(&ggtt->base.mm)) {
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev_priv))
intel_vgt_deballoon(); intel_vgt_deballoon();
drm_mm_takedown(&ggtt->base.mm); drm_mm_takedown(&ggtt->base.mm);
@ -3069,14 +3074,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ret = ggtt_probe_common(dev, ggtt->size); ret = ggtt_probe_common(dev, ggtt->size);
ggtt->base.clear_range = gen8_ggtt_clear_range;
if (IS_CHERRYVIEW(dev_priv))
ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
else
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv))
ggtt->base.clear_range = gen8_ggtt_clear_range;
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
if (IS_CHERRYVIEW(dev_priv))
ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
return ret; return ret;
} }
@ -3219,14 +3227,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
if (intel_iommu_gfx_mapped) if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n"); DRM_INFO("VT-d active for gfx access\n");
#endif #endif
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
* do this now so that we can print out any log messages once rather
* than every time we check intel_enable_ppgtt().
*/
i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
return 0; return 0;
@ -3250,9 +3250,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
bool flush;
i915_check_and_clear_faults(dev); i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */ /* First fill our portion of the GTT with scratch pages */
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
@ -3260,19 +3259,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* Cache flush objects bound into GGTT and rebind them. */ /* Cache flush objects bound into GGTT and rebind them. */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != &ggtt->base) if (vma->vm != &ggtt->base)
continue; continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level, WARN_ON(i915_vma_bind(vma, obj->cache_level,
PIN_UPDATE)); PIN_UPDATE));
flush = true;
} }
if (flush) if (obj->pin_display)
i915_gem_clflush_object(obj, obj->pin_display); WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
} }
if (INTEL_INFO(dev)->gen >= 8) { if (INTEL_INFO(dev)->gen >= 8) {
@ -3398,9 +3394,11 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
const size_t n_pages = obj->base.size / PAGE_SIZE;
unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
unsigned int size_pages_uv; unsigned int size_pages_uv;
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
dma_addr_t dma_addr;
unsigned long i; unsigned long i;
dma_addr_t *page_addr_list; dma_addr_t *page_addr_list;
struct sg_table *st; struct sg_table *st;
@ -3409,7 +3407,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
int ret = -ENOMEM; int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */ /* Allocate a temporary list of source pages for random access. */
page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, page_addr_list = drm_malloc_gfp(n_pages,
sizeof(dma_addr_t), sizeof(dma_addr_t),
GFP_TEMPORARY); GFP_TEMPORARY);
if (!page_addr_list) if (!page_addr_list)
@ -3432,11 +3430,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */ /* Populate source page list from the object. */
i = 0; i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); page_addr_list[i++] = dma_addr;
i++;
}
GEM_BUG_ON(i != n_pages);
st->nents = 0; st->nents = 0;
sg = st->sgl; sg = st->sgl;
@ -3634,3 +3631,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
return obj->base.size; return obj->base.size;
} }
} }
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{
void __iomem *ptr;
lockdep_assert_held(&vma->vm->dev->struct_mutex);
if (WARN_ON(!vma->obj->map_and_fenceable))
return ERR_PTR(-ENODEV);
GEM_BUG_ON(!vma->is_ggtt);
GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
ptr = vma->iomap;
if (ptr == NULL) {
ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
vma->node.start,
vma->node.size);
if (ptr == NULL)
return ERR_PTR(-ENOMEM);
vma->iomap = ptr;
}
vma->pin_count++;
return ptr;
}

View file

@ -34,6 +34,8 @@
#ifndef __I915_GEM_GTT_H__ #ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__ #define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
struct drm_i915_file_private; struct drm_i915_file_private;
typedef uint32_t gen6_pte_t; typedef uint32_t gen6_pte_t;
@ -175,6 +177,7 @@ struct i915_vma {
struct drm_mm_node node; struct drm_mm_node node;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm; struct i915_address_space *vm;
void __iomem *iomap;
/** Flags and address space this VMA is bound to */ /** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0) #define GLOBAL_BIND (1<<0)
@ -518,9 +521,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
void i915_gem_init_ggtt(struct drm_device *dev); void i915_gem_init_ggtt(struct drm_device *dev);
void i915_ggtt_cleanup_hw(struct drm_device *dev); void i915_ggtt_cleanup_hw(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev); int i915_ppgtt_init_hw(struct drm_device *dev);
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
void i915_ppgtt_release(struct kref *kref); void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv); struct drm_i915_file_private *fpriv);
@ -535,7 +536,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release); kref_put(&ppgtt->ref, i915_ppgtt_release);
} }
void i915_check_and_clear_faults(struct drm_device *dev); void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev);
@ -560,4 +561,36 @@ size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj, i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view); const struct i915_ggtt_view *view);
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap
*
* The passed in VMA has to be pinned in the global GTT mappable region.
* An extra pinning of the VMA is acquired for the return iomapping,
* the caller must call i915_vma_unpin_iomap to relinquish the pinning
* after the iomapping is no longer required.
*
* Callers must hold the struct_mutex.
*
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
/**
* i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
* @vma: VMA to unpin
*
* Unpins the previously iomapped VMA from i915_vma_pin_iomap().
*
* Callers must hold the struct_mutex. This function is only valid to be
* called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
*/
static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->dev->struct_mutex);
GEM_BUG_ON(vma->pin_count == 0);
GEM_BUG_ON(vma->iomap == NULL);
vma->pin_count--;
}
#endif #endif

View file

@ -29,7 +29,7 @@
#include "intel_renderstate.h" #include "intel_renderstate.h"
static const struct intel_renderstate_rodata * static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen) render_state_get_rodata(const int gen)
{ {
switch (gen) { switch (gen) {
case 6: case 6:
@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL; return NULL;
} }
static int render_state_init(struct render_state *so, struct drm_device *dev) static int render_state_init(struct render_state *so,
struct drm_i915_private *dev_priv)
{ {
int ret; int ret;
so->gen = INTEL_INFO(dev)->gen; so->gen = INTEL_GEN(dev_priv);
so->rodata = render_state_get_rodata(dev, so->gen); so->rodata = render_state_get_rodata(so->gen);
if (so->rodata == NULL) if (so->rodata == NULL)
return 0; return 0;
if (so->rodata->batch_items * 4 > 4096) if (so->rodata->batch_items * 4 > 4096)
return -EINVAL; return -EINVAL;
so->obj = i915_gem_alloc_object(dev, 4096); so->obj = i915_gem_object_create(dev_priv->dev, 4096);
if (so->obj == NULL) if (IS_ERR(so->obj))
return -ENOMEM; return PTR_ERR(so->obj);
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret) if (ret)
@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
if (WARN_ON(engine->id != RCS)) if (WARN_ON(engine->id != RCS))
return -ENOENT; return -ENOENT;
ret = render_state_init(so, engine->dev); ret = render_state_init(so, engine->i915);
if (ret) if (ret)
return ret; return ret;

View file

@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long count = 0; unsigned long count = 0;
trace_i915_gem_shrink(dev_priv, target, flags); trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv->dev); i915_gem_retire_requests(dev_priv);
/*
* Unbinding of objects will require HW access; Let us not wake the
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
if ((flags & I915_SHRINK_BOUND) &&
!intel_runtime_pm_get_if_in_use(dev_priv))
flags &= ~I915_SHRINK_BOUND;
/* /*
* As we may completely rewrite the (un)bound list whilst unbinding * As we may completely rewrite the (un)bound list whilst unbinding
@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
list_splice(&still_in_list, phase->list); list_splice(&still_in_list, phase->list);
} }
i915_gem_retire_requests(dev_priv->dev); if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
return count; return count;
} }
@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE; return NOTIFY_DONE;
intel_runtime_pm_get(dev_priv);
freed_pages = i915_gem_shrink_all(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv);
intel_runtime_pm_put(dev_priv);
/* Because we may be allocating inside our own driver, we cannot /* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not * assert that there are no objects with pinned pages that are not
@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier); container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct shrinker_lock_uninterruptible slu; struct shrinker_lock_uninterruptible slu;
unsigned long freed_pages; struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
int ret;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE; return NOTIFY_DONE;
freed_pages = i915_gem_shrink(dev_priv, -1UL, /* Force everything onto the inactive lists */
I915_SHRINK_BOUND | ret = i915_gpu_idle(dev_priv->dev);
I915_SHRINK_UNBOUND | if (ret)
I915_SHRINK_ACTIVE | goto out;
I915_SHRINK_VMAPS);
intel_runtime_pm_get(dev_priv);
freed_pages += i915_gem_shrink(dev_priv, -1UL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE |
I915_SHRINK_VMAPS);
intel_runtime_pm_put(dev_priv);
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
&dev_priv->ggtt.base.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
}
out:
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
*(unsigned long *)ptr += freed_pages; *(unsigned long *)ptr += freed_pages;

View file

@ -56,7 +56,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
/* See the comment at the drm_mm_init() call for more about this check. /* See the comment at the drm_mm_init() call for more about this check.
* WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) if (IS_GEN8(dev_priv) && start < 4096)
start = 4096; start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock); mutex_lock(&dev_priv->mm.stolen_lock);
@ -109,9 +109,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 3) { if (INTEL_INFO(dev)->gen >= 3) {
u32 bsm; u32 bsm;
pci_read_config_dword(dev->pdev, BSM, &bsm); pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
base = bsm & BSM_MASK; base = bsm & INTEL_BSM_MASK;
} else if (IS_I865G(dev)) { } else if (IS_I865G(dev)) {
u16 toud = 0; u16 toud = 0;

View file

@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
if (INTEL_INFO(obj->base.dev)->gen >= 4) if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true; return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) { if (IS_GEN3(obj->base.dev)) {
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false; return false;
} else { } else {
@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/ */
if (obj->map_and_fenceable && if (obj->map_and_fenceable &&
!i915_gem_object_fence_ok(obj, args->tiling_mode)) !i915_gem_object_fence_ok(obj, args->tiling_mode))
ret = i915_gem_object_ggtt_unbind(obj); ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
if (ret == 0) { if (ret == 0) {
if (obj->pages && if (obj->pages &&

View file

@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
static void static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
{ {
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page;
BUG_ON(obj->userptr.work != NULL); BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false); __i915_gem_userptr_set_active(obj, false);
@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
i915_gem_gtt_finish_object(obj); i915_gem_gtt_finish_object(obj);
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { for_each_sgt_page(page, sgt_iter, obj->pages) {
struct page *page = sg_page_iter_page(&sg_iter);
if (obj->dirty) if (obj->dirty)
set_page_dirty(page); set_page_dirty(page);
@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return 0; return 0;
} }
int void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
i915_gem_init_userptr(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
mutex_init(&dev_priv->mm_lock); mutex_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs); hash_init(dev_priv->mm_structs);
return 0;
} }

View file

@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
} }
if (INTEL_INFO(dev)->gen == 7) if (IS_GEN7(dev))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) for (i = 0; i < ARRAY_SIZE(error->ring); i++)
@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
return error_code; return error_code;
} }
static void i915_gem_record_fences(struct drm_device *dev, static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error) struct drm_i915_error_state *error)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
if (IS_GEN3(dev) || IS_GEN2(dev)) { if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++) for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ(FENCE_REG(i)); error->fence[i] = I915_READ(FENCE_REG(i));
} else if (IS_GEN5(dev) || IS_GEN4(dev)) { } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++) for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
} else if (INTEL_INFO(dev)->gen >= 6) { } else if (INTEL_GEN(dev_priv) >= 6) {
for (i = 0; i < dev_priv->num_fence_regs; i++) for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
} }
@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct intel_engine_cs *to; struct intel_engine_cs *to;
enum intel_engine_id id; enum intel_engine_id id;
if (!i915_semaphore_is_enabled(dev_priv->dev)) if (!i915_semaphore_is_enabled(dev_priv))
return; return;
if (!error->semaphore_obj) if (!error->semaphore_obj)
@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
} }
} }
static void i915_record_ring_state(struct drm_device *dev, static void i915_record_ring_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_GEN(dev_priv) >= 6) {
if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
if (INTEL_INFO(dev)->gen >= 8) if (INTEL_GEN(dev_priv) >= 8)
gen8_record_semaphore_state(dev_priv, error, engine, gen8_record_semaphore_state(dev_priv, error, engine,
ering); ering);
else else
gen6_record_semaphore_state(dev_priv, engine, ering); gen6_record_semaphore_state(dev_priv, engine, ering);
} }
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_GEN(dev_priv) >= 4) {
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_INFO(dev)->gen >= 8) { if (INTEL_GEN(dev_priv) >= 8) {
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
} }
@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->tail = I915_READ_TAIL(engine); ering->tail = I915_READ_TAIL(engine);
ering->ctl = I915_READ_CTL(engine); ering->ctl = I915_READ_CTL(engine);
if (I915_NEED_GFX_HWS(dev)) { if (I915_NEED_GFX_HWS(dev_priv)) {
i915_reg_t mmio; i915_reg_t mmio;
if (IS_GEN7(dev)) { if (IS_GEN7(dev_priv)) {
switch (engine->id) { switch (engine->id) {
default: default:
case RCS: case RCS:
@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev,
mmio = VEBOX_HWS_PGA_GEN7; mmio = VEBOX_HWS_PGA_GEN7;
break; break;
} }
} else if (IS_GEN6(engine->dev)) { } else if (IS_GEN6(engine->i915)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base); mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else { } else {
/* XXX: gen8 returns to sanity */ /* XXX: gen8 returns to sanity */
@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->hangcheck_score = engine->hangcheck.score; ering->hangcheck_score = engine->hangcheck.score;
ering->hangcheck_action = engine->hangcheck.action; ering->hangcheck_action = engine->hangcheck.action;
if (USES_PPGTT(dev)) { if (USES_PPGTT(dev_priv)) {
int i; int i;
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
if (IS_GEN6(dev)) if (IS_GEN6(dev_priv))
ering->vm_info.pp_dir_base = ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine)); I915_READ(RING_PP_DIR_BASE_READ(engine));
else if (IS_GEN7(dev)) else if (IS_GEN7(dev_priv))
ering->vm_info.pp_dir_base = ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine)); I915_READ(RING_PP_DIR_BASE(engine));
else if (INTEL_INFO(dev)->gen >= 8) else if (INTEL_GEN(dev_priv) >= 8)
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] = ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(engine, i)); I915_READ(GEN8_RING_PDP_UDW(engine, i));
@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
struct drm_i915_private *dev_priv = engine->dev->dev_private; struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
/* Currently render ring is the only HW context user */ /* Currently render ring is the only HW context user */
@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
} }
} }
static void i915_gem_record_rings(struct drm_device *dev, static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error) struct drm_i915_error_state *error)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int i, count; int i, count;
@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].pid = -1; error->ring[i].pid = -1;
if (engine->dev == NULL) if (!intel_engine_initialized(engine))
continue; continue;
error->ring[i].valid = true; error->ring[i].valid = true;
i915_record_ring_state(dev, error, engine, &error->ring[i]); i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
request = i915_gem_find_active_request(engine); request = i915_gem_find_active_request(engine);
if (request) { if (request) {
@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->eir = I915_READ(EIR); error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER); error->pgtbl_er = I915_READ(PGTBL_ER);
i915_get_extra_instdone(dev, error->extra_instdone); i915_get_extra_instdone(dev_priv, error->extra_instdone);
} }
static void i915_error_capture_msg(struct drm_device *dev, static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
u32 engine_mask, u32 engine_mask,
const char *error_msg) const char *error_msg)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ecode; u32 ecode;
int ring_id = -1, len; int ring_id = -1, len;
@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
len = scnprintf(error->error_msg, sizeof(error->error_msg), len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%d:0x%08x", "GPU HANG: ecode %d:%d:0x%08x",
INTEL_INFO(dev)->gen, ring_id, ecode); INTEL_GEN(dev_priv), ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1) if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len, len += scnprintf(error->error_msg + len,
@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools * out a structure which becomes available in debugfs for user level tools
* to pick up. * to pick up.
*/ */
void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, void i915_capture_error_state(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *error_msg) const char *error_msg)
{ {
static bool warned; static bool warned;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error; struct drm_i915_error_state *error;
unsigned long flags; unsigned long flags;
@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
i915_capture_gen_state(dev_priv, error); i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error); i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error); i915_gem_capture_buffers(dev_priv, error);
i915_gem_record_fences(dev, error); i915_gem_record_fences(dev_priv, error);
i915_gem_record_rings(dev, error); i915_gem_record_rings(dev_priv, error);
do_gettimeofday(&error->time); do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev); error->overlay = intel_overlay_capture_error_state(dev_priv);
error->display = intel_display_capture_error_state(dev); error->display = intel_display_capture_error_state(dev_priv);
i915_error_capture_msg(dev, error, engine_mask, error_msg); i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg); DRM_INFO("%s\n", error->error_msg);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
warned = true; warned = true;
} }
} }
@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
} }
/* NB: please notice the memset */ /* NB: please notice the memset */
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
uint32_t *instdone)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
if (IS_GEN2(dev) || IS_GEN3(dev)) if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
instdone[0] = I915_READ(GEN2_INSTDONE); instdone[0] = I915_READ(GEN2_INSTDONE);
else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN4_INSTDONE1); instdone[1] = I915_READ(GEN4_INSTDONE1);
} else if (INTEL_INFO(dev)->gen >= 7) { } else if (INTEL_GEN(dev_priv) >= 7) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN7_SC_INSTDONE); instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);

View file

@ -67,11 +67,11 @@
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) #define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
/* Defines WOPCM space available to GuC firmware */
#define GUC_WOPCM_SIZE _MMIO(0xc050) #define GUC_WOPCM_SIZE _MMIO(0xc050)
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) #define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
#define GEN8_GT_PM_CONFIG _MMIO(0x138140) #define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)

View file

@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */ /* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev) || if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev))
NEEDS_WaRsDisableCoarsePowerGating(dev))
data[1] = 0; data[1] = 0;
else else
/* bit 0 and 1 are for Render and Media domain separately */ /* bit 0 and 1 are for Render and Media domain separately */
@ -587,8 +586,8 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_object_create(dev, size);
if (!obj) if (IS_ERR(obj))
return NULL; return NULL;
if (i915_gem_object_get_pages(obj)) { if (i915_gem_object_get_pages(obj)) {

File diff suppressed because it is too large Load diff

View file

@ -58,6 +58,7 @@ struct i915_params i915 __read_mostly = {
.guc_log_level = -1, .guc_log_level = -1,
.enable_dp_mst = true, .enable_dp_mst = true,
.inject_load_failure = 0, .inject_load_failure = 0,
.enable_dpcd_backlight = false,
}; };
module_param_named(modeset, i915.modeset, int, 0400); module_param_named(modeset, i915.modeset, int, 0400);
@ -210,3 +211,6 @@ MODULE_PARM_DESC(enable_dp_mst,
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure, MODULE_PARM_DESC(inject_load_failure,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
MODULE_PARM_DESC(enable_dpcd_backlight,
"Enable support for DPCD backlight control (default:false)");

View file

@ -61,6 +61,7 @@ struct i915_params {
bool verbose_state_checks; bool verbose_state_checks;
bool nuclear_pageflip; bool nuclear_pageflip;
bool enable_dp_mst; bool enable_dp_mst;
bool enable_dpcd_backlight;
}; };
extern struct i915_params i915 __read_mostly; extern struct i915_params i915 __read_mostly;

View file

@ -2449,6 +2449,8 @@ enum skl_disp_power_wells {
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
#define _FPA0 0x6040 #define _FPA0 0x6040
#define _FPA1 0x6044 #define _FPA1 0x6044
#define _FPB0 0x6048 #define _FPB0 0x6048
@ -6031,6 +6033,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PAR1_1 _MMIO(0x42080) #define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15) #define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14) #define FORCE_ARB_IDLE_PLANES (1 << 14)
#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
#define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4 #define _CHICKEN_PIPESL_1_B 0x420b4
@ -6089,7 +6092,14 @@ enum skl_disp_power_wells {
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
#define GEN8_L3SQCREG1 _MMIO(0xB100) #define GEN8_L3SQCREG1 _MMIO(0xB100)
#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 /*
* Note that on CHV the following has an off-by-one error wrt. to BSpec.
* Using the formula in BSpec leads to a hang, while the formula here works
* fine and matches the formulas for all other platforms. A BSpec change
* request has been filed to clarify this.
*/
#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
#define GEN7_L3CNTLREG1 _MMIO(0xB01C) #define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
@ -7557,14 +7567,15 @@ enum skl_disp_power_wells {
#define CDCLK_FREQ_540 (1<<26) #define CDCLK_FREQ_540 (1<<26)
#define CDCLK_FREQ_337_308 (2<<26) #define CDCLK_FREQ_337_308 (2<<26)
#define CDCLK_FREQ_675_617 (3<<26) #define CDCLK_FREQ_675_617 (3<<26)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) #define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
/* LCPLL_CTL */ /* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010) #define LCPLL1_CTL _MMIO(0x46010)

View file

@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev,
u64 units = 128ULL, div = 100000ULL; u64 units = 128ULL, div = 100000ULL;
u32 ret; u32 ret;
if (!intel_enable_rc6(dev)) if (!intel_enable_rc6())
return 0; return 0;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
static ssize_t static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_minor *dminor = dev_to_drm_minor(kdev); return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
} }
static ssize_t static ssize_t
@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and /* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though * update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */ * frequency request may be unchanged. */
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and /* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though * update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */ * frequency request may be unchanged. */
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);

View file

@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = from->dev->primary->index; __entry->dev = from->i915->dev->primary->index;
__entry->sync_from = from->id; __entry->sync_from = from->id;
__entry->sync_to = to_req->engine->id; __entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = __entry->dev = req->i915->dev->primary->index;
i915_gem_request_get_engine(req); __entry->ring = req->engine->id;
__entry->dev = engine->dev->primary->index; __entry->seqno = req->seqno;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags; __entry->flags = flags;
i915_trace_irq_get(engine, req); i915_trace_irq_get(req->engine, req);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = req->engine->dev->primary->index; __entry->dev = req->i915->dev->primary->index;
__entry->ring = req->engine->id; __entry->ring = req->engine->id;
__entry->invalidate = invalidate; __entry->invalidate = invalidate;
__entry->flush = flush; __entry->flush = flush;
@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = __entry->dev = req->i915->dev->primary->index;
i915_gem_request_get_engine(req); __entry->ring = req->engine->id;
__entry->dev = engine->dev->primary->index; __entry->seqno = req->seqno;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u", TP_printk("dev=%u, ring=%u, seqno=%u",
@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = engine->dev->primary->index; __entry->dev = engine->i915->dev->primary->index;
__entry->ring = engine->id; __entry->ring = engine->id;
__entry->seqno = engine->get_seqno(engine); __entry->seqno = engine->get_seqno(engine);
), ),
@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable. * less desirable.
*/ */
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = __entry->dev = req->i915->dev->primary->index;
i915_gem_request_get_engine(req); __entry->ring = req->engine->id;
__entry->dev = engine->dev->primary->index; __entry->seqno = req->seqno;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking = __entry->blocking =
mutex_is_locked(&engine->dev->struct_mutex); mutex_is_locked(&req->i915->dev->struct_mutex);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
__entry->ring = engine->id; __entry->ring = engine->id;
__entry->to = to; __entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL; __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
__entry->dev = engine->dev->primary->index; __entry->dev = engine->i915->dev->primary->index;
), ),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",

View file

@ -58,15 +58,14 @@
* This function is called at the initialization stage, to detect whether * This function is called at the initialization stage, to detect whether
* running on a vGPU. * running on a vGPU.
*/ */
void i915_check_vgpu(struct drm_device *dev) void i915_check_vgpu(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
uint64_t magic; uint64_t magic;
uint32_t version; uint32_t version;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
if (!IS_HASWELL(dev)) if (!IS_HASWELL(dev_priv))
return; return;
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
/** /**
* intel_vgt_balloon - balloon out reserved graphics address trunks * intel_vgt_balloon - balloon out reserved graphics address trunks
* @dev: drm device * @dev_priv: i915 device
* *
* This function is called at the initialization stage, to balloon out the * This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as * graphic address space allocated to other vGPUs, by marking these spaces as

View file

@ -110,7 +110,7 @@ struct vgt_if {
#define VGT_DRV_DISPLAY_NOT_READY 0 #define VGT_DRV_DISPLAY_NOT_READY 0
#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ #define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
extern void i915_check_vgpu(struct drm_device *dev); extern void i915_check_vgpu(struct drm_i915_private *dev_priv);
extern int intel_vgt_balloon(struct drm_device *dev); extern int intel_vgt_balloon(struct drm_device *dev);
extern void intel_vgt_deballoon(void); extern void intel_vgt_deballoon(void);

View file

@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
static int i915_audio_component_get_cdclk_freq(struct device *dev) static int i915_audio_component_get_cdclk_freq(struct device *dev)
{ {
struct drm_i915_private *dev_priv = dev_to_i915(dev); struct drm_i915_private *dev_priv = dev_to_i915(dev);
int ret;
if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV; return -ENODEV;
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); return dev_priv->cdclk_freq;
ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return ret;
} }
static int i915_audio_component_sync_audio_rate(struct device *dev, static int i915_audio_component_sync_audio_rate(struct device *dev,

View file

@ -318,6 +318,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
return; return;
} }
dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
if (bdb->version >= 191 &&
get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
const struct bdb_lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
dev_priv->vbt.backlight.type = method->type;
}
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness; dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
@ -763,6 +772,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
return; return;
} }
/*
* These fields are introduced from the VBT version 197 onwards,
* so making sure that these bits are set zero in the previous
* versions.
*/
if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
}
/* We have mandatory mipi config blocks. Initialize as generic panel */ /* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
} }

View file

@ -30,6 +30,14 @@
#ifndef _INTEL_BIOS_H_ #ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_ #define _INTEL_BIOS_H_
enum intel_backlight_type {
INTEL_BACKLIGHT_PMIC,
INTEL_BACKLIGHT_LPSS,
INTEL_BACKLIGHT_DISPLAY_DDI,
INTEL_BACKLIGHT_DSI_DCS,
INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
};
struct edp_power_seq { struct edp_power_seq {
u16 t1_t3; u16 t1_t3;
u16 t8; u16 t8;
@ -113,7 +121,13 @@ struct mipi_config {
u16 dual_link:2; u16 dual_link:2;
u16 lane_cnt:2; u16 lane_cnt:2;
u16 pixel_overlap:3; u16 pixel_overlap:3;
u16 rsvd3:9; u16 rgb_flip:1;
#define DL_DCS_PORT_A 0x00
#define DL_DCS_PORT_C 0x01
#define DL_DCS_PORT_A_AND_C 0x02
u16 dl_dcs_cabc_ports:2;
u16 dl_dcs_backlight_ports:2;
u16 rsvd3:4;
u16 rsvd4; u16 rsvd4;

View file

@ -41,16 +41,22 @@
* be moved to FW_FAILED. * be moved to FW_FAILED.
*/ */
#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" #define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" #define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define CSR_MAX_FW_SIZE 0x2FFF #define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@ -169,12 +175,10 @@ struct stepping_info {
char substepping; char substepping;
}; };
/*
* Kabylake derivated from Skylake H0, so SKL H0
* is the right firmware for KBL A0 (revid 0).
*/
static const struct stepping_info kbl_stepping_info[] = { static const struct stepping_info kbl_stepping_info[] = {
{'H', '0'}, {'I', '0'} {'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
{'G', '0'}, {'H', '0'}, {'I', '0'},
}; };
static const struct stepping_info skl_stepping_info[] = { static const struct stepping_info skl_stepping_info[] = {
@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version; csr->version = css_header->version;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_KABYLAKE(dev_priv)) {
required_min_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
required_min_version = SKL_CSR_VERSION_REQUIRED; required_min_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) { } else if (IS_BROXTON(dev_priv)) {
required_min_version = BXT_CSR_VERSION_REQUIRED; required_min_version = BXT_CSR_VERSION_REQUIRED;
@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv)) if (!HAS_CSR(dev_priv))
return; return;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_KBL;
else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL; csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv)) else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT; csr->fw_path = I915_CSR_BXT;

View file

@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
{ {
struct intel_shared_dpll *pll; struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state; struct intel_dpll_hw_state *state;
intel_clock_t clock; struct dpll clock;
/* For DDI ports we always use a shared PLL. */ /* For DDI ports we always use a shared PLL. */
if (WARN_ON(dpll == DPLL_ID_PRIVATE)) if (WARN_ON(dpll == DPLL_ID_PRIVATE))

File diff suppressed because it is too large Load diff

View file

@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe); enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp); static void intel_dp_unset_edid(struct intel_dp *intel_dp);
static unsigned int intel_dp_unused_lane_mask(int lane_count)
{
return ~((1 << lane_count) - 1) & 0xf;
}
static int static int
intel_dp_max_link_bw(struct intel_dp *intel_dp) intel_dp_max_link_bw(struct intel_dp *intel_dp)
{ {
@ -775,6 +770,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_1600us | DP_AUX_CH_CTL_TIME_OUT_1600us |
DP_AUX_CH_CTL_RECEIVE_ERROR | DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
} }
@ -2460,50 +2456,6 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
intel_dp_link_down(intel_dp); intel_dp_link_down(intel_dp);
} }
static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = crtc->pipe;
uint32_t val;
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
if (crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
}
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
if (crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
}
}
static void chv_post_disable_dp(struct intel_encoder *encoder) static void chv_post_disable_dp(struct intel_encoder *encoder)
{ {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@ -2811,266 +2763,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
static void vlv_pre_enable_dp(struct intel_encoder *encoder) static void vlv_pre_enable_dp(struct intel_encoder *encoder)
{ {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); vlv_phy_pre_encoder_enable(encoder);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->sb_lock);
intel_enable_dp(encoder); intel_enable_dp(encoder);
} }
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
{ {
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
intel_dp_prepare(encoder); intel_dp_prepare(encoder);
/* Program Tx lane resets to default */ vlv_phy_pre_pll_enable(encoder);
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
mutex_unlock(&dev_priv->sb_lock);
} }
static void chv_pre_enable_dp(struct intel_encoder *encoder) static void chv_pre_enable_dp(struct intel_encoder *encoder)
{ {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); chv_phy_pre_encoder_enable(encoder);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i, stagger;
u32 val;
mutex_lock(&dev_priv->sb_lock);
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
for (i = 0; i < intel_crtc->config->lane_count; i++) {
/* Set the upar bit */
if (intel_crtc->config->lane_count == 1)
data = 0x0;
else
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
}
/* Data lane stagger programming */
if (intel_crtc->config->port_clock > 270000)
stagger = 0x18;
else if (intel_crtc->config->port_clock > 135000)
stagger = 0xd;
else if (intel_crtc->config->port_clock > 67500)
stagger = 0x7;
else if (intel_crtc->config->port_clock > 33750)
stagger = 0x4;
else
stagger = 0x2;
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
}
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
DPIO_TX1_STAGGER_MULT(6) |
DPIO_TX2_STAGGER_MULT(0));
if (intel_crtc->config->lane_count > 2) {
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
DPIO_TX1_STAGGER_MULT(7) |
DPIO_TX2_STAGGER_MULT(5));
}
/* Deassert data lane reset */
chv_data_lane_soft_reset(encoder, false);
mutex_unlock(&dev_priv->sb_lock);
intel_enable_dp(encoder); intel_enable_dp(encoder);
/* Second common lane will stay alive on its own now */ /* Second common lane will stay alive on its own now */
if (dport->release_cl2_override) { chv_phy_release_cl2_override(encoder);
chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
dport->release_cl2_override = false;
}
} }
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
{ {
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
u32 val;
intel_dp_prepare(encoder); intel_dp_prepare(encoder);
/* chv_phy_pre_pll_enable(encoder);
* Must trick the second common lane into life.
* Otherwise we can't even access the PLL.
*/
if (ch == DPIO_CH0 && pipe == PIPE_B)
dport->release_cl2_override =
!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
chv_phy_powergate_lanes(encoder, true, lane_mask);
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
}
/*
* This a a bit weird since generally CL
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
if (pipe != PIPE_B)
val &= ~CHV_CMN_USEDCLKCHANNEL;
else
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
mutex_unlock(&dev_priv->sb_lock);
} }
static void chv_dp_post_pll_disable(struct intel_encoder *encoder) static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); chv_phy_post_pll_disable(encoder);
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
mutex_unlock(&dev_priv->sb_lock);
/*
* Leave the power down bit cleared for at least one
* lane so that chv_powergate_phy_ch() will power
* on something when the channel is otherwise unused.
* When the port is off and the override is removed
* the lanes power down anyway, so otherwise it doesn't
* really matter what the state of power down bits is
* after this.
*/
chv_phy_powergate_lanes(encoder, false, 0x0);
} }
/* /*
@ -3178,16 +2902,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
{ {
struct drm_device *dev = intel_dp_to_dev(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct intel_crtc *intel_crtc =
to_intel_crtc(dport->base.base.crtc);
unsigned long demph_reg_value, preemph_reg_value, unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value; uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0]; uint8_t train_set = intel_dp->train_set[0];
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0: case DP_TRAIN_PRE_EMPH_LEVEL_0:
@ -3262,37 +2980,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
return 0; return 0;
} }
mutex_lock(&dev_priv->sb_lock); vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); uniqtranscale_reg_value, 0);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
uniqtranscale_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
mutex_unlock(&dev_priv->sb_lock);
return 0; return 0;
} }
static bool chv_need_uniq_trans_scale(uint8_t train_set)
{
return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
}
static uint32_t chv_signal_levels(struct intel_dp *intel_dp) static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
{ {
struct drm_device *dev = intel_dp_to_dev(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = dev->dev_private; u32 deemph_reg_value, margin_reg_value;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp); bool uniq_trans_scale = false;
struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
u32 deemph_reg_value, margin_reg_value, val;
uint8_t train_set = intel_dp->train_set[0]; uint8_t train_set = intel_dp->train_set[0];
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
int i;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0: case DP_TRAIN_PRE_EMPH_LEVEL_0:
@ -3312,7 +3011,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
deemph_reg_value = 128; deemph_reg_value = 128;
margin_reg_value = 154; margin_reg_value = 154;
/* FIXME extra to set for 1200 */ uniq_trans_scale = true;
break; break;
default: default:
return 0; return 0;
@ -3364,88 +3063,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
return 0; return 0;
} }
mutex_lock(&dev_priv->sb_lock); chv_set_phy_signal_level(encoder, deemph_reg_value,
margin_reg_value, uniq_trans_scale);
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
}
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
}
/* Program swing deemph */
for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
}
/* Program swing margin */
for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
/*
* Supposedly this value shouldn't matter when unique transition
* scale is disabled, but in fact it does matter. Let's just
* always program the same value and hope it's OK.
*/
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
/*
* The document said it needs to set bit 27 for ch0 and bit 26
* for ch1. Might be a typo in the doc.
* For now, for this unique transition scale selection, set bit
* 27 for ch0 and ch1.
*/
for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
if (chv_need_uniq_trans_scale(train_set))
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
else
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
/* Start swing calculation */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
}
mutex_unlock(&dev_priv->sb_lock);
return 0; return 0;
} }
@ -3714,7 +3333,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev; struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint8_t rev;
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0) sizeof(intel_dp->dpcd)) < 0)
@ -3771,6 +3389,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("PSR2 %s on sink", DRM_DEBUG_KMS("PSR2 %s on sink",
dev_priv->psr.psr2_support ? "supported" : "not supported"); dev_priv->psr.psr2_support ? "supported" : "not supported");
} }
/* Read the eDP Display control capabilities registers */
memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
(drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd)))
DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
} }
DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
@ -3778,10 +3405,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
yesno(drm_dp_tps3_supported(intel_dp->dpcd))); yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
/* Intermediate frequency support */ /* Intermediate frequency support */
if (is_edp(intel_dp) && if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
(intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
(drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
(rev >= 0x03)) { /* eDp v1.4 or higher */
__le16 sink_rates[DP_MAX_SUPPORTED_RATES]; __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i; int i;

View file

@ -0,0 +1,172 @@
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "intel_drv.h"
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
uint8_t reg_val = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
&reg_val) < 0) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
DP_EDP_DISPLAY_CONTROL_REGISTER);
return;
}
if (enable)
reg_val |= DP_EDP_BACKLIGHT_ENABLE;
else
reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
reg_val) != 1) {
DRM_DEBUG_KMS("Failed to %s aux backlight\n",
enable ? "enable" : "disable");
}
}
/*
* Read the current backlight value from DPCD register(s) based
* on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
*/
static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t read_val[2] = { 0x0 };
uint16_t level = 0;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
return 0;
}
level = read_val[0];
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
level = (read_val[0] << 8 | read_val[1]);
return level;
}
/*
* Sends the current backlight level over the aux channel, checking if its using
* 8-bit or 16 bit value (MSB and LSB)
*/
static void
intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t vals[2] = { 0x0 };
vals[0] = level;
/* Write the MSB and/or LSB */
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
vals[0] = (level & 0xFF00) >> 8;
vals[1] = (level & 0xFF);
}
if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
vals, sizeof(vals)) < 0) {
DRM_DEBUG_KMS("Failed to write aux backlight level\n");
return;
}
}
static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t dpcd_buf = 0;
set_aux_backlight_enable(intel_dp, true);
if ((drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
(dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
}
static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
{
set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
}
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
struct intel_panel *panel = &connector->panel;
intel_dp_aux_enable_backlight(connector);
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
panel->backlight.max = 0xFFFF;
else
panel->backlight.max = 0xFF;
panel->backlight.min = 0;
panel->backlight.level = intel_dp_aux_get_backlight(connector);
panel->backlight.enabled = panel->backlight.level != 0;
return 0;
}
static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
*/
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
!((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
return true;
}
return false;
}
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
if (!i915.enable_dpcd_backlight)
return -ENODEV;
if (!intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
panel->backlight.setup = intel_dp_aux_setup_backlight;
panel->backlight.enable = intel_dp_aux_enable_backlight;
panel->backlight.disable = intel_dp_aux_disable_backlight;
panel->backlight.set = intel_dp_aux_set_backlight;
panel->backlight.get = intel_dp_aux_get_backlight;
return 0;
}

View file

@ -0,0 +1,470 @@
/*
* Copyright © 2014-2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "intel_drv.h"
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
int i;
mutex_lock(&dev_priv->sb_lock);
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
}
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
}
/* Program swing deemph */
for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
}
/* Program swing margin */
for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
/*
* Supposedly this value shouldn't matter when unique transition
* scale is disabled, but in fact it does matter. Let's just
* always program the same value and hope it's OK.
*/
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
/*
* The document said it needs to set bit 27 for ch0 and bit 26
* for ch1. Might be a typo in the doc.
* For now, for this unique transition scale selection, set bit
* 27 for ch0 and ch1.
*/
for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
if (uniq_trans_scale)
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
else
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
/* Start swing calculation */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
}
mutex_unlock(&dev_priv->sb_lock);
}
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = crtc->pipe;
uint32_t val;
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
if (crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
}
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
if (crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
}
}
void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
u32 val;
/*
* Must trick the second common lane into life.
* Otherwise we can't even access the PLL.
*/
if (ch == DPIO_CH0 && pipe == PIPE_B)
dport->release_cl2_override =
!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
chv_phy_powergate_lanes(encoder, true, lane_mask);
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
}
/*
* This a a bit weird since generally CL
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
if (pipe != PIPE_B)
val &= ~CHV_CMN_USEDCLKCHANNEL;
else
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
mutex_unlock(&dev_priv->sb_lock);
}
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i, stagger;
u32 val;
mutex_lock(&dev_priv->sb_lock);
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
for (i = 0; i < intel_crtc->config->lane_count; i++) {
/* Set the upar bit */
if (intel_crtc->config->lane_count == 1)
data = 0x0;
else
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
}
/* Data lane stagger programming */
if (intel_crtc->config->port_clock > 270000)
stagger = 0x18;
else if (intel_crtc->config->port_clock > 135000)
stagger = 0xd;
else if (intel_crtc->config->port_clock > 67500)
stagger = 0x7;
else if (intel_crtc->config->port_clock > 33750)
stagger = 0x4;
else
stagger = 0x2;
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
}
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
DPIO_TX1_STAGGER_MULT(6) |
DPIO_TX2_STAGGER_MULT(0));
if (intel_crtc->config->lane_count > 2) {
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
DPIO_TX1_STAGGER_MULT(7) |
DPIO_TX2_STAGGER_MULT(5));
}
/* Deassert data lane reset */
chv_data_lane_soft_reset(encoder, false);
mutex_unlock(&dev_priv->sb_lock);
}
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (dport->release_cl2_override) {
chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
dport->release_cl2_override = false;
}
}
void chv_phy_post_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
mutex_unlock(&dev_priv->sb_lock);
/*
* Leave the power down bit cleared for at least one
* lane so that chv_powergate_phy_ch() will power
* on something when the channel is otherwise unused.
* When the port is off and the override is removed
* the lanes power down anyway, so otherwise it doesn't
* really matter what the state of power down bits is
* after this.
*/
chv_phy_powergate_lanes(encoder, false, 0x0);
}
void vlv_set_phy_signal_level(struct intel_encoder *encoder,
u32 demph_reg_value, u32 preemph_reg_value,
u32 uniqtranscale_reg_value, u32 tx3_demph)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
uniqtranscale_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
if (tx3_demph)
vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->sb_lock);
}
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
mutex_unlock(&dev_priv->sb_lock);
}
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
/* Enable clock channels for this port */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
/* Program lane clock */
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->sb_lock);
}
void vlv_phy_reset_lanes(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
mutex_unlock(&dev_priv->sb_lock);
}

View file

@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
int clock = crtc_state->port_clock; int clock = crtc_state->port_clock;
if (encoder->type == INTEL_OUTPUT_HDMI) { if (encoder->type == INTEL_OUTPUT_HDMI) {
intel_clock_t best_clock; struct dpll best_clock;
/* Calculate HDMI div */ /* Calculate HDMI div */
/* /*

View file

@ -266,7 +266,7 @@ struct intel_connector {
struct intel_dp *mst_port; struct intel_dp *mst_port;
}; };
typedef struct dpll { struct dpll {
/* given values */ /* given values */
int n; int n;
int m1, m2; int m1, m2;
@ -276,7 +276,7 @@ typedef struct dpll {
int vco; int vco;
int m; int m;
int p; int p;
} intel_clock_t; };
struct intel_atomic_state { struct intel_atomic_state {
struct drm_atomic_state base; struct drm_atomic_state base;
@ -291,17 +291,29 @@ struct intel_atomic_state {
bool dpll_set, modeset; bool dpll_set, modeset;
/*
* Does this transaction change the pipes that are active? This mask
* tracks which CRTC's have changed their active state at the end of
* the transaction (not counting the temporary disable during modesets).
* This mask should only be non-zero when intel_state->modeset is true,
* but the converse is not necessarily true; simply changing a mode may
* not flip the final active status of any CRTC's
*/
unsigned int active_pipe_changes;
unsigned int active_crtcs; unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES]; unsigned int min_pixclk[I915_MAX_PIPES];
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
struct intel_wm_config wm_config;
/* /*
* Current watermarks can't be trusted during hardware readout, so * Current watermarks can't be trusted during hardware readout, so
* don't bother calculating intermediate watermarks. * don't bother calculating intermediate watermarks.
*/ */
bool skip_intermediate_wm; bool skip_intermediate_wm;
/* Gen9+ only */
struct skl_wm_values wm_results;
}; };
struct intel_plane_state { struct intel_plane_state {
@ -405,6 +417,48 @@ struct skl_pipe_wm {
uint32_t linetime; uint32_t linetime;
}; };
struct intel_crtc_wm_state {
union {
struct {
/*
* Intermediate watermarks; these can be
* programmed immediately since they satisfy
* both the current configuration we're
* switching away from and the new
* configuration we're switching to.
*/
struct intel_pipe_wm intermediate;
/*
* Optimal watermarks, programmed post-vblank
* when this state is committed.
*/
struct intel_pipe_wm optimal;
} ilk;
struct {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
/* cached plane data rate */
unsigned plane_data_rate[I915_MAX_PLANES];
unsigned plane_y_data_rate[I915_MAX_PLANES];
/* minimum block allocation */
uint16_t minimum_blocks[I915_MAX_PLANES];
uint16_t minimum_y_blocks[I915_MAX_PLANES];
} skl;
};
/*
* Platforms with two-step watermark programming will need to
* update watermark programming post-vblank to switch from the
* safe intermediate watermarks to the optimal final
* watermarks.
*/
bool need_postvbl_update;
};
struct intel_crtc_state { struct intel_crtc_state {
struct drm_crtc_state base; struct drm_crtc_state base;
@ -558,32 +612,7 @@ struct intel_crtc_state {
/* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
bool disable_lp_wm; bool disable_lp_wm;
struct { struct intel_crtc_wm_state wm;
/*
* Optimal watermarks, programmed post-vblank when this state
* is committed.
*/
union {
struct intel_pipe_wm ilk;
struct skl_pipe_wm skl;
} optimal;
/*
* Intermediate watermarks; these can be programmed immediately
* since they satisfy both the current configuration we're
* switching away from and the new configuration we're switching
* to.
*/
struct intel_pipe_wm intermediate;
/*
* Platforms with two-step watermark programming will need to
* update watermark programming post-vblank to switch from the
* safe intermediate watermarks to the optimal final
* watermarks.
*/
bool need_postvbl_update;
} wm;
/* Gamma mode programmed on the pipe */ /* Gamma mode programmed on the pipe */
uint32_t gamma_mode; uint32_t gamma_mode;
@ -598,14 +627,6 @@ struct vlv_wm_state {
bool cxsr; bool cxsr;
}; };
struct intel_mmio_flip {
struct work_struct work;
struct drm_i915_private *i915;
struct drm_i915_gem_request *req;
struct intel_crtc *crtc;
unsigned int rotation;
};
struct intel_crtc { struct intel_crtc {
struct drm_crtc base; struct drm_crtc base;
enum pipe pipe; enum pipe pipe;
@ -620,7 +641,7 @@ struct intel_crtc {
unsigned long enabled_power_domains; unsigned long enabled_power_domains;
bool lowfreq_avail; bool lowfreq_avail;
struct intel_overlay *overlay; struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work; struct intel_flip_work *flip_work;
atomic_t unpin_work_count; atomic_t unpin_work_count;
@ -815,6 +836,7 @@ struct intel_dp {
uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */ /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates; uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES]; int sink_rates[DP_MAX_SUPPORTED_RATES];
@ -947,22 +969,21 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
return dev_priv->plane_to_crtc_mapping[plane]; return dev_priv->plane_to_crtc_mapping[plane];
} }
struct intel_unpin_work { struct intel_flip_work {
struct work_struct work; struct work_struct unpin_work;
struct work_struct mmio_work;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_framebuffer *old_fb; struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj; struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
atomic_t pending; atomic_t pending;
#define INTEL_FLIP_INACTIVE 0
#define INTEL_FLIP_PENDING 1
#define INTEL_FLIP_COMPLETE 2
u32 flip_count; u32 flip_count;
u32 gtt_offset; u32 gtt_offset;
struct drm_i915_gem_request *flip_queued_req; struct drm_i915_gem_request *flip_queued_req;
u32 flip_queued_vblank; u32 flip_queued_vblank;
u32 flip_ready_vblank; u32 flip_ready_vblank;
bool enable_stall_check; unsigned int rotation;
}; };
struct intel_load_detect_pipe { struct intel_load_detect_pipe {
@ -1031,9 +1052,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_device *dev); void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_device *dev); void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_device *dev); void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
@ -1112,14 +1133,15 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */ /* intel_display.c */
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv, int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq); const char *name, u32 reg, int ref_freq);
extern const struct drm_plane_funcs intel_plane_funcs; extern const struct drm_plane_funcs intel_plane_funcs;
void intel_init_display_hooks(struct drm_i915_private *dev_priv); void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
bool intel_has_pending_fb_unpin(struct drm_device *dev); bool intel_has_pending_fb_unpin(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev); void intel_mark_busy(struct drm_i915_private *dev_priv);
void intel_mark_idle(struct drm_device *dev); void intel_mark_idle(struct drm_i915_private *dev_priv);
void intel_crtc_restore_mode(struct drm_crtc *crtc); void intel_crtc_restore_mode(struct drm_crtc *crtc);
int intel_display_suspend(struct drm_device *dev); int intel_display_suspend(struct drm_device *dev);
void intel_encoder_destroy(struct drm_encoder *encoder); void intel_encoder_destroy(struct drm_encoder *encoder);
@ -1151,6 +1173,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
if (crtc->active) if (crtc->active)
intel_wait_for_vblank(dev, pipe); intel_wait_for_vblank(dev, pipe);
} }
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport, struct intel_digital_port *dport,
@ -1164,14 +1189,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int rotation); unsigned int rotation);
void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
struct drm_framebuffer * struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev, __intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj); struct drm_i915_gem_object *obj);
void intel_prepare_page_flip(struct drm_device *dev, int plane); void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
void intel_finish_page_flip(struct drm_device *dev, int pipe); void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane); void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
void intel_check_page_flip(struct drm_device *dev, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane, int intel_prepare_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *new_state); const struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane, void intel_cleanup_plane_fb(struct drm_plane *plane,
@ -1228,8 +1253,8 @@ u32 intel_compute_tile_offset(int *x, int *y,
const struct drm_framebuffer *fb, int plane, const struct drm_framebuffer *fb, int plane,
unsigned int pitch, unsigned int pitch,
unsigned int rotation); unsigned int rotation);
void intel_prepare_reset(struct drm_device *dev); void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_device *dev); void intel_finish_reset(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv); void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv); void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void broxton_init_cdclk(struct drm_i915_private *dev_priv); void broxton_init_cdclk(struct drm_i915_private *dev_priv);
@ -1252,8 +1277,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
intel_clock_t *best_clock); struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock); int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct drm_crtc *crtc); bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc); void hsw_enable_ips(struct intel_crtc *crtc);
@ -1339,12 +1364,22 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
return ~((1 << lane_count) - 1) & 0xf;
}
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
/* intel_dp_mst.c */ /* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */ /* intel_dsi.c */
void intel_dsi_init(struct drm_device *dev); void intel_dsi_init(struct drm_device *dev);
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
/* intel_dvo.c */ /* intel_dvo.c */
void intel_dvo_init(struct drm_device *dev); void intel_dvo_init(struct drm_device *dev);
@ -1424,13 +1459,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
/* intel_overlay.c */ /* intel_overlay.c */
void intel_setup_overlay(struct drm_device *dev); void intel_setup_overlay(struct drm_i915_private *dev_priv);
void intel_cleanup_overlay(struct drm_device *dev); void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
int intel_overlay_switch_off(struct intel_overlay *overlay); int intel_overlay_switch_off(struct intel_overlay *overlay);
int intel_overlay_put_image(struct drm_device *dev, void *data, int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int intel_overlay_attrs(struct drm_device *dev, void *data, int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void intel_overlay_reset(struct drm_i915_private *dev_priv); void intel_overlay_reset(struct drm_i915_private *dev_priv);
@ -1601,21 +1636,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev); void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void); void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_device *dev); void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
void intel_cleanup_gt_powersave(struct drm_device *dev); void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_device *dev); void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_device *dev); void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_suspend_gt_powersave(struct drm_device *dev); void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void intel_reset_gt_powersave(struct drm_device *dev); void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_update_ring_freq(struct drm_device *dev); void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv, void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct intel_rps_client *rps, struct intel_rps_client *rps,
unsigned long submitted); unsigned long submitted);
void intel_queue_rps_boost_for_request(struct drm_device *dev, void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
struct drm_i915_gem_request *req);
void vlv_wm_get_hw_state(struct drm_device *dev); void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev); void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev); void skl_wm_get_hw_state(struct drm_device *dev);
@ -1623,7 +1657,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */); struct skl_ddb_allocation *ddb /* out */);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev); bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
static inline int intel_enable_rc6(void)
{
return i915.enable_rc6;
}
/* intel_sdvo.c */ /* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, bool intel_sdvo_init(struct drm_device *dev,
@ -1635,7 +1673,7 @@ int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data, int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc); void intel_pipe_update_start(struct intel_crtc *crtc);
void intel_pipe_update_end(struct intel_crtc *crtc); void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
/* intel_tv.c */ /* intel_tv.c */
void intel_tv_init(struct drm_device *dev); void intel_tv_init(struct drm_device *dev);

View file

@ -532,7 +532,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port; enum port port;
u32 tmp;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
@ -551,11 +550,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay); msleep(intel_dsi->panel_on_delay);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
/* Disable DPOunit clock gating, can stall pipe */ /* Disable DPOunit clock gating, can stall pipe */
tmp = I915_READ(DSPCLK_GATE_D); val = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE; val |= DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, tmp); I915_WRITE(DSPCLK_GATE_D, val);
} }
/* put device in ready state */ /* put device in ready state */
@ -693,7 +694,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
intel_dsi_clear_device_ready(encoder); intel_dsi_clear_device_ready(encoder);
if (!IS_BROXTON(dev_priv)) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val; u32 val;
val = I915_READ(DSPCLK_GATE_D); val = I915_READ(DSPCLK_GATE_D);
@ -1473,10 +1474,42 @@ void intel_dsi_init(struct drm_device *dev)
else else
intel_encoder->crtc_mask = BIT(PIPE_B); intel_encoder->crtc_mask = BIT(PIPE_B);
if (dev_priv->vbt.dsi.config->dual_link) if (dev_priv->vbt.dsi.config->dual_link) {
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
else
switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
case DL_DCS_PORT_A:
intel_dsi->dcs_backlight_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
intel_dsi->dcs_backlight_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
break;
}
switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
case DL_DCS_PORT_A:
intel_dsi->dcs_cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
intel_dsi->dcs_cabc_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
break;
}
} else {
intel_dsi->ports = BIT(port); intel_dsi->ports = BIT(port);
intel_dsi->dcs_backlight_ports = BIT(port);
intel_dsi->dcs_cabc_ports = BIT(port);
}
if (!dev_priv->vbt.dsi.config->cabc_supported)
intel_dsi->dcs_cabc_ports = 0;
/* Create a DSI host (and a device) for each port. */ /* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {

View file

@ -78,6 +78,10 @@ struct intel_dsi {
u8 escape_clk_div; u8 escape_clk_div;
u8 dual_link; u8 dual_link;
u16 dcs_backlight_ports;
u16 dcs_cabc_ports;
u8 pixel_overlap; u8 pixel_overlap;
u32 port_bits; u32 port_bits;
u32 bw_timer; u32 bw_timer;

View file

@ -0,0 +1,179 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Deepak M <m.deepak at intel.com>
*/
#include "intel_drv.h"
#include "intel_dsi.h"
#include "i915_drv.h"
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#define CONTROL_DISPLAY_BCTRL (1 << 5)
#define CONTROL_DISPLAY_DD (1 << 3)
#define CONTROL_DISPLAY_BL (1 << 2)
#define POWER_SAVE_OFF (0 << 0)
#define POWER_SAVE_LOW (1 << 0)
#define POWER_SAVE_MEDIUM (2 << 0)
#define POWER_SAVE_HIGH (3 << 0)
#define POWER_SAVE_OUTDOOR_MODE (4 << 0)
#define PANEL_PWM_MAX_VALUE 0xFF
static u32 dcs_get_backlight(struct intel_connector *connector)
{
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct mipi_dsi_device *dsi_device;
u8 data;
enum port port;
/* FIXME: Need to take care of 16 bit brightness level */
for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
&data, sizeof(data));
break;
}
return data;
}
static void dcs_set_backlight(struct intel_connector *connector, u32 level)
{
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct mipi_dsi_device *dsi_device;
u8 data = level;
enum port port;
/* FIXME: Need to take care of 16 bit brightness level */
for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
&data, sizeof(data));
}
}
static void dcs_disable_backlight(struct intel_connector *connector)
{
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct mipi_dsi_device *dsi_device;
enum port port;
dcs_set_backlight(connector, 0);
for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
u8 cabc = POWER_SAVE_OFF;
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
&cabc, sizeof(cabc));
}
for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
u8 ctrl = 0;
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
&ctrl, sizeof(ctrl));
ctrl &= ~CONTROL_DISPLAY_BL;
ctrl &= ~CONTROL_DISPLAY_DD;
ctrl &= ~CONTROL_DISPLAY_BCTRL;
mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
&ctrl, sizeof(ctrl));
}
}
static void dcs_enable_backlight(struct intel_connector *connector)
{
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_panel *panel = &connector->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
u8 ctrl = 0;
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
&ctrl, sizeof(ctrl));
ctrl |= CONTROL_DISPLAY_BL;
ctrl |= CONTROL_DISPLAY_DD;
ctrl |= CONTROL_DISPLAY_BCTRL;
mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
&ctrl, sizeof(ctrl));
}
for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
u8 cabc = POWER_SAVE_MEDIUM;
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
&cabc, sizeof(cabc));
}
dcs_set_backlight(connector, panel->backlight.level);
}
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
struct intel_panel *panel = &connector->panel;
panel->backlight.max = PANEL_PWM_MAX_VALUE;
panel->backlight.level = PANEL_PWM_MAX_VALUE;
return 0;
}
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder = intel_connector->encoder;
struct intel_panel *panel = &intel_connector->panel;
if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
return -ENODEV;
if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
return -EINVAL;
panel->backlight.setup = dcs_setup_backlight;
panel->backlight.enable = dcs_enable_backlight;
panel->backlight.disable = dcs_disable_backlight;
panel->backlight.set = dcs_set_backlight;
panel->backlight.get = dcs_get_backlight;
return 0;
}

View file

@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = {
{ VLV_GPIO_NC_11_PANEL1_BKLTCTL }, { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
}; };
#define CHV_GPIO_IDX_START_N 0
#define CHV_GPIO_IDX_START_E 73
#define CHV_GPIO_IDX_START_SW 100
#define CHV_GPIO_IDX_START_SE 198
#define CHV_VBT_MAX_PINS_PER_FMLY 15
#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
#define CHV_GPIO_GPIOEN (1 << 15)
#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
#define CHV_GPIO_CFGLOCK (1 << 31)
static inline enum port intel_dsi_seq_port_to_port(u8 port) static inline enum port intel_dsi_seq_port_to_port(u8 port)
{ {
return port ? PORT_C : PORT_A; return port ? PORT_C : PORT_A;
@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
map = &vlv_gpio_table[gpio_index]; map = &vlv_gpio_table[gpio_index];
if (dev_priv->vbt.dsi.seq_version >= 3) { if (dev_priv->vbt.dsi.seq_version >= 3) {
DRM_DEBUG_KMS("GPIO element v3 not supported\n"); /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
return; port = IOSF_PORT_GPIO_NC;
} else { } else {
if (gpio_source == 0) { if (gpio_source == 0) {
port = IOSF_PORT_GPIO_NC; port = IOSF_PORT_GPIO_NC;
} else if (gpio_source == 1) { } else if (gpio_source == 1) {
port = IOSF_PORT_GPIO_SC; DRM_DEBUG_KMS("SC gpio not supported\n");
return;
} else { } else {
DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
return; return;
@ -231,6 +250,56 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->sb_lock); mutex_unlock(&dev_priv->sb_lock);
} }
static void chv_exec_gpio(struct drm_i915_private *dev_priv,
u8 gpio_source, u8 gpio_index, bool value)
{
u16 cfg0, cfg1;
u16 family_num;
u8 port;
if (dev_priv->vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
/* XXX: it's unclear whether 255->57 is part of SE. */
gpio_index -= CHV_GPIO_IDX_START_SE;
port = CHV_IOSF_PORT_GPIO_SE;
} else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
gpio_index -= CHV_GPIO_IDX_START_SW;
port = CHV_IOSF_PORT_GPIO_SW;
} else if (gpio_index >= CHV_GPIO_IDX_START_E) {
gpio_index -= CHV_GPIO_IDX_START_E;
port = CHV_IOSF_PORT_GPIO_E;
} else {
port = CHV_IOSF_PORT_GPIO_N;
}
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
if (gpio_source != 0) {
DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
return;
}
if (gpio_index >= CHV_GPIO_IDX_START_E) {
DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
gpio_index);
return;
}
port = CHV_IOSF_PORT_GPIO_N;
}
family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
mutex_lock(&dev_priv->sb_lock);
vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
vlv_iosf_sb_write(dev_priv, port, cfg0,
CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
mutex_unlock(&dev_priv->sb_lock);
}
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{ {
struct drm_device *dev = intel_dsi->base.base.dev; struct drm_device *dev = intel_dsi->base.base.dev;
@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
if (IS_VALLEYVIEW(dev_priv)) if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value); vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else if (IS_CHERRYVIEW(dev_priv))
chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else else
DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); DRM_DEBUG_KMS("GPIO element not supported on this platform\n");

View file

@ -740,7 +740,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
/* FIXME: We lack the proper locking here, so only run this on the /* FIXME: We lack the proper locking here, so only run this on the
* platforms that need. */ * platforms that need. */
if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) if (IS_GEN(dev_priv, 5, 6))
cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
cache->fb.pixel_format = fb->pixel_format; cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0]; cache->fb.stride = fb->pitches[0];
@ -827,7 +827,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
bool enable_by_default = IS_HASWELL(dev_priv) || bool enable_by_default = IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv); IS_BROADWELL(dev_priv);
if (intel_vgpu_active(dev_priv->dev)) { if (intel_vgpu_active(dev_priv)) {
fbc->no_fbc_reason = "VGPU is active"; fbc->no_fbc_reason = "VGPU is active";
return false; return false;
} }

View file

@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (size * 2 < ggtt->stolen_usable_size) if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size); obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL) if (obj == NULL)
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_object_create(dev, size);
if (!obj) { if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n"); DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM; ret = PTR_ERR(obj);
goto out; goto out;
} }
@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info; struct fb_info *info;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct i915_vma *vma;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int size, ret;
bool prealloc = false; bool prealloc = false;
void *vaddr;
int ret;
if (intel_fb && if (intel_fb &&
(sizes->fb_width > intel_fb->base.width || (sizes->fb_width > intel_fb->base.width ||
@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
} }
obj = intel_fb->obj; obj = intel_fb->obj;
size = obj->base.size;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops; info->fbops = &intelfb_ops;
vma = i915_gem_obj_to_ggtt(obj);
/* setup aperture base/size for vesafb takeover */ /* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = ggtt->mappable_end; info->apertures->ranges[0].size = ggtt->mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
info->fix.smem_len = size; info->fix.smem_len = vma->node.size;
info->screen_base = vaddr = i915_vma_pin_iomap(vma);
ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj), if (IS_ERR(vaddr)) {
size);
if (!info->screen_base) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
ret = -ENOSPC; ret = PTR_ERR(vaddr);
goto out_destroy_fbi; goto out_destroy_fbi;
} }
info->screen_size = size; info->screen_base = vaddr;
info->screen_size = vma->node.size;
/* This driver doesn't need a VT switch to restore the mode on resume */ /* This driver doesn't need a VT switch to restore the mode on resume */
info->skip_vt_switch = true; info->skip_vt_switch = true;
@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_destroy_fbi: out_destroy_fbi:
drm_fb_helper_release_fbi(helper); drm_fb_helper_release_fbi(helper);
out_unpin: out_unpin:
i915_gem_object_ggtt_unpin(obj); intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
out_unlock: out_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
@ -551,6 +553,11 @@ static void intel_fbdev_destroy(struct drm_device *dev,
if (ifbdev->fb) { if (ifbdev->fb) {
drm_framebuffer_unregister_private(&ifbdev->fb->base); drm_framebuffer_unregister_private(&ifbdev->fb->base);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
mutex_unlock(&dev->struct_mutex);
drm_framebuffer_remove(&ifbdev->fb->base); drm_framebuffer_remove(&ifbdev->fb->base);
} }
} }

View file

@ -59,9 +59,12 @@
* *
*/ */
#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin" #define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
MODULE_FIRMWARE(I915_SKL_GUC_UCODE); MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
/* User-friendly representation of an enum */ /* User-friendly representation of an enum */
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
{ {
@ -281,6 +284,17 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
return ret; return ret;
} }
static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
{
u32 wopcm_size = GUC_WOPCM_TOP;
/* On BXT, the top of WOPCM is reserved for RC6 context */
if (IS_BROXTON(dev_priv))
wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
return wopcm_size;
}
/* /*
* Load the GuC firmware blob into the MinuteIA. * Load the GuC firmware blob into the MinuteIA.
*/ */
@ -308,7 +322,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* init WOPCM */ /* init WOPCM */
I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE); I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE); I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
/* Enable MIA caching. GuC clock gating is disabled. */ /* Enable MIA caching. GuC clock gating is disabled. */
@ -552,9 +566,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
/* Header and uCode will be loaded to WOPCM. Size of the two. */ /* Header and uCode will be loaded to WOPCM. Size of the two. */
size = guc_fw->header_size + guc_fw->ucode_size; size = guc_fw->header_size + guc_fw->ucode_size;
if (size > guc_wopcm_size(dev->dev_private)) {
/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
DRM_ERROR("Firmware is too large to fit in WOPCM\n"); DRM_ERROR("Firmware is too large to fit in WOPCM\n");
goto fail; goto fail;
} }
@ -640,6 +652,10 @@ void intel_guc_ucode_init(struct drm_device *dev)
fw_path = I915_SKL_GUC_UCODE; fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = 6; guc_fw->guc_fw_major_wanted = 6;
guc_fw->guc_fw_minor_wanted = 1; guc_fw->guc_fw_minor_wanted = 1;
} else if (IS_BROXTON(dev)) {
fw_path = I915_BXT_GUC_UCODE;
guc_fw->guc_fw_major_wanted = 8;
guc_fw->guc_fw_minor_wanted = 7;
} else { } else {
i915.enable_guc_submission = false; i915.enable_guc_submission = false;
fw_path = ""; /* unknown device */ fw_path = ""; /* unknown device */

View file

@ -1678,35 +1678,12 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc = struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc); to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
/* Enable clock channels for this port */ vlv_phy_pre_encoder_enable(encoder);
mutex_lock(&dev_priv->sb_lock);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
/* HDMI 1.0V-2dB */ /* HDMI 1.0V-2dB */
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); 0x2b247878);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
/* Program lane clock */
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->sb_lock);
intel_hdmi->set_infoframes(&encoder->base, intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config->has_hdmi_sink, intel_crtc->config->has_hdmi_sink,
@ -1719,207 +1696,27 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{ {
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
intel_hdmi_prepare(encoder); intel_hdmi_prepare(encoder);
/* Program Tx lane resets to default */ vlv_phy_pre_pll_enable(encoder);
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->sb_lock);
}
static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = crtc->pipe;
uint32_t val;
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
if (crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
}
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
if (crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
}
} }
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{ {
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
intel_hdmi_prepare(encoder); intel_hdmi_prepare(encoder);
/* chv_phy_pre_pll_enable(encoder);
* Must trick the second common lane into life.
* Otherwise we can't even access the PLL.
*/
if (ch == DPIO_CH0 && pipe == PIPE_B)
dport->release_cl2_override =
!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
chv_phy_powergate_lanes(encoder, true, 0x0);
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
/*
* This a a bit weird since generally CL
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
if (pipe != PIPE_B)
val &= ~CHV_CMN_USEDCLKCHANNEL;
else
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
mutex_unlock(&dev_priv->sb_lock);
} }
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder) static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); chv_phy_post_pll_disable(encoder);
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
mutex_unlock(&dev_priv->sb_lock);
/*
* Leave the power down bit cleared for at least one
* lane so that chv_powergate_phy_ch() will power
* on something when the channel is otherwise unused.
* When the port is off and the override is removed
* the lanes power down anyway, so otherwise it doesn't
* really matter what the state of power down bits is
* after this.
*/
chv_phy_powergate_lanes(encoder, false, 0x0);
} }
static void vlv_hdmi_post_disable(struct intel_encoder *encoder) static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{ {
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
/* Reset lanes to avoid HDMI flicker (VLV w/a) */ /* Reset lanes to avoid HDMI flicker (VLV w/a) */
mutex_lock(&dev_priv->sb_lock); vlv_phy_reset_lanes(encoder);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
mutex_unlock(&dev_priv->sb_lock);
} }
static void chv_hdmi_post_disable(struct intel_encoder *encoder) static void chv_hdmi_post_disable(struct intel_encoder *encoder)
@ -1944,138 +1741,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc = struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc); to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i, stagger;
u32 val;
mutex_lock(&dev_priv->sb_lock); chv_phy_pre_encoder_enable(encoder);
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
/* Program Tx latency optimal setting */
for (i = 0; i < 4; i++) {
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
}
/* Data lane stagger programming */
if (intel_crtc->config->port_clock > 270000)
stagger = 0x18;
else if (intel_crtc->config->port_clock > 135000)
stagger = 0xd;
else if (intel_crtc->config->port_clock > 67500)
stagger = 0x7;
else if (intel_crtc->config->port_clock > 33750)
stagger = 0x4;
else
stagger = 0x2;
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
DPIO_TX1_STAGGER_MULT(6) |
DPIO_TX2_STAGGER_MULT(0));
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
DPIO_TX1_STAGGER_MULT(7) |
DPIO_TX2_STAGGER_MULT(5));
/* Deassert data lane reset */
chv_data_lane_soft_reset(encoder, false);
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
/* FIXME: Program the support xxx V-dB */ /* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */ /* Use 800mV-0dB */
for (i = 0; i < 4; i++) { chv_set_phy_signal_level(encoder, 128, 102, false);
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
}
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
/*
* Supposedly this value shouldn't matter when unique transition
* scale is disabled, but in fact it does matter. Let's just
* always program the same value and hope it's OK.
*/
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
/*
* The document said it needs to set bit 27 for ch0 and bit 26
* for ch1. Might be a typo in the doc.
* For now, for this unique transition scale selection, set bit
* 27 for ch0 and ch1.
*/
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
/* Start swing calculation */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
mutex_unlock(&dev_priv->sb_lock);
intel_hdmi->set_infoframes(&encoder->base, intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config->has_hdmi_sink, intel_crtc->config->has_hdmi_sink,
@ -2086,10 +1757,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
vlv_wait_port_ready(dev_priv, dport, 0x0); vlv_wait_port_ready(dev_priv, dport, 0x0);
/* Second common lane will stay alive on its own now */ /* Second common lane will stay alive on its own now */
if (dport->release_cl2_override) { chv_phy_release_cl2_override(encoder);
chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
dport->release_cl2_override = false;
}
} }
static void intel_hdmi_destroy(struct drm_connector *connector) static void intel_hdmi_destroy(struct drm_connector *connector)

View file

@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
} }
} }
if (dev_priv->display.hpd_irq_setup) if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev); dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
/** /**
* intel_hpd_irq_handler - main hotplug irq handler * intel_hpd_irq_handler - main hotplug irq handler
* @dev: drm device * @dev_priv: drm_i915_private
* @pin_mask: a mask of hpd pins that have triggered the irq * @pin_mask: a mask of hpd pins that have triggered the irq
* @long_mask: a mask of hpd pins that may be long hpd pulses * @long_mask: a mask of hpd pins that may be long hpd pulses
* *
@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
* Here, we do hotplug irq storm detection and mitigation, and pass further * Here, we do hotplug irq storm detection and mitigation, and pass further
* processing to appropriate bottom halves. * processing to appropriate bottom halves.
*/ */
void intel_hpd_irq_handler(struct drm_device *dev, void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask) u32 pin_mask, u32 long_mask)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
enum port port; enum port port;
bool storm_detected = false; bool storm_detected = false;
@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
* hotplug bits itself. So only WARN about unexpected * hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms. * interrupts on saner platforms.
*/ */
WARN_ONCE(!HAS_GMCH_DISPLAY(dev), WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
"Received HPD interrupt on pin %d although disabled\n", i); "Received HPD interrupt on pin %d although disabled\n", i);
continue; continue;
} }
@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
} }
if (storm_detected) if (storm_detected)
dev_priv->display.hpd_irq_setup(dev); dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock); spin_unlock(&dev_priv->irq_lock);
/* /*
@ -485,7 +484,7 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
*/ */
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup) if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev); dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
} }

File diff suppressed because it is too large Load diff

View file

@ -101,8 +101,6 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
void intel_lr_context_free(struct intel_context *ctx); void intel_lr_context_free(struct intel_context *ctx);
uint32_t intel_lr_context_size(struct intel_engine_cs *engine); uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *engine);
void intel_lr_context_unpin(struct intel_context *ctx, void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
@ -113,16 +111,14 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
uint64_t intel_lr_context_descriptor(struct intel_context *ctx, uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
u32 intel_execlists_ctx_id(struct intel_context *ctx,
struct intel_engine_cs *engine);
/* Execlists */ /* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
struct i915_execbuffer_params; struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params, int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas); struct list_head *vmas);
void intel_execlists_retire_requests(struct intel_engine_cs *engine); void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */ #endif /* _INTEL_LRC_H_ */

View file

@ -190,7 +190,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* Set the dithering flag on LVDS as needed, note that there is no /* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is * special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg. */ * only controlled through the PIPECONF reg. */
if (INTEL_INFO(dev)->gen == 4) { if (IS_GEN4(dev_priv)) {
/* Bspec wording suggests that LVDS port dithering only exists /* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */ * for 18bpp panels. */
if (crtc->config->dither && crtc->config->pipe_bpp == 18) if (crtc->config->dither && crtc->config->pipe_bpp == 18)

View file

@ -189,7 +189,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
*/ */
int intel_mocs_init_engine(struct intel_engine_cs *engine) int intel_mocs_init_engine(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = to_i915(engine->dev); struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_mocs_table table; struct drm_i915_mocs_table table;
unsigned int index; unsigned int index;

View file

@ -574,10 +574,8 @@ static void asle_work(struct work_struct *work)
asle->aslc = aslc_stat; asle->aslc = aslc_stat;
} }
void intel_opregion_asle_intr(struct drm_device *dev) void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->opregion.asle) if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work); schedule_work(&dev_priv->opregion.asle_work);
} }

View file

@ -168,7 +168,7 @@ struct overlay_registers {
}; };
struct intel_overlay { struct intel_overlay {
struct drm_device *dev; struct drm_i915_private *i915;
struct intel_crtc *crtc; struct intel_crtc *crtc;
struct drm_i915_gem_object *vid_bo; struct drm_i915_gem_object *vid_bo;
struct drm_i915_gem_object *old_vid_bo; struct drm_i915_gem_object *old_vid_bo;
@ -190,15 +190,15 @@ struct intel_overlay {
static struct overlay_registers __iomem * static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay) intel_overlay_map_regs(struct intel_overlay *overlay)
{ {
struct drm_i915_private *dev_priv = to_i915(overlay->dev); struct drm_i915_private *dev_priv = overlay->i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct overlay_registers __iomem *regs; struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else else
regs = io_mapping_map_wc(ggtt->mappable, regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo)); overlay->flip_addr,
PAGE_SIZE);
return regs; return regs;
} }
@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
static void intel_overlay_unmap_regs(struct intel_overlay *overlay, static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs) struct overlay_registers __iomem *regs)
{ {
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
io_mapping_unmap(regs); io_mapping_unmap(regs);
} }
@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
/* overlay needs to be disable in OCMD reg */ /* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay) static int intel_overlay_on(struct intel_overlay *overlay)
{ {
struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
int ret; int ret;
WARN_ON(overlay->active); WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
req = i915_gem_request_alloc(engine, NULL); req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req)) if (IS_ERR(req))
@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
static int intel_overlay_continue(struct intel_overlay *overlay, static int intel_overlay_continue(struct intel_overlay *overlay,
bool load_polyphase_filter) bool load_polyphase_filter)
{ {
struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr; u32 flip_addr = overlay->flip_addr;
@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* overlay needs to be disabled in OCMD reg */ /* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay) static int intel_overlay_off(struct intel_overlay *overlay)
{ {
struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr; u32 flip_addr = overlay->flip_addr;
@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
intel_ring_emit(engine, flip_addr); intel_ring_emit(engine, flip_addr);
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */ /* turn overlay off */
if (IS_I830(dev)) { if (IS_I830(dev_priv)) {
/* Workaround: Don't disable the overlay fully, since otherwise /* Workaround: Don't disable the overlay fully, since otherwise
* it dies on the next OVERLAY_ON cmd. */ * it dies on the next OVERLAY_ON cmd. */
intel_ring_emit(engine, MI_NOOP); intel_ring_emit(engine, MI_NOOP);
@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
*/ */
static int intel_overlay_release_old_vid(struct intel_overlay *overlay) static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{ {
struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct intel_engine_cs *engine = &dev_priv->engine[RCS];
int ret; int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); lockdep_assert_held(&dev_priv->dev->struct_mutex);
/* Only wait if there is actually an old frame to release to /* Only wait if there is actually an old frame to release to
* guarantee forward progress. * guarantee forward progress.
@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format)
} }
} }
static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
{ {
u32 mask, shift, ret; u32 mask, shift, ret;
if (IS_GEN2(dev)) { if (IS_GEN2(dev_priv)) {
mask = 0x1f; mask = 0x1f;
shift = 5; shift = 5;
} else { } else {
@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
shift = 6; shift = 6;
} }
ret = ((offset + width + mask) >> shift) - (offset >> shift); ret = ((offset + width + mask) >> shift) - (offset >> shift);
if (!IS_GEN2(dev)) if (!IS_GEN2(dev_priv))
ret <<= 1; ret <<= 1;
ret -= 1; ret -= 1;
return ret << 2; return ret << 2;
@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int ret, tmp_width; int ret, tmp_width;
struct overlay_registers __iomem *regs; struct overlay_registers __iomem *regs;
bool scale_changed = false; bool scale_changed = false;
struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride; u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe; enum pipe pipe = overlay->crtc->pipe;
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); lockdep_assert_held(&dev_priv->dev->struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay); ret = intel_overlay_release_old_vid(overlay);
if (ret != 0) if (ret != 0)
@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin; goto out_unpin;
} }
oconfig = OCONF_CC_OUT_8BIT; oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev)) if (IS_GEN4(dev_priv))
oconfig |= OCONF_CSC_MODE_BT709; oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ? oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B; OCONF_PIPE_A : OCONF_PIPE_B;
@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
tmp_width = params->src_w; tmp_width = params->src_w;
swidth = params->src_w; swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
sheight = params->src_h; sheight = params->src_h;
iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y); iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y; ostride = params->stride_Y;
@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int uv_vscale = uv_vsubsampling(params->format); int uv_vscale = uv_vsubsampling(params->format);
u32 tmp_U, tmp_V; u32 tmp_U, tmp_V;
swidth |= (params->src_w/uv_hscale) << 16; swidth |= (params->src_w/uv_hscale) << 16;
tmp_U = calc_swidthsw(overlay->dev, params->offset_U, tmp_U = calc_swidthsw(dev_priv, params->offset_U,
params->src_w/uv_hscale); params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V, tmp_V = calc_swidthsw(dev_priv, params->offset_V,
params->src_w/uv_hscale); params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16; sheight |= (params->src_h/uv_vscale) << 16;
@ -840,8 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
overlay->old_vid_bo = overlay->vid_bo; overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = new_bo; overlay->vid_bo = new_bo;
intel_frontbuffer_flip(dev, intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe));
INTEL_FRONTBUFFER_OVERLAY(pipe));
return 0; return 0;
@ -852,12 +847,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int intel_overlay_switch_off(struct intel_overlay *overlay) int intel_overlay_switch_off(struct intel_overlay *overlay)
{ {
struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs; struct overlay_registers __iomem *regs;
struct drm_device *dev = overlay->dev;
int ret; int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); lockdep_assert_held(&dev_priv->dev->struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay); ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0) if (ret != 0)
@ -897,15 +892,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay) static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{ {
struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control = I915_READ(PFIT_CONTROL); u32 pfit_control = I915_READ(PFIT_CONTROL);
u32 ratio; u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in /* XXX: This is not the same logic as in the xorg driver, but more in
* line with the intel documentation for the i965 * line with the intel documentation for the i965
*/ */
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_GEN(dev_priv) >= 4) {
/* on i965 use the PGM reg to read out the autoscaler values */ /* on i965 use the PGM reg to read out the autoscaler values */
ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
} else { } else {
@ -948,7 +942,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
return 0; return 0;
} }
static int check_overlay_src(struct drm_device *dev, static int check_overlay_src(struct drm_i915_private *dev_priv,
struct drm_intel_overlay_put_image *rec, struct drm_intel_overlay_put_image *rec,
struct drm_i915_gem_object *new_bo) struct drm_i915_gem_object *new_bo)
{ {
@ -959,7 +953,7 @@ static int check_overlay_src(struct drm_device *dev,
u32 tmp; u32 tmp;
/* check src dimensions */ /* check src dimensions */
if (IS_845G(dev) || IS_I830(dev)) { if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
rec->src_width > IMAGE_MAX_WIDTH_LEGACY) rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL; return -EINVAL;
@ -1011,14 +1005,14 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL; return -EINVAL;
/* stride checking */ /* stride checking */
if (IS_I830(dev) || IS_845G(dev)) if (IS_I830(dev_priv) || IS_845G(dev_priv))
stride_mask = 255; stride_mask = 255;
else else
stride_mask = 63; stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL; return -EINVAL;
if (IS_GEN4(dev) && rec->stride_Y < 512) if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
return -EINVAL; return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@ -1063,13 +1057,13 @@ static int check_overlay_src(struct drm_device *dev,
* Return the pipe currently connected to the panel fitter, * Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use * or -1 if the panel fitter is not present or not in use
*/ */
static int intel_panel_fitter_pipe(struct drm_device *dev) static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control; u32 pfit_control;
/* i830 doesn't have a panel fitter */ /* i830 doesn't have a panel fitter */
if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) if (INTEL_GEN(dev_priv) <= 3 &&
(IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
return -1; return -1;
pfit_control = I915_READ(PFIT_CONTROL); pfit_control = I915_READ(PFIT_CONTROL);
@ -1079,15 +1073,15 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
return -1; return -1;
/* 965 can place panel fitter on either pipe */ /* 965 can place panel fitter on either pipe */
if (IS_GEN4(dev)) if (IS_GEN4(dev_priv))
return (pfit_control >> 29) & 0x3; return (pfit_control >> 29) & 0x3;
/* older chips can only use pipe 1 */ /* older chips can only use pipe 1 */
return 1; return 1;
} }
int intel_overlay_put_image(struct drm_device *dev, void *data, int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_intel_overlay_put_image *put_image_rec = data; struct drm_intel_overlay_put_image *put_image_rec = data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -1162,7 +1156,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
/* line too wide, i.e. one-line-mode */ /* line too wide, i.e. one-line-mode */
if (mode->hdisplay > 1024 && if (mode->hdisplay > 1024 &&
intel_panel_fitter_pipe(dev) == crtc->pipe) { intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
overlay->pfit_active = true; overlay->pfit_active = true;
update_pfit_vscale_ratio(overlay); update_pfit_vscale_ratio(overlay);
} else } else
@ -1196,7 +1190,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
ret = check_overlay_src(dev, put_image_rec, new_bo); ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
if (ret != 0) if (ret != 0)
goto out_unlock; goto out_unlock;
params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
@ -1284,8 +1278,8 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
return 0; return 0;
} }
int intel_overlay_attrs(struct drm_device *dev, void *data, int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_intel_overlay_attrs *attrs = data; struct drm_intel_overlay_attrs *attrs = data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -1309,7 +1303,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast; attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation; attrs->saturation = overlay->saturation;
if (!IS_GEN2(dev)) { if (!IS_GEN2(dev_priv)) {
attrs->gamma0 = I915_READ(OGAMC0); attrs->gamma0 = I915_READ(OGAMC0);
attrs->gamma1 = I915_READ(OGAMC1); attrs->gamma1 = I915_READ(OGAMC1);
attrs->gamma2 = I915_READ(OGAMC2); attrs->gamma2 = I915_READ(OGAMC2);
@ -1341,7 +1335,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
intel_overlay_unmap_regs(overlay, regs); intel_overlay_unmap_regs(overlay, regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
if (IS_GEN2(dev)) if (IS_GEN2(dev_priv))
goto out_unlock; goto out_unlock;
if (overlay->active) { if (overlay->active) {
@ -1371,37 +1365,36 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
return ret; return ret;
} }
void intel_setup_overlay(struct drm_device *dev) void intel_setup_overlay(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay; struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo; struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs; struct overlay_registers __iomem *regs;
int ret; int ret;
if (!HAS_OVERLAY(dev)) if (!HAS_OVERLAY(dev_priv))
return; return;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay) if (!overlay)
return; return;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev_priv->dev->struct_mutex);
if (WARN_ON(dev_priv->overlay)) if (WARN_ON(dev_priv->overlay))
goto out_free; goto out_free;
overlay->dev = dev; overlay->i915 = dev_priv;
reg_bo = NULL; reg_bo = NULL;
if (!OVERLAY_NEEDS_PHYSICAL(dev)) if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE);
if (reg_bo == NULL)
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (reg_bo == NULL) if (reg_bo == NULL)
reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE);
if (IS_ERR(reg_bo))
goto out_free; goto out_free;
overlay->reg_bo = reg_bo; overlay->reg_bo = reg_bo;
if (OVERLAY_NEEDS_PHYSICAL(dev)) { if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
if (ret) { if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n"); DRM_ERROR("failed to attach phys overlay regs\n");
@ -1441,25 +1434,23 @@ void intel_setup_overlay(struct drm_device *dev)
intel_overlay_unmap_regs(overlay, regs); intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay; dev_priv->overlay = overlay;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->dev->struct_mutex);
DRM_INFO("initialized overlay support\n"); DRM_INFO("initialized overlay support\n");
return; return;
out_unpin_bo: out_unpin_bo:
if (!OVERLAY_NEEDS_PHYSICAL(dev)) if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
i915_gem_object_ggtt_unpin(reg_bo); i915_gem_object_ggtt_unpin(reg_bo);
out_free_bo: out_free_bo:
drm_gem_object_unreference(&reg_bo->base); drm_gem_object_unreference(&reg_bo->base);
out_free: out_free:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->dev->struct_mutex);
kfree(overlay); kfree(overlay);
return; return;
} }
void intel_cleanup_overlay(struct drm_device *dev) void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->overlay) if (!dev_priv->overlay)
return; return;
@ -1482,18 +1473,17 @@ struct intel_overlay_error_state {
static struct overlay_registers __iomem * static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay) intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{ {
struct drm_i915_private *dev_priv = to_i915(overlay->dev); struct drm_i915_private *dev_priv = overlay->i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct overlay_registers __iomem *regs; struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
/* Cast to make sparse happy, but it's wc memory anyway, so /* Cast to make sparse happy, but it's wc memory anyway, so
* equivalent to the wc io mapping on X86. */ * equivalent to the wc io mapping on X86. */
regs = (struct overlay_registers __iomem *) regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr; overlay->reg_bo->phys_handle->vaddr;
else else
regs = io_mapping_map_atomic_wc(ggtt->mappable, regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo)); overlay->flip_addr);
return regs; return regs;
} }
@ -1501,15 +1491,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs) struct overlay_registers __iomem *regs)
{ {
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
io_mapping_unmap_atomic(regs); io_mapping_unmap_atomic(regs);
} }
struct intel_overlay_error_state * struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_device *dev) intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay = dev_priv->overlay; struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error; struct intel_overlay_error_state *error;
struct overlay_registers __iomem *regs; struct overlay_registers __iomem *regs;
@ -1523,10 +1511,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA); error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR); error->isr = I915_READ(ISR);
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) error->base = overlay->flip_addr;
error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
else
error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
regs = intel_overlay_map_regs_atomic(overlay); regs = intel_overlay_map_regs_atomic(overlay);
if (!regs) if (!regs)

View file

@ -1724,6 +1724,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
container_of(panel, struct intel_connector, panel); container_of(panel, struct intel_connector, panel);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
intel_dp_aux_init_backlight_funcs(connector) == 0)
return;
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
if (IS_BROXTON(dev_priv)) { if (IS_BROXTON(dev_priv)) {
panel->backlight.setup = bxt_setup_backlight; panel->backlight.setup = bxt_setup_backlight;
panel->backlight.enable = bxt_enable_backlight; panel->backlight.enable = bxt_enable_backlight;

File diff suppressed because it is too large Load diff

View file

@ -176,7 +176,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider; uint32_t aux_clock_divider;
i915_reg_t aux_ctl_reg; i915_reg_t aux_ctl_reg;
int precharge = 0x3;
static const uint8_t aux_msg[] = { static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4, [0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8, [1] = DP_SET_POWER >> 8,
@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
[4] = DP_SET_POWER_D0, [4] = DP_SET_POWER_D0,
}; };
enum port port = dig_port->port; enum port port = dig_port->port;
u32 aux_ctl;
int i; int i;
BUILD_BUG_ON(sizeof(aux_msg) > 20); BUILD_BUG_ON(sizeof(aux_msg) > 20);
@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
DP_AUX_FRAME_SYNC_ENABLE); DP_AUX_FRAME_SYNC_ENABLE);
if (dev_priv->psr.link_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE);
aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port); aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
/* Setup AUX registers */ /* Setup AUX registers */
@ -204,33 +211,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2), I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
if (INTEL_INFO(dev)->gen >= 9) { aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
uint32_t val; aux_clock_divider);
I915_WRITE(aux_ctl_reg, aux_ctl);
val = I915_READ(aux_ctl_reg);
val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
/* Use hardcoded data values for PSR, frame sync and GTC */
val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
I915_WRITE(aux_ctl_reg, val);
} else {
I915_WRITE(aux_ctl_reg,
DP_AUX_CH_CTL_TIME_OUT_400us |
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
if (dev_priv->psr.link_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE);
} }
static void vlv_psr_enable_source(struct intel_dp *intel_dp) static void vlv_psr_enable_source(struct intel_dp *intel_dp)
@ -272,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f; uint32_t max_sleep_time = 0x1f;
/* /* Lately it was identified that depending on panel idle frame count
* Let's respect VBT in case VBT asks a higher idle_frame value. * calculated at HW can be off by 1. So let's use what came
* Let's use 6 as the minimum to cover all known cases including * from VBT + 1.
* the off-by-one issue that HW has in some cases. Also there are * There are also other cases where panel demands at least 4
* cases where sink should be able to train * but VBT is not being set. To cover these 2 cases lets use
* with the 5 or 6 idle patterns. * at least 5 when VBT isn't set to be on the safest side.
*/ */
uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
uint32_t val = EDP_PSR_ENABLE; uint32_t val = EDP_PSR_ENABLE;
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;

File diff suppressed because it is too large Load diff

View file

@ -107,7 +107,6 @@ struct intel_ringbuffer {
int space; int space;
int size; int size;
int effective_size; int effective_size;
int reserved_size;
/** We track the position of the requests in the ring buffer, and /** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU * when each is retired we increment last_retired_head as the GPU
@ -142,7 +141,8 @@ struct i915_ctx_workarounds {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
}; };
struct intel_engine_cs { struct intel_engine_cs {
struct drm_i915_private *i915;
const char *name; const char *name;
enum intel_engine_id { enum intel_engine_id {
RCS = 0, RCS = 0,
@ -157,7 +157,6 @@ struct intel_engine_cs {
unsigned int hw_id; unsigned int hw_id;
unsigned int guc_id; /* XXX same as hw_id? */ unsigned int guc_id; /* XXX same as hw_id? */
u32 mmio_base; u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer; struct intel_ringbuffer *buffer;
struct list_head buffers; struct list_head buffers;
@ -268,7 +267,6 @@ struct intel_engine_cs {
struct tasklet_struct irq_tasklet; struct tasklet_struct irq_tasklet;
spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct list_head execlist_queue; struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
unsigned int fw_domains; unsigned int fw_domains;
unsigned int next_context_status_buffer; unsigned int next_context_status_buffer;
unsigned int idle_lite_restore_wa; unsigned int idle_lite_restore_wa;
@ -352,7 +350,7 @@ struct intel_engine_cs {
static inline bool static inline bool
intel_engine_initialized(struct intel_engine_cs *engine) intel_engine_initialized(struct intel_engine_cs *engine)
{ {
return engine->dev != NULL; return engine->i915 != NULL;
} }
static inline unsigned static inline unsigned
@ -427,7 +425,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
struct intel_ringbuffer * struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
struct intel_ringbuffer *ringbuf); struct intel_ringbuffer *ringbuf);
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring); void intel_ringbuffer_free(struct intel_ringbuffer *ring);
@ -486,26 +484,15 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
/* /*
* Arbitrary size for largest possible 'add request' sequence. The code paths * Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case * are complex and variable. Empirical measurement shows that the worst case
* is ILK at 136 words. Reserving too much is better than reserving too little * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
* as that allows for corner cases that might have been missed. So the figure * we need to allocate double the largest single packet within that emission
* has been rounded up to 160 words. * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
*/ */
#define MIN_SPACE_FOR_ADD_REQUEST 160 #define MIN_SPACE_FOR_ADD_REQUEST 336
/* static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
* Reserve space in the ring to guarantee that the i915_add_request() call {
* will always have sufficient room to do its stuff. The request creation return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
* code calls this automatically. }
*/
void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
/* Cancel the reservation, e.g. because the request is being discarded. */
void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
/* Use the reserved space - for use by i915_add_request() only. */
void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
/* Finish with the reserved space - for use by i915_add_request() only. */
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
/* Legacy ringbuffer specific portion of reservation code: */
int intel_ring_reserve_space(struct drm_i915_gem_request *request);
#endif /* _INTEL_RINGBUFFER_H_ */ #endif /* _INTEL_RINGBUFFER_H_ */

View file

@ -948,6 +948,11 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
*/ */
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
I915_WRITE(CBR1_VLV, 0); I915_WRITE(CBR1_VLV, 0);
WARN_ON(dev_priv->rawclk_freq == 0);
I915_WRITE(RAWCLK_FREQ_VLV,
DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
} }
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)

View file

@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*/ */
void intel_pipe_update_start(struct intel_crtc *crtc) void intel_pipe_update_start(struct intel_crtc *crtc)
{ {
struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1); long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start; int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
crtc->debug.scanline_start = scanline; crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get(); crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
dev->driver->get_vblank_counter(dev, pipe);
trace_i915_pipe_update_vblank_evaded(crtc); trace_i915_pipe_update_vblank_evaded(crtc);
} }
@ -154,14 +151,19 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
* re-enables interrupts and verifies the update was actually completed * re-enables interrupts and verifies the update was actually completed
* before a vblank using the value of @start_vbl_count. * before a vblank using the value of @start_vbl_count.
*/ */
void intel_pipe_update_end(struct intel_crtc *crtc) void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
{ {
struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe; enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc); int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
ktime_t end_vbl_time = ktime_get(); ktime_t end_vbl_time = ktime_get();
if (work) {
work->flip_queued_vblank = end_vbl_count;
smp_mb__before_atomic();
atomic_set(&work->pending, 1);
}
trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
local_irq_enable(); local_irq_enable();
@ -203,8 +205,6 @@ skl_update_plane(struct drm_plane *drm_plane,
uint32_t y = plane_state->src.y1 >> 16; uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
const struct intel_scaler *scaler =
&crtc_state->scaler_state.scalers[plane_state->scaler_id];
plane_ctl = PLANE_CTL_ENABLE | plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE |
@ -260,13 +260,16 @@ skl_update_plane(struct drm_plane *drm_plane,
/* program plane scaler */ /* program plane scaler */
if (plane_state->scaler_id >= 0) { if (plane_state->scaler_id >= 0) {
uint32_t ps_ctrl = 0;
int scaler_id = plane_state->scaler_id; int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler;
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
PS_PLANE_SEL(plane)); PS_PLANE_SEL(plane));
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); scaler = &crtc_state->scaler_state.scalers[scaler_id];
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),

View file

@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
bool restore)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags; unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain; struct intel_uncore_forcewake_domain *domain;
int retry_count = 100; int retry_count = 100;
@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (fw) if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
if (IS_GEN6(dev) || IS_GEN7(dev)) if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
dev_priv->uncore.fifo_count = dev_priv->uncore.fifo_count =
fifo_free_entries(dev_priv); fifo_free_entries(dev_priv);
} }
@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
return false; return false;
} }
static void __intel_uncore_early_sanitize(struct drm_device *dev, static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake) bool restore_forcewake)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
/* clear out unclaimed reg detection bit */ /* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv)) if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* clear out old GT FIFO errors */ /* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev)) if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
__raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG)); __raw_i915_read32(dev_priv, GTFIFODBG));
/* WaDisableShadowRegForCpd:chv */ /* WaDisableShadowRegForCpd:chv */
if (IS_CHERRYVIEW(dev)) { if (IS_CHERRYVIEW(dev_priv)) {
__raw_i915_write32(dev_priv, GTFIFOCTL, __raw_i915_write32(dev_priv, GTFIFOCTL,
__raw_i915_read32(dev_priv, GTFIFOCTL) | __raw_i915_read32(dev_priv, GTFIFOCTL) |
GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
GT_FIFO_CTL_RC6_POLICY_STALL); GT_FIFO_CTL_RC6_POLICY_STALL);
} }
intel_uncore_forcewake_reset(dev, restore_forcewake); intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
} }
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake)
{ {
__intel_uncore_early_sanitize(dev, restore_forcewake); __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
i915_check_and_clear_faults(dev); i915_check_and_clear_faults(dev_priv);
} }
void intel_uncore_sanitize(struct drm_device *dev) void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{ {
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
/* BIOS often leaves RC6 enabled, but disable it for hw init */ /* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev); intel_disable_gt_powersave(dev_priv);
} }
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
fw_domain_reset(d); fw_domain_reset(d);
} }
static void intel_uncore_fw_domains_init(struct drm_device *dev) static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev_priv)->gen <= 5) if (INTEL_INFO(dev_priv)->gen <= 5)
return; return;
if (IS_GEN9(dev)) { if (IS_GEN9(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put; dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
FORCEWAKE_ACK_BLITTER_GEN9); FORCEWAKE_ACK_BLITTER_GEN9);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
if (!IS_CHERRYVIEW(dev)) if (!IS_CHERRYVIEW(dev_priv))
dev_priv->uncore.funcs.force_wake_put = dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo; fw_domains_put_with_fifo;
else else
@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status; fw_domains_get_with_thread_status;
if (IS_HASWELL(dev)) if (IS_HASWELL(dev_priv))
dev_priv->uncore.funcs.force_wake_put = dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo; fw_domains_put_with_fifo;
else else
dev_priv->uncore.funcs.force_wake_put = fw_domains_put; dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW); FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) { } else if (IS_IVYBRIDGE(dev_priv)) {
u32 ecobus; u32 ecobus;
/* IVB configs may use multi-threaded forcewake */ /* IVB configs may use multi-threaded forcewake */
@ -1302,11 +1299,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK); FORCEWAKE_MT, FORCEWAKE_MT_ACK);
mutex_lock(&dev->struct_mutex);
fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS); ecobus = __raw_i915_read32(dev_priv, ECOBUS);
fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) { if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@ -1314,7 +1309,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK); FORCEWAKE, FORCEWAKE_ACK);
} }
} else if (IS_GEN6(dev)) { } else if (IS_GEN6(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status; fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put = dev_priv->uncore.funcs.force_wake_put =
@ -1327,26 +1322,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
WARN_ON(dev_priv->uncore.fw_domains == 0); WARN_ON(dev_priv->uncore.fw_domains == 0);
} }
void intel_uncore_init(struct drm_device *dev) void intel_uncore_init(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; i915_check_vgpu(dev_priv);
i915_check_vgpu(dev);
intel_uncore_edram_detect(dev_priv); intel_uncore_edram_detect(dev_priv);
intel_uncore_fw_domains_init(dev); intel_uncore_fw_domains_init(dev_priv);
__intel_uncore_early_sanitize(dev, false); __intel_uncore_early_sanitize(dev_priv, false);
dev_priv->uncore.unclaimed_mmio_check = 1; dev_priv->uncore.unclaimed_mmio_check = 1;
switch (INTEL_INFO(dev)->gen) { switch (INTEL_INFO(dev_priv)->gen) {
default: default:
case 9: case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9); ASSIGN_WRITE_MMIO_VFUNCS(gen9);
ASSIGN_READ_MMIO_VFUNCS(gen9); ASSIGN_READ_MMIO_VFUNCS(gen9);
break; break;
case 8: case 8:
if (IS_CHERRYVIEW(dev)) { if (IS_CHERRYVIEW(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(chv); ASSIGN_WRITE_MMIO_VFUNCS(chv);
ASSIGN_READ_MMIO_VFUNCS(chv); ASSIGN_READ_MMIO_VFUNCS(chv);
@ -1357,13 +1350,13 @@ void intel_uncore_init(struct drm_device *dev)
break; break;
case 7: case 7:
case 6: case 6:
if (IS_HASWELL(dev)) { if (IS_HASWELL(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(hsw); ASSIGN_WRITE_MMIO_VFUNCS(hsw);
} else { } else {
ASSIGN_WRITE_MMIO_VFUNCS(gen6); ASSIGN_WRITE_MMIO_VFUNCS(gen6);
} }
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev_priv)) {
ASSIGN_READ_MMIO_VFUNCS(vlv); ASSIGN_READ_MMIO_VFUNCS(vlv);
} else { } else {
ASSIGN_READ_MMIO_VFUNCS(gen6); ASSIGN_READ_MMIO_VFUNCS(gen6);
@ -1381,24 +1374,24 @@ void intel_uncore_init(struct drm_device *dev)
break; break;
} }
if (intel_vgpu_active(dev)) { if (intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu); ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu); ASSIGN_READ_MMIO_VFUNCS(vgpu);
} }
i915_check_and_clear_faults(dev); i915_check_and_clear_faults(dev_priv);
} }
#undef ASSIGN_WRITE_MMIO_VFUNCS #undef ASSIGN_WRITE_MMIO_VFUNCS
#undef ASSIGN_READ_MMIO_VFUNCS #undef ASSIGN_READ_MMIO_VFUNCS
void intel_uncore_fini(struct drm_device *dev) void intel_uncore_fini(struct drm_i915_private *dev_priv)
{ {
/* Paranoia: make sure we have disabled everything before we exit. */ /* Paranoia: make sure we have disabled everything before we exit. */
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev_priv);
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev_priv, false);
} }
#define GEN_RANGE(l, h) GENMASK(h, l) #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
static const struct register_whitelist { static const struct register_whitelist {
i915_reg_t offset_ldw, offset_udw; i915_reg_t offset_ldw, offset_udw;
@ -1423,7 +1416,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
break; break;
} }
@ -1467,83 +1460,47 @@ int i915_reg_read_ioctl(struct drm_device *dev,
return ret; return ret;
} }
int i915_get_reset_stats_ioctl(struct drm_device *dev, static int i915_reset_complete(struct pci_dev *pdev)
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reset_stats *args = data;
struct i915_ctx_hang_stats *hs;
struct intel_context *ctx;
int ret;
if (args->flags || args->pad)
return -EINVAL;
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
hs = &ctx->hang_stats;
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
else
args->reset_count = 0;
args->batch_active = hs->batch_active;
args->batch_pending = hs->batch_pending;
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int i915_reset_complete(struct drm_device *dev)
{ {
u8 gdrst; u8 gdrst;
pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_STATUS) == 0; return (gdrst & GRDOM_RESET_STATUS) == 0;
} }
static int i915_do_reset(struct drm_device *dev, unsigned engine_mask) static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{ {
/* assert reset for at least 20 usec */ struct pci_dev *pdev = dev_priv->dev->pdev;
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
udelay(20);
pci_write_config_byte(dev->pdev, I915_GDRST, 0);
return wait_for(i915_reset_complete(dev), 500); /* assert reset for at least 20 usec */
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
udelay(20);
pci_write_config_byte(pdev, I915_GDRST, 0);
return wait_for(i915_reset_complete(pdev), 500);
} }
static int g4x_reset_complete(struct drm_device *dev) static int g4x_reset_complete(struct pci_dev *pdev)
{ {
u8 gdrst; u8 gdrst;
pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0; return (gdrst & GRDOM_RESET_ENABLE) == 0;
} }
static int g33_do_reset(struct drm_device *dev, unsigned engine_mask) static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{ {
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); struct pci_dev *pdev = dev_priv->dev->pdev;
return wait_for(g4x_reset_complete(dev), 500); pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for(g4x_reset_complete(pdev), 500);
} }
static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev_priv->dev->pdev;
int ret; int ret;
pci_write_config_byte(dev->pdev, I915_GDRST, pci_write_config_byte(pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE); GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(g4x_reset_complete(dev), 500); ret = wait_for(g4x_reset_complete(pdev), 500);
if (ret) if (ret)
return ret; return ret;
@ -1551,9 +1508,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D); POSTING_READ(VDECCLK_GATE_D);
pci_write_config_byte(dev->pdev, I915_GDRST, pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE); GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for(g4x_reset_complete(dev), 500); ret = wait_for(g4x_reset_complete(pdev), 500);
if (ret) if (ret)
return ret; return ret;
@ -1561,14 +1518,14 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D); POSTING_READ(VDECCLK_GATE_D);
pci_write_config_byte(dev->pdev, I915_GDRST, 0); pci_write_config_byte(pdev, I915_GDRST, 0);
return 0; return 0;
} }
static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) static int ironlake_do_reset(struct drm_i915_private *dev_priv,
unsigned engine_mask)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
I915_WRITE(ILK_GDSR, I915_WRITE(ILK_GDSR,
@ -1612,7 +1569,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
/** /**
* gen6_reset_engines - reset individual engines * gen6_reset_engines - reset individual engines
* @dev: DRM device * @dev_priv: i915 device
* @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
* *
* This function will reset the individual engines that are set in engine_mask. * This function will reset the individual engines that are set in engine_mask.
@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
* *
* Returns 0 on success, nonzero on error. * Returns 0 on success, nonzero on error.
*/ */
static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) static int gen6_reset_engines(struct drm_i915_private *dev_priv,
unsigned engine_mask)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
const u32 hw_engine_mask[I915_NUM_ENGINES] = { const u32 hw_engine_mask[I915_NUM_ENGINES] = {
[RCS] = GEN6_GRDOM_RENDER, [RCS] = GEN6_GRDOM_RENDER,
@ -1647,7 +1604,7 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
ret = gen6_hw_domain_reset(dev_priv, hw_mask); ret = gen6_hw_domain_reset(dev_priv, hw_mask);
intel_uncore_forcewake_reset(dev, true); intel_uncore_forcewake_reset(dev_priv, true);
return ret; return ret;
} }
@ -1663,8 +1620,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv,
static int gen8_request_engine_reset(struct intel_engine_cs *engine) static int gen8_request_engine_reset(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = engine->i915;
int ret; int ret;
struct drm_i915_private *dev_priv = engine->dev->dev_private;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
@ -1682,22 +1639,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = engine->dev->dev_private; struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
} }
static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask) static int gen8_reset_engines(struct drm_i915_private *dev_priv,
unsigned engine_mask)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
for_each_engine_masked(engine, dev_priv, engine_mask) for_each_engine_masked(engine, dev_priv, engine_mask)
if (gen8_request_engine_reset(engine)) if (gen8_request_engine_reset(engine))
goto not_ready; goto not_ready;
return gen6_reset_engines(dev, engine_mask); return gen6_reset_engines(dev_priv, engine_mask);
not_ready: not_ready:
for_each_engine_masked(engine, dev_priv, engine_mask) for_each_engine_masked(engine, dev_priv, engine_mask)
@ -1706,35 +1663,35 @@ static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
return -EIO; return -EIO;
} }
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *, typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
unsigned engine_mask)
static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
{ {
if (!i915.reset) if (!i915.reset)
return NULL; return NULL;
if (INTEL_INFO(dev)->gen >= 8) if (INTEL_INFO(dev_priv)->gen >= 8)
return gen8_reset_engines; return gen8_reset_engines;
else if (INTEL_INFO(dev)->gen >= 6) else if (INTEL_INFO(dev_priv)->gen >= 6)
return gen6_reset_engines; return gen6_reset_engines;
else if (IS_GEN5(dev)) else if (IS_GEN5(dev_priv))
return ironlake_do_reset; return ironlake_do_reset;
else if (IS_G4X(dev)) else if (IS_G4X(dev_priv))
return g4x_do_reset; return g4x_do_reset;
else if (IS_G33(dev)) else if (IS_G33(dev_priv))
return g33_do_reset; return g33_do_reset;
else if (INTEL_INFO(dev)->gen >= 3) else if (INTEL_INFO(dev_priv)->gen >= 3)
return i915_do_reset; return i915_do_reset;
else else
return NULL; return NULL;
} }
int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); reset_func reset;
int (*reset)(struct drm_device *, unsigned);
int ret; int ret;
reset = intel_get_gpu_reset(dev); reset = intel_get_gpu_reset(dev_priv);
if (reset == NULL) if (reset == NULL)
return -ENODEV; return -ENODEV;
@ -1742,15 +1699,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
* request may be dropped and never completes (causing -EIO). * request may be dropped and never completes (causing -EIO).
*/ */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = reset(dev, engine_mask); ret = reset(dev_priv, engine_mask);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret; return ret;
} }
bool intel_has_gpu_reset(struct drm_device *dev) bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
{ {
return intel_get_gpu_reset(dev) != NULL; return intel_get_gpu_reset(dev_priv) != NULL;
} }
int intel_guc_reset(struct drm_i915_private *dev_priv) int intel_guc_reset(struct drm_i915_private *dev_priv)
@ -1802,10 +1759,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
{ {
enum forcewake_domains fw_domains; enum forcewake_domains fw_domains;
if (intel_vgpu_active(dev_priv->dev)) if (intel_vgpu_active(dev_priv))
return 0; return 0;
switch (INTEL_INFO(dev_priv)->gen) { switch (INTEL_GEN(dev_priv)) {
case 9: case 9:
fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
break; break;
@ -1842,10 +1799,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
{ {
enum forcewake_domains fw_domains; enum forcewake_domains fw_domains;
if (intel_vgpu_active(dev_priv->dev)) if (intel_vgpu_active(dev_priv))
return 0; return 0;
switch (INTEL_INFO(dev_priv)->gen) { switch (INTEL_GEN(dev_priv)) {
case 9: case 9:
fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
break; break;

View file

@ -446,10 +446,16 @@ struct bdb_lfp_backlight_data_entry {
u8 obsolete3; u8 obsolete3;
} __packed; } __packed;
struct bdb_lfp_backlight_control_method {
u8 type:4;
u8 controller:4;
} __packed;
struct bdb_lfp_backlight_data { struct bdb_lfp_backlight_data {
u8 entry_size; u8 entry_size;
struct bdb_lfp_backlight_data_entry data[16]; struct bdb_lfp_backlight_data_entry data[16];
u8 level[16]; u8 level[16];
struct bdb_lfp_backlight_control_method backlight_control[16];
} __packed; } __packed;
struct aimdb_header { struct aimdb_header {

View file

@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
goto free_uar; goto free_uar;
} }
uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
uar->index << PAGE_SHIFT,
PAGE_SIZE);
if (!uar->bf_map) { if (!uar->bf_map) {
err = -ENOMEM; err = -ENOMEM;
goto unamp_uar; goto unamp_uar;

View file

@ -990,6 +990,7 @@ extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc); extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev); extern void drm_vblank_cleanup(struct drm_device *dev);
extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe); extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,

View file

@ -622,6 +622,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
#define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf #define DP_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2 #define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);

View file

@ -92,4 +92,7 @@ extern bool i915_gpu_turbo_disable(void);
#define I845_TSEG_SIZE_512K (2 << 1) #define I845_TSEG_SIZE_512K (2 << 1)
#define I845_TSEG_SIZE_1M (3 << 1) #define I845_TSEG_SIZE_1M (3 << 1)
#define INTEL_BSM 0x5c
#define INTEL_BSM_MASK (0xFFFF << 20)
#endif /* _I915_DRM_H_ */ #endif /* _I915_DRM_H_ */

View file

@ -100,14 +100,16 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
} }
static inline void __iomem * static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) io_mapping_map_wc(struct io_mapping *mapping,
unsigned long offset,
unsigned long size)
{ {
resource_size_t phys_addr; resource_size_t phys_addr;
BUG_ON(offset >= mapping->size); BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset; phys_addr = mapping->base + offset;
return ioremap_wc(phys_addr, PAGE_SIZE); return ioremap_wc(phys_addr, size);
} }
static inline void static inline void
@ -155,7 +157,9 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
/* Non-atomic map/unmap */ /* Non-atomic map/unmap */
static inline void __iomem * static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) io_mapping_map_wc(struct io_mapping *mapping,
unsigned long offset,
unsigned long size)
{ {
return ((char __force __iomem *) mapping) + offset; return ((char __force __iomem *) mapping) + offset;
} }