Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "Here's the main drm pull request for 4.7, it's been a busy one, and
  I've been a bit more distracted in real life this merge window.  Lots
  more ARM drivers, not sure if it'll ever end.  I think I've at least
  one more coming the next merge window.

  But changes are all over the place, support for AMD Polaris GPUs is in
  here, some missing GM108 support for nouveau (found in some Lenovos),
  a bunch of MST and skylake fixes.

  I've also noticed a few fixes from Arnd in my inbox, that I'll try and
  get in asap, but I didn't think they should hold this up.

  New drivers:
   - Hisilicon kirin display driver
   - Mediatek MT8173 display driver
   - ARC PGU - bitstreamer on Synopsys ARC SDP boards
   - Allwinner A13 initial RGB output driver
   - Analogix driver for DisplayPort IP found in exynos and rockchip

  DRM Core:
   - UAPI headers fixes and C++ safety
   - DRM connector reference counting
   - DisplayID mode parsing for Dell 5K monitors
   - Removal of struct_mutex from drivers
   - Connector registration cleanups
   - MST robustness fixes
   - MAINTAINERS updates
   - Lockless GEM object freeing
   - Generic fbdev deferred IO support

  panel:
   - Support for a bunch of new panels

  i915:
   - VBT refactoring
   - PLL computation cleanups
   - DSI support for BXT
   - Color manager support
   - More atomic patches
   - GEM improvements
   - GuC fw loading fixes
   - DP detection fixes
   - SKL GPU hang fixes
   - Lots of BXT fixes

  radeon/amdgpu:
   - Initial Polaris support
   - GPUVM/Scheduler/Clock/Power improvements
   - ASYNC pageflip support
   - New mesa feature support

  nouveau:
   - GM108 support
   - Power sensor support improvements
   - GR init + ucode fixes.
   - Use GPU provided topology information

  vmwgfx:
   - Add host messaging support

  gma500:
   - Some cleanups and fixes

  atmel:
   - Bridge support
   - Async atomic commit support

  fsl-dcu:
   - Timing controller for LCD support
   - Pixel clock polarity support

  rcar-du:
   - Misc fixes

  exynos:
   - Pipeline clock support
   - Exynoss4533 SoC support
   - HW trigger mode support
   - export HDMI_PHY clock
   - DECON5433 fixes
   - Use generic prime functions
   - use DMA mapping APIs

  rockchip:
   - Lots of little fixes

  vc4:
   - Render node support
   - Gamma ramp support
   - DPI output support

  msm:
   - Mostly cleanups and fixes
   - Conversion to generic struct fence

  etnaviv:
   - Fix for prime buffer handling
   - Allow hangcheck to be coalesced with other wakeups

  tegra:
   - Gamme table size fix"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (1050 commits)
  drm/edid: add displayid detailed 1 timings to the modelist. (v1.1)
  drm/edid: move displayid validation to it's own function.
  drm/displayid: Iterate over all DisplayID blocks
  drm/edid: move displayid tiled block parsing into separate function.
  drm: Nuke ->vblank_disable_allowed
  drm/vmwgfx: Report vmwgfx version to vmware.log
  drm/vmwgfx: Add VMWare host messaging capability
  drm/vmwgfx: Kill some lockdep warnings
  drm/nouveau/gr/gf100-: fix race condition in fecs/gpccs ucode
  drm/nouveau/core: recognise GM108 chipsets
  drm/nouveau/gr/gm107-: fix touching non-existent ppcs in attrib cb setup
  drm/nouveau/gr/gk104-: share implementation of ppc exception init
  drm/nouveau/gr/gk104-: move rop_active_fbps init to nonctx
  drm/nouveau/bios/pll: check BIT table version before trying to parse it
  drm/nouveau/bios/pll: prevent oops when limits table can't be parsed
  drm/nouveau/volt/gk104: round up in gk104_volt_set
  drm/nouveau/fb/gm200: setup mmu debug buffer registers at init()
  drm/nouveau/fb/gk20a,gm20b: setup mmu debug buffer registers at init()
  drm/nouveau/fb/gf100-: allocate mmu debug buffers
  drm/nouveau/fb: allow chipset-specific actions for oneinit()
  ...
This commit is contained in:
Linus Torvalds 2016-05-23 11:48:48 -07:00
commit 1d6da87a32
849 changed files with 103510 additions and 21076 deletions

View File

@ -1615,6 +1615,11 @@ void intel_crt_init(struct drm_device *dev)
!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
!Edrivers/gpu/drm/drm_fb_helper.c
!Iinclude/drm/drm_fb_helper.h
</sect2>
<sect2>
<title>Framebuffer CMA Helper Functions Reference</title>
!Pdrivers/gpu/drm/drm_fb_cma_helper.c framebuffer cma helper functions
!Edrivers/gpu/drm/drm_fb_cma_helper.c
</sect2>
<sect2>
<title>Display Port Helper Functions Reference</title>
@ -1671,17 +1676,23 @@ void intel_crt_init(struct drm_device *dev)
!Pdrivers/gpu/drm/drm_crtc.c Tile group
</sect2>
<sect2>
<title>Bridges</title>
<title>Bridges</title>
<sect3>
<title>Overview</title>
<title>Overview</title>
!Pdrivers/gpu/drm/drm_bridge.c overview
</sect3>
<sect3>
<title>Default bridge callback sequence</title>
<title>Default bridge callback sequence</title>
!Pdrivers/gpu/drm/drm_bridge.c bridge callbacks
</sect3>
!Edrivers/gpu/drm/drm_bridge.c
</sect2>
<sect2>
<title>Panel Helper Reference</title>
!Iinclude/drm/drm_panel.h
!Edrivers/gpu/drm/drm_panel.c
!Pdrivers/gpu/drm/drm_panel.c drm panel
</sect2>
</sect1>
<!-- Internals: kms properties -->
@ -1817,7 +1828,7 @@ void intel_crt_init(struct drm_device *dev)
</tr>
<tr>
<td rowspan="42" valign="top" >DRM</td>
<td valign="top" >Generic</td>
<td rowspan="2" valign="top" >Generic</td>
<td valign="top" >“rotation”</td>
<td valign="top" >BITMASK</td>
<td valign="top" >{ 0, "rotate-0" },
@ -1832,6 +1843,13 @@ void intel_crt_init(struct drm_device *dev)
image along the specified axis prior to rotation</td>
</tr>
<tr>
<td valign="top" >“scaling mode”</td>
<td valign="top" >ENUM</td>
<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
<td valign="top" >Connector</td>
<td valign="top" >Supported by: amdgpu, gma500, i915, nouveau and radeon.</td>
</tr>
<tr>
<td rowspan="5" valign="top" >Connector</td>
<td valign="top" >“EDID”</td>
<td valign="top" >BLOB | IMMUTABLE</td>
@ -2068,21 +2086,12 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >property to suggest an Y offset for a connector</td>
</tr>
<tr>
<td rowspan="8" valign="top" >Optional</td>
<td valign="top" >“scaling mode”</td>
<td valign="top" >ENUM</td>
<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
<td valign="top" >Connector</td>
<td valign="top" >TBD</td>
</tr>
<tr>
<td rowspan="7" valign="top" >Optional</td>
<td valign="top" >"aspect ratio"</td>
<td valign="top" >ENUM</td>
<td valign="top" >{ "None", "4:3", "16:9" }</td>
<td valign="top" >Connector</td>
<td valign="top" >DRM property to set aspect ratio from user space app.
This enum is made generic to allow addition of custom aspect
ratios.</td>
<td valign="top" >TDB</td>
</tr>
<tr>
<td valign="top" >“dirty”</td>
@ -2153,7 +2162,11 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >ENUM</td>
<td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
<td valign="top" >Connector</td>
<td valign="top" >TBD</td>
<td valign="top" >When this property is set to Limited 16:235
and CTM is set, the hardware will be programmed with the
result of the multiplication of CTM by the limited range
matrix to ensure the pixels normaly in the range 0..1.0 are
remapped to the range 16/255..235/255.</td>
</tr>
<tr>
<td valign="top" >“audio”</td>
@ -3334,7 +3347,7 @@ int num_ioctls;</synopsis>
<title>Video BIOS Table (VBT)</title>
!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
!Idrivers/gpu/drm/i915/intel_bios.c
!Idrivers/gpu/drm/i915/intel_bios.h
!Idrivers/gpu/drm/i915/intel_vbt_defs.h
</sect2>
</sect1>

View File

@ -35,12 +35,22 @@ Optional properties for HDMI:
as an interrupt/status bit in the HDMI controller
itself). See bindings/pinctrl/brcm,bcm2835-gpio.txt
Required properties for DPI:
- compatible: Should be "brcm,bcm2835-dpi"
- reg: Physical base address and length of the registers
- clocks: a) core: The core clock the unit runs on
b) pixel: The pixel clock that feeds the pixelvalve
- port: Port node with a single endpoint connecting to the panel
device, as defined in [1]
Required properties for V3D:
- compatible: Should be "brcm,bcm2835-v3d"
- reg: Physical base address and length of the V3D's registers
- interrupts: The interrupt number
See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
[1] Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
pixelvalve@7e807000 {
compatible = "brcm,bcm2835-pixelvalve2";
@ -66,6 +76,22 @@ hdmi: hdmi@7e902000 {
clock-names = "pixel", "hdmi";
};
dpi: dpi@7e208000 {
compatible = "brcm,bcm2835-dpi";
reg = <0x7e208000 0x8c>;
clocks = <&clocks BCM2835_CLOCK_VPU>,
<&clocks BCM2835_CLOCK_DPI>;
clock-names = "core", "pixel";
#address-cells = <1>;
#size-cells = <0>;
port {
dpi_out: endpoint@0 {
remote-endpoint = <&panel_in>;
};
};
};
v3d: v3d@7ec00000 {
compatible = "brcm,bcm2835-v3d";
reg = <0x7ec00000 0x1000>;
@ -75,3 +101,13 @@ v3d: v3d@7ec00000 {
vc4: gpu {
compatible = "brcm,bcm2835-vc4";
};
panel: panel {
compatible = "ontat,yx700wv03", "simple-panel";
port {
panel_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
};

View File

@ -0,0 +1,52 @@
Analogix Display Port bridge bindings
Required properties for dp-controller:
-compatible:
platform specific such as:
* "samsung,exynos5-dp"
* "rockchip,rk3288-dp"
-reg:
physical base address of the controller and length
of memory mapped region.
-interrupts:
interrupt combiner values.
-clocks:
from common clock binding: handle to dp clock.
-clock-names:
from common clock binding: Shall be "dp".
-interrupt-parent:
phandle to Interrupt combiner node.
-phys:
from general PHY binding: the phandle for the PHY device.
-phy-names:
from general PHY binding: Should be "dp".
Optional properties for dp-controller:
-force-hpd:
Indicate driver need force hpd when hpd detect failed, this
is used for some eDP screen which don't have hpd signal.
-hpd-gpios:
Hotplug detect GPIO.
Indicates which GPIO should be used for hotplug detection
-port@[X]: SoC specific port nodes with endpoint definitions as defined
in Documentation/devicetree/bindings/media/video-interfaces.txt,
please refer to the SoC specific binding document:
* Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
* Documentation/devicetree/bindings/video/analogix_dp-rockchip.txt
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
-------------------------------------------------------------------------------
Example:
dp-controller {
compatible = "samsung,exynos5-dp";
reg = <0x145b0000 0x10000>;
interrupts = <10 3>;
interrupt-parent = <&combiner>;
clocks = <&clock 342>;
clock-names = "dp";
phys = <&dp_phy>;
phy-names = "dp";
};

View File

@ -5,7 +5,8 @@ Exynos series of SoCs which transfers the image data from a video memory
buffer to an external LCD interface.
Required properties:
- compatible: value should be "samsung,exynos5433-decon";
- compatible: value should be one of:
"samsung,exynos5433-decon", "samsung,exynos5433-decon-tv";
- reg: physical base address and length of the DECON registers set.
- interrupts: should contain a list of all DECON IP block interrupts in the
order: VSYNC, LCD_SYSTEM. The interrupt specifier format
@ -16,7 +17,7 @@ Required properties:
- clocks: must include clock specifiers corresponding to entries in the
clock-names property.
- clock-names: list of clock names sorted in the same order as the clocks
property. Must contain "aclk_decon", "aclk_smmu_decon0x",
property. Must contain "pclk", "aclk_decon", "aclk_smmu_decon0x",
"aclk_xiu_decon0x", "pclk_smmu_decon0x", clk_decon_vclk",
"sclk_decon_eclk"
- ports: contains a port which is connected to mic node. address-cells and

View File

@ -1,20 +1,3 @@
Device-Tree bindings for Samsung Exynos Embedded DisplayPort Transmitter(eDP)
DisplayPort is industry standard to accommodate the growing board adoption
of digital display technology within the PC and CE industries.
It consolidates the internal and external connection methods to reduce device
complexity and cost. It also supports necessary features for important cross
industry applications and provides performance scalability to enable the next
generation of displays that feature higher color depths, refresh rates, and
display resolutions.
eDP (embedded display port) device is compliant with Embedded DisplayPort
standard as follows,
- DisplayPort standard 1.1a for Exynos5250 and Exynos5260.
- DisplayPort standard 1.3 for Exynos5422s and Exynos5800.
eDP resides between FIMD and panel or FIMD and bridge such as LVDS.
The Exynos display port interface should be configured based on
the type of panel connected to it.
@ -48,26 +31,6 @@ Required properties for dp-controller:
from general PHY binding: the phandle for the PHY device.
-phy-names:
from general PHY binding: Should be "dp".
-samsung,color-space:
input video data format.
COLOR_RGB = 0, COLOR_YCBCR422 = 1, COLOR_YCBCR444 = 2
-samsung,dynamic-range:
dynamic range for input video data.
VESA = 0, CEA = 1
-samsung,ycbcr-coeff:
YCbCr co-efficients for input video.
COLOR_YCBCR601 = 0, COLOR_YCBCR709 = 1
-samsung,color-depth:
number of bits per colour component.
COLOR_6 = 0, COLOR_8 = 1, COLOR_10 = 2, COLOR_12 = 3
-samsung,link-rate:
link rate supported by the panel.
LINK_RATE_1_62GBPS = 0x6, LINK_RATE_2_70GBPS = 0x0A
-samsung,lane-count:
number of lanes supported by the panel.
LANE_COUNT1 = 1, LANE_COUNT2 = 2, LANE_COUNT4 = 4
- display-timings: timings for the connected panel as described by
Documentation/devicetree/bindings/display/display-timing.txt
Optional properties for dp-controller:
-interlaced:
@ -83,17 +46,31 @@ Optional properties for dp-controller:
Hotplug detect GPIO.
Indicates which GPIO should be used for hotplug
detection
Video interfaces:
Device node can contain video interface port nodes according to [1].
The following are properties specific to those nodes:
-video interfaces: Device node can contain video interface port
nodes according to [1].
- display-timings: timings for the connected panel as described by
Documentation/devicetree/bindings/display/panel/display-timing.txt
endpoint node connected to bridge or panel node:
- remote-endpoint: specifies the endpoint in panel or bridge node.
This node is required in all kinds of exynos dp
to represent the connection between dp and bridge
or dp and panel.
For the below properties, please refer to Analogix DP binding document:
* Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
-phys (required)
-phy-names (required)
-hpd-gpios (optional)
force-hpd (optional)
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Deprecated properties for DisplayPort:
-interlaced: deprecated prop that can parsed from drm_display_mode.
-vsync-active-high: deprecated prop that can parsed from drm_display_mode.
-hsync-active-high: deprecated prop that can parsed from drm_display_mode.
-samsung,ycbcr-coeff: deprecated prop that can parsed from drm_display_mode.
-samsung,dynamic-range: deprecated prop that can parsed from drm_display_mode.
-samsung,color-space: deprecated prop that can parsed from drm_display_info.
-samsung,color-depth: deprecated prop that can parsed from drm_display_info.
-samsung,link-rate: deprecated prop that can reading from monitor by dpcd method.
-samsung,lane-count: deprecated prop that can reading from monitor by dpcd method.
-samsung,hpd-gpio: deprecated name for hpd-gpios.
-------------------------------------------------------------------------------
Example:
@ -112,13 +89,6 @@ SOC specific portion:
Board Specific portion:
dp-controller {
samsung,color-space = <0>;
samsung,dynamic-range = <0>;
samsung,ycbcr-coeff = <0>;
samsung,color-depth = <1>;
samsung,link-rate = <0x0a>;
samsung,lane-count = <4>;
display-timings {
native-mode = <&lcd_timing>;
lcd_timing: 1366x768 {
@ -135,18 +105,9 @@ Board Specific portion:
};
ports {
port {
port@0 {
dp_out: endpoint {
remote-endpoint = <&dp_in>;
};
};
};
panel {
...
port {
dp_in: endpoint {
remote-endpoint = <&dp_out>;
remote-endpoint = <&bridge_in>;
};
};
};

View File

@ -5,6 +5,7 @@ Required properties:
1) "samsung,exynos4210-hdmi"
2) "samsung,exynos4212-hdmi"
3) "samsung,exynos5420-hdmi"
4) "samsung,exynos5433-hdmi"
- reg: physical base address of the hdmi and length of memory mapped
region.
- interrupts: interrupt number to the cpu.
@ -12,6 +13,11 @@ Required properties:
a) phandle of the gpio controller node.
b) pin number within the gpio controller.
c) optional flags and pull up/down.
- ddc: phandle to the hdmi ddc node
- phy: phandle to the hdmi phy node
- samsung,syscon-phandle: phandle for system controller node for PMU.
Required properties for Exynos 4210, 4212, 5420 and 5433:
- clocks: list of clock IDs from SoC clock driver.
a) hdmi: Gate of HDMI IP bus clock.
b) sclk_hdmi: Gate of HDMI special clock.
@ -25,9 +31,24 @@ Required properties:
sclk_pixel.
- clock-names: aliases as per driver requirements for above clock IDs:
"hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy" and "mout_hdmi".
- ddc: phandle to the hdmi ddc node
- phy: phandle to the hdmi phy node
- samsung,syscon-phandle: phandle for system controller node for PMU.
Required properties for Exynos 5433:
- clocks: list of clock specifiers according to common clock bindings.
a) hdmi_pclk: Gate of HDMI IP APB bus.
b) hdmi_i_pclk: Gate of HDMI-PHY IP APB bus.
d) i_tmds_clk: Gate of HDMI TMDS clock.
e) i_pixel_clk: Gate of HDMI pixel clock.
f) i_spdif_clk: Gate of HDMI SPDIF clock.
g) oscclk: Oscillator clock, used as parent of following *_user clocks
in case HDMI-PHY is not operational.
h) tmds_clko: TMDS clock generated by HDMI-PHY.
i) tmds_clko_user: MUX used to switch between oscclk and tmds_clko,
respectively if HDMI-PHY is off and operational.
j) pixel_clko: Pixel clock generated by HDMI-PHY.
k) pixel_clko_user: MUX used to switch between oscclk and pixel_clko,
respectively if HDMI-PHY is off and operational.
- clock-names: aliases for above clock specfiers.
- samsung,sysreg: handle to syscon used to control the system registers.
Example:

View File

@ -6,17 +6,24 @@ Required properties:
* "fsl,vf610-dcu".
- reg: Address and length of the register set for dcu.
- clocks: From common clock binding: handle to dcu clock.
- clock-names: From common clock binding: Shall be "dcu".
- clocks: Handle to "dcu" and "pix" clock (in the order below)
This can be the same clock (e.g. LS1021a)
See ../clocks/clock-bindings.txt for details.
- clock-names: Should be "dcu" and "pix"
See ../clocks/clock-bindings.txt for details.
- big-endian Boolean property, LS1021A DCU registers are big-endian.
- fsl,panel: The phandle to panel node.
Optional properties:
- fsl,tcon: The phandle to the timing controller node.
Examples:
dcu: dcu@2ce0000 {
compatible = "fsl,ls1021a-dcu";
reg = <0x0 0x2ce0000 0x0 0x10000>;
clocks = <&platform_clk 0>;
clock-names = "dcu";
clocks = <&platform_clk 0>, <&platform_clk 0>;
clock-names = "dcu", "pix";
big-endian;
fsl,panel = <&panel>;
fsl,tcon = <&tcon>;
};

View File

@ -0,0 +1,18 @@
Device Tree bindings for Freescale TCON Driver
Required properties:
- compatible: Should be one of
* "fsl,vf610-tcon".
- reg: Address and length of the register set for tcon.
- clocks: From common clock binding: handle to tcon ipg clock.
- clock-names: From common clock binding: Shall be "ipg".
Examples:
timing-controller@4003d000 {
compatible = "fsl,vf610-tcon";
reg = <0x4003d000 0x1000>;
clocks = <&clks VF610_CLK_TCON0>;
clock-names = "ipg";
status = "okay";
};

View File

@ -0,0 +1,72 @@
Device-Tree bindings for DesignWare DSI Host Controller v1.20a driver
A DSI Host Controller resides in the middle of display controller and external
HDMI converter or panel.
Required properties:
- compatible: value should be "hisilicon,hi6220-dsi".
- reg: physical base address and length of dsi controller's registers.
- clocks: contains APB clock phandle + clock-specifier pair.
- clock-names: should be "pclk".
- ports: contains DSI controller input and output sub port.
The input port connects to ADE output port with the reg value "0".
The output port with the reg value "1", it could connect to panel or
any other bridge endpoints.
See Documentation/devicetree/bindings/graph.txt for more device graph info.
A example of HiKey board hi6220 SoC and board specific DT entry:
Example:
SoC specific:
dsi: dsi@f4107800 {
compatible = "hisilicon,hi6220-dsi";
reg = <0x0 0xf4107800 0x0 0x100>;
clocks = <&media_ctrl HI6220_DSI_PCLK>;
clock-names = "pclk";
status = "disabled";
ports {
#address-cells = <1>;
#size-cells = <0>;
/* 0 for input port */
port@0 {
reg = <0>;
dsi_in: endpoint {
remote-endpoint = <&ade_out>;
};
};
};
};
Board specific:
&dsi {
status = "ok";
ports {
/* 1 for output port */
port@1 {
reg = <1>;
dsi_out0: endpoint@0 {
remote-endpoint = <&adv7533_in>;
};
};
};
};
&i2c2 {
...
adv7533: adv7533@39 {
...
port {
adv7533_in: endpoint {
remote-endpoint = <&dsi_out0>;
};
};
};
};

View File

@ -0,0 +1,64 @@
Device-Tree bindings for hisilicon ADE display controller driver
ADE (Advanced Display Engine) is the display controller which grab image
data from memory, do composition, do post image processing, generate RGB
timing stream and transfer to DSI.
Required properties:
- compatible: value should be "hisilicon,hi6220-ade".
- reg: physical base address and length of the ADE controller's registers.
- hisilicon,noc-syscon: ADE NOC QoS syscon.
- resets: The ADE reset controller node.
- interrupt: the ldi vblank interrupt number used.
- clocks: a list of phandle + clock-specifier pairs, one for each entry
in clock-names.
- clock-names: should contain:
"clk_ade_core" for the ADE core clock.
"clk_codec_jpeg" for the media NOC QoS clock, which use the same clock with
jpeg codec.
"clk_ade_pix" for the ADE pixel clok.
- assigned-clocks: Should contain "clk_ade_core" and "clk_codec_jpeg" clocks'
phandle + clock-specifier pairs.
- assigned-clock-rates: clock rates, one for each entry in assigned-clocks.
The rate of "clk_ade_core" could be "360000000" or "180000000";
The rate of "clk_codec_jpeg" could be or less than "1440000000".
These rate values could be configured according to performance and power
consumption.
- port: the output port. This contains one endpoint subnode, with its
remote-endpoint set to the phandle of the connected DSI input endpoint.
See Documentation/devicetree/bindings/graph.txt for more device graph info.
Optional properties:
- dma-coherent: Present if dma operations are coherent.
A example of HiKey board hi6220 SoC specific DT entry:
Example:
ade: ade@f4100000 {
compatible = "hisilicon,hi6220-ade";
reg = <0x0 0xf4100000 0x0 0x7800>;
reg-names = "ade_base";
hisilicon,noc-syscon = <&medianoc_ade>;
resets = <&media_ctrl MEDIA_ADE>;
interrupts = <0 115 4>; /* ldi interrupt */
clocks = <&media_ctrl HI6220_ADE_CORE>,
<&media_ctrl HI6220_CODEC_JPEG>,
<&media_ctrl HI6220_ADE_PIX_SRC>;
/*clock name*/
clock-names = "clk_ade_core",
"clk_codec_jpeg",
"clk_ade_pix";
assigned-clocks = <&media_ctrl HI6220_ADE_CORE>,
<&media_ctrl HI6220_CODEC_JPEG>;
assigned-clock-rates = <360000000>, <288000000>;
dma-coherent;
port {
ade_out: endpoint {
remote-endpoint = <&dsi_in>;
};
};
};

View File

@ -0,0 +1,203 @@
Mediatek display subsystem
==========================
The Mediatek display subsystem consists of various DISP function blocks in the
MMSYS register space. The connections between them can be configured by output
and input selectors in the MMSYS_CONFIG register space. Pixel clock and start
of frame signal are distributed to the other function blocks by a DISP_MUTEX
function block.
All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
For a description of the MMSYS_CONFIG binding, see
Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
DISP function blocks
====================
A display stream starts at a source function block that reads pixel data from
memory and ends with a sink function block that drives pixels on a display
interface, or writes pixels back to memory. All DISP function blocks have
their own register space, interrupt, and clock gate. The blocks that can
access memory additionally have to list the IOMMU and local arbiter they are
connected to.
For a description of the display interface sink function blocks, see
Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt and
Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt.
Required properties (all function blocks):
- compatible: "mediatek,<chip>-disp-<function>", one of
"mediatek,<chip>-disp-ovl" - overlay (4 layers, blending, csc)
"mediatek,<chip>-disp-rdma" - read DMA / line buffer
"mediatek,<chip>-disp-wdma" - write DMA
"mediatek,<chip>-disp-color" - color processor
"mediatek,<chip>-disp-aal" - adaptive ambient light controller
"mediatek,<chip>-disp-gamma" - gamma correction
"mediatek,<chip>-disp-merge" - merge streams from two RDMA sources
"mediatek,<chip>-disp-split" - split stream to two encoders
"mediatek,<chip>-disp-ufoe" - data compression engine
"mediatek,<chip>-dsi" - DSI controller, see mediatek,dsi.txt
"mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
"mediatek,<chip>-disp-mutex" - display mutex
"mediatek,<chip>-disp-od" - overdrive
- reg: Physical base address and length of the function block register space
- interrupts: The interrupt signal from the function block (required, except for
merge and split function blocks).
- clocks: device clocks
See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
For most function blocks this is just a single clock input. Only the DSI and
DPI controller nodes have multiple clock inputs. These are documented in
mediatek,dsi.txt and mediatek,dpi.txt, respectively.
Required properties (DMA function blocks):
- compatible: Should be one of
"mediatek,<chip>-disp-ovl"
"mediatek,<chip>-disp-rdma"
"mediatek,<chip>-disp-wdma"
- larb: Should contain a phandle pointing to the local arbiter device as defined
in Documentation/devicetree/bindings/soc/mediatek/mediatek,smi-larb.txt
- iommus: Should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
for details.
Examples:
mmsys: clock-controller@14000000 {
compatible = "mediatek,mt8173-mmsys", "syscon";
reg = <0 0x14000000 0 0x1000>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
#clock-cells = <1>;
};
ovl0: ovl@1400c000 {
compatible = "mediatek,mt8173-disp-ovl";
reg = <0 0x1400c000 0 0x1000>;
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_OVL0>;
iommus = <&iommu M4U_PORT_DISP_OVL0>;
mediatek,larb = <&larb0>;
};
ovl1: ovl@1400d000 {
compatible = "mediatek,mt8173-disp-ovl";
reg = <0 0x1400d000 0 0x1000>;
interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_OVL1>;
iommus = <&iommu M4U_PORT_DISP_OVL1>;
mediatek,larb = <&larb4>;
};
rdma0: rdma@1400e000 {
compatible = "mediatek,mt8173-disp-rdma";
reg = <0 0x1400e000 0 0x1000>;
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_RDMA0>;
iommus = <&iommu M4U_PORT_DISP_RDMA0>;
mediatek,larb = <&larb0>;
};
rdma1: rdma@1400f000 {
compatible = "mediatek,mt8173-disp-rdma";
reg = <0 0x1400f000 0 0x1000>;
interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_RDMA1>;
iommus = <&iommu M4U_PORT_DISP_RDMA1>;
mediatek,larb = <&larb4>;
};
rdma2: rdma@14010000 {
compatible = "mediatek,mt8173-disp-rdma";
reg = <0 0x14010000 0 0x1000>;
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_RDMA2>;
iommus = <&iommu M4U_PORT_DISP_RDMA2>;
mediatek,larb = <&larb4>;
};
wdma0: wdma@14011000 {
compatible = "mediatek,mt8173-disp-wdma";
reg = <0 0x14011000 0 0x1000>;
interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_WDMA0>;
iommus = <&iommu M4U_PORT_DISP_WDMA0>;
mediatek,larb = <&larb0>;
};
wdma1: wdma@14012000 {
compatible = "mediatek,mt8173-disp-wdma";
reg = <0 0x14012000 0 0x1000>;
interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_WDMA1>;
iommus = <&iommu M4U_PORT_DISP_WDMA1>;
mediatek,larb = <&larb4>;
};
color0: color@14013000 {
compatible = "mediatek,mt8173-disp-color";
reg = <0 0x14013000 0 0x1000>;
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_COLOR0>;
};
color1: color@14014000 {
compatible = "mediatek,mt8173-disp-color";
reg = <0 0x14014000 0 0x1000>;
interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_COLOR1>;
};
aal@14015000 {
compatible = "mediatek,mt8173-disp-aal";
reg = <0 0x14015000 0 0x1000>;
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_AAL>;
};
gamma@14016000 {
compatible = "mediatek,mt8173-disp-gamma";
reg = <0 0x14016000 0 0x1000>;
interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_GAMMA>;
};
ufoe@1401a000 {
compatible = "mediatek,mt8173-disp-ufoe";
reg = <0 0x1401a000 0 0x1000>;
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_UFOE>;
};
dsi0: dsi@1401b000 {
/* See mediatek,dsi.txt for details */
};
dpi0: dpi@1401d000 {
/* See mediatek,dpi.txt for details */
};
mutex: mutex@14020000 {
compatible = "mediatek,mt8173-disp-mutex";
reg = <0 0x14020000 0 0x1000>;
interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_MUTEX_32K>;
};
od@14023000 {
compatible = "mediatek,mt8173-disp-od";
reg = <0 0x14023000 0 0x1000>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_OD>;
};

View File

@ -0,0 +1,35 @@
Mediatek DPI Device
===================
The Mediatek DPI function block is a sink of the display subsystem and
provides 8-bit RGB/YUV444 or 8/10/10-bit YUV422 pixel data on a parallel
output bus.
Required properties:
- compatible: "mediatek,<chip>-dpi"
- reg: Physical base address and length of the controller's registers
- interrupts: The interrupt signal from the function block.
- clocks: device clocks
See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
- clock-names: must contain "pixel", "engine", and "pll"
- port: Output port node with endpoint definitions as described in
Documentation/devicetree/bindings/graph.txt. This port should be connected
to the input port of an attached HDMI or LVDS encoder chip.
Example:
dpi0: dpi@1401d000 {
compatible = "mediatek,mt8173-dpi";
reg = <0 0x1401d000 0 0x1000>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
clocks = <&mmsys CLK_MM_DPI_PIXEL>,
<&mmsys CLK_MM_DPI_ENGINE>,
<&apmixedsys CLK_APMIXED_TVDPLL>;
clock-names = "pixel", "engine", "pll";
port {
dpi0_out: endpoint {
remote-endpoint = <&hdmi0_in>;
};
};
};

View File

@ -0,0 +1,60 @@
Mediatek DSI Device
===================
The Mediatek DSI function block is a sink of the display subsystem and can
drive up to 4-lane MIPI DSI output. Two DSIs can be synchronized for dual-
channel output.
Required properties:
- compatible: "mediatek,<chip>-dsi"
- reg: Physical base address and length of the controller's registers
- interrupts: The interrupt signal from the function block.
- clocks: device clocks
See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
- clock-names: must contain "engine", "digital", and "hs"
- phys: phandle link to the MIPI D-PHY controller.
- phy-names: must contain "dphy"
- port: Output port node with endpoint definitions as described in
Documentation/devicetree/bindings/graph.txt. This port should be connected
to the input port of an attached DSI panel or DSI-to-eDP encoder chip.
MIPI TX Configuration Module
============================
The MIPI TX configuration module controls the MIPI D-PHY.
Required properties:
- compatible: "mediatek,<chip>-mipi-tx"
- reg: Physical base address and length of the controller's registers
- clocks: PLL reference clock
- clock-output-names: name of the output clock line to the DSI encoder
- #clock-cells: must be <0>;
- #phy-cells: must be <0>.
Example:
mipi_tx0: mipi-dphy@10215000 {
compatible = "mediatek,mt8173-mipi-tx";
reg = <0 0x10215000 0 0x1000>;
clocks = <&clk26m>;
clock-output-names = "mipi_tx0_pll";
#clock-cells = <0>;
#phy-cells = <0>;
};
dsi0: dsi@1401b000 {
compatible = "mediatek,mt8173-dsi";
reg = <0 0x1401b000 0 0x1000>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_LOW>;
clocks = <&mmsys MM_DSI0_ENGINE>, <&mmsys MM_DSI0_DIGITAL>,
<&mipi_tx0>;
clock-names = "engine", "digital", "hs";
phys = <&mipi_tx0>;
phy-names = "dphy";
port {
dsi0_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};

View File

@ -0,0 +1,7 @@
Innolux AT070TN92 7.0" WQVGA TFT LCD panel
Required properties:
- compatible: should be "innolux,at070tn92"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,7 @@
Olimex 4.3" TFT LCD panel
Required properties:
- compatible: should be "olimex,lcd-olinuxino-43-ts"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,7 @@
On Tat Industrial Company 7" DPI TFT panel.
Required properties:
- compatible: should be "ontat,yx700wv03"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,8 @@
TPK U.S.A. LLC Fusion 7" integrated projected capacitive touch display with,
800 x 480 (WVGA) LCD panel.
Required properties:
- compatible: should be "tpk,f07a-0102"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,8 @@
TPK U.S.A. LLC Fusion 10.1" integrated projected capacitive touch display with,
1024 x 600 (WSVGA) LCD panel.
Required properties:
- compatible: should be "tpk,f10a-0102"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,92 @@
Rockchip RK3288 specific extensions to the Analogix Display Port
================================
Required properties:
- compatible: "rockchip,rk3288-edp";
- reg: physical base address of the controller and length
- clocks: from common clock binding: handle to dp clock.
of memory mapped region.
- clock-names: from common clock binding:
Required elements: "dp" "pclk"
- resets: Must contain an entry for each entry in reset-names.
See ../reset/reset.txt for details.
- pinctrl-names: Names corresponding to the chip hotplug pinctrl states.
- pinctrl-0: pin-control mode. should be <&edp_hpd>
- reset-names: Must include the name "dp"
- rockchip,grf: this soc should set GRF regs, so need get grf here.
- ports: there are 2 port nodes with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt.
Port 0: contained 2 endpoints, connecting to the output of vop.
Port 1: contained 1 endpoint, connecting to the input of panel.
For the below properties, please refer to Analogix DP binding document:
* Documentation/devicetree/bindings/drm/bridge/analogix_dp.txt
- phys (required)
- phy-names (required)
- hpd-gpios (optional)
- force-hpd (optional)
-------------------------------------------------------------------------------
Example:
dp-controller: dp@ff970000 {
compatible = "rockchip,rk3288-dp";
reg = <0xff970000 0x4000>;
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>;
clock-names = "dp", "pclk";
phys = <&dp_phy>;
phy-names = "dp";
rockchip,grf = <&grf>;
resets = <&cru 111>;
reset-names = "dp";
pinctrl-names = "default";
pinctrl-0 = <&edp_hpd>;
status = "disabled";
ports {
#address-cells = <1>;
#size-cells = <0>;
edp_in: port@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
edp_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_edp>;
};
edp_in_vopl: endpoint@1 {
reg = <1>;
remote-endpoint = <&vopl_out_edp>;
};
};
edp_out: port@1 {
reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
edp_out_panel: endpoint {
reg = <0>;
remote-endpoint = <&panel_in_edp>
};
};
};
};
pinctrl {
edp {
edp_hpd: edp-hpd {
rockchip,pins = <7 11 RK_FUNC_2 &pcfg_pull_none>;
};
};
};

View File

@ -0,0 +1,35 @@
ARC PGU
This is a display controller found on several development boards produced
by Synopsys. The ARC PGU is an RGB streamer that reads the data from a
framebuffer and sends it to a single digital encoder (usually HDMI).
Required properties:
- compatible: "snps,arcpgu"
- reg: Physical base address and length of the controller's registers.
- clocks: A list of phandle + clock-specifier pairs, one for each
entry in 'clock-names'.
- clock-names: A list of clock names. For ARC PGU it should contain:
- "pxlclk" for the clock feeding the output PLL of the controller.
Required sub-nodes:
- port: The PGU connection to an encoder chip.
Example:
/ {
...
pgu@XXXXXXXX {
compatible = "snps,arcpgu";
reg = <0xXXXXXXXX 0x400>;
clocks = <&clock_node>;
clock-names = "pxlclk";
port {
pgu_output: endpoint {
remote-endpoint = <&hdmi_enc_input>;
};
};
};
};

View File

@ -0,0 +1,258 @@
Allwinner A10 Display Pipeline
==============================
The Allwinner A10 Display pipeline is composed of several components
that are going to be documented below:
TV Encoder
----------
The TV Encoder supports the composite and VGA output. It is one end of
the pipeline.
Required properties:
- compatible: value should be "allwinner,sun4i-a10-tv-encoder".
- reg: base address and size of memory-mapped region
- clocks: the clocks driving the TV encoder
- resets: phandle to the reset controller driving the encoder
- ports: A ports node with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt. The
first port should be the input endpoint.
TCON
----
The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
Required properties:
- compatible: value should be "allwinner,sun5i-a13-tcon".
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the TCON. Three are needed:
- 'ahb': the interface clocks
- 'tcon-ch0': The clock driving the TCON channel 0
- 'tcon-ch1': The clock driving the TCON channel 1
- resets: phandles to the reset controllers driving the encoder
- "lcd": the reset line for the TCON channel 0
- clock-names: the clock names mentioned above
- reset-names: the reset names mentioned above
- clock-output-names: Name of the pixel clock created
- ports: A ports node with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt. The
first port should be the input endpoint, the second one the output
The output should have two endpoints. The first is the block
connected to the TCON channel 0 (usually a panel or a bridge), the
second the block connected to the TCON channel 1 (usually the TV
encoder)
Display Engine Backend
----------------------
The display engine backend exposes layers and sprites to the
system.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-backend
- reg: base address and size of the memory-mapped region.
- clocks: phandles to the clocks feeding the frontend and backend
* ahb: the backend interface clock
* mod: the backend module clock
* ram: the backend DRAM clock
- clock-names: the clock names mentioned above
- resets: phandles to the reset controllers driving the backend
- ports: A ports node with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt. The
first port should be the input endpoints, the second one the output
Display Engine Frontend
-----------------------
The display engine frontend does formats conversion, scaling,
deinterlacing and color space conversion.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-frontend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the frontend and backend
* ahb: the backend interface clock
* mod: the backend module clock
* ram: the backend DRAM clock
- clock-names: the clock names mentioned above
- resets: phandles to the reset controllers driving the backend
- ports: A ports node with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt. The
first port should be the input endpoints, the second one the outputs
Display Engine Pipeline
-----------------------
The display engine pipeline (and its entry point, since it can be
either directly the backend or the frontend) is represented as an
extra node.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-engine
- allwinner,pipelines: list of phandle to the display engine
frontends available.
Example:
panel: panel {
compatible = "olimex,lcd-olinuxino-43-ts";
#address-cells = <1>;
#size-cells = <0>;
port {
#address-cells = <1>;
#size-cells = <0>;
panel_input: endpoint {
remote-endpoint = <&tcon0_out_panel>;
};
};
};
tve0: tv-encoder@01c0a000 {
compatible = "allwinner,sun4i-a10-tv-encoder";
reg = <0x01c0a000 0x1000>;
clocks = <&ahb_gates 34>;
resets = <&tcon_ch0_clk 0>;
port {
#address-cells = <1>;
#size-cells = <0>;
tve0_in_tcon0: endpoint@0 {
reg = <0>;
remote-endpoint = <&tcon0_out_tve0>;
};
};
};
tcon0: lcd-controller@1c0c000 {
compatible = "allwinner,sun5i-a13-tcon";
reg = <0x01c0c000 0x1000>;
interrupts = <44>;
resets = <&tcon_ch0_clk 1>;
reset-names = "lcd";
clocks = <&ahb_gates 36>,
<&tcon_ch0_clk>,
<&tcon_ch1_clk>;
clock-names = "ahb",
"tcon-ch0",
"tcon-ch1";
clock-output-names = "tcon-pixel-clock";
ports {
#address-cells = <1>;
#size-cells = <0>;
tcon0_in: port@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
tcon0_in_be0: endpoint@0 {
reg = <0>;
remote-endpoint = <&be0_out_tcon0>;
};
};
tcon0_out: port@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
tcon0_out_panel: endpoint@0 {
reg = <0>;
remote-endpoint = <&panel_input>;
};
tcon0_out_tve0: endpoint@1 {
reg = <1>;
remote-endpoint = <&tve0_in_tcon0>;
};
};
};
};
fe0: display-frontend@1e00000 {
compatible = "allwinner,sun5i-a13-display-frontend";
reg = <0x01e00000 0x20000>;
interrupts = <47>;
clocks = <&ahb_gates 46>, <&de_fe_clk>,
<&dram_gates 25>;
clock-names = "ahb", "mod",
"ram";
resets = <&de_fe_clk>;
ports {
#address-cells = <1>;
#size-cells = <0>;
fe0_out: port@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
fe0_out_be0: endpoint {
remote-endpoint = <&be0_in_fe0>;
};
};
};
};
be0: display-backend@1e60000 {
compatible = "allwinner,sun5i-a13-display-backend";
reg = <0x01e60000 0x10000>;
clocks = <&ahb_gates 44>, <&de_be_clk>,
<&dram_gates 26>;
clock-names = "ahb", "mod",
"ram";
resets = <&de_be_clk>;
ports {
#address-cells = <1>;
#size-cells = <0>;
be0_in: port@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
be0_in_fe0: endpoint@0 {
reg = <0>;
remote-endpoint = <&fe0_out_be0>;
};
};
be0_out: port@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
be0_out_tcon0: endpoint@0 {
reg = <0>;
remote-endpoint = <&tcon0_in_be0>;
};
};
};
};
display-engine {
compatible = "allwinner,sun5i-a13-display-engine";
allwinner,pipelines = <&fe0>;
};

View File

@ -23,6 +23,7 @@ amlogic Amlogic, Inc.
ampire Ampire Co., Ltd.
ams AMS AG
amstaos AMS-Taos Inc.
analogix Analogix Semiconductor, Inc.
apm Applied Micro Circuits Corporation (APM)
aptina Aptina Imaging
arasan Arasan Chip Systems
@ -185,6 +186,7 @@ okaya Okaya Electric America, Inc.
olimex OLIMEX Ltd.
onion Onion Corporation
onnn ON Semiconductor Corp.
ontat On Tat Industrial Company
opencores OpenCores.org
option Option NV
ortustech Ortus Technology Co., Ltd.
@ -261,6 +263,7 @@ toradex Toradex AG
toshiba Toshiba Corporation
toumaz Toumaz
tplink TP-LINK Technologies Co., Ltd.
tpk TPK U.S.A. LLC
tronfy Tronfy
tronsmart Tronsmart
truly Truly Semiconductors Limited

View File

@ -0,0 +1,40 @@
Analogix ANX7814 SlimPort (Full-HD Transmitter)
-----------------------------------------------
The ANX7814 is an ultra-low power Full-HD (1080p60) SlimPort transmitter
designed for portable devices.
Required properties:
- compatible : "analogix,anx7814"
- reg : I2C address of the device
- interrupt-parent : Should be the phandle of the interrupt controller
that services interrupts for this device
- interrupts : Should contain the INTP interrupt
- hpd-gpios : Which GPIO to use for hpd
- pd-gpios : Which GPIO to use for power down
- reset-gpios : Which GPIO to use for reset
Optional properties:
- dvdd10-supply : Regulator for 1.0V digital core power.
- Video port for HDMI input, using the DT bindings defined in [1].
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
anx7814: anx7814@38 {
compatible = "analogix,anx7814";
reg = <0x38>;
interrupt-parent = <&gpio0>;
interrupts = <99 IRQ_TYPE_LEVEL_LOW>; /* INTP */
hpd-gpios = <&pio 36 GPIO_ACTIVE_HIGH>;
pd-gpios = <&pio 33 GPIO_ACTIVE_HIGH>;
reset-gpios = <&pio 98 GPIO_ACTIVE_HIGH>;
port {
anx7814_in: endpoint {
remote-endpoint = <&hdmi0_out>;
};
};
};

View File

@ -856,6 +856,12 @@ S: Maintained
F: drivers/net/arcnet/
F: include/uapi/linux/if_arcnet.h
ARC PGU DRM DRIVER
M: Alexey Brodkin <abrodkin@synopsys.com>
S: Supported
F: drivers/gpu/drm/arc/
F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
ARM HDLCD DRM DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
@ -3837,9 +3843,25 @@ T: git git://people.freedesktop.org/~airlied/linux
S: Maintained
F: drivers/gpu/drm/
F: drivers/gpu/vga/
F: Documentation/DocBook/gpu.*
F: include/drm/
F: include/uapi/drm/
DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/ast/
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/bochs/
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/cirrus/
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
@ -3847,9 +3869,9 @@ L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~agd5f/linux
S: Supported
F: drivers/gpu/drm/radeon/
F: include/uapi/drm/radeon*
F: include/uapi/drm/radeon_drm.h
F: drivers/gpu/drm/amd/
F: include/uapi/drm/amdgpu*
F: include/uapi/drm/amdgpu_drm.h
DRM PANEL DRIVERS
M: Thierry Reding <thierry.reding@gmail.com>
@ -3872,7 +3894,7 @@ T: git git://anongit.freedesktop.org/drm-intel
S: Supported
F: drivers/gpu/drm/i915/
F: include/drm/i915*
F: include/uapi/drm/i915*
F: include/uapi/drm/i915_drm.h
DRM DRIVERS FOR ATMEL HLCDC
M: Boris Brezillon <boris.brezillon@free-electrons.com>
@ -3881,6 +3903,13 @@ S: Supported
F: drivers/gpu/drm/atmel-hlcdc/
F: Documentation/devicetree/bindings/drm/atmel/
DRM DRIVERS FOR ALLWINNER A10
M: Maxime Ripard <maxime.ripard@free-electrons.com>
L: dri-devel@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/sun4i/
F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
DRM DRIVERS FOR EXYNOS
M: Inki Dae <inki.dae@samsung.com>
M: Joonyoung Shim <jy0922.shim@samsung.com>
@ -3890,8 +3919,8 @@ L: dri-devel@lists.freedesktop.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
S: Supported
F: drivers/gpu/drm/exynos/
F: include/drm/exynos*
F: include/uapi/drm/exynos*
F: include/uapi/drm/exynos_drm.h
F: Documentation/devicetree/bindings/display/exynos/
DRM DRIVERS FOR FREESCALE DCU
M: Stefan Agner <stefan@agner.ch>
@ -3900,6 +3929,7 @@ L: dri-devel@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/fsl-dcu/
F: Documentation/devicetree/bindings/display/fsl,dcu.txt
F: Documentation/devicetree/bindings/display/fsl,tcon.txt
F: Documentation/devicetree/bindings/display/panel/nec,nl4827hc19_05b.txt
DRM DRIVERS FOR FREESCALE IMX
@ -3915,12 +3945,45 @@ M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
L: dri-devel@lists.freedesktop.org
T: git git://github.com/patjak/drm-gma500
S: Maintained
F: drivers/gpu/drm/gma500
F: include/drm/gma500*
F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
M: Xinliang Liu <z.liuxinliang@hisilicon.com>
R: Xinwei Kong <kong.kongxinwei@hisilicon.com>
R: Chen Feng <puck.chen@hisilicon.com>
L: dri-devel@lists.freedesktop.org
T: git git://github.com/xin3liang/linux.git
S: Maintained
F: drivers/gpu/drm/hisilicon/
F: Documentation/devicetree/bindings/display/hisilicon/
DRM DRIVER FOR INTEL I810 VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/i810/
F: include/uapi/drm/i810_drm.h
DRM DRIVER FOR MSM ADRENO GPU
M: Rob Clark <robdclark@gmail.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: freedreno@lists.freedesktop.org
T: git git://people.freedesktop.org/~robclark/linux
S: Maintained
F: drivers/gpu/drm/msm/
F: include/uapi/drm/msm_drm.h
F: Documentation/devicetree/bindings/display/msm/
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Ben Skeggs <bskeggs@redhat.com>
L: dri-devel@lists.freedesktop.org
L: nouveau@lists.freedesktop.org
T: git git://github.com/skeggsb/linux
S: Supported
F: drivers/gpu/drm/nouveau/
F: include/uapi/drm/nouveau_drm.h
DRM DRIVERS FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
M: Terje Bergström <tbergstrom@nvidia.com>
L: dri-devel@lists.freedesktop.org
L: linux-tegra@vger.kernel.org
T: git git://anongit.freedesktop.org/tegra/linux.git
@ -3931,22 +3994,54 @@ F: include/linux/host1x.h
F: include/uapi/drm/tegra_drm.h
F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/mga/
F: include/uapi/drm/mga_drm.h
DRM DRIVER FOR MGA G200 SERVER GRAPHICS CHIPS
M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/mgag200/
DRM DRIVER FOR RAGE 128 VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/r128/
F: include/uapi/drm/r128_drm.h
DRM DRIVERS FOR RENESAS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
L: linux-renesas-soc@vger.kernel.org
T: git git://people.freedesktop.org/~airlied/linux
T: git git://linuxtv.org/pinchartl/fbdev
S: Supported
F: drivers/gpu/drm/rcar-du/
F: drivers/gpu/drm/shmobile/
F: include/linux/platform_data/shmob_drm.h
F: Documentation/devicetree/bindings/display/renesas,du.txt
DRM DRIVER FOR QXL VIRTUAL GPU
M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/qxl/
F: include/uapi/drm/qxl_drm.h
DRM DRIVERS FOR ROCKCHIP
M: Mark Yao <mark.yao@rock-chips.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/rockchip/
F: Documentation/devicetree/bindings/display/rockchip*
F: Documentation/devicetree/bindings/display/rockchip/
DRM DRIVER FOR SAVAGE VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/savage/
F: include/uapi/drm/savage_drm.h
DRM DRIVER FOR SIS VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/sis/
F: include/uapi/drm/sis_drm.h
DRM DRIVERS FOR STI
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
@ -3957,14 +4052,43 @@ S: Maintained
F: drivers/gpu/drm/sti
F: Documentation/devicetree/bindings/display/st,stih4xx.txt
DRM DRIVER FOR TDFX VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/tdfx/
DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/udl/
DRM DRIVERS FOR VIVANTE GPU IP
M: Lucas Stach <l.stach@pengutronix.de>
R: Russell King <linux+etnaviv@armlinux.org.uk>
R: Christian Gmeiner <christian.gmeiner@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/etnaviv
F: Documentation/devicetree/bindings/display/etnaviv
F: drivers/gpu/drm/etnaviv/
F: include/uapi/drm/etnaviv_drm.h
F: Documentation/devicetree/bindings/display/etnaviv/
DRM DRIVER FOR VMWARE VIRTUAL GPU
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
M: Sinclair Yeh <syeh@vmware.com>
M: Thomas Hellstrom <thellstrom@vmware.com>
L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~syeh/repos_linux
T: git git://people.freedesktop.org/~thomash/linux
S: Supported
F: drivers/gpu/drm/vmwgfx/
F: include/uapi/drm/vmwgfx_drm.h
DRM DRIVERS FOR VC4
M: Eric Anholt <eric@anholt.net>
T: git git://github.com/anholt/linux
S: Supported
F: drivers/gpu/drm/vc4/
F: include/uapi/drm/vc4_drm.h
F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
@ -7005,6 +7129,8 @@ MARVELL ARMADA DRM SUPPORT
M: Russell King <rmk+kernel@armlinux.org.uk>
S: Maintained
F: drivers/gpu/drm/armada/
F: include/uapi/drm/armada_drm.h
F: Documentation/devicetree/bindings/display/armada/
MARVELL 88E6352 DSA support
M: Guenter Roeck <linux@roeck-us.net>

View File

@ -101,8 +101,26 @@
memory {
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x80000000 0x40000000>;
ranges = <0x00000000 0x80000000 0x20000000>;
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512MiB */
reg = <0x80000000 0x1b000000>; /* (512 - 32) MiB */
};
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
/*
* We just move frame buffer area to the very end of
* available DDR. And even though in case of ARC770 there's
* no strict requirement for a frame-buffer to be in any
* particular location it allows us to use the same
* base board's DT node for ARC PGU as for ARc HS38.
*/
frame_buffer: frame_buffer@9e000000 {
compatible = "shared-dma-pool";
reg = <0x9e000000 0x2000000>;
no-map;
};
};
};

View File

@ -108,4 +108,18 @@
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512MiB */
};
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
/*
* Move frame buffer out of IOC aperture (0x8z-0xAz).
*/
frame_buffer: frame_buffer@be000000 {
compatible = "shared-dma-pool";
reg = <0xbe000000 0x2000000>;
no-map;
};
};
};

View File

@ -131,4 +131,18 @@
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512MiB */
};
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
/*
* Move frame buffer out of IOC aperture (0x8z-0xAz).
*/
frame_buffer: frame_buffer@be000000 {
compatible = "shared-dma-pool";
reg = <0xbe000000 0x2000000>;
no-map;
};
};
};

View File

@ -47,6 +47,12 @@
clock-frequency = <50000000>;
#clock-cells = <0>;
};
pguclk: pguclk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <74440000>;
};
};
ethernet@0x18000 {
@ -160,6 +166,37 @@
clocks = <&i2cclk>;
interrupts = <16>;
adv7511:adv7511@39{
compatible="adi,adv7511";
reg = <0x39>;
interrupts = <23>;
adi,input-depth = <8>;
adi,input-colorspace = "rgb";
adi,input-clock = "1x";
adi,clock-delay = <0x03>;
ports {
#address-cells = <1>;
#size-cells = <0>;
/* RGB/YUV input */
port@0 {
reg = <0>;
adv7511_input:endpoint {
remote-endpoint = <&pgu_output>;
};
};
/* HDMI output */
port@1 {
reg = <1>;
adv7511_output: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
};
};
eeprom@0x54{
compatible = "24c01";
reg = <0x54>;
@ -173,6 +210,16 @@
};
};
hdmi0: connector {
compatible = "hdmi-connector";
type = "a";
port {
hdmi_connector_in: endpoint {
remote-endpoint = <&adv7511_output>;
};
};
};
gpio0:gpio@13000 {
compatible = "snps,dw-apb-gpio";
reg = <0x13000 0x1000>;
@ -234,5 +281,19 @@
reg = <2>;
};
};
pgu@17000 {
compatible = "snps,arcpgu";
reg = <0x17000 0x400>;
encoder-slave = <&adv7511>;
clocks = <&pguclk>;
clock-names = "pxlclk";
memory-region = <&frame_buffer>;
port {
pgu_output: endpoint {
remote-endpoint = <&adv7511_input>;
};
};
};
};
};

View File

@ -124,8 +124,6 @@
&dp {
status = "okay";
samsung,color-space = <0>;
samsung,dynamic-range = <0>;
samsung,ycbcr-coeff = <0>;
samsung,color-depth = <1>;
samsung,link-rate = <0x0a>;
samsung,lane-count = <4>;

View File

@ -80,8 +80,6 @@
&dp {
samsung,color-space = <0>;
samsung,dynamic-range = <0>;
samsung,ycbcr-coeff = <0>;
samsung,color-depth = <1>;
samsung,link-rate = <0x0a>;
samsung,lane-count = <4>;

View File

@ -236,12 +236,10 @@
pinctrl-names = "default";
pinctrl-0 = <&dp_hpd>;
samsung,color-space = <0>;
samsung,dynamic-range = <0>;
samsung,ycbcr-coeff = <0>;
samsung,color-depth = <1>;
samsung,link-rate = <0x0a>;
samsung,lane-count = <2>;
samsung,hpd-gpio = <&gpx0 7 GPIO_ACTIVE_HIGH>;
hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
ports {
port0 {

View File

@ -74,12 +74,10 @@
pinctrl-names = "default";
pinctrl-0 = <&dp_hpd_gpio>;
samsung,color-space = <0>;
samsung,dynamic-range = <0>;
samsung,ycbcr-coeff = <0>;
samsung,color-depth = <1>;
samsung,link-rate = <0x0a>;
samsung,lane-count = <1>;
samsung,hpd-gpio = <&gpc3 0 GPIO_ACTIVE_HIGH>;
hpd-gpios = <&gpc3 0 GPIO_ACTIVE_HIGH>;
};
&ehci {

View File

@ -157,12 +157,10 @@
pinctrl-names = "default";
pinctrl-0 = <&dp_hpd_gpio>;
samsung,color-space = <0>;
samsung,dynamic-range = <0>;
samsung,ycbcr-coeff = <0>;
samsung,color-depth = <1>;
samsung,link-rate = <0x06>;
samsung,lane-count = <2>;
samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>;
hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
ports {
port0 {

View File

@ -102,8 +102,6 @@
pinctrl-names = "default";
pinctrl-0 = <&dp_hpd>;
samsung,color-space = <0>;
samsung,dynamic-range = <0>;
samsung,ycbcr-coeff = <0>;
samsung,color-depth = <1>;
samsung,link-rate = <0x0a>;
samsung,lane-count = <4>;

View File

@ -157,8 +157,6 @@
pinctrl-names = "default";
pinctrl-0 = <&dp_hpd_gpio>;
samsung,color-space = <0>;
samsung,dynamic-range = <0>;
samsung,ycbcr-coeff = <0>;
samsung,color-depth = <1>;
samsung,link-rate = <0x0a>;
samsung,lane-count = <2>;

View File

@ -61,7 +61,6 @@ static const struct mtk_fixed_factor top_divs[] __initconst = {
FACTOR(CLK_TOP_CLKRTC_INT, "clkrtc_int", "clk26m", 1, 793),
FACTOR(CLK_TOP_FPC, "fpc_ck", "clk26m", 1, 1),
FACTOR(CLK_TOP_HDMITX_DIG_CTS, "hdmitx_dig_cts", "tvdpll_445p5m", 1, 3),
FACTOR(CLK_TOP_HDMITXPLL_D2, "hdmitxpll_d2", "hdmitx_dig_cts", 1, 2),
FACTOR(CLK_TOP_HDMITXPLL_D3, "hdmitxpll_d3", "hdmitx_dig_cts", 1, 3),
@ -558,7 +557,11 @@ static const struct mtk_composite top_muxes[] __initconst = {
MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, 0x0090, 16, 2, 23),
MUX_GATE(CLK_TOP_VENC_LT_SEL, "venclt_sel", venc_lt_parents, 0x0090, 24, 4, 31),
/* CLK_CFG_6 */
MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x00a0, 0, 3, 7),
/*
* The dpi0_sel clock should not propagate rate changes to its parent
* clock so the dpi driver can have full control over PLL and divider.
*/
MUX_GATE_FLAGS(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x00a0, 0, 3, 7, 0),
MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0x00a0, 8, 2, 15),
MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel", cci400_parents, 0x00a0, 16, 3, 23),
MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, 0x00a0, 24, 2, 31),
@ -1091,6 +1094,11 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
clk_data->clks[cku->id] = clk;
}
clk = clk_register_divider(NULL, "hdmi_ref", "tvdpll_594m", 0,
base + 0x40, 16, 3, CLK_DIVIDER_POWER_OF_TWO,
NULL);
clk_data->clks[CLK_APMIXED_HDMI_REF] = clk;
r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",

View File

@ -83,7 +83,11 @@ struct mtk_composite {
signed char num_parents;
};
#define MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate) { \
/*
* In case the rate change propagation to parent clocks is undesirable,
* this macro allows to specify the clock flags manually.
*/
#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) { \
.id = _id, \
.name = _name, \
.mux_reg = _reg, \
@ -94,9 +98,16 @@ struct mtk_composite {
.divider_shift = -1, \
.parent_names = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
.flags = CLK_SET_RATE_PARENT, \
.flags = _flags, \
}
/*
* Unless necessary, all MUX_GATE clocks propagate rate changes to their
* parent clock by default.
*/
#define MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate) \
MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, _gate, CLK_SET_RATE_PARENT)
#define MUX(_id, _name, _parents, _reg, _shift, _width) { \
.id = _id, \
.name = _name, \

View File

@ -52,6 +52,7 @@ config DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_DEFERRED_IO
help
FBDEV helpers for KMS drivers.
@ -252,6 +253,8 @@ source "drivers/gpu/drm/rcar-du/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
source "drivers/gpu/drm/sun4i/Kconfig"
source "drivers/gpu/drm/omapdrm/Kconfig"
source "drivers/gpu/drm/tilcdc/Kconfig"
@ -281,3 +284,9 @@ source "drivers/gpu/drm/imx/Kconfig"
source "drivers/gpu/drm/vc4/Kconfig"
source "drivers/gpu/drm/etnaviv/Kconfig"
source "drivers/gpu/drm/arc/Kconfig"
source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"

View File

@ -1,4 +1,4 @@
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
@ -65,6 +65,7 @@ obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-y += omapdrm/
obj-$(CONFIG_DRM_SUN4I) += sun4i/
obj-y += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_BOCHS) += bochs/
@ -73,8 +74,11 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STI) += sti/
obj-$(CONFIG_DRM_IMX) += imx/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/

View File

@ -34,7 +34,7 @@
#define mmACP_AZALIA_I2S_SELECT 0x51d4
int amd_acp_hw_init(void *cgs_device,
int amd_acp_hw_init(struct cgs_device *cgs_device,
unsigned acp_version_major, unsigned acp_version_minor)
{
unsigned int acp_mode = ACP_MODE_I2S;

View File

@ -28,7 +28,7 @@
#include "cgs_linux.h"
#include "cgs_common.h"
int amd_acp_hw_init(void *cgs_device,
int amd_acp_hw_init(struct cgs_device *cgs_device,
unsigned acp_version_major, unsigned acp_version_minor);
#endif /* _ACP_GFX_IF_H */

View File

@ -15,3 +15,13 @@ config DRM_AMDGPU_USERPTR
help
This option selects CONFIG_MMU_NOTIFIER if it isn't already
selected to enabled full userptr support.
config DRM_AMDGPU_GART_DEBUGFS
bool "Allow GART access through debugfs"
depends on DRM_AMDGPU
depends on DEBUG_FS
default n
help
Selecting this option creates a debugfs file to inspect the mapped
pages. Uses more memory for housekeeping, enable only for debugging.

View File

@ -283,7 +283,8 @@ struct amdgpu_ring_funcs {
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
struct amdgpu_ib *ib);
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@ -302,6 +303,8 @@ struct amdgpu_ring_funcs {
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
/* pad the indirect buffer to the necessary number of dw */
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
};
/*
@ -365,13 +368,6 @@ struct amdgpu_fence_driver {
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
struct amdgpu_user_fence {
/* write-back bo */
struct amdgpu_bo *bo;
/* write-back address offset to bo start */
uint32_t offset;
};
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
@ -391,6 +387,14 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
* TTM.
*/
#define AMDGPU_TTM_LRU_SIZE 20
struct amdgpu_mman_lru {
struct list_head *lru[TTM_NUM_MEM_TYPES];
struct list_head *swap_lru;
};
struct amdgpu_mman {
struct ttm_bo_global_ref bo_global_ref;
struct drm_global_reference mem_global_ref;
@ -408,6 +412,9 @@ struct amdgpu_mman {
struct amdgpu_ring *buffer_funcs_ring;
/* Scheduler entity for buffer moves */
struct amd_sched_entity entity;
/* custom LRU management */
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
};
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@ -494,9 +501,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg);
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg);
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags);
@ -586,6 +594,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
struct fence *fence);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
@ -609,8 +620,9 @@ struct amdgpu_gart {
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
struct page **pages;
dma_addr_t *pages_addr;
#endif
bool ready;
const struct amdgpu_gart_funcs *gart_funcs;
};
@ -709,6 +721,7 @@ struct amdgpu_flip_work {
unsigned shared_count;
struct fence **shared;
struct fence_cb cb;
bool async;
};
@ -721,17 +734,7 @@ struct amdgpu_ib {
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
struct amdgpu_user_fence *user;
struct amdgpu_vm *vm;
unsigned vm_id;
uint64_t vm_pd_addr;
struct amdgpu_ctx *ctx;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
uint32_t flags;
/* resulting sequence number */
uint64_t sequence;
};
enum amdgpu_ring_type {
@ -742,22 +745,25 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_VCE
};
extern struct amd_sched_backend_ops amdgpu_sched_ops;
extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job);
struct amdgpu_job **job, struct amdgpu_vm *vm);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job);
void amdgpu_job_free(struct amdgpu_job *job);
void amdgpu_job_free_func(struct kref *refcount);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f);
void amdgpu_job_timeout_func(struct work_struct *work);
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler sched;
struct amd_gpu_scheduler sched;
spinlock_t fence_lock;
struct amdgpu_bo *ring_obj;
@ -785,9 +791,12 @@ struct amdgpu_ring {
unsigned wptr_offs;
unsigned next_rptr_offs;
unsigned fence_offs;
struct amdgpu_ctx *current_ctx;
uint64_t current_ctx;
enum amdgpu_ring_type type;
char name[16];
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
};
/*
@ -830,13 +839,6 @@ struct amdgpu_vm_pt {
uint64_t addr;
};
struct amdgpu_vm_id {
struct amdgpu_vm_manager_id *mgr_id;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
};
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root va;
@ -862,19 +864,29 @@ struct amdgpu_vm {
struct amdgpu_vm_pt *page_tables;
/* for id and flush management per ring */
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
/* protecting freed */
spinlock_t freed_lock;
/* Scheduler entity for page table updates */
struct amd_sched_entity entity;
/* client id */
u64 client_id;
};
struct amdgpu_vm_manager_id {
struct amdgpu_vm_id {
struct list_head list;
struct fence *active;
atomic_long_t owner;
struct fence *first;
struct amdgpu_sync active;
struct fence *last_flush;
struct amdgpu_ring *last_user;
atomic64_t owner;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
uint32_t gds_base;
uint32_t gds_size;
@ -889,7 +901,7 @@ struct amdgpu_vm_manager {
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM];
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
uint32_t max_pfn;
/* vram base address for page table entry */
@ -901,6 +913,8 @@ struct amdgpu_vm_manager {
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rings;
atomic_t vm_pte_next_ring;
/* client id counter */
atomic64_t client_counter;
};
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
@ -916,11 +930,11 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
unsigned *vm_id, uint64_t *vm_pd_addr);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size);
int amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
@ -1026,6 +1040,11 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
*/
#include "clearstate_defs.h"
struct amdgpu_rlc_funcs {
void (*enter_safe_mode)(struct amdgpu_device *adev);
void (*exit_safe_mode)(struct amdgpu_device *adev);
};
struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
@ -1044,6 +1063,24 @@ struct amdgpu_rlc {
uint64_t cp_table_gpu_addr;
volatile uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */
bool in_safe_mode;
const struct amdgpu_rlc_funcs *funcs;
/* for firmware data */
u32 save_and_restore_offset;
u32 clear_state_descriptor_offset;
u32 avail_scratch_ram_locations;
u32 reg_restore_list_size;
u32 reg_list_format_start;
u32 reg_list_format_separate_start;
u32 starting_offsets_start;
u32 reg_list_format_size_bytes;
u32 reg_list_size_bytes;
u32 *register_list_format;
u32 *register_restore;
};
struct amdgpu_mec {
@ -1097,6 +1134,12 @@ struct amdgpu_gca_config {
uint32_t macrotile_mode_array[16];
};
struct amdgpu_cu_info {
uint32_t number; /* total active CU number */
uint32_t ao_cu_mask;
uint32_t bitmap[4][4];
};
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gca_config config;
@ -1129,17 +1172,19 @@ struct amdgpu_gfx {
struct amdgpu_irq_src priv_reg_irq;
struct amdgpu_irq_src priv_inst_irq;
/* gfx status */
uint32_t gfx_current_status;
uint32_t gfx_current_status;
/* ce ram size*/
unsigned ce_ram_size;
unsigned ce_ram_size;
struct amdgpu_cu_info cu_info;
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
struct fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ib, struct fence *last_vm_update,
struct fence **f);
struct amdgpu_job *job, struct fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
@ -1164,7 +1209,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring);
struct amdgpu_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
uint32_t *kdata;
void *kdata;
};
struct amdgpu_cs_parser {
@ -1195,13 +1240,25 @@ struct amdgpu_cs_parser {
struct amdgpu_job {
struct amd_sched_job base;
struct amdgpu_device *adev;
struct amdgpu_vm *vm;
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
struct fence *fence; /* the hw fence */
uint32_t num_ibs;
void *owner;
struct amdgpu_user_fence uf;
uint64_t ctx;
unsigned vm_id;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
/* user fence handling */
struct amdgpu_bo *uf_bo;
uint32_t uf_offset;
uint64_t uf_sequence;
};
#define to_amdgpu_job(sched_job) \
container_of((sched_job), struct amdgpu_job, base)
@ -1582,10 +1639,12 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev);
/*
* UVD
*/
#define AMDGPU_MAX_UVD_HANDLES 10
#define AMDGPU_UVD_STACK_SIZE (1024*1024)
#define AMDGPU_UVD_HEAP_SIZE (1024*1024)
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
#define AMDGPU_DEFAULT_UVD_HANDLES 10
#define AMDGPU_MAX_UVD_HANDLES 40
#define AMDGPU_UVD_STACK_SIZE (200*1024)
#define AMDGPU_UVD_HEAP_SIZE (256*1024)
#define AMDGPU_UVD_SESSION_SIZE (50*1024)
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
@ -1593,6 +1652,7 @@ struct amdgpu_uvd {
uint64_t gpu_addr;
unsigned fw_version;
void *saved_bo;
unsigned max_handles;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@ -1645,7 +1705,7 @@ struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
int num_instances;
int num_instances;
};
/*
@ -1691,12 +1751,12 @@ static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
* Debugfs
*/
struct amdgpu_debugfs {
struct drm_info_list *files;
const struct drm_info_list *files;
unsigned num_files;
};
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
struct drm_info_list *files,
const struct drm_info_list *files,
unsigned nfiles);
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
@ -1738,13 +1798,6 @@ struct amdgpu_allowed_register_entry {
bool grbm_indexed;
};
struct amdgpu_cu_info {
uint32_t number; /* total active CU number */
uint32_t ao_cu_mask;
uint32_t bitmap[4][4];
};
/*
* ASIC specific functions.
*/
@ -1762,7 +1815,6 @@ struct amdgpu_asic_funcs {
u32 (*get_xclk)(struct amdgpu_device *adev);
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
@ -1855,15 +1907,8 @@ struct amdgpu_atcs {
/*
* CGS
*/
void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(void *cgs_device);
/*
* CGS
*/
void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(void *cgs_device);
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
/* GPU virtualization */
@ -1904,16 +1949,15 @@ struct amdgpu_device {
int usec_timeout;
const struct amdgpu_asic_funcs *asic_funcs;
bool shutdown;
bool suspend;
bool need_dma32;
bool accel_working;
struct work_struct reset_work;
struct work_struct reset_work;
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
unsigned debugfs_count;
unsigned debugfs_count;
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_regs;
struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
#endif
struct amdgpu_atif atif;
struct amdgpu_atcs atcs;
@ -1926,7 +1970,6 @@ struct amdgpu_device {
/* BIOS */
uint8_t *bios;
bool is_atom_bios;
uint16_t bios_header_start;
struct amdgpu_bo *stollen_vga_memory;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@ -2163,7 +2206,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
@ -2175,7 +2217,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@ -2183,6 +2225,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@ -2196,7 +2240,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
#define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
@ -2339,7 +2383,7 @@ static inline void amdgpu_unregister_atpx_handler(void) {}
* KMS
*/
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern int amdgpu_max_kms_ioctl;
extern const int amdgpu_max_kms_ioctl;
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
int amdgpu_driver_unload_kms(struct drm_device *dev);
@ -2398,5 +2442,4 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo);
#include "amdgpu_object.h"
#endif

View File

@ -467,13 +467,6 @@ static int acp_soft_reset(void *handle)
return 0;
}
static void acp_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "ACP STATUS\n");
}
static int acp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@ -487,6 +480,7 @@ static int acp_set_powergating_state(void *handle,
}
const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
.early_init = acp_early_init,
.late_init = NULL,
.sw_init = acp_sw_init,
@ -498,7 +492,6 @@ const struct amd_ip_funcs acp_ip_funcs = {
.is_idle = acp_is_idle,
.wait_for_idle = acp_wait_for_idle,
.soft_reset = acp_soft_reset,
.print_status = acp_print_status,
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
};

View File

@ -30,7 +30,7 @@
struct amdgpu_acp {
struct device *parent;
void *cgs_device;
struct cgs_device *cgs_device;
struct amd_acp_private *private;
struct mfd_cell *acp_cell;
struct resource *acp_res;

View File

@ -234,16 +234,6 @@ amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device *adev,
return hpd;
}
static bool amdgpu_atombios_apply_quirks(struct amdgpu_device *adev,
uint32_t supported_device,
int *connector_type,
struct amdgpu_i2c_bus_rec *i2c_bus,
uint16_t *line_mux,
struct amdgpu_hpd *hpd)
{
return true;
}
static const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVII,
@ -514,11 +504,6 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
conn_id = le16_to_cpu(path->usConnObjectId);
if (!amdgpu_atombios_apply_quirks
(adev, le16_to_cpu(path->usDeviceTag), &connector_type,
&ddc_bus, &conn_id, &hpd))
continue;
amdgpu_display_add_connector(adev,
conn_id,
le16_to_cpu(path->usDeviceTag),
@ -699,6 +684,36 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
return ret;
}
union gfx_info {
ATOM_GFX_INFO_V2_1 info;
};
int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index = GetIndexIntoMasterTable(DATA, GFX_Info);
uint8_t frev, crev;
uint16_t data_offset;
int ret = -EINVAL;
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
union gfx_info *gfx_info = (union gfx_info *)
(mode_info->atom_context->bios + data_offset);
adev->gfx.config.max_shader_engines = gfx_info->info.max_shader_engines;
adev->gfx.config.max_tile_pipes = gfx_info->info.max_tile_pipes;
adev->gfx.config.max_cu_per_sh = gfx_info->info.max_cu_per_sh;
adev->gfx.config.max_sh_per_se = gfx_info->info.max_sh_per_se;
adev->gfx.config.max_backends_per_se = gfx_info->info.max_backends_per_se;
adev->gfx.config.max_texture_channel_caches =
gfx_info->info.max_texture_channel_caches;
ret = 0;
}
return ret;
}
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;

View File

@ -144,6 +144,8 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id, u32 clock);

View File

@ -141,7 +141,7 @@ out_cleanup:
void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
{
int i;
int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
640 * 480 * 4,
720 * 480 * 4,
800 * 600 * 4,

View File

@ -349,7 +349,7 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
bool amdgpu_get_bios(struct amdgpu_device *adev)
{
bool r;
uint16_t tmp;
uint16_t tmp, bios_header_start;
r = amdgpu_atrm_get_bios(adev);
if (r == false)
@ -383,11 +383,11 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto free_bios;
}
adev->bios_header_start = RBIOS16(0x48);
if (!adev->bios_header_start) {
bios_header_start = RBIOS16(0x48);
if (!bios_header_start) {
goto free_bios;
}
tmp = adev->bios_header_start + 4;
tmp = bios_header_start + 4;
if (!memcmp(adev->bios + tmp, "ATOM", 4) ||
!memcmp(adev->bios + tmp, "MOTA", 4)) {
adev->is_atom_bios = true;

View File

@ -106,7 +106,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
struct amdgpu_bo *bo;
struct mm_struct *usermm;
gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
if (!gobj) {
r = -ENOENT;
goto error_free;
@ -263,7 +263,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
for (i = 0; i < args->in.bo_number; ++i) {
if (copy_from_user(&info[i], uptr, bytes))
goto error_free;
uptr += args->in.bo_info_size;
}
}
@ -271,7 +271,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE:
r = amdgpu_bo_list_create(fpriv, &list, &handle);
if (r)
if (r)
goto error_free;
r = amdgpu_bo_list_set(adev, filp, list, info,
@ -281,7 +281,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
goto error_free;
break;
case AMDGPU_BO_LIST_OP_DESTROY:
amdgpu_bo_list_destroy(fpriv, handle);
handle = 0;

View File

@ -42,7 +42,7 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t *mc_start, uint64_t *mc_size,
uint64_t *mem_size)
{
@ -73,7 +73,7 @@ static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
return 0;
}
static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
uint64_t size,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *kmem_handle, uint64_t *mcaddr)
@ -102,7 +102,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
return ret;
}
static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
@ -118,7 +118,7 @@ static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
return 0;
}
static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
uint64_t min_offset, uint64_t max_offset,
@ -208,7 +208,7 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
return ret;
}
static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@ -225,7 +225,7 @@ static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
return 0;
}
static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
uint64_t *mcaddr)
{
int r;
@ -246,7 +246,7 @@ static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
return r;
}
static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@ -258,7 +258,7 @@ static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
return r;
}
static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
void **map)
{
int r;
@ -271,7 +271,7 @@ static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
return r;
}
static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@ -283,20 +283,20 @@ static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
return r;
}
static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
{
CGS_FUNC_ADEV;
return RREG32(offset);
}
static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
uint32_t value)
{
CGS_FUNC_ADEV;
WREG32(offset, value);
}
static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
enum cgs_ind_reg space,
unsigned index)
{
@ -320,7 +320,7 @@ static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
return 0;
}
static void amdgpu_cgs_write_ind_register(void *cgs_device,
static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
enum cgs_ind_reg space,
unsigned index, uint32_t value)
{
@ -343,7 +343,7 @@ static void amdgpu_cgs_write_ind_register(void *cgs_device,
WARN(1, "Invalid indirect register space");
}
static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint8_t val;
@ -353,7 +353,7 @@ static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
return val;
}
static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint16_t val;
@ -363,7 +363,7 @@ static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
return val;
}
static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
unsigned addr)
{
CGS_FUNC_ADEV;
@ -374,7 +374,7 @@ static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
return val;
}
static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
uint8_t value)
{
CGS_FUNC_ADEV;
@ -382,7 +382,7 @@ static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
WARN(ret, "pci_write_config_byte error");
}
static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
uint16_t value)
{
CGS_FUNC_ADEV;
@ -390,7 +390,7 @@ static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
WARN(ret, "pci_write_config_word error");
}
static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
uint32_t value)
{
CGS_FUNC_ADEV;
@ -399,7 +399,7 @@ static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
}
static int amdgpu_cgs_get_pci_resource(void *cgs_device,
static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
enum cgs_resource_type resource_type,
uint64_t size,
uint64_t offset,
@ -433,7 +433,7 @@ static int amdgpu_cgs_get_pci_resource(void *cgs_device,
}
}
static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
unsigned table, uint16_t *size,
uint8_t *frev, uint8_t *crev)
{
@ -449,7 +449,7 @@ static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
return NULL;
}
static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
uint8_t *frev, uint8_t *crev)
{
CGS_FUNC_ADEV;
@ -462,7 +462,7 @@ static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
return -EINVAL;
}
static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
void *args)
{
CGS_FUNC_ADEV;
@ -471,33 +471,33 @@ static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
adev->mode_info.atom_context, table, args);
}
static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
int active)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_clock clock, unsigned freq)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_engine engine, int powered)
{
/* TODO */
@ -506,7 +506,7 @@ static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
enum cgs_clock clock,
struct cgs_clock_limits *limits)
{
@ -514,7 +514,7 @@ static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
return 0;
}
static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
const uint32_t *voltages)
{
DRM_ERROR("not implemented");
@ -565,7 +565,7 @@ static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
.process = cgs_process_irq,
};
static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
unsigned num_types,
cgs_irq_source_set_func_t set,
cgs_irq_handler_func_t handler,
@ -600,19 +600,19 @@ static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
return ret;
}
static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
{
CGS_FUNC_ADEV;
return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
}
static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
{
CGS_FUNC_ADEV;
return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
}
int amdgpu_cgs_set_clockgating_state(void *cgs_device,
int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state)
{
@ -633,7 +633,7 @@ int amdgpu_cgs_set_clockgating_state(void *cgs_device,
return r;
}
int amdgpu_cgs_set_powergating_state(void *cgs_device,
int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state)
{
@ -655,7 +655,7 @@ int amdgpu_cgs_set_powergating_state(void *cgs_device,
}
static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
{
CGS_FUNC_ADEV;
enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
@ -681,9 +681,10 @@ static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
if (adev->asic_type == CHIP_TONGA)
if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
|| adev->asic_type == CHIP_POLARIS10)
result = AMDGPU_UCODE_ID_CP_MEC2;
else if (adev->asic_type == CHIP_CARRIZO)
else
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_RLC_G:
@ -695,13 +696,13 @@ static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
return result;
}
static int amdgpu_cgs_get_firmware_info(void *cgs_device,
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
{
CGS_FUNC_ADEV;
if (CGS_UCODE_ID_SMU != type) {
if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
uint64_t gpu_addr;
uint32_t data_size;
const struct gfx_firmware_header_v1_0 *header;
@ -734,30 +735,44 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
const uint8_t *src;
const struct smc_firmware_header_v1_0 *hdr;
switch (adev->asic_type) {
case CHIP_TONGA:
strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
}
if (!adev->pm.fw) {
switch (adev->asic_type) {
case CHIP_TONGA:
strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin");
break;
case CHIP_POLARIS11:
if (type == CGS_UCODE_ID_SMU)
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
else if (type == CGS_UCODE_ID_SMU_SK)
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU)
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
else if (type == CGS_UCODE_ID_SMU_SK)
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
}
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err) {
DRM_ERROR("Failed to request firmware\n");
return err;
}
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err) {
DRM_ERROR("Failed to request firmware\n");
return err;
}
err = amdgpu_ucode_validate(adev->pm.fw);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
return err;
err = amdgpu_ucode_validate(adev->pm.fw);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
return err;
}
}
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
@ -774,7 +789,7 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
return 0;
}
static int amdgpu_cgs_query_system_info(void *cgs_device,
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info)
{
CGS_FUNC_ADEV;
@ -801,6 +816,9 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
case CGS_SYSTEM_INFO_PG_FLAGS:
sys_info->value = adev->pg_flags;
break;
case CGS_SYSTEM_INFO_GFX_CU_INFO:
sys_info->value = adev->gfx.cu_info.number;
break;
default:
return -ENODEV;
}
@ -808,7 +826,7 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
return 0;
}
static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
struct cgs_display_info *info)
{
CGS_FUNC_ADEV;
@ -851,7 +869,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
}
static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
{
CGS_FUNC_ADEV;
@ -867,7 +885,7 @@ static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
*/
#if defined(CONFIG_ACPI)
static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
struct cgs_acpi_method_info *info)
{
CGS_FUNC_ADEV;
@ -1030,14 +1048,14 @@ error:
return result;
}
#else
static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
struct cgs_acpi_method_info *info)
{
return -EIO;
}
#endif
int amdgpu_cgs_call_acpi_method(void *cgs_device,
int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
@ -1121,7 +1139,7 @@ static const struct cgs_os_ops amdgpu_cgs_os_ops = {
amdgpu_cgs_irq_put
};
void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
{
struct amdgpu_cgs_device *cgs_device =
kmalloc(sizeof(*cgs_device), GFP_KERNEL);
@ -1135,10 +1153,10 @@ void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
cgs_device->adev = adev;
return cgs_device;
return (struct cgs_device *)cgs_device;
}
void amdgpu_cgs_destroy_device(void *cgs_device)
void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
{
kfree(cgs_device);
}

View File

@ -439,7 +439,7 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
struct mode_size {
static const struct mode_size {
int w;
int h;
} common_modes[17] = {

View File

@ -24,7 +24,6 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
#include <linux/list_sort.h>
#include <linux/pagemap.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
@ -88,44 +87,41 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
}
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct amdgpu_user_fence *uf,
struct drm_amdgpu_cs_chunk_fence *fence_data)
struct drm_amdgpu_cs_chunk_fence *data,
uint32_t *offset)
{
struct drm_gem_object *gobj;
uint32_t handle;
handle = fence_data->handle;
gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
fence_data->handle);
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
return -EINVAL;
uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
uf->offset = fence_data->offset;
if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
return -EINVAL;
}
p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
p->uf_entry.user_pages = NULL;
*offset = data->offset;
drm_gem_object_unreference_unlocked(gobj);
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
amdgpu_bo_unref(&p->uf_entry.robj);
return -EINVAL;
}
return 0;
}
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
struct amdgpu_user_fence uf = {};
unsigned size, num_ibs = 0;
uint32_t uf_offset = 0;
int i;
int ret;
@ -200,7 +196,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_partial_kdata;
}
ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata);
ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
&uf_offset);
if (ret)
goto free_partial_kdata;
@ -215,11 +212,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
}
ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job);
ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
if (ret)
goto free_all_kdata;
p->job->uf = uf;
if (p->uf_entry.robj) {
p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
p->job->uf_offset = uf_offset;
}
kfree(chunk_array);
return 0;
@ -377,7 +377,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
if (p->job->uf.bo)
if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock)
@ -473,6 +473,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_validate;
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
struct amdgpu_bo *oa = p->bo_list->oa_obj;
struct amdgpu_vm *vm = &fpriv->vm;
unsigned i;
@ -481,6 +484,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
}
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
p->job->gds_size = amdgpu_bo_size(gds);
}
if (gws) {
p->job->gws_base = amdgpu_bo_gpu_offset(gws);
p->job->gws_size = amdgpu_bo_size(gws);
}
if (oa) {
p->job->oa_base = amdgpu_bo_gpu_offset(oa);
p->job->oa_size = amdgpu_bo_size(oa);
}
}
error_validate:
@ -527,16 +543,6 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return 0;
}
static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct list_head *b)
{
struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head);
struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head);
/* Sort A before B if A is smaller. */
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
}
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
@ -553,18 +559,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
if (!error) {
amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
* This assures that the smallest buffers are added first
* to the LRU list, so they are likely to be later evicted
* first, instead of large buffers whose eviction is more
* expensive.
*
* This slightly lowers the number of bytes moved by TTM
* per frame under memory pressure.
*/
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->fence);
@ -763,41 +757,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
ib->ctx = parser->ctx;
j++;
}
/* add GDS resources to first IB */
if (parser->bo_list) {
struct amdgpu_bo *gds = parser->bo_list->gds_obj;
struct amdgpu_bo *gws = parser->bo_list->gws_obj;
struct amdgpu_bo *oa = parser->bo_list->oa_obj;
struct amdgpu_ib *ib = &parser->job->ibs[0];
if (gds) {
ib->gds_base = amdgpu_bo_gpu_offset(gds);
ib->gds_size = amdgpu_bo_size(gds);
}
if (gws) {
ib->gws_base = amdgpu_bo_gpu_offset(gws);
ib->gws_size = amdgpu_bo_size(gws);
}
if (oa) {
ib->oa_base = amdgpu_bo_gpu_offset(oa);
ib->oa_size = amdgpu_bo_size(oa);
}
}
/* wrap the last IB with user fence */
if (parser->job->uf.bo) {
struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
/* UVD & VCE fw doesn't support user fences */
if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
return -EINVAL;
ib->user = &parser->job->uf;
}
/* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_bo && (
parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
return 0;
}
@ -862,28 +829,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_fence *fence;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct fence *fence;
struct amdgpu_job *job;
int r;
job = p->job;
p->job = NULL;
job->base.sched = &ring->sched;
job->base.s_entity = &p->ctx->rings[ring->idx].entity;
job->owner = p->filp;
fence = amd_sched_fence_create(job->base.s_entity, p->filp);
if (!fence) {
r = amd_sched_job_init(&job->base, &ring->sched,
entity, amdgpu_job_timeout_func,
amdgpu_job_free_func,
p->filp, &fence);
if (r) {
amdgpu_job_free(job);
return -ENOMEM;
return r;
}
job->base.s_fence = fence;
p->fence = fence_get(&fence->base);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring,
&fence->base);
job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
job->owner = p->filp;
job->ctx = entity->fence_context;
p->fence = fence_get(fence);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
job->uf_sequence = cs->out.handle;
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);

View File

@ -59,6 +59,8 @@ static const char *amdgpu_asic_name[] = {
"FIJI",
"CARRIZO",
"STONEY",
"POLARIS10",
"POLARIS11",
"LAST",
};
@ -346,7 +348,7 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
@ -936,15 +938,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
}
if (amdgpu_gart_size != -1) {
/* gtt size must be power of two and greater or equal to 32M */
/* gtt size must be greater or equal to 32M */
if (amdgpu_gart_size < 32) {
dev_warn(adev->dev, "gart size (%d) too small\n",
amdgpu_gart_size);
amdgpu_gart_size = -1;
} else if (!amdgpu_check_pot_argument(amdgpu_gart_size)) {
dev_warn(adev->dev, "gart size (%d) must be a power of 2\n",
amdgpu_gart_size);
amdgpu_gart_size = -1;
}
}
@ -1144,6 +1142,8 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_CARRIZO:
case CHIP_STONEY:
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
@ -1196,7 +1196,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
if (r == -ENOENT) {
adev->ip_block_status[i].valid = false;
} else if (r) {
DRM_ERROR("early_init %d failed %d\n", i, r);
DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
} else {
adev->ip_block_status[i].valid = true;
@ -1219,7 +1219,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
if (r) {
DRM_ERROR("sw_init %d failed %d\n", i, r);
DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
adev->ip_block_status[i].sw = true;
@ -1252,7 +1252,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
adev->ip_block_status[i].hw = true;
@ -1272,13 +1272,13 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_GATE);
if (r) {
DRM_ERROR("set_clockgating_state(gate) %d failed %d\n", i, r);
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (r) {
DRM_ERROR("late_init %d failed %d\n", i, r);
DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
}
@ -1302,13 +1302,13 @@ static int amdgpu_fini(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini %d failed %d\n", i, r);
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
}
adev->ip_block_status[i].hw = false;
}
@ -1319,7 +1319,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("sw_fini %d failed %d\n", i, r);
DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
}
adev->ip_block_status[i].sw = false;
adev->ip_block_status[i].valid = false;
@ -1332,20 +1332,29 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
{
int i, r;
/* ungate SMC block first */
r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
if (i != AMD_IP_BLOCK_TYPE_SMC) {
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
}
}
/* XXX handle errors */
r = adev->ip_blocks[i].funcs->suspend(adev);
/* XXX handle errors */
if (r) {
DRM_ERROR("suspend %d failed %d\n", i, r);
DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
}
}
@ -1361,7 +1370,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].funcs->resume(adev);
if (r) {
DRM_ERROR("resume %d failed %d\n", i, r);
DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
}
@ -2007,7 +2016,7 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev)
* Debugfs
*/
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
struct drm_info_list *files,
const struct drm_info_list *files,
unsigned nfiles)
{
unsigned i;
@ -2119,32 +2128,246 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
return result;
}
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
value = RREG32_PCIE(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
if (r)
return r;
WREG32_PCIE(*pos >> 2, value);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
value = RREG32_DIDT(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
if (r)
return r;
WREG32_DIDT(*pos >> 2, value);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
value = RREG32_SMC(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
if (r)
return r;
WREG32_SMC(*pos >> 2, value);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
.write = amdgpu_debugfs_regs_write,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_didt_read,
.write = amdgpu_debugfs_regs_didt_write,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_pcie_read,
.write = amdgpu_debugfs_regs_pcie_write,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_smc_read,
.write = amdgpu_debugfs_regs_smc_write,
.llseek = default_llseek
};
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
};
static const char *debugfs_regs_names[] = {
"amdgpu_regs",
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
struct drm_minor *minor = adev->ddev->primary;
struct dentry *ent, *root = minor->debugfs_root;
unsigned i, j;
ent = debugfs_create_file("amdgpu_regs", S_IFREG | S_IRUGO, root,
adev, &amdgpu_debugfs_regs_fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
i_size_write(ent->d_inode, adev->rmmio_size);
adev->debugfs_regs = ent;
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
ent = debugfs_create_file(debugfs_regs_names[i],
S_IFREG | S_IRUGO, root,
adev, debugfs_regs[i]);
if (IS_ERR(ent)) {
for (j = 0; j < i; j++) {
debugfs_remove(adev->debugfs_regs[i]);
adev->debugfs_regs[i] = NULL;
}
return PTR_ERR(ent);
}
if (!i)
i_size_write(ent->d_inode, adev->rmmio_size);
adev->debugfs_regs[i] = ent;
}
return 0;
}
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
{
debugfs_remove(adev->debugfs_regs);
adev->debugfs_regs = NULL;
unsigned i;
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
if (adev->debugfs_regs[i]) {
debugfs_remove(adev->debugfs_regs[i]);
adev->debugfs_regs[i] = NULL;
}
}
}
int amdgpu_debugfs_init(struct drm_minor *minor)

View File

@ -131,12 +131,17 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
vblank->framedur_ns / 1000,
vblank->linedur_ns / 1000, stat, vpos, hpos);
/* set the flip status */
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
/* Set the flip status */
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
amdgpuCrtc->crtc_id, amdgpuCrtc, work);
}
/*
@ -192,6 +197,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->adev = adev;
work->crtc_id = amdgpu_crtc->crtc_id;
work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
/* schedule unpin of the old buffer */
old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
@ -252,6 +258,9 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
amdgpu_crtc->pflip_works = work;
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@ -554,7 +563,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
struct amdgpu_framebuffer *amdgpu_fb;
int ret;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
"can't create framebuffer\n", mode_cmd->handles[0]);
@ -588,20 +597,20 @@ const struct drm_mode_config_funcs amdgpu_mode_funcs = {
.output_poll_changed = amdgpu_output_poll_changed
};
static struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
{ { UNDERSCAN_OFF, "off" },
{ UNDERSCAN_ON, "on" },
{ UNDERSCAN_AUTO, "auto" },
};
static struct drm_prop_enum_list amdgpu_audio_enum_list[] =
static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
{ { AMDGPU_AUDIO_DISABLE, "off" },
{ AMDGPU_AUDIO_ENABLE, "on" },
{ AMDGPU_AUDIO_AUTO, "auto" },
};
/* XXX support different dither options? spatial, temporal, both, etc. */
static struct drm_prop_enum_list amdgpu_dither_enum_list[] =
static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};

View File

@ -150,7 +150,7 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
vrefresh = amdgpu_crtc->hw_mode.vrefresh;
vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
break;
}
}

View File

@ -166,7 +166,7 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
static struct pci_device_id pciidlist[] = {
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
@ -277,6 +277,16 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
/* stoney */
{0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
/* Polaris11 */
{0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
/* Polaris10 */
{0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0, 0, 0}
};
@ -514,7 +524,7 @@ static struct drm_driver kms_driver = {
.irq_uninstall = amdgpu_irq_uninstall,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
.gem_free_object = amdgpu_gem_object_free,
.gem_free_object_unlocked = amdgpu_gem_object_free,
.gem_open_object = amdgpu_gem_object_open,
.gem_close_object = amdgpu_gem_object_close,
.dumb_create = amdgpu_mode_dumb_create,
@ -556,12 +566,10 @@ static struct pci_driver amdgpu_kms_pci_driver = {
static int __init amdgpu_init(void)
{
amdgpu_sync_init();
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
}
#endif
DRM_INFO("amdgpu kernel modesetting enabled.\n");
driver = &kms_driver;
pdriver = &amdgpu_kms_pci_driver;

View File

@ -198,7 +198,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
/* There is always exactly one thread signaling this fence slot */
fence = rcu_dereference_protected(*ptr, 1);
rcu_assign_pointer(*ptr, NULL);
RCU_INIT_POINTER(*ptr, NULL);
BUG_ON(!fence);
@ -352,9 +352,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
(unsigned long)ring);
ring->fence_drv.num_fences_mask = num_hw_submission - 1;
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *),
ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
GFP_KERNEL);
if (!ring->fence_drv.fences)
return -ENOMEM;
@ -639,7 +639,7 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
return 0;
}
static struct drm_info_list amdgpu_debugfs_fence_list[] = {
static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
};

View File

@ -238,18 +238,17 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (adev->gart.pages[p]) {
adev->gart.pages[p] = NULL;
adev->gart.pages_addr[p] = adev->dummy_page.addr;
page_base = adev->gart.pages_addr[p];
if (!adev->gart.ptr)
continue;
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = NULL;
#endif
page_base = adev->dummy_page.addr;
if (!adev->gart.ptr)
continue;
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
mb();
@ -287,10 +286,11 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
adev->gart.pages_addr[p] = dma_addr[i];
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = pagelist[i];
#endif
if (adev->gart.ptr) {
page_base = adev->gart.pages_addr[p];
page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
@ -312,11 +312,11 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
*/
int amdgpu_gart_init(struct amdgpu_device *adev)
{
int r, i;
int r;
if (adev->gart.pages) {
if (adev->dummy_page.page)
return 0;
}
/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("Page size is smaller than GPU page size!\n");
@ -330,22 +330,16 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
/* Allocate pages table */
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
if (adev->gart.pages == NULL) {
amdgpu_gart_fini(adev);
return -ENOMEM;
}
adev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
adev->gart.num_cpu_pages);
if (adev->gart.pages_addr == NULL) {
amdgpu_gart_fini(adev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */
for (i = 0; i < adev->gart.num_cpu_pages; i++) {
adev->gart.pages_addr[i] = adev->dummy_page.addr;
}
#endif
return 0;
}
@ -358,15 +352,14 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
*/
void amdgpu_gart_fini(struct amdgpu_device *adev)
{
if (adev->gart.pages && adev->gart.pages_addr && adev->gart.ready) {
if (adev->gart.ready) {
/* unbind pages */
amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
}
adev->gart.ready = false;
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
vfree(adev->gart.pages);
vfree(adev->gart.pages_addr);
adev->gart.pages = NULL;
adev->gart.pages_addr = NULL;
#endif
amdgpu_dummy_page_fini(adev);
}

View File

@ -43,7 +43,7 @@ struct amdgpu_ring;
struct amdgpu_bo;
struct amdgpu_gds_asic_info {
uint32_t total_size;
uint32_t total_size;
uint32_t gfx_partition_size;
uint32_t cs_partition_size;
};
@ -52,8 +52,8 @@ struct amdgpu_gds {
struct amdgpu_gds_asic_info mem;
struct amdgpu_gds_asic_info gws;
struct amdgpu_gds_asic_info oa;
/* At present, GDS, GWS and OA resources for gfx (graphics)
* is always pre-allocated and available for graphics operation.
/* At present, GDS, GWS and OA resources for gfx (graphics)
* is always pre-allocated and available for graphics operation.
* Such resource is shared between all gfx clients.
* TODO: move this operation to user space
* */

View File

@ -93,7 +93,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
struct drm_device *ddev = adev->ddev;
struct drm_file *file;
mutex_lock(&ddev->struct_mutex);
mutex_lock(&ddev->filelist_mutex);
list_for_each_entry(file, &ddev->filelist, lhead) {
struct drm_gem_object *gobj;
@ -103,13 +103,13 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n");
drm_gem_object_unreference(gobj);
drm_gem_object_unreference_unlocked(gobj);
}
idr_destroy(&file->object_idr);
spin_unlock(&file->table_lock);
}
mutex_unlock(&ddev->struct_mutex);
mutex_unlock(&ddev->filelist_mutex);
}
/*
@ -338,7 +338,7 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
struct drm_gem_object *gobj;
struct amdgpu_bo *robj;
gobj = drm_gem_object_lookup(dev, filp, handle);
gobj = drm_gem_object_lookup(filp, handle);
if (gobj == NULL) {
return -ENOENT;
}
@ -402,7 +402,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
int r = 0;
long ret;
gobj = drm_gem_object_lookup(dev, filp, handle);
gobj = drm_gem_object_lookup(filp, handle);
if (gobj == NULL) {
return -ENOENT;
}
@ -436,7 +436,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
int r = -1;
DRM_DEBUG("%d \n", args->handle);
gobj = drm_gem_object_lookup(dev, filp, args->handle);
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
robj = gem_to_amdgpu_bo(gobj);
@ -584,7 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
gobj = drm_gem_object_lookup(dev, filp, args->handle);
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
rbo = gem_to_amdgpu_bo(gobj);
@ -646,7 +646,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo *robj;
int r;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
@ -769,7 +769,7 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
struct drm_file *file;
int r;
r = mutex_lock_interruptible(&dev->struct_mutex);
r = mutex_lock_interruptible(&dev->filelist_mutex);
if (r)
return r;
@ -793,11 +793,11 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
spin_unlock(&file->table_lock);
}
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->filelist_mutex);
return 0;
}
static struct drm_info_list amdgpu_debugfs_gem_list[] = {
static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
};
#endif

View File

@ -74,9 +74,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
}
ib->vm = vm;
ib->vm_id = 0;
return 0;
}
@ -89,7 +86,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*
* Free an IB (all asics).
*/
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f)
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
struct fence *f)
{
amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
}
@ -117,28 +115,37 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fen
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, struct fence *last_vm_update,
struct fence **f)
struct amdgpu_job *job, struct fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct amdgpu_ctx *ctx, *old_ctx;
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
struct fence *hwf;
uint64_t ctx;
unsigned i;
int r = 0;
if (num_ibs == 0)
return -EINVAL;
ctx = ibs->ctx;
vm = ibs->vm;
/* ring tests don't use a job */
if (job) {
vm = job->vm;
ctx = job->ctx;
} else {
vm = NULL;
ctx = 0;
}
if (!ring->ready) {
dev_err(adev->dev, "couldn't schedule ib\n");
return -EINVAL;
}
if (vm && !ibs->vm_id) {
if (vm && !job->vm_id) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
@ -149,58 +156,68 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
if (vm) {
/* do context switch */
amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
ib->gds_base, ib->gds_size,
ib->gws_base, ib->gws_size,
ib->oa_base, ib->oa_size);
if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
if (vm) {
r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
job->gds_base, job->gds_size,
job->gws_base, job->gws_size,
job->oa_base, job->oa_size);
if (r) {
amdgpu_ring_undo(ring);
return r;
}
}
old_ctx = ring->current_ctx;
if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
/* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1;
skip_preamble = ring->current_ctx == ctx;
need_ctx_switch = ring->current_ctx != ctx;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
if (ib->ctx != ctx || ib->vm != vm) {
ring->current_ctx = old_ctx;
if (ib->vm_id)
amdgpu_vm_reset_id(adev, ib->vm_id);
amdgpu_ring_undo(ring);
return -EINVAL;
}
amdgpu_ring_emit_ib(ring, ib);
ring->current_ctx = ctx;
/* drop preamble IBs if we don't have a context switch */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
continue;
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
need_ctx_switch);
need_ctx_switch = false;
}
if (vm) {
if (ring->funcs->emit_hdp_invalidate)
amdgpu_ring_emit_hdp_invalidate(ring);
}
if (ring->funcs->emit_hdp_invalidate)
amdgpu_ring_emit_hdp_invalidate(ring);
r = amdgpu_fence_emit(ring, &hwf);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
ring->current_ctx = old_ctx;
if (ib->vm_id)
amdgpu_vm_reset_id(adev, ib->vm_id);
if (job && job->vm_id)
amdgpu_vm_reset_id(adev, job->vm_id);
amdgpu_ring_undo(ring);
return r;
}
/* wrap the last IB with fence */
if (ib->user) {
uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
addr += ib->user->offset;
amdgpu_ring_emit_fence(ring, addr, ib->sequence,
if (job && job->uf_bo) {
uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo);
addr += job->uf_offset;
amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
AMDGPU_FENCE_FLAG_64BIT);
}
if (f)
*f = fence_get(hwf);
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
ring->current_ctx = ctx;
amdgpu_ring_commit(ring);
return 0;
}
@ -315,7 +332,7 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
}
static struct drm_info_list amdgpu_debugfs_sa_list[] = {
static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
};

View File

@ -219,7 +219,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r) {
return r;
}
adev->ddev->vblank_disable_allowed = true;
/* enable msi */
adev->irq.msi_enabled = false;
@ -498,7 +497,7 @@ static int amdgpu_irqdomain_map(struct irq_domain *d,
return 0;
}
static struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
.map = amdgpu_irqdomain_map,
};

View File

@ -28,8 +28,25 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
static void amdgpu_job_free_handler(struct work_struct *ws)
{
struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
amd_sched_job_put(&job->base);
}
void amdgpu_job_timeout_func(struct work_struct *work)
{
struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
job->base.sched->name,
(uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
job->ring->fence_drv.sync_seq);
amd_sched_job_put(&job->base);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job)
struct amdgpu_job **job, struct amdgpu_vm *vm)
{
size_t size = sizeof(struct amdgpu_job);
@ -43,8 +60,10 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
return -ENOMEM;
(*job)->adev = adev;
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
amdgpu_sync_create(&(*job)->sync);
@ -56,7 +75,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
{
int r;
r = amdgpu_job_alloc(adev, 1, job);
r = amdgpu_job_alloc(adev, 1, job, NULL);
if (r)
return r;
@ -78,8 +97,16 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
fence_put(job->fence);
amdgpu_bo_unref(&job->uf.bo);
amdgpu_bo_unref(&job->uf_bo);
amdgpu_sync_free(&job->sync);
if (!job->base.use_sched)
kfree(job);
}
void amdgpu_job_free_func(struct kref *refcount)
{
struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
kfree(job);
}
@ -87,16 +114,22 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f)
{
struct fence *fence;
int r;
job->ring = ring;
job->base.sched = &ring->sched;
job->base.s_entity = entity;
job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
if (!job->base.s_fence)
return -ENOMEM;
*f = fence_get(&job->base.s_fence->base);
if (!f)
return -EINVAL;
r = amd_sched_job_init(&job->base, &ring->sched,
entity, amdgpu_job_timeout_func,
amdgpu_job_free_func, owner, &fence);
if (r)
return r;
job->owner = owner;
job->ctx = entity->fence_context;
*f = fence_get(fence);
amd_sched_entity_push_job(&job->base);
return 0;
@ -105,27 +138,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->ibs->vm;
struct amdgpu_vm *vm = job->vm;
struct fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->ibs->vm_id) {
if (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
unsigned i, vm_id;
uint64_t vm_pd_addr;
int r;
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
&job->base.s_fence->base,
&vm_id, &vm_pd_addr);
&job->vm_id, &job->vm_pd_addr);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
else {
for (i = 0; i < job->num_ibs; ++i) {
job->ibs[i].vm_id = vm_id;
job->ibs[i].vm_pd_addr = vm_pd_addr;
}
}
fence = amdgpu_sync_get_fence(&job->sync);
}
@ -153,7 +178,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->sync.last_vm_update, &fence);
job->sync.last_vm_update, job, &fence);
if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err;
@ -165,7 +190,9 @@ err:
return fence;
}
struct amd_sched_backend_ops amdgpu_sched_ops = {
const struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
.begin_job = amd_sched_job_begin,
.finish_job = amd_sched_job_finish,
};

View File

@ -427,7 +427,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device dev_info = {};
struct amdgpu_cu_info cu_info;
dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id;
@ -461,11 +460,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
AMDGPU_GPU_PAGE_SIZE;
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
amdgpu_asic_get_cu_info(adev, &cu_info);
dev_info.cu_active_number = cu_info.number;
dev_info.cu_ao_mask = cu_info.ao_cu_mask;
dev_info.cu_active_number = adev->gfx.cu_info.number;
dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info.ce_ram_size = adev->gfx.ce_ram_size;
memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
sizeof(adev->gfx.cu_info.bitmap));
dev_info.vram_type = adev->mc.vram_type;
dev_info.vram_bit_width = adev->mc.vram_width;
dev_info.vce_harvest_config = adev->vce.harvest_config;
@ -755,4 +754,4 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);

View File

@ -283,7 +283,7 @@ struct amdgpu_display_funcs {
u32 (*hpd_get_gpio_reg)(struct amdgpu_device *adev);
/* pageflipping */
void (*page_flip)(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base);
int crtc_id, u64 crtc_base, bool async);
int (*page_flip_get_scanoutpos)(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position);
/* display topology setup */
@ -530,7 +530,7 @@ struct amdgpu_framebuffer {
((em) == ATOM_ENCODER_MODE_DP_MST))
/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
#define USE_REAL_VBLANKSTART (1 << 30)
#define USE_REAL_VBLANKSTART (1 << 30)
#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
void amdgpu_link_encoder_connector(struct drm_device *dev);

View File

@ -71,7 +71,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{
int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(bo->adev->dev, "%p reserve failed\n", bo);

View File

@ -362,16 +362,23 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
uint32_t i, mask = 0;
char sub_str[2];
ret = kstrtol(buf, 0, &level);
for (i = 0; i < strlen(buf) - 1; i++) {
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
if (ret) {
count = -EINVAL;
goto fail;
if (ret) {
count = -EINVAL;
goto fail;
}
mask |= 1 << level;
}
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, level);
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
fail:
return count;
}
@ -399,16 +406,23 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
uint32_t i, mask = 0;
char sub_str[2];
ret = kstrtol(buf, 0, &level);
for (i = 0; i < strlen(buf) - 1; i++) {
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
if (ret) {
count = -EINVAL;
goto fail;
if (ret) {
count = -EINVAL;
goto fail;
}
mask |= 1 << level;
}
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, level);
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
fail:
return count;
}
@ -436,16 +450,23 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
uint32_t i, mask = 0;
char sub_str[2];
ret = kstrtol(buf, 0, &level);
for (i = 0; i < strlen(buf) - 1; i++) {
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
if (ret) {
count = -EINVAL;
goto fail;
if (ret) {
count = -EINVAL;
goto fail;
}
mask |= 1 << level;
}
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, level);
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
fail:
return count;
}
@ -1212,7 +1233,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
return 0;
}
static struct drm_info_list amdgpu_pm_info_list[] = {
static const struct drm_info_list amdgpu_pm_info_list[] = {
{"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
};
#endif

View File

@ -99,6 +99,10 @@ static int amdgpu_pp_early_init(void *handle)
#ifdef CONFIG_DRM_AMD_POWERPLAY
switch (adev->asic_type) {
case CHIP_POLARIS11:
case CHIP_POLARIS10:
adev->pp_enabled = true;
break;
case CHIP_TONGA:
case CHIP_FIJI:
adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
@ -299,16 +303,8 @@ static int amdgpu_pp_soft_reset(void *handle)
return ret;
}
static void amdgpu_pp_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->print_status)
adev->powerplay.ip_funcs->print_status(
adev->powerplay.pp_handle);
}
const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.name = "amdgpu_powerplay",
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
.sw_init = amdgpu_pp_sw_init,
@ -320,7 +316,6 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.is_idle = amdgpu_pp_is_idle,
.wait_for_idle = amdgpu_pp_wait_for_idle,
.soft_reset = amdgpu_pp_soft_reset,
.print_status = amdgpu_pp_print_status,
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};

View File

@ -57,9 +57,10 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
struct reservation_object *resv = attach->dmabuf->resv;
struct amdgpu_device *adev = dev->dev_private;

View File

@ -46,7 +46,8 @@
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
@ -215,18 +216,17 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring,
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring structure holding ring information
* @ring_size: size of the ring
* @max_ndw: maximum number of dw for ring alloc
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, u32 nop, u32 align_mask,
unsigned max_dw, u32 nop, u32 align_mask,
struct amdgpu_irq_src *irq_src, unsigned irq_type,
enum amdgpu_ring_type ring_type)
{
u32 rb_bufsz;
int r;
if (ring->adev == NULL) {
@ -265,8 +265,17 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
return r;
}
ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
return r;
}
ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
spin_lock_init(&ring->fence_lock);
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) {
@ -274,10 +283,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
/* Align ring size */
rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
amdgpu_sched_hw_submission);
ring->align_mask = align_mask;
ring->nop = nop;
ring->type = ring_type;
@ -310,8 +317,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4,
amdgpu_sched_hw_submission);
ring->max_dw = max_dw;
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
@ -363,9 +369,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
int roffset = *(int*)node->info_ent->data;
int roffset = (unsigned long)node->info_ent->data;
struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
uint32_t rptr, wptr, rptr_next;
unsigned i;
@ -408,46 +413,37 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
return 0;
}
/* TODO: clean this up !*/
static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
static struct drm_info_list amdgpu_debugfs_ring_info_list[] = {
{"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index},
{"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index},
{"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index},
{"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index},
{"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index},
{"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index},
{"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index},
{"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index},
};
static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
#endif
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
unsigned i;
struct drm_info_list *info;
char *name;
for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i];
int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data;
struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset);
unsigned r;
if (other != ring)
continue;
r = amdgpu_debugfs_add_files(adev, info, 1);
if (r)
return r;
info = &amdgpu_debugfs_ring_info_list[i];
if (!info->data)
break;
}
if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
return -ENOSPC;
name = &amdgpu_debugfs_ring_names[i][0];
sprintf(name, "amdgpu_ring_%s", ring->name);
info->name = name;
info->show = amdgpu_debugfs_ring_info;
info->driver_features = 0;
info->data = (void*)(uintptr_t)offset;
return amdgpu_debugfs_add_files(adev, info, 1);
#endif
return 0;
}

View File

@ -108,6 +108,29 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
*keep = fence_get(fence);
}
/**
* amdgpu_sync_add_later - add the fence to the hash
*
* @sync: sync object to add the fence to
* @f: fence to add
*
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
{
struct amdgpu_sync_entry *e;
hash_for_each_possible(sync->fences, e, node, f->context) {
if (unlikely(e->fence->context != f->context))
continue;
amdgpu_sync_keep_later(&e->fence, f);
return true;
}
return false;
}
/**
* amdgpu_sync_fence - remember to sync to this fence
*
@ -127,13 +150,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
amdgpu_sync_keep_later(&sync->last_vm_update, f);
hash_for_each_possible(sync->fences, e, node, f->context) {
if (unlikely(e->fence->context != f->context))
continue;
amdgpu_sync_keep_later(&e->fence, f);
if (amdgpu_sync_add_later(sync, f))
return 0;
}
e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!e)
@ -204,6 +222,81 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return r;
}
/**
* amdgpu_sync_is_idle - test if all fences are signaled
*
* @sync: the sync object
*
* Returns true if all fences in the sync object are signaled.
*/
bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct fence *f = e->fence;
if (fence_is_signaled(f)) {
hash_del(&e->node);
fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
return false;
}
return true;
}
/**
* amdgpu_sync_cycle_fences - move fences from one sync object into another
*
* @dst: the destination sync object
* @src: the source sync object
* @fence: fence to add to source
*
* Remove all fences from source and put them into destination and add
* fence as new one into source.
*/
int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
struct fence *fence)
{
struct amdgpu_sync_entry *e, *newone;
struct hlist_node *tmp;
int i;
/* Allocate the new entry before moving the old ones */
newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!newone)
return -ENOMEM;
hash_for_each_safe(src->fences, i, tmp, e, node) {
struct fence *f = e->fence;
hash_del(&e->node);
if (fence_is_signaled(f)) {
fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
if (amdgpu_sync_add_later(dst, f)) {
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
hash_add(dst->fences, &e->node, f->context);
}
hash_add(src->fences, &newone->node, fence->context);
newone->fence = fence_get(fence);
return 0;
}
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;

View File

@ -911,6 +911,52 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
return flags;
}
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
unsigned i, j;
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
if (&tbo->lru == lru->lru[j])
lru->lru[j] = tbo->lru.prev;
if (&tbo->swap == lru->swap_lru)
lru->swap_lru = tbo->swap.prev;
}
}
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
unsigned log2_size = min(ilog2(tbo->num_pages),
AMDGPU_TTM_LRU_SIZE - 1);
return &adev->mman.log2_size[log2_size];
}
static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
{
struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
struct list_head *res = lru->lru[tbo->mem.mem_type];
lru->lru[tbo->mem.mem_type] = &tbo->lru;
return res;
}
static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
{
struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
struct list_head *res = lru->swap_lru;
lru->swap_lru = &tbo->swap;
return res;
}
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
@ -924,10 +970,14 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
.lru_removal = &amdgpu_ttm_lru_removal,
.lru_tail = &amdgpu_ttm_lru_tail,
.swap_lru_tail = &amdgpu_ttm_swap_lru_tail,
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
unsigned i, j;
int r;
r = amdgpu_ttm_global_init(adev);
@ -945,6 +995,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
lru->lru[j] = &adev->mman.bdev.man[j].lru;
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
}
adev->mman.initialized = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT);
@ -1167,7 +1226,7 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
static int ttm_pl_vram = TTM_PL_VRAM;
static int ttm_pl_tt = TTM_PL_TT;
static struct drm_info_list amdgpu_ttm_debugfs_list[] = {
static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
@ -1218,6 +1277,8 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
.llseek = default_llseek
};
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@ -1265,6 +1326,8 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
#endif
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
@ -1280,6 +1343,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
i_size_write(ent->d_inode, adev->mc.mc_vram_size);
adev->mman.vram = ent;
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
adev, &amdgpu_ttm_gtt_fops);
if (IS_ERR(ent))
@ -1287,6 +1351,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
i_size_write(ent->d_inode, adev->mc.gtt_size);
adev->mman.gtt = ent;
#endif
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB
@ -1308,7 +1373,10 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
debugfs_remove(adev->mman.vram);
adev->mman.vram = NULL;
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
debugfs_remove(adev->mman.gtt);
adev->mman.gtt = NULL;
#endif
#endif
}

View File

@ -41,19 +41,23 @@
/* 1 second timeout */
#define UVD_IDLE_TIMEOUT_MS 1000
/* Polaris10/11 firmware version */
#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
/**
* amdgpu_uvd_cs_ctx - Command submission parser context
@ -85,6 +89,8 @@ MODULE_FIRMWARE(FIRMWARE_TONGA);
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@ -131,6 +137,12 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_STONEY:
fw_name = FIRMWARE_STONEY;
break;
case CHIP_POLARIS10:
fw_name = FIRMWARE_POLARIS10;
break;
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
default:
return -EINVAL;
}
@ -151,6 +163,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return r;
}
/* Set the default UVD handles that the firmware can handle */
adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
@ -158,11 +173,28 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
version_major, version_minor, family_id);
/*
* Limit the number of UVD handles depending on microcode major
* and minor versions. The firmware version which has 40 UVD
* instances support is 1.80. So all subsequent versions should
* also have the same support.
*/
if ((version_major > 0x01) ||
((version_major == 0x01) && (version_minor >= 0x50)))
adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
(family_id << 8));
if ((adev->asic_type == CHIP_POLARIS10 ||
adev->asic_type == CHIP_POLARIS11) &&
(adev->uvd.fw_version < FW_1_66_16))
DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
version_major, version_minor);
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
@ -205,7 +237,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return r;
}
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.handles[i], 0);
adev->uvd.filp[i] = NULL;
}
@ -251,7 +283,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (adev->uvd.vcpu_bo == NULL)
return 0;
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
break;
@ -308,7 +340,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
struct amdgpu_ring *ring = &adev->uvd.ring;
int i, r;
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct fence *fence;
@ -390,7 +422,8 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
*
* Peek into the decode message and calculate the necessary buffer sizes.
*/
static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
unsigned buf_sizes[])
{
unsigned stream_type = msg[4];
unsigned width = msg[6];
@ -412,7 +445,6 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
switch (stream_type) {
case 0: /* H264 */
case 7: /* H264 Perf */
switch(level) {
case 30:
num_dpb_buffer = 8100 / fs_in_mb;
@ -490,6 +522,54 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
break;
case 7: /* H264 Perf */
switch(level) {
case 30:
num_dpb_buffer = 8100 / fs_in_mb;
break;
case 31:
num_dpb_buffer = 18000 / fs_in_mb;
break;
case 32:
num_dpb_buffer = 20480 / fs_in_mb;
break;
case 41:
num_dpb_buffer = 32768 / fs_in_mb;
break;
case 42:
num_dpb_buffer = 34816 / fs_in_mb;
break;
case 50:
num_dpb_buffer = 110400 / fs_in_mb;
break;
case 51:
num_dpb_buffer = 184320 / fs_in_mb;
break;
default:
num_dpb_buffer = 184320 / fs_in_mb;
break;
}
num_dpb_buffer++;
if (num_dpb_buffer > 17)
num_dpb_buffer = 17;
/* reference picture buffer */
min_dpb_size = image_size * num_dpb_buffer;
if (adev->asic_type < CHIP_POLARIS10){
/* macroblock context buffer */
min_dpb_size +=
width_in_mb * height_in_mb * num_dpb_buffer * 192;
/* IT surface buffer */
min_dpb_size += width_in_mb * height_in_mb * 32;
} else {
/* macroblock context buffer */
min_ctx_size =
width_in_mb * height_in_mb * num_dpb_buffer * 192;
}
break;
case 16: /* H265 */
image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
image_size = ALIGN(image_size, 256);
@ -568,7 +648,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
amdgpu_bo_kunmap(bo);
/* try to alloc a new handle */
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle);
return -EINVAL;
@ -585,13 +665,13 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
case 1:
/* it's a decode msg, calc buffer sizes */
r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
amdgpu_bo_kunmap(bo);
if (r)
return r;
/* validate the handle */
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
if (adev->uvd.filp[i] != ctx->parser->filp) {
DRM_ERROR("UVD handle collision detected!\n");
@ -606,7 +686,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
case 2:
/* it's a destroy msg, free the handle */
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
for (i = 0; i < adev->uvd.max_handles; ++i)
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
amdgpu_bo_kunmap(bo);
return 0;
@ -886,7 +966,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f;
if (r)
goto err_free;
@ -1018,7 +1098,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
++handles;

View File

@ -41,15 +41,17 @@
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
#define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@ -62,6 +64,8 @@ MODULE_FIRMWARE(FIRMWARE_TONGA);
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
@ -113,6 +117,12 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_STONEY:
fw_name = FIRMWARE_STONEY;
break;
case CHIP_POLARIS10:
fw_name = FIRMWARE_POLARIS10;
break;
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
default:
return -EINVAL;
@ -426,7 +436,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f;
if (r)
goto err;
@ -488,7 +498,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f;
if (r)
goto err;
@ -752,7 +762,8 @@ out:
* @ib: the IB to execute
*
*/
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));

View File

@ -34,7 +34,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);

View File

@ -166,74 +166,109 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
{
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_id *id = &vm->ids[ring->idx];
struct fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id;
unsigned i = ring->idx;
int r;
mutex_lock(&adev->vm_manager.lock);
/* check if the id is still valid */
if (id->mgr_id) {
struct fence *flushed = id->flushed_updates;
bool is_later;
long owner;
/* Check if we can use a VMID already assigned to this VM */
do {
struct fence *flushed;
if (!flushed)
is_later = true;
else if (!updates)
is_later = false;
else
is_later = fence_is_later(updates, flushed);
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
i = 0;
owner = atomic_long_read(&id->mgr_id->owner);
if (!is_later && owner == (long)id &&
pd_addr == id->pd_gpu_addr) {
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
if (atomic64_read(&id->owner) != vm->client_id)
continue;
if (pd_addr != id->pd_gpu_addr)
continue;
if (id->last_user != ring &&
(!id->last_flush || !fence_is_signaled(id->last_flush)))
continue;
flushed = id->flushed_updates;
if (updates && (!flushed || fence_is_later(updates, flushed)))
continue;
/* Good we can use this VMID */
if (id->last_user == ring) {
r = amdgpu_sync_fence(ring->adev, sync,
id->mgr_id->active);
if (r) {
mutex_unlock(&adev->vm_manager.lock);
return r;
}
fence_put(id->mgr_id->active);
id->mgr_id->active = fence_get(fence);
list_move_tail(&id->mgr_id->list,
&adev->vm_manager.ids_lru);
*vm_id = id->mgr_id - adev->vm_manager.ids;
*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
*vm_pd_addr);
mutex_unlock(&adev->vm_manager.lock);
return 0;
id->first);
if (r)
goto error;
}
}
id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
struct amdgpu_vm_manager_id,
list);
/* And remember this submission as user of the VMID */
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
if (r)
goto error;
r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
if (!r) {
fence_put(id->mgr_id->active);
id->mgr_id->active = fence_get(fence);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates);
id->pd_gpu_addr = pd_addr;
list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru);
atomic_long_set(&id->mgr_id->owner, (long)id);
*vm_id = id->mgr_id - adev->vm_manager.ids;
*vm_pd_addr = pd_addr;
*vm_id = id - adev->vm_manager.ids;
*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
mutex_unlock(&adev->vm_manager.lock);
return 0;
} while (i != ring->idx);
id = list_first_entry(&adev->vm_manager.ids_lru,
struct amdgpu_vm_id,
list);
if (!amdgpu_sync_is_idle(&id->active)) {
struct list_head *head = &adev->vm_manager.ids_lru;
struct amdgpu_vm_id *tmp;
list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
list) {
if (amdgpu_sync_is_idle(&id->active)) {
list_move(&id->list, head);
head = &id->list;
}
}
id = list_first_entry(&adev->vm_manager.ids_lru,
struct amdgpu_vm_id,
list);
}
r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
if (r)
goto error;
fence_put(id->first);
id->first = fence_get(fence);
fence_put(id->last_flush);
id->last_flush = NULL;
fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates);
id->pd_gpu_addr = pd_addr;
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
id->last_user = ring;
atomic64_set(&id->owner, vm->client_id);
vm->ids[ring->idx] = id;
*vm_id = id - adev->vm_manager.ids;
*vm_pd_addr = pd_addr;
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
error:
mutex_unlock(&adev->vm_manager.lock);
return r;
}
@ -247,43 +282,62 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
*
* Emit a VM flush when it is necessary.
*/
void amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size)
int amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
mgr_id->gds_base != gds_base ||
mgr_id->gds_size != gds_size ||
mgr_id->gws_base != gws_base ||
mgr_id->gws_size != gws_size ||
mgr_id->oa_base != oa_base ||
mgr_id->oa_size != oa_size);
id->gds_base != gds_base ||
id->gds_size != gds_size ||
id->gws_base != gws_base ||
id->gws_size != gws_size ||
id->oa_base != oa_base ||
id->oa_size != oa_size);
int r;
if (ring->funcs->emit_pipeline_sync && (
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
ring->type == AMDGPU_RING_TYPE_COMPUTE))
amdgpu_ring_emit_pipeline_sync(ring);
if (pd_addr != AMDGPU_VM_NO_FLUSH) {
if (ring->funcs->emit_vm_flush &&
pd_addr != AMDGPU_VM_NO_FLUSH) {
struct fence *fence;
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
mutex_lock(&adev->vm_manager.lock);
if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
r = amdgpu_fence_emit(ring, &fence);
if (r) {
mutex_unlock(&adev->vm_manager.lock);
return r;
}
fence_put(id->last_flush);
id->last_flush = fence;
}
mutex_unlock(&adev->vm_manager.lock);
}
if (gds_switch_needed) {
mgr_id->gds_base = gds_base;
mgr_id->gds_size = gds_size;
mgr_id->gws_base = gws_base;
mgr_id->gws_size = gws_size;
mgr_id->oa_base = oa_base;
mgr_id->oa_size = oa_size;
id->gds_base = gds_base;
id->gds_size = gds_size;
id->gws_base = gws_base;
id->gws_size = gws_size;
id->oa_base = oa_base;
id->oa_size = oa_size;
amdgpu_ring_emit_gds_switch(ring, vm_id,
gds_base, gds_size,
gws_base, gws_size,
oa_base, oa_size);
}
return 0;
}
/**
@ -296,14 +350,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
*/
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
{
struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
mgr_id->gds_base = 0;
mgr_id->gds_size = 0;
mgr_id->gws_base = 0;
mgr_id->gws_size = 0;
mgr_id->oa_base = 0;
mgr_id->oa_size = 0;
id->gds_base = 0;
id->gds_size = 0;
id->gws_base = 0;
id->gws_size = 0;
id->oa_base = 0;
id->oa_size = 0;
}
/**
@ -335,8 +389,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* amdgpu_vm_update_pages - helper to call the right asic function
*
* @adev: amdgpu_device pointer
* @gtt: GART instance to use for mapping
* @gtt_flags: GTT hw access flags
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
@ -348,8 +402,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* to setup the page table using the DMA.
*/
static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
struct amdgpu_gart *gtt,
uint32_t gtt_flags,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_ib *ib,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
@ -357,12 +411,11 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
{
trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
if ((gtt == &adev->gart) && (flags == gtt_flags)) {
uint64_t src = gtt->table_addr + (addr >> 12) * 8;
if (src) {
src += (addr >> 12) * 8;
amdgpu_vm_copy_pte(adev, ib, pe, src, count);
} else if (gtt) {
dma_addr_t *pages_addr = gtt->pages_addr;
} else if (pages_addr) {
amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
count, incr, flags);
@ -412,7 +465,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries,
amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries,
0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@ -522,7 +575,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
((last_pt + incr * count) != pt)) {
if (count) {
amdgpu_vm_update_pages(adev, NULL, 0, ib,
amdgpu_vm_update_pages(adev, 0, NULL, ib,
last_pde, last_pt,
count, incr,
AMDGPU_PTE_VALID);
@ -537,7 +590,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt,
amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt,
count, incr, AMDGPU_PTE_VALID);
if (ib->length_dw != 0) {
@ -570,8 +623,8 @@ error_free:
* amdgpu_vm_frag_ptes - add fragment information to PTEs
*
* @adev: amdgpu_device pointer
* @gtt: GART instance to use for mapping
* @gtt_flags: GTT hw mapping flags
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @ib: IB for the update
* @pe_start: first PTE to handle
* @pe_end: last PTE to handle
@ -579,8 +632,8 @@ error_free:
* @flags: hw mapping flags
*/
static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
struct amdgpu_gart *gtt,
uint32_t gtt_flags,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_ib *ib,
uint64_t pe_start, uint64_t pe_end,
uint64_t addr, uint32_t flags)
@ -618,10 +671,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
return;
/* system pages are non continuously */
if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) ||
(frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8;
amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start,
amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start,
addr, count, AMDGPU_GPU_PAGE_SIZE,
flags);
return;
@ -630,21 +684,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
/* handle the 4K area at the beginning */
if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8;
amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr,
amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr,
count, AMDGPU_GPU_PAGE_SIZE, flags);
addr += AMDGPU_GPU_PAGE_SIZE * count;
}
/* handle the area in the middle */
count = (frag_end - frag_start) / 8;
amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count,
amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count,
AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != pe_end) {
addr += AMDGPU_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8;
amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr,
amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr,
count, AMDGPU_GPU_PAGE_SIZE, flags);
}
}
@ -653,8 +707,8 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
* @adev: amdgpu_device pointer
* @gtt: GART instance to use for mapping
* @gtt_flags: GTT hw mapping flags
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
@ -664,8 +718,8 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* Update the page tables in the range @start - @end.
*/
static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
struct amdgpu_gart *gtt,
uint32_t gtt_flags,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_ib *ib,
uint64_t start, uint64_t end,
@ -693,7 +747,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
if (last_pe_end != pe_start) {
amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
amdgpu_vm_frag_ptes(adev, src, pages_addr, ib,
last_pe_start, last_pe_end,
last_dst, flags);
@ -708,17 +762,16 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
last_pe_start, last_pe_end,
last_dst, flags);
amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start,
last_pe_end, last_dst, flags);
}
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
* @gtt: GART instance to use for mapping
* @gtt_flags: flags as they are used for GTT
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @start: start of mapped range
* @last: last mapped entry
@ -730,8 +783,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_gart *gtt,
uint32_t gtt_flags,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint32_t flags, uint64_t addr,
@ -762,11 +815,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
if ((gtt == &adev->gart) && (flags == gtt_flags)) {
if (src) {
/* only copy commands needed */
ndw += ncmds * 7;
} else if (gtt) {
} else if (pages_addr) {
/* header for write data commands */
ndw += ncmds * 4;
@ -796,8 +849,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1,
addr, flags);
amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start,
last + 1, addr, flags);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > ndw);
@ -823,11 +876,12 @@ error_free:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
* @gtt: GART instance to use for mapping
* @gtt_flags: flags as they are used for GTT
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
* @addr: addr to set the area to
* @gtt_flags: flags as they are used for GTT
* @flags: HW flags for the mapping
* @fence: optional resulting fence
*
* Split the mapping into smaller chunks so that each update fits
@ -835,16 +889,16 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct amdgpu_gart *gtt,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t addr, struct fence **fence)
uint32_t flags, uint64_t addr,
struct fence **fence)
{
const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
uint64_t start = mapping->it.start;
uint32_t flags = gtt_flags;
uint64_t src = 0, start = mapping->it.start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@ -857,10 +911,15 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_update(mapping);
if (pages_addr) {
if (flags == gtt_flags)
src = adev->gart.table_addr + (addr >> 12) * 8;
addr = 0;
}
addr += mapping->offset;
if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags)))
return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
if (!pages_addr || src)
return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
start, mapping->it.last,
flags, addr, fence);
@ -868,7 +927,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t last;
last = min((uint64_t)mapping->it.last, start + max_size - 1);
r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
start, last, flags, addr,
fence);
if (r)
@ -899,16 +958,20 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
{
struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_gart *gtt = NULL;
uint32_t flags;
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
uint64_t addr;
int r;
if (mem) {
struct ttm_dma_tt *ttm;
addr = (u64)mem->start << PAGE_SHIFT;
switch (mem->mem_type) {
case TTM_PL_TT:
gtt = &bo_va->bo->adev->gart;
ttm = container_of(bo_va->bo->tbo.ttm, struct
ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
break;
case TTM_PL_VRAM:
@ -923,6 +986,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@ -930,7 +994,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr,
r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
mapping, flags, addr,
&bo_va->last_pt_update);
if (r)
return r;
@ -976,8 +1041,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
0, NULL);
r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
0, 0, NULL);
kfree(mapping);
if (r)
return r;
@ -1320,11 +1385,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amd_sched_rq *rq;
int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
vm->ids[i].mgr_id = NULL;
vm->ids[i].flushed_updates = NULL;
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
vm->ids[i] = NULL;
vm->va = RB_ROOT;
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->cleared);
@ -1416,15 +1480,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_vm_id *id = &vm->ids[i];
if (id->mgr_id)
atomic_long_cmpxchg(&id->mgr_id->owner,
(long)id, 0);
fence_put(id->flushed_updates);
}
}
/**
@ -1443,11 +1498,13 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.num_ids; ++i) {
amdgpu_vm_reset_id(adev, i);
amdgpu_sync_create(&adev->vm_manager.ids[i].active);
list_add_tail(&adev->vm_manager.ids[i].list,
&adev->vm_manager.ids_lru);
}
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0);
}
/**
@ -1461,6 +1518,11 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
unsigned i;
for (i = 0; i < AMDGPU_NUM_VM; ++i)
fence_put(adev->vm_manager.ids[i].active);
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
fence_put(id->flushed_updates);
}
}

View File

@ -92,7 +92,7 @@
#define ATOM_WS_AND_MASK 0x45
#define ATOM_WS_FB_WINDOW 0x46
#define ATOM_WS_ATTRIBUTES 0x47
#define ATOM_WS_REGPTR 0x48
#define ATOM_WS_REGPTR 0x48
#define ATOM_IIO_NOP 0
#define ATOM_IIO_START 1

View File

@ -461,13 +461,14 @@ union set_pixel_clock {
PIXEL_CLOCK_PARAMETERS_V3 v3;
PIXEL_CLOCK_PARAMETERS_V5 v5;
PIXEL_CLOCK_PARAMETERS_V6 v6;
PIXEL_CLOCK_PARAMETERS_V7 v7;
};
/* on DCE5, make sure the voltage is high enough to support the
* required disp clk.
*/
void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
u32 dispclk)
u32 dispclk)
{
u8 frev, crev;
int index;
@ -510,6 +511,49 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
union set_dce_clock {
SET_DCE_CLOCK_PS_ALLOCATION_V1_1 v1_1;
SET_DCE_CLOCK_PS_ALLOCATION_V2_1 v2_1;
};
u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
u32 freq, u8 clk_type, u8 clk_src)
{
u8 frev, crev;
int index;
union set_dce_clock args;
u32 ret_freq = 0;
memset(&args, 0, sizeof(args));
index = GetIndexIntoMasterTable(COMMAND, SetDCEClock);
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
&crev))
return 0;
switch (frev) {
case 2:
switch (crev) {
case 1:
args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */
args.v2_1.asParam.ucDCEClkType = clk_type;
args.v2_1.asParam.ucDCEClkSrc = clk_src;
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return 0;
}
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return 0;
}
return ret_freq;
}
static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id)
{
if (ENCODER_MODE_IS_DP(encoder_mode)) {
@ -523,18 +567,18 @@ static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id)
}
void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
u32 crtc_id,
int pll_id,
u32 encoder_mode,
u32 encoder_id,
u32 clock,
u32 ref_div,
u32 fb_div,
u32 frac_fb_div,
u32 post_div,
int bpc,
bool ss_enabled,
struct amdgpu_atom_ss *ss)
u32 crtc_id,
int pll_id,
u32 encoder_mode,
u32 encoder_id,
u32 clock,
u32 ref_div,
u32 fb_div,
u32 frac_fb_div,
u32 post_div,
int bpc,
bool ss_enabled,
struct amdgpu_atom_ss *ss)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
@ -652,6 +696,34 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v6.ucEncoderMode = encoder_mode;
args.v6.ucPpll = pll_id;
break;
case 7:
args.v7.ulPixelClock = cpu_to_le32(clock * 10); /* 100 hz units */
args.v7.ucMiscInfo = 0;
if ((encoder_mode == ATOM_ENCODER_MODE_DVI) &&
(clock > 165000))
args.v7.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
args.v7.ucCRTC = crtc_id;
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
switch (bpc) {
case 8:
default:
args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
break;
case 10:
args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
break;
case 12:
args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
break;
case 16:
args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
break;
}
}
args.v7.ucTransmitterID = encoder_id;
args.v7.ucEncoderMode = encoder_mode;
args.v7.ucPpll = pll_id;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;

View File

@ -37,6 +37,8 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
struct drm_display_mode *mode);
void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
u32 dispclk);
u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
u32 freq, u8 clk_type, u8 clk_src);
void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
u32 crtc_id,
int pll_id,

View File

@ -567,6 +567,7 @@ union dig_encoder_control {
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
DIG_ENCODER_CONTROL_PARAMETERS_V5 v5;
};
void
@ -694,6 +695,47 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
else
args.v4.ucHPD_ID = hpd_id + 1;
break;
case 5:
switch (action) {
case ATOM_ENCODER_CMD_SETUP_PANEL_MODE:
args.v5.asDPPanelModeParam.ucAction = action;
args.v5.asDPPanelModeParam.ucPanelMode = panel_mode;
args.v5.asDPPanelModeParam.ucDigId = dig->dig_encoder;
break;
case ATOM_ENCODER_CMD_STREAM_SETUP:
args.v5.asStreamParam.ucAction = action;
args.v5.asStreamParam.ucDigId = dig->dig_encoder;
args.v5.asStreamParam.ucDigMode =
amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (ENCODER_MODE_IS_DP(args.v5.asStreamParam.ucDigMode))
args.v5.asStreamParam.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder,
amdgpu_encoder->pixel_clock))
args.v5.asStreamParam.ucLaneNum = 8;
else
args.v5.asStreamParam.ucLaneNum = 4;
args.v5.asStreamParam.ulPixelClock =
cpu_to_le32(amdgpu_encoder->pixel_clock / 10);
args.v5.asStreamParam.ucBitPerColor =
amdgpu_atombios_encoder_get_bpc(encoder);
args.v5.asStreamParam.ucLinkRateIn270Mhz = dp_clock / 27000;
break;
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_START:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN4:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE:
case ATOM_ENCODER_CMD_DP_VIDEO_OFF:
case ATOM_ENCODER_CMD_DP_VIDEO_ON:
args.v5.asCmdParam.ucAction = action;
args.v5.asCmdParam.ucDigId = dig->dig_encoder;
break;
default:
DRM_ERROR("Unsupported action 0x%x\n", action);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
@ -714,11 +756,12 @@ union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 v6;
};
void
amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action,
uint8_t lane_num, uint8_t lane_set)
uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private;
@ -1070,6 +1113,54 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
args.v5.ucDigEncoderSel = 1 << dig_encoder;
args.v5.ucDPLaneSet = lane_set;
break;
case 6:
args.v6.ucAction = action;
if (is_dp)
args.v6.ulSymClock = cpu_to_le32(dp_clock / 10);
else
args.v6.ulSymClock = cpu_to_le32(amdgpu_encoder->pixel_clock / 10);
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYB;
else
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYA;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYD;
else
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYC;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYF;
else
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYE;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYG;
break;
}
if (is_dp)
args.v6.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v6.ucLaneNum = 8;
else
args.v6.ucLaneNum = 4;
args.v6.ucConnObjId = connector_object_id;
if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH)
args.v6.ucDPLaneSet = lane_set;
else
args.v6.ucDigMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (hpd_id == AMDGPU_HPD_NONE)
args.v6.ucHPDSel = 0;
else
args.v6.ucHPDSel = hpd_id + 1;
args.v6.ucDigEncoderSel = 1 << dig_encoder;
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;

View File

@ -2549,19 +2549,17 @@ static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
return 0;
}
static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
u32 sclk, u32 min_sclk_in_sr)
static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
{
u32 i;
u32 tmp;
u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
if (sclk < min)
return 0;
for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
tmp = sclk / (1 << i);
tmp = sclk >> i;
if (tmp >= min || i == 0)
break;
}
@ -3358,8 +3356,7 @@ static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
graphic_level->PowerThrottle = 0;
if (pi->caps_sclk_ds)
graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev,
engine_clock,
graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
CISLAND_MINIMUM_ENGINE_CLOCK);
graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
@ -6309,215 +6306,6 @@ static int ci_dpm_wait_for_idle(void *handle)
return 0;
}
static void ci_dpm_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "CIK DPM registers\n");
dev_info(adev->dev, " BIOS_SCRATCH_4=0x%08X\n",
RREG32(mmBIOS_SCRATCH_4));
dev_info(adev->dev, " MC_ARB_DRAM_TIMING=0x%08X\n",
RREG32(mmMC_ARB_DRAM_TIMING));
dev_info(adev->dev, " MC_ARB_DRAM_TIMING2=0x%08X\n",
RREG32(mmMC_ARB_DRAM_TIMING2));
dev_info(adev->dev, " MC_ARB_BURST_TIME=0x%08X\n",
RREG32(mmMC_ARB_BURST_TIME));
dev_info(adev->dev, " MC_ARB_DRAM_TIMING_1=0x%08X\n",
RREG32(mmMC_ARB_DRAM_TIMING_1));
dev_info(adev->dev, " MC_ARB_DRAM_TIMING2_1=0x%08X\n",
RREG32(mmMC_ARB_DRAM_TIMING2_1));
dev_info(adev->dev, " MC_CG_CONFIG=0x%08X\n",
RREG32(mmMC_CG_CONFIG));
dev_info(adev->dev, " MC_ARB_CG=0x%08X\n",
RREG32(mmMC_ARB_CG));
dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_SQ_CTRL0));
dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_DB_CTRL0));
dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_TD_CTRL0));
dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_TCP_CTRL0));
dev_info(adev->dev, " CG_THERMAL_INT=0x%08X\n",
RREG32_SMC(ixCG_THERMAL_INT));
dev_info(adev->dev, " CG_THERMAL_CTRL=0x%08X\n",
RREG32_SMC(ixCG_THERMAL_CTRL));
dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n",
RREG32_SMC(ixGENERAL_PWRMGT));
dev_info(adev->dev, " MC_SEQ_CNTL_3=0x%08X\n",
RREG32(mmMC_SEQ_CNTL_3));
dev_info(adev->dev, " LCAC_MC0_CNTL=0x%08X\n",
RREG32_SMC(ixLCAC_MC0_CNTL));
dev_info(adev->dev, " LCAC_MC1_CNTL=0x%08X\n",
RREG32_SMC(ixLCAC_MC1_CNTL));
dev_info(adev->dev, " LCAC_CPL_CNTL=0x%08X\n",
RREG32_SMC(ixLCAC_CPL_CNTL));
dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n",
RREG32_SMC(ixSCLK_PWRMGT_CNTL));
dev_info(adev->dev, " BIF_LNCNT_RESET=0x%08X\n",
RREG32(mmBIF_LNCNT_RESET));
dev_info(adev->dev, " FIRMWARE_FLAGS=0x%08X\n",
RREG32_SMC(ixFIRMWARE_FLAGS));
dev_info(adev->dev, " CG_SPLL_FUNC_CNTL=0x%08X\n",
RREG32_SMC(ixCG_SPLL_FUNC_CNTL));
dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_2=0x%08X\n",
RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2));
dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_3=0x%08X\n",
RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3));
dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_4=0x%08X\n",
RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4));
dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM=0x%08X\n",
RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM));
dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n",
RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2));
dev_info(adev->dev, " DLL_CNTL=0x%08X\n",
RREG32(mmDLL_CNTL));
dev_info(adev->dev, " MCLK_PWRMGT_CNTL=0x%08X\n",
RREG32(mmMCLK_PWRMGT_CNTL));
dev_info(adev->dev, " MPLL_AD_FUNC_CNTL=0x%08X\n",
RREG32(mmMPLL_AD_FUNC_CNTL));
dev_info(adev->dev, " MPLL_DQ_FUNC_CNTL=0x%08X\n",
RREG32(mmMPLL_DQ_FUNC_CNTL));
dev_info(adev->dev, " MPLL_FUNC_CNTL=0x%08X\n",
RREG32(mmMPLL_FUNC_CNTL));
dev_info(adev->dev, " MPLL_FUNC_CNTL_1=0x%08X\n",
RREG32(mmMPLL_FUNC_CNTL_1));
dev_info(adev->dev, " MPLL_FUNC_CNTL_2=0x%08X\n",
RREG32(mmMPLL_FUNC_CNTL_2));
dev_info(adev->dev, " MPLL_SS1=0x%08X\n",
RREG32(mmMPLL_SS1));
dev_info(adev->dev, " MPLL_SS2=0x%08X\n",
RREG32(mmMPLL_SS2));
dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL=0x%08X\n",
RREG32_SMC(ixCG_DISPLAY_GAP_CNTL));
dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL2=0x%08X\n",
RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2));
dev_info(adev->dev, " CG_STATIC_SCREEN_PARAMETER=0x%08X\n",
RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_1=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_2=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_3=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_4=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_5=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_6=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_7=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7));
dev_info(adev->dev, " RCU_UC_EVENTS=0x%08X\n",
RREG32_SMC(ixRCU_UC_EVENTS));
dev_info(adev->dev, " DPM_TABLE_475=0x%08X\n",
RREG32_SMC(ixDPM_TABLE_475));
dev_info(adev->dev, " MC_SEQ_RAS_TIMING_LP=0x%08X\n",
RREG32(mmMC_SEQ_RAS_TIMING_LP));
dev_info(adev->dev, " MC_SEQ_RAS_TIMING=0x%08X\n",
RREG32(mmMC_SEQ_RAS_TIMING));
dev_info(adev->dev, " MC_SEQ_CAS_TIMING_LP=0x%08X\n",
RREG32(mmMC_SEQ_CAS_TIMING_LP));
dev_info(adev->dev, " MC_SEQ_CAS_TIMING=0x%08X\n",
RREG32(mmMC_SEQ_CAS_TIMING));
dev_info(adev->dev, " MC_SEQ_DLL_STBY_LP=0x%08X\n",
RREG32(mmMC_SEQ_DLL_STBY_LP));
dev_info(adev->dev, " MC_SEQ_DLL_STBY=0x%08X\n",
RREG32(mmMC_SEQ_DLL_STBY));
dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0_LP=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CMD0_LP));
dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CMD0));
dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1_LP=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CMD1_LP));
dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CMD1));
dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL_LP=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CTRL_LP));
dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CTRL));
dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_DVS_CMD_LP));
dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD=0x%08X\n",
RREG32(mmMC_SEQ_PMG_DVS_CMD));
dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_DVS_CTL_LP));
dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL=0x%08X\n",
RREG32(mmMC_SEQ_PMG_DVS_CTL));
dev_info(adev->dev, " MC_SEQ_MISC_TIMING_LP=0x%08X\n",
RREG32(mmMC_SEQ_MISC_TIMING_LP));
dev_info(adev->dev, " MC_SEQ_MISC_TIMING=0x%08X\n",
RREG32(mmMC_SEQ_MISC_TIMING));
dev_info(adev->dev, " MC_SEQ_MISC_TIMING2_LP=0x%08X\n",
RREG32(mmMC_SEQ_MISC_TIMING2_LP));
dev_info(adev->dev, " MC_SEQ_MISC_TIMING2=0x%08X\n",
RREG32(mmMC_SEQ_MISC_TIMING2));
dev_info(adev->dev, " MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP));
dev_info(adev->dev, " MC_PMG_CMD_EMRS=0x%08X\n",
RREG32(mmMC_PMG_CMD_EMRS));
dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_CMD_MRS_LP));
dev_info(adev->dev, " MC_PMG_CMD_MRS=0x%08X\n",
RREG32(mmMC_PMG_CMD_MRS));
dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP));
dev_info(adev->dev, " MC_PMG_CMD_MRS1=0x%08X\n",
RREG32(mmMC_PMG_CMD_MRS1));
dev_info(adev->dev, " MC_SEQ_WR_CTL_D0_LP=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_D0_LP));
dev_info(adev->dev, " MC_SEQ_WR_CTL_D0=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_D0));
dev_info(adev->dev, " MC_SEQ_WR_CTL_D1_LP=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_D1_LP));
dev_info(adev->dev, " MC_SEQ_WR_CTL_D1=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_D1));
dev_info(adev->dev, " MC_SEQ_RD_CTL_D0_LP=0x%08X\n",
RREG32(mmMC_SEQ_RD_CTL_D0_LP));
dev_info(adev->dev, " MC_SEQ_RD_CTL_D0=0x%08X\n",
RREG32(mmMC_SEQ_RD_CTL_D0));
dev_info(adev->dev, " MC_SEQ_RD_CTL_D1_LP=0x%08X\n",
RREG32(mmMC_SEQ_RD_CTL_D1_LP));
dev_info(adev->dev, " MC_SEQ_RD_CTL_D1=0x%08X\n",
RREG32(mmMC_SEQ_RD_CTL_D1));
dev_info(adev->dev, " MC_SEQ_PMG_TIMING_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_TIMING_LP));
dev_info(adev->dev, " MC_SEQ_PMG_TIMING=0x%08X\n",
RREG32(mmMC_SEQ_PMG_TIMING));
dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP));
dev_info(adev->dev, " MC_PMG_CMD_MRS2=0x%08X\n",
RREG32(mmMC_PMG_CMD_MRS2));
dev_info(adev->dev, " MC_SEQ_WR_CTL_2_LP=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_2_LP));
dev_info(adev->dev, " MC_SEQ_WR_CTL_2=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_2));
dev_info(adev->dev, " PCIE_LC_SPEED_CNTL=0x%08X\n",
RREG32_PCIE(ixPCIE_LC_SPEED_CNTL));
dev_info(adev->dev, " PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n",
RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL));
dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n",
RREG32(mmSMC_IND_INDEX_0));
dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n",
RREG32(mmSMC_IND_DATA_0));
dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n",
RREG32(mmSMC_IND_ACCESS_CNTL));
dev_info(adev->dev, " SMC_RESP_0=0x%08X\n",
RREG32(mmSMC_RESP_0));
dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n",
RREG32(mmSMC_MESSAGE_0));
dev_info(adev->dev, " SMC_SYSCON_RESET_CNTL=0x%08X\n",
RREG32_SMC(ixSMC_SYSCON_RESET_CNTL));
dev_info(adev->dev, " SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n",
RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0));
dev_info(adev->dev, " SMC_SYSCON_MISC_CNTL=0x%08X\n",
RREG32_SMC(ixSMC_SYSCON_MISC_CNTL));
dev_info(adev->dev, " SMC_PC_C=0x%08X\n",
RREG32_SMC(ixSMC_PC_C));
}
static int ci_dpm_soft_reset(void *handle)
{
return 0;
@ -6572,7 +6360,7 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
}
static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
bool queue_thermal = false;
@ -6614,6 +6402,7 @@ static int ci_dpm_set_powergating_state(void *handle,
}
const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init,
.late_init = ci_dpm_late_init,
.sw_init = ci_dpm_sw_init,
@ -6625,7 +6414,6 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
.is_idle = ci_dpm_is_idle,
.wait_for_idle = ci_dpm_wait_for_idle,
.soft_reset = ci_dpm_soft_reset,
.print_status = ci_dpm_print_status,
.set_clockgating_state = ci_dpm_set_clockgating_state,
.set_powergating_state = ci_dpm_set_powergating_state,
};

View File

@ -962,7 +962,7 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false},
{mmMC_ARB_RAMCFG, false},
@ -2007,7 +2007,6 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
.get_cu_info = &gfx_v7_0_get_cu_info,
/* these should be moved to their own ip modules */
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
@ -2214,11 +2213,6 @@ static int cik_common_wait_for_idle(void *handle)
return 0;
}
static void cik_common_print_status(void *handle)
{
}
static int cik_common_soft_reset(void *handle)
{
/* XXX hard reset?? */
@ -2238,6 +2232,7 @@ static int cik_common_set_powergating_state(void *handle,
}
const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init,
.late_init = NULL,
.sw_init = cik_common_sw_init,
@ -2249,7 +2244,6 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
.is_idle = cik_common_is_idle,
.wait_for_idle = cik_common_wait_for_idle,
.soft_reset = cik_common_soft_reset,
.print_status = cik_common_print_status,
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
};

View File

@ -243,7 +243,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
/* wptr/rptr are in bytes! */
u32 ring_index = adev->irq.ih.rptr >> 2;
uint32_t dw[4];
dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
@ -372,35 +372,6 @@ static int cik_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void cik_ih_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "CIK IH registers\n");
dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(mmSRBM_STATUS));
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
RREG32(mmINTERRUPT_CNTL));
dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
RREG32(mmINTERRUPT_CNTL2));
dev_info(adev->dev, " IH_CNTL=0x%08X\n",
RREG32(mmIH_CNTL));
dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
RREG32(mmIH_RB_CNTL));
dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
RREG32(mmIH_RB_BASE));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_LO));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_HI));
dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
RREG32(mmIH_RB_RPTR));
dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
RREG32(mmIH_RB_WPTR));
}
static int cik_ih_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -412,8 +383,6 @@ static int cik_ih_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
if (srbm_soft_reset) {
cik_ih_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -428,8 +397,6 @@ static int cik_ih_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
cik_ih_print_status((void *)adev);
}
return 0;
@ -448,6 +415,7 @@ static int cik_ih_set_powergating_state(void *handle,
}
const struct amd_ip_funcs cik_ih_ip_funcs = {
.name = "cik_ih",
.early_init = cik_ih_early_init,
.late_init = NULL,
.sw_init = cik_ih_sw_init,
@ -459,7 +427,6 @@ const struct amd_ip_funcs cik_ih_ip_funcs = {
.is_idle = cik_ih_is_idle,
.wait_for_idle = cik_ih_wait_for_idle,
.soft_reset = cik_ih_soft_reset,
.print_status = cik_ih_print_status,
.set_clockgating_state = cik_ih_set_clockgating_state,
.set_powergating_state = cik_ih_set_powergating_state,
};

View File

@ -210,9 +210,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (CIK).
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
u32 extra_bits = ib->vm_id & 0xf;
u32 extra_bits = vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 4)
@ -643,7 +644,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err1;
@ -976,7 +977,7 @@ static int cik_sdma_sw_init(void *handle)
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 256 * 1024,
r = amdgpu_ring_init(adev, ring, 1024,
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
@ -1064,57 +1065,6 @@ static int cik_sdma_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void cik_sdma_print_status(void *handle)
{
int i, j;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "CIK SDMA registers\n");
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
for (i = 0; i < adev->sdma.num_instances; i++) {
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
cik_srbm_select(adev, 0, 0, 0, j);
dev_info(adev->dev, " VM %d:\n", j);
dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n",
RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
}
static int cik_sdma_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@ -1137,8 +1087,6 @@ static int cik_sdma_soft_reset(void *handle)
}
if (srbm_soft_reset) {
cik_sdma_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -1153,8 +1101,6 @@ static int cik_sdma_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
cik_sdma_print_status((void *)adev);
}
return 0;
@ -1278,6 +1224,7 @@ static int cik_sdma_set_powergating_state(void *handle,
}
const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init,
.late_init = NULL,
.sw_init = cik_sdma_sw_init,
@ -1289,7 +1236,6 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
.is_idle = cik_sdma_is_idle,
.wait_for_idle = cik_sdma_wait_for_idle,
.soft_reset = cik_sdma_soft_reset,
.print_status = cik_sdma_print_status,
.set_clockgating_state = cik_sdma_set_clockgating_state,
.set_powergating_state = cik_sdma_set_powergating_state,
};

View File

@ -190,8 +190,8 @@
# define MACRO_TILE_ASPECT(x) ((x) << 4)
# define NUM_BANKS(x) ((x) << 6)
#define MSG_ENTER_RLC_SAFE_MODE 1
#define MSG_EXIT_RLC_SAFE_MODE 0
#define MSG_ENTER_RLC_SAFE_MODE 1
#define MSG_EXIT_RLC_SAFE_MODE 0
/*
* PM4

View File

@ -2230,6 +2230,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
const struct amd_ip_funcs cz_dpm_ip_funcs = {
.name = "cz_dpm",
.early_init = cz_dpm_early_init,
.late_init = cz_dpm_late_init,
.sw_init = cz_dpm_sw_init,
@ -2241,7 +2242,6 @@ const struct amd_ip_funcs cz_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.print_status = NULL,
.set_clockgating_state = cz_dpm_set_clockgating_state,
.set_powergating_state = cz_dpm_set_powergating_state,
};

View File

@ -222,7 +222,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
/* wptr/rptr are in bytes! */
u32 ring_index = adev->irq.ih.rptr >> 2;
uint32_t dw[4];
dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
@ -351,35 +351,6 @@ static int cz_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void cz_ih_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "CZ IH registers\n");
dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(mmSRBM_STATUS));
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
RREG32(mmINTERRUPT_CNTL));
dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
RREG32(mmINTERRUPT_CNTL2));
dev_info(adev->dev, " IH_CNTL=0x%08X\n",
RREG32(mmIH_CNTL));
dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
RREG32(mmIH_RB_CNTL));
dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
RREG32(mmIH_RB_BASE));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_LO));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_HI));
dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
RREG32(mmIH_RB_RPTR));
dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
RREG32(mmIH_RB_WPTR));
}
static int cz_ih_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@ -391,8 +362,6 @@ static int cz_ih_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
cz_ih_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -407,8 +376,6 @@ static int cz_ih_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
cz_ih_print_status((void *)adev);
}
return 0;
@ -429,6 +396,7 @@ static int cz_ih_set_powergating_state(void *handle,
}
const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init,
.late_init = NULL,
.sw_init = cz_ih_sw_init,
@ -440,7 +408,6 @@ const struct amd_ip_funcs cz_ih_ip_funcs = {
.is_idle = cz_ih_is_idle,
.wait_for_idle = cz_ih_wait_for_idle,
.soft_reset = cz_ih_soft_reset,
.print_status = cz_ih_print_status,
.set_clockgating_state = cz_ih_set_clockgating_state,
.set_powergating_state = cz_ih_set_powergating_state,
};

View File

@ -77,7 +77,7 @@ struct cz_smu_private_data {
uint8_t driver_buffer_length;
uint8_t scratch_buffer_length;
uint16_t toc_entry_used_count;
uint16_t toc_entry_initialize_index;
uint16_t toc_entry_initialize_index;
uint16_t toc_entry_power_profiling_index;
uint16_t toc_entry_aram;
uint16_t toc_entry_ih_register_restore_task_index;

View File

@ -284,10 +284,16 @@ static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* surface base address.
*/
static void dce_v10_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base)
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
u32 tmp;
/* flip at hsync for async, default is vsync */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* update the primary scanout address */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@ -2211,6 +2217,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
dce_v10_0_vga_enable(crtc, false);
/* Make sure surface address is updated at vertical blank rather than
* horizontal blank
*/
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@ -2261,13 +2275,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
@ -2587,7 +2594,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
return -EINVAL;
}
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
return -ENOENT;
@ -2992,6 +2999,8 @@ static int dce_v10_0_sw_init(void *handle)
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.async_page_flip = true;
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
@ -3130,14 +3139,6 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0;
}
static void dce_v10_0_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "DCE 10.x registers\n");
/* XXX todo */
}
static int dce_v10_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
@ -3147,8 +3148,6 @@ static int dce_v10_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
if (srbm_soft_reset) {
dce_v10_0_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -3163,7 +3162,6 @@ static int dce_v10_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
dce_v10_0_print_status((void *)adev);
}
return 0;
}
@ -3370,7 +3368,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
/* wakeup usersapce */
if (works->event)
drm_send_vblank_event(adev->ddev, crtc_id, works->event);
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@ -3501,6 +3499,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.name = "dce_v10_0",
.early_init = dce_v10_0_early_init,
.late_init = NULL,
.sw_init = dce_v10_0_sw_init,
@ -3512,7 +3511,6 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.is_idle = dce_v10_0_is_idle,
.wait_for_idle = dce_v10_0_wait_for_idle,
.soft_reset = dce_v10_0_soft_reset,
.print_status = dce_v10_0_print_status,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
.set_powergating_state = dce_v10_0_set_powergating_state,
};

View File

@ -132,6 +132,22 @@ static const u32 stoney_golden_settings_a11[] =
mmFBC_MISC, 0x1f311fff, 0x14302000,
};
static const u32 polaris11_golden_settings_a11[] =
{
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
mmFBC_DEBUG1, 0xffffffff, 0x00000008,
mmFBC_MISC, 0x9f313fff, 0x14300008,
mmHDMI_CONTROL, 0x313f031f, 0x00000011,
};
static const u32 polaris10_golden_settings_a11[] =
{
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
mmFBC_MISC, 0x9f313fff, 0x14300008,
mmHDMI_CONTROL, 0x313f031f, 0x00000011,
};
static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
@ -149,6 +165,16 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
stoney_golden_settings_a11,
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
break;
case CHIP_POLARIS11:
amdgpu_program_register_sequence(adev,
polaris11_golden_settings_a11,
(const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
break;
case CHIP_POLARIS10:
amdgpu_program_register_sequence(adev,
polaris10_golden_settings_a11,
(const u32)ARRAY_SIZE(polaris10_golden_settings_a11));
break;
default:
break;
}
@ -276,10 +302,17 @@ static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* surface base address.
*/
static void dce_v11_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base)
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
u32 tmp;
/* flip at hsync for async, default is vsync */
/* use UPDATE_IMMEDIATE_EN instead for async? */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@ -565,35 +598,14 @@ static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
CRTC_CONTROL, CRTC_MASTER_EN);
if (crtc_enabled) {
#if 0
u32 frame_count;
int j;
#if 1
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
amdgpu_display_vblank_wait(adev, i);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
/*it is correct only for RGB ; black is 0*/
WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = amdgpu_display_vblank_get_counter(adev, i);
for (j = 0; j < adev->usec_timeout; j++) {
if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
break;
udelay(1);
}
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
#else
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
@ -614,54 +626,20 @@ static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
u32 tmp, frame_count;
int i, j;
u32 tmp;
int i;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
if (save->crtc_enabled[i]) {
tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
for (j = 0; j < adev->usec_timeout; j++) {
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
break;
udelay(1);
}
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
/* wait for the next frame */
frame_count = amdgpu_display_vblank_get_counter(adev, i);
for (j = 0; j < adev->usec_timeout; j++) {
if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
break;
udelay(1);
}
}
}
@ -1624,6 +1602,7 @@ static const u32 pin_offsets[] =
AUD4_REGISTER_OFFSET,
AUD5_REGISTER_OFFSET,
AUD6_REGISTER_OFFSET,
AUD7_REGISTER_OFFSET,
};
static int dce_v11_0_audio_init(struct amdgpu_device *adev)
@ -1635,7 +1614,20 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
adev->mode_info.audio.enabled = true;
adev->mode_info.audio.num_pins = 7;
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
adev->mode_info.audio.num_pins = 7;
break;
case CHIP_POLARIS10:
adev->mode_info.audio.num_pins = 8;
break;
case CHIP_POLARIS11:
adev->mode_info.audio.num_pins = 6;
break;
default:
return -EINVAL;
}
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
adev->mode_info.audio.pin[i].channels = -1;
@ -2201,6 +2193,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
dce_v11_0_vga_enable(crtc, false);
/* Make sure surface address is updated at vertical blank rather than
* horizontal blank
*/
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@ -2251,13 +2251,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
@ -2427,6 +2420,40 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
u32 pll_in_use;
int pll;
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
return ATOM_DP_DTO;
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
return ATOM_COMBOPHY_PLL1;
else
return ATOM_COMBOPHY_PLL0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
return ATOM_COMBOPHY_PLL3;
else
return ATOM_COMBOPHY_PLL2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
return ATOM_COMBOPHY_PLL5;
else
return ATOM_COMBOPHY_PLL4;
break;
default:
DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
return ATOM_PPLL_INVALID;
}
}
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
if (adev->clock.dp_extclk)
/* skip PPLL programming if using ext clock */
@ -2578,7 +2605,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
return -EINVAL;
}
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
return -ENOENT;
@ -2782,7 +2809,17 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
case ATOM_PPLL2:
/* disable the ppll */
amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
case ATOM_COMBOPHY_PLL0:
case ATOM_COMBOPHY_PLL1:
case ATOM_COMBOPHY_PLL2:
case ATOM_COMBOPHY_PLL3:
case ATOM_COMBOPHY_PLL4:
case ATOM_COMBOPHY_PLL5:
/* disable the ppll */
amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
default:
break;
@ -2800,11 +2837,28 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
if (!amdgpu_crtc->adjusted_clock)
return -EINVAL;
amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
int encoder_mode =
amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
/* SetPixelClock calculates the plls and ss values now */
amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
amdgpu_crtc->pll_id,
encoder_mode, amdgpu_encoder->encoder_id,
adjusted_mode->clock, 0, 0, 0, 0,
amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
} else {
amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
}
amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
@ -2955,6 +3009,16 @@ static int dce_v11_0_early_init(void *handle)
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
case CHIP_POLARIS10:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_POLARIS11:
adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@ -2987,6 +3051,8 @@ static int dce_v11_0_sw_init(void *handle)
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.async_page_flip = true;
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
@ -3057,7 +3123,15 @@ static int dce_v11_0_hw_init(void *handle)
/* init dig PHYs, disp eng pll */
amdgpu_atombios_crtc_powergate_init(adev);
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11)) {
amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
amdgpu_atombios_crtc_set_dce_clock(adev, 0,
DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
} else {
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
}
/* initialize hpd */
dce_v11_0_hpd_init(adev);
@ -3126,14 +3200,6 @@ static int dce_v11_0_wait_for_idle(void *handle)
return 0;
}
static void dce_v11_0_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "DCE 10.x registers\n");
/* XXX todo */
}
static int dce_v11_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
@ -3143,8 +3209,6 @@ static int dce_v11_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
if (srbm_soft_reset) {
dce_v11_0_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -3159,7 +3223,6 @@ static int dce_v11_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
dce_v11_0_print_status((void *)adev);
}
return 0;
}
@ -3366,7 +3429,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
/* wakeup usersapce */
if(works->event)
drm_send_vblank_event(adev->ddev, crtc_id, works->event);
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@ -3497,6 +3560,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.name = "dce_v11_0",
.early_init = dce_v11_0_early_init,
.late_init = NULL,
.sw_init = dce_v11_0_sw_init,
@ -3508,7 +3572,6 @@ const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.is_idle = dce_v11_0_is_idle,
.wait_for_idle = dce_v11_0_wait_for_idle,
.soft_reset = dce_v11_0_soft_reset,
.print_status = dce_v11_0_print_status,
.set_clockgating_state = dce_v11_0_set_clockgating_state,
.set_powergating_state = dce_v11_0_set_powergating_state,
};

View File

@ -233,10 +233,13 @@ static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* surface base address.
*/
static void dce_v8_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base)
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
/* flip at hsync for async, default is vsync */
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
/* update the primary scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@ -1999,7 +2002,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
u32 pipe_config;
u32 tmp, viewport_w, viewport_h;
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
@ -2135,6 +2138,11 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
dce_v8_0_vga_enable(crtc, false);
/* Make sure surface address is updated at vertical blank rather than
* horizontal blank
*/
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@ -2182,12 +2190,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK;
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
@ -2499,7 +2501,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
return -EINVAL;
}
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
return -ENOENT;
@ -2902,6 +2904,8 @@ static int dce_v8_0_sw_init(void *handle)
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.async_page_flip = true;
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
@ -3038,14 +3042,6 @@ static int dce_v8_0_wait_for_idle(void *handle)
return 0;
}
static void dce_v8_0_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "DCE 8.x registers\n");
/* XXX todo */
}
static int dce_v8_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
@ -3055,8 +3051,6 @@ static int dce_v8_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
if (srbm_soft_reset) {
dce_v8_0_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -3071,7 +3065,6 @@ static int dce_v8_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
dce_v8_0_print_status((void *)adev);
}
return 0;
}
@ -3379,7 +3372,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
/* wakeup usersapce */
if (works->event)
drm_send_vblank_event(adev->ddev, crtc_id, works->event);
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@ -3431,6 +3424,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.name = "dce_v8_0",
.early_init = dce_v8_0_early_init,
.late_init = NULL,
.sw_init = dce_v8_0_sw_init,
@ -3442,7 +3436,6 @@ const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.is_idle = dce_v8_0_is_idle,
.wait_for_idle = dce_v8_0_wait_for_idle,
.soft_reset = dce_v8_0_soft_reset,
.print_status = dce_v8_0_print_status,
.set_clockgating_state = dce_v8_0_set_clockgating_state,
.set_powergating_state = dce_v8_0_set_powergating_state,
};

View File

@ -143,6 +143,7 @@ static int fiji_dpm_set_powergating_state(void *handle,
}
const struct amd_ip_funcs fiji_dpm_ip_funcs = {
.name = "fiji_dpm",
.early_init = fiji_dpm_early_init,
.late_init = NULL,
.sw_init = fiji_dpm_sw_init,
@ -154,7 +155,6 @@ const struct amd_ip_funcs fiji_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.print_status = NULL,
.set_clockgating_state = fiji_dpm_set_clockgating_state,
.set_powergating_state = fiji_dpm_set_powergating_state,
};

View File

@ -53,7 +53,6 @@
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
int gfx_v7_0_get_cu_info(struct amdgpu_device *, struct amdgpu_cu_info *);
MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
MODULE_FIRMWARE("radeon/bonaire_me.bin");
@ -882,6 +881,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
/*
* Core functions
@ -1718,6 +1718,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
gfx_v7_0_tiling_mode_table_init(adev);
gfx_v7_0_setup_rb(adev);
gfx_v7_0_get_cu_info(adev);
/* set HW defaults for 3D engine */
WREG32(mmCP_MEQ_THRESHOLDS,
@ -2029,17 +2030,13 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* on the gfx ring for execution by the GPU.
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
bool need_ctx_switch = ring->current_ctx != ib->ctx;
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
if (need_ctx_switch)
if (ctx_switch)
next_rptr += 2;
next_rptr += 4;
@ -2050,7 +2047,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (need_ctx_switch) {
if (ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@ -2060,7 +2057,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (ib->vm_id << 24);
control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@ -2073,7 +2070,8 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
@ -2088,7 +2086,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (ib->vm_id << 24);
control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@ -2136,7 +2134,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err2;
@ -3053,6 +3051,19 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@ -3080,18 +3091,6 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@ -3869,18 +3868,13 @@ static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
{
uint32_t tmp, active_cu_number;
struct amdgpu_cu_info cu_info;
u32 tmp;
gfx_v7_0_get_cu_info(adev, &cu_info);
tmp = cu_info.ao_cu_mask;
active_cu_number = cu_info.number;
WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, tmp);
WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
tmp = RREG32(mmRLC_MAX_PG_CU);
tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
tmp |= (active_cu_number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
WREG32(mmRLC_MAX_PG_CU, tmp);
}
@ -4414,7 +4408,7 @@ static int gfx_v7_0_sw_init(void *handle)
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024 * 1024,
r = amdgpu_ring_init(adev, ring, 1024,
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
AMDGPU_RING_TYPE_GFX);
@ -4438,10 +4432,10 @@ static int gfx_v7_0_sw_init(void *handle)
ring->me = 1; /* first MEC */
ring->pipe = i / 8;
ring->queue = i % 8;
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024 * 1024,
r = amdgpu_ring_init(adev, ring, 1024,
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
&adev->gfx.eop_irq, irq_type,
AMDGPU_RING_TYPE_COMPUTE);
@ -4572,256 +4566,6 @@ static int gfx_v7_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void gfx_v7_0_print_status(void *handle)
{
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "GFX 7.x registers\n");
dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(mmGRBM_STATUS));
dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
RREG32(mmGRBM_STATUS2));
dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(mmGRBM_STATUS_SE0));
dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(mmGRBM_STATUS_SE1));
dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
RREG32(mmGRBM_STATUS_SE2));
dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
RREG32(mmGRBM_STATUS_SE3));
dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
RREG32(mmCP_STALLED_STAT1));
dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
RREG32(mmCP_STALLED_STAT2));
dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
RREG32(mmCP_STALLED_STAT3));
dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
RREG32(mmCP_CPF_BUSY_STAT));
dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
RREG32(mmCP_CPF_STALLED_STAT1));
dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
RREG32(mmCP_CPC_STALLED_STAT1));
dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
for (i = 0; i < 32; i++) {
dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
}
for (i = 0; i < 16; i++) {
dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
}
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
dev_info(adev->dev, " se: %d\n", i);
gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
RREG32(mmPA_SC_RASTER_CONFIG));
dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
RREG32(mmPA_SC_RASTER_CONFIG_1));
}
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
RREG32(mmGB_ADDR_CONFIG));
dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n",
RREG32(mmHDP_ADDR_CONFIG));
dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n",
RREG32(mmDMIF_ADDR_CALC));
dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
RREG32(mmCP_MEQ_THRESHOLDS));
dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
RREG32(mmSX_DEBUG_1));
dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
RREG32(mmTA_CNTL_AUX));
dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
RREG32(mmSPI_CONFIG_CNTL));
dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
RREG32(mmSQ_CONFIG));
dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
RREG32(mmDB_DEBUG));
dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
RREG32(mmDB_DEBUG2));
dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
RREG32(mmDB_DEBUG3));
dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
RREG32(mmCB_HW_CONTROL));
dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
RREG32(mmSPI_CONFIG_CNTL_1));
dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
RREG32(mmPA_SC_FIFO_SIZE));
dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
RREG32(mmVGT_NUM_INSTANCES));
dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
RREG32(mmCP_PERFMON_CNTL));
dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
RREG32(mmVGT_CACHE_INVALIDATION));
dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
RREG32(mmVGT_GS_VERTEX_REUSE));
dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
RREG32(mmPA_SC_LINE_STIPPLE_STATE));
dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
RREG32(mmPA_CL_ENHANCE));
dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
RREG32(mmPA_SC_ENHANCE));
dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
RREG32(mmCP_ME_CNTL));
dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
RREG32(mmCP_MAX_CONTEXT));
dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n",
RREG32(mmCP_ENDIAN_SWAP));
dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
RREG32(mmCP_DEVICE_ID));
dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
RREG32(mmCP_SEM_WAIT_TIMER));
if (adev->asic_type != CHIP_HAWAII)
dev_info(adev->dev, " CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL));
dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
RREG32(mmCP_RB_WPTR_DELAY));
dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
RREG32(mmCP_RB_VMID));
dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
RREG32(mmCP_RB0_CNTL));
dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
RREG32(mmCP_RB0_WPTR));
dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
RREG32(mmCP_RB0_RPTR_ADDR));
dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
RREG32(mmCP_RB0_RPTR_ADDR_HI));
dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
RREG32(mmCP_RB0_CNTL));
dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
RREG32(mmCP_RB0_BASE));
dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
RREG32(mmCP_RB0_BASE_HI));
dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
RREG32(mmCP_MEC_CNTL));
dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n",
RREG32(mmCP_CPF_DEBUG));
dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
RREG32(mmSCRATCH_ADDR));
dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
RREG32(mmSCRATCH_UMSK));
/* init the pipes */
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
int me = (i < 4) ? 1 : 2;
int pipe = (i < 4) ? i : (i - 4);
int queue;
dev_info(adev->dev, " me: %d, pipe: %d\n", me, pipe);
cik_srbm_select(adev, me, pipe, 0, 0);
dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR=0x%08X\n",
RREG32(mmCP_HPD_EOP_BASE_ADDR));
dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n",
RREG32(mmCP_HPD_EOP_BASE_ADDR_HI));
dev_info(adev->dev, " CP_HPD_EOP_VMID=0x%08X\n",
RREG32(mmCP_HPD_EOP_VMID));
dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
RREG32(mmCP_HPD_EOP_CONTROL));
for (queue = 0; queue < 8; queue++) {
cik_srbm_select(adev, me, pipe, queue, 0);
dev_info(adev->dev, " queue: %d\n", queue);
dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
RREG32(mmCP_PQ_WPTR_POLL_CNTL));
dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
dev_info(adev->dev, " CP_HQD_ACTIVE=0x%08X\n",
RREG32(mmCP_HQD_ACTIVE));
dev_info(adev->dev, " CP_HQD_DEQUEUE_REQUEST=0x%08X\n",
RREG32(mmCP_HQD_DEQUEUE_REQUEST));
dev_info(adev->dev, " CP_HQD_PQ_RPTR=0x%08X\n",
RREG32(mmCP_HQD_PQ_RPTR));
dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
RREG32(mmCP_HQD_PQ_WPTR));
dev_info(adev->dev, " CP_HQD_PQ_BASE=0x%08X\n",
RREG32(mmCP_HQD_PQ_BASE));
dev_info(adev->dev, " CP_HQD_PQ_BASE_HI=0x%08X\n",
RREG32(mmCP_HQD_PQ_BASE_HI));
dev_info(adev->dev, " CP_HQD_PQ_CONTROL=0x%08X\n",
RREG32(mmCP_HQD_PQ_CONTROL));
dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n",
RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR));
dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n",
RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI));
dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n",
RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR));
dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n",
RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI));
dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
RREG32(mmCP_HQD_PQ_WPTR));
dev_info(adev->dev, " CP_HQD_VMID=0x%08X\n",
RREG32(mmCP_HQD_VMID));
dev_info(adev->dev, " CP_MQD_BASE_ADDR=0x%08X\n",
RREG32(mmCP_MQD_BASE_ADDR));
dev_info(adev->dev, " CP_MQD_BASE_ADDR_HI=0x%08X\n",
RREG32(mmCP_MQD_BASE_ADDR_HI));
dev_info(adev->dev, " CP_MQD_CONTROL=0x%08X\n",
RREG32(mmCP_MQD_CONTROL));
}
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
RREG32(mmCP_INT_CNTL_RING0));
dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
RREG32(mmRLC_LB_CNTL));
dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
RREG32(mmRLC_CNTL));
dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
RREG32(mmRLC_CGCG_CGLS_CTRL));
dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
RREG32(mmRLC_LB_CNTR_INIT));
dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
RREG32(mmRLC_LB_CNTR_MAX));
dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
RREG32(mmRLC_LB_INIT_CU_MASK));
dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
RREG32(mmRLC_LB_PARAMS));
dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
RREG32(mmRLC_LB_CNTL));
dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n",
RREG32(mmRLC_MC_CNTL));
dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
RREG32(mmRLC_UCODE_CNTL));
if (adev->asic_type == CHIP_BONAIRE)
dev_info(adev->dev, " RLC_DRIVER_CPDMA_STATUS=0x%08X\n",
RREG32(mmRLC_DRIVER_CPDMA_STATUS));
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < 16; i++) {
cik_srbm_select(adev, 0, 0, 0, i);
dev_info(adev->dev, " VM %d:\n", i);
dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
RREG32(mmSH_MEM_CONFIG));
dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n",
RREG32(mmSH_MEM_APE1_BASE));
dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n",
RREG32(mmSH_MEM_APE1_LIMIT));
dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
RREG32(mmSH_MEM_BASES));
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v7_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@ -4855,7 +4599,6 @@ static int gfx_v7_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
if (grbm_soft_reset || srbm_soft_reset) {
gfx_v7_0_print_status((void *)adev);
/* disable CG/PG */
gfx_v7_0_fini_pg(adev);
gfx_v7_0_update_cg(adev, false);
@ -4898,7 +4641,6 @@ static int gfx_v7_0_soft_reset(void *handle)
}
/* Wait a little for things to settle down */
udelay(50);
gfx_v7_0_print_status((void *)adev);
}
return 0;
}
@ -5150,6 +4892,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
.late_init = gfx_v7_0_late_init,
.sw_init = gfx_v7_0_sw_init,
@ -5161,7 +4904,6 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.is_idle = gfx_v7_0_is_idle,
.wait_for_idle = gfx_v7_0_wait_for_idle,
.soft_reset = gfx_v7_0_soft_reset,
.print_status = gfx_v7_0_print_status,
.set_clockgating_state = gfx_v7_0_set_clockgating_state,
.set_powergating_state = gfx_v7_0_set_powergating_state,
};
@ -5268,14 +5010,11 @@ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
}
int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info)
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
{
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
if (!adev || !cu_info)
return -EINVAL;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
memset(cu_info, 0, sizeof(*cu_info));
@ -5306,6 +5045,4 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
return 0;
}

View File

@ -32,6 +32,5 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,5 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
#endif

View File

@ -1117,114 +1117,6 @@ static int gmc_v7_0_wait_for_idle(void *handle)
}
static void gmc_v7_0_print_status(void *handle)
{
int i, j;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "GMC 8.x registers\n");
dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(mmSRBM_STATUS));
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
RREG32(mmMC_VM_MX_L1_TLB_CNTL));
dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
RREG32(mmVM_L2_CNTL));
dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
RREG32(mmVM_L2_CNTL2));
dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
RREG32(mmVM_L2_CNTL3));
dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
RREG32(mmVM_CONTEXT0_CNTL2));
dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
RREG32(mmVM_CONTEXT0_CNTL));
dev_info(adev->dev, " 0x15D4=0x%08X\n",
RREG32(0x575));
dev_info(adev->dev, " 0x15D8=0x%08X\n",
RREG32(0x576));
dev_info(adev->dev, " 0x15DC=0x%08X\n",
RREG32(0x577));
dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
RREG32(mmVM_CONTEXT1_CNTL2));
dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
RREG32(mmVM_CONTEXT1_CNTL));
for (i = 0; i < 16; i++) {
if (i < 8)
dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
else
dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
}
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
RREG32(mmMC_VM_FB_LOCATION));
dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
RREG32(mmMC_VM_AGP_BASE));
dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
RREG32(mmMC_VM_AGP_TOP));
dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
RREG32(mmMC_VM_AGP_BOT));
if (adev->asic_type == CHIP_KAVERI) {
dev_info(adev->dev, " CHUB_CONTROL=0x%08X\n",
RREG32(mmCHUB_CONTROL));
}
dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
RREG32(mmHDP_NONSURFACE_BASE));
dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
RREG32(mmHDP_NONSURFACE_INFO));
dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
RREG32(mmHDP_NONSURFACE_SIZE));
dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
RREG32(mmHDP_MISC_CNTL));
dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
RREG32(mmHDP_HOST_PATH_CNTL));
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
dev_info(adev->dev, " %d:\n", i);
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb05 + j, RREG32(0xb05 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb06 + j, RREG32(0xb06 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb07 + j, RREG32(0xb07 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb08 + j, RREG32(0xb08 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb09 + j, RREG32(0xb09 + j));
}
dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
RREG32(mmBIF_FB_EN));
}
static int gmc_v7_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -1244,8 +1136,6 @@ static int gmc_v7_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
gmc_v7_0_print_status((void *)adev);
gmc_v7_0_mc_stop(adev, &save);
if (gmc_v7_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
@ -1269,8 +1159,6 @@ static int gmc_v7_0_soft_reset(void *handle)
gmc_v7_0_mc_resume(adev, &save);
udelay(50);
gmc_v7_0_print_status((void *)adev);
}
return 0;
@ -1373,6 +1261,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.name = "gmc_v7_0",
.early_init = gmc_v7_0_early_init,
.late_init = gmc_v7_0_late_init,
.sw_init = gmc_v7_0_sw_init,
@ -1384,7 +1273,6 @@ const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.is_idle = gmc_v7_0_is_idle,
.wait_for_idle = gmc_v7_0_wait_for_idle,
.soft_reset = gmc_v7_0_soft_reset,
.print_status = gmc_v7_0_print_status,
.set_clockgating_state = gmc_v7_0_set_clockgating_state,
.set_powergating_state = gmc_v7_0_set_powergating_state,
};

View File

@ -43,6 +43,8 @@ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@ -73,6 +75,23 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
static const u32 golden_settings_polaris11_a11[] =
{
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
};
static const u32 golden_settings_polaris10_a11[] =
{
mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
};
static const u32 cz_mgcg_cgcg_init[] =
{
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@ -103,6 +122,16 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_tonga_a11,
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
break;
case CHIP_POLARIS10:
amdgpu_program_register_sequence(adev,
golden_settings_polaris10_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
break;
case CHIP_CARRIZO:
amdgpu_program_register_sequence(adev,
cz_mgcg_cgcg_init,
@ -209,6 +238,12 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_TONGA:
chip_name = "tonga";
break;
case CHIP_POLARIS11:
chip_name = "polaris11";
break;
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
@ -1085,111 +1120,6 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
static void gmc_v8_0_print_status(void *handle)
{
int i, j;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "GMC 8.x registers\n");
dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(mmSRBM_STATUS));
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
RREG32(mmMC_VM_MX_L1_TLB_CNTL));
dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
RREG32(mmVM_L2_CNTL));
dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
RREG32(mmVM_L2_CNTL2));
dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
RREG32(mmVM_L2_CNTL3));
dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n",
RREG32(mmVM_L2_CNTL4));
dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
RREG32(mmVM_CONTEXT0_CNTL2));
dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
RREG32(mmVM_CONTEXT0_CNTL));
dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
RREG32(mmVM_CONTEXT1_CNTL2));
dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
RREG32(mmVM_CONTEXT1_CNTL));
for (i = 0; i < 16; i++) {
if (i < 8)
dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
else
dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
}
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
RREG32(mmMC_VM_FB_LOCATION));
dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
RREG32(mmMC_VM_AGP_BASE));
dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
RREG32(mmMC_VM_AGP_TOP));
dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
RREG32(mmMC_VM_AGP_BOT));
dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
RREG32(mmHDP_NONSURFACE_BASE));
dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
RREG32(mmHDP_NONSURFACE_INFO));
dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
RREG32(mmHDP_NONSURFACE_SIZE));
dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
RREG32(mmHDP_MISC_CNTL));
dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
RREG32(mmHDP_HOST_PATH_CNTL));
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
dev_info(adev->dev, " %d:\n", i);
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb05 + j, RREG32(0xb05 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb06 + j, RREG32(0xb06 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb07 + j, RREG32(0xb07 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb08 + j, RREG32(0xb08 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb09 + j, RREG32(0xb09 + j));
}
dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
RREG32(mmBIF_FB_EN));
}
static int gmc_v8_0_soft_reset(void *handle)
{
struct amdgpu_mode_mc_save save;
@ -1209,8 +1139,6 @@ static int gmc_v8_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
gmc_v8_0_print_status((void *)adev);
gmc_v8_0_mc_stop(adev, &save);
if (gmc_v8_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
@ -1234,8 +1162,6 @@ static int gmc_v8_0_soft_reset(void *handle)
gmc_v8_0_mc_resume(adev, &save);
udelay(50);
gmc_v8_0_print_status((void *)adev);
}
return 0;
@ -1313,11 +1239,11 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
}
static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
bool enable)
{
uint32_t data;
if (enable) {
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
data = RREG32(mmMC_HUB_MISC_HUB_CG);
data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
WREG32(mmMC_HUB_MISC_HUB_CG, data);
@ -1393,11 +1319,11 @@ static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
}
static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
bool enable)
bool enable)
{
uint32_t data;
if (enable) {
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
data = RREG32(mmMC_HUB_MISC_HUB_CG);
data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_HUB_MISC_HUB_CG, data);
@ -1497,6 +1423,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.name = "gmc_v8_0",
.early_init = gmc_v8_0_early_init,
.late_init = gmc_v8_0_late_init,
.sw_init = gmc_v8_0_sw_init,
@ -1508,7 +1435,6 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.is_idle = gmc_v8_0_is_idle,
.wait_for_idle = gmc_v8_0_wait_for_idle,
.soft_reset = gmc_v8_0_soft_reset,
.print_status = gmc_v8_0_print_status,
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
};

View File

@ -157,6 +157,7 @@ static int iceland_dpm_set_powergating_state(void *handle,
}
const struct amd_ip_funcs iceland_dpm_ip_funcs = {
.name = "iceland_dpm",
.early_init = iceland_dpm_early_init,
.late_init = NULL,
.sw_init = iceland_dpm_sw_init,
@ -168,7 +169,6 @@ const struct amd_ip_funcs iceland_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.print_status = NULL,
.set_clockgating_state = iceland_dpm_set_clockgating_state,
.set_powergating_state = iceland_dpm_set_powergating_state,
};

Some files were not shown because too many files have changed in this diff Show More