Merge tag 'drm-msm-next-2018-12-12' of git://people.freedesktop.org/~robclark/linux into drm-next

This time around, seeing some love for some older hw:

 - a2xx gpu support for apq8060 (hp touchpad) and imx5 (headless
   gpu-only mode)
 - a2xx gpummu support (a2xx was pre-iommu)
 - mdp4 display support for apq8060/touchpad

For display/dpu:

 - a big pile of continuing dpu fixes and cleanups

On the gpu side of things:

 - per-submit statistics and traceevents for better profiling
 - a6xx crashdump support
 - decouple get_iova() and page pinning.. so we can unpin from
   physical memory inactive bo's while using softpin to lower
   cpu overhead
 - new interface to set debug names on GEM BOs and debugfs
   output improvements
 - additional submit flag to indicate buffers that are used
   to dump (so $debugfs/rd cmdstream dumping is useful with
   softpin + state-objects)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvVvLPD9_Z4kyfGe98Y--byj6HbxHivEYSgF7Rq7=bFnw@mail.gmail.com
This commit is contained in:
Dave Airlie 2018-12-13 10:29:14 +10:00
commit d675ba4b9e
129 changed files with 4659 additions and 5568 deletions

View file

@ -106,6 +106,7 @@ Required properties:
- clocks: Phandles to device clocks. See [1] for details on clock bindings.
- clock-names: the following clocks are required:
* "iface"
* "ref" (only required for new DTS files/entries)
For 28nm HPM/LP, 28nm 8960 PHYs:
- vddio-supply: phandle to vdd-io regulator device node
For 20nm PHY:

View file

@ -1,11 +1,13 @@
Qualcomm adreno/snapdragon GPU
Required properties:
- compatible: "qcom,adreno-XYZ.W", "qcom,adreno"
- compatible: "qcom,adreno-XYZ.W", "qcom,adreno" or
"amd,imageon-XYZ.W", "amd,imageon"
for example: "qcom,adreno-306.0", "qcom,adreno"
Note that you need to list the less specific "qcom,adreno" (since this
is what the device is matched on), in addition to the more specific
with the chip-id.
If "amd,imageon" is used, there should be no top level msm device.
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt signal from the gpu.
- clocks: device clocks

View file

@ -38,6 +38,8 @@ Required properties:
Optional properties:
- clock-names: the following clocks are optional:
* "lut_clk"
- qcom,lcdc-align-lsb: Boolean value indicating that LSB alignment should be
used for LCDC. This is only valid for 18bpp panels.
Example:

View file

@ -2,7 +2,7 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
select QCOM_MDT_LOADER if ARCH_QCOM
@ -11,7 +11,7 @@ config DRM_MSM
select DRM_PANEL
select SHMEM
select TMPFS
select QCOM_SCM
select QCOM_SCM if ARCH_QCOM
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE

View file

@ -6,6 +6,7 @@ ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
adreno/adreno_device.o \
adreno/adreno_gpu.o \
adreno/a2xx_gpu.o \
adreno/a3xx_gpu.o \
adreno/a4xx_gpu.o \
adreno/a5xx_gpu.o \
@ -14,6 +15,7 @@ msm-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
adreno/a6xx_gpu_state.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@ -68,11 +70,9 @@ msm-y := \
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
disp/dpu1/dpu_io_util.o \
disp/dpu1/dpu_irq.o \
disp/dpu1/dpu_kms.o \
disp/dpu1/dpu_mdss.o \
disp/dpu1/dpu_plane.o \
disp/dpu1/dpu_power_handle.o \
disp/dpu1/dpu_rm.o \
disp/dpu1/dpu_vbif.o \
msm_atomic.o \
@ -90,10 +90,11 @@ msm-y := \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
msm_submitqueue.o
msm_submitqueue.o \
msm_gpu_tracepoints.o \
msm_gpummu.o
msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
disp/dpu1/dpu_dbg.o
msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o

View file

@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@ -239,7 +239,63 @@ enum sq_tex_swiz {
enum sq_tex_filter {
SQ_TEX_FILTER_POINT = 0,
SQ_TEX_FILTER_BILINEAR = 1,
SQ_TEX_FILTER_BICUBIC = 2,
SQ_TEX_FILTER_BASEMAP = 2,
SQ_TEX_FILTER_USE_FETCH_CONST = 3,
};
enum sq_tex_aniso_filter {
SQ_TEX_ANISO_FILTER_DISABLED = 0,
SQ_TEX_ANISO_FILTER_MAX_1_1 = 1,
SQ_TEX_ANISO_FILTER_MAX_2_1 = 2,
SQ_TEX_ANISO_FILTER_MAX_4_1 = 3,
SQ_TEX_ANISO_FILTER_MAX_8_1 = 4,
SQ_TEX_ANISO_FILTER_MAX_16_1 = 5,
SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7,
};
enum sq_tex_dimension {
SQ_TEX_DIMENSION_1D = 0,
SQ_TEX_DIMENSION_2D = 1,
SQ_TEX_DIMENSION_3D = 2,
SQ_TEX_DIMENSION_CUBE = 3,
};
enum sq_tex_border_color {
SQ_TEX_BORDER_COLOR_BLACK = 0,
SQ_TEX_BORDER_COLOR_WHITE = 1,
SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2,
SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3,
};
enum sq_tex_sign {
SQ_TEX_SIGN_UNISIGNED = 0,
SQ_TEX_SIGN_SIGNED = 1,
SQ_TEX_SIGN_UNISIGNED_BIASED = 2,
SQ_TEX_SIGN_GAMMA = 3,
};
enum sq_tex_endian {
SQ_TEX_ENDIAN_NONE = 0,
SQ_TEX_ENDIAN_8IN16 = 1,
SQ_TEX_ENDIAN_8IN32 = 2,
SQ_TEX_ENDIAN_16IN32 = 3,
};
enum sq_tex_clamp_policy {
SQ_TEX_CLAMP_POLICY_D3D = 0,
SQ_TEX_CLAMP_POLICY_OGL = 1,
};
enum sq_tex_num_format {
SQ_TEX_NUM_FORMAT_FRAC = 0,
SQ_TEX_NUM_FORMAT_INT = 1,
};
enum sq_tex_type {
SQ_TEX_TYPE_0 = 0,
SQ_TEX_TYPE_1 = 1,
SQ_TEX_TYPE_2 = 2,
SQ_TEX_TYPE_3 = 3,
};
#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
@ -323,6 +379,18 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
}
#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK 0x00000fff
#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT 0
static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val)
{
return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK;
}
#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK 0xfffff000
#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT 12
static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
{
return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK;
}
#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
@ -331,6 +399,8 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL 0x00000001
#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC 0x00000002
#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
@ -389,12 +459,19 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK 0x00000001
#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK 0x00000002
#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK 0x00080000
#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
#define REG_A2XX_RBBM_INT_ACK 0x000003b6
#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT 0x00000020
#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT 0x04000000
#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT 0x40000000
#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT 0x80000000
#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
@ -467,6 +544,19 @@ static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
#define REG_A2XX_MH_INTERRUPT_MASK 0x00000a42
#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR 0x00000001
#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR 0x00000002
#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT 0x00000004
#define REG_A2XX_MH_INTERRUPT_STATUS 0x00000a43
#define REG_A2XX_MH_INTERRUPT_CLEAR 0x00000a44
#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1 0x00000a54
#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2 0x00000a55
#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
@ -648,6 +738,18 @@ static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val
#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
#define REG_A2XX_RB_SURFACE_INFO 0x00002000
#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK 0x00003fff
#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT 0
static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val)
{
return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK;
}
#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK 0x0000c000
#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT 14
static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val)
{
return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK;
}
#define REG_A2XX_RB_COLOR_INFO 0x00002001
#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
@ -679,7 +781,7 @@ static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
{
return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
}
#define REG_A2XX_RB_DEPTH_INFO 0x00002002
@ -693,7 +795,7 @@ static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
@ -1757,6 +1859,36 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
#define REG_A2XX_SQ_TEX_0 0x00000000
#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003
#define A2XX_SQ_TEX_0_TYPE__SHIFT 0
static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val)
{
return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK;
}
#define A2XX_SQ_TEX_0_SIGN_X__MASK 0x0000000c
#define A2XX_SQ_TEX_0_SIGN_X__SHIFT 2
static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val)
{
return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK;
}
#define A2XX_SQ_TEX_0_SIGN_Y__MASK 0x00000030
#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT 4
static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val)
{
return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK;
}
#define A2XX_SQ_TEX_0_SIGN_Z__MASK 0x000000c0
#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT 6
static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val)
{
return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK;
}
#define A2XX_SQ_TEX_0_SIGN_W__MASK 0x00000300
#define A2XX_SQ_TEX_0_SIGN_W__SHIFT 8
static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val)
{
return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK;
}
#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
@ -1775,14 +1907,46 @@ static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
{
return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
}
#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
#define A2XX_SQ_TEX_0_PITCH__MASK 0x7fc00000
#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
{
return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
}
#define A2XX_SQ_TEX_0_TILED 0x00000002
#define REG_A2XX_SQ_TEX_1 0x00000001
#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f
#define A2XX_SQ_TEX_1_FORMAT__SHIFT 0
static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val)
{
return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK;
}
#define A2XX_SQ_TEX_1_ENDIANNESS__MASK 0x000000c0
#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT 6
static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val)
{
return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK;
}
#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK 0x00000300
#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT 8
static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK;
}
#define A2XX_SQ_TEX_1_STACKED 0x00000400
#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK 0x00000800
#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT 11
static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
{
return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK;
}
#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK 0xfffff000
#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12
static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
{
return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
}
#define REG_A2XX_SQ_TEX_2 0x00000002
#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
@ -1797,8 +1961,20 @@ static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
}
#define A2XX_SQ_TEX_2_DEPTH__MASK 0xfc000000
#define A2XX_SQ_TEX_2_DEPTH__SHIFT 26
static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK;
}
#define REG_A2XX_SQ_TEX_3 0x00000003
#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK 0x00000001
#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT 0
static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val)
{
return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK;
}
#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
@ -1823,6 +1999,12 @@ static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
{
return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
}
#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000
#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13
static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
}
#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
@ -1835,6 +2017,104 @@ static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
{
return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
}
#define A2XX_SQ_TEX_3_MIP_FILTER__MASK 0x01800000
#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT 23
static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val)
{
return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK;
}
#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK 0x0e000000
#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT 25
static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val)
{
return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK;
}
#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK 0x80000000
#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT 31
static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK;
}
#define REG_A2XX_SQ_TEX_4 0x00000004
#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK 0x00000001
#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT 0
static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val)
{
return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK;
}
#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK 0x00000002
#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT 1
static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val)
{
return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK;
}
#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK 0x0000003c
#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT 2
static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK;
}
#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK 0x000003c0
#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT 6
static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK;
}
#define A2XX_SQ_TEX_4_MAX_ANISO_WALK 0x00000400
#define A2XX_SQ_TEX_4_MIN_ANISO_WALK 0x00000800
#define A2XX_SQ_TEX_4_LOD_BIAS__MASK 0x003ff000
#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT 12
static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val)
{
return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK;
}
#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK 0x07c00000
#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT 22
static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK;
}
#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK 0xf8000000
#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT 27
static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK;
}
#define REG_A2XX_SQ_TEX_5 0x00000005
#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK 0x00000003
#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT 0
static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val)
{
return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK;
}
#define A2XX_SQ_TEX_5_FORCE_BCW_MAX 0x00000004
#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK 0x00000018
#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT 3
static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK;
}
#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK 0x000001e0
#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT 5
static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val)
{
return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK;
}
#define A2XX_SQ_TEX_5_DIMENSION__MASK 0x00000600
#define A2XX_SQ_TEX_5_DIMENSION__SHIFT 9
static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
{
return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK;
}
#define A2XX_SQ_TEX_5_PACKED_MIPS 0x00000800
#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK 0xfffff000
#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12
static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
{
return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
}
#endif /* A2XX_XML */

View file

@ -0,0 +1,492 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
#include "a2xx_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
extern bool hang_debug;
static void a2xx_dump(struct msm_gpu *gpu);
static bool a2xx_idle(struct msm_gpu *gpu);
static bool a2xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 18);
/* All fields present (bits 9:0) */
OUT_RING(ring, 0x000003ff);
/* Disable/Enable Real-Time Stream processing (present but ignored) */
OUT_RING(ring, 0x00000000);
/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
OUT_RING(ring, 0x00000000);
OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
/* Vertex and Pixel Shader Start Addresses in instructions
* (3 DWORDS per instruction) */
OUT_RING(ring, 0x80000180);
/* Maximum Contexts */
OUT_RING(ring, 0x00000001);
/* Write Confirm Interval and The CP will wait the
* wait_interval * 16 clocks between polling */
OUT_RING(ring, 0x00000000);
/* NQ and External Memory Swap */
OUT_RING(ring, 0x00000000);
/* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
OUT_RING(ring, 0x200001f2);
/* Disable header dumping and Header dump address */
OUT_RING(ring, 0x00000000);
/* Header dump size */
OUT_RING(ring, 0x00000000);
/* enable protected mode */
OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
gpu->funcs->flush(gpu, ring);
return a2xx_idle(gpu);
}
static int a2xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
dma_addr_t pt_base, tran_error;
uint32_t *ptr, len;
int i, ret;
msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
DBG("%s", gpu->name);
/* halt ME to avoid ucode upload issues on a20x */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
/* note: kgsl uses 0x00000001 after first reset on a22x */
gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
msleep(30);
gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
if (adreno_is_a225(adreno_gpu))
gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
/* note: kgsl uses 0x0000ffff for a20x */
gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
/* MPU: physical range */
gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
/* same as parameters in adreno_gpu */
gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
if (!adreno_is_a20x(adreno_gpu))
gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
/* note: gsl doesn't set this */
gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
AXXX_CP_INT_CNTL_IB_ERROR_MASK |
AXXX_CP_INT_CNTL_IB1_INT_MASK |
AXXX_CP_INT_CNTL_RB_INT_MASK);
gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
for (i = 3; i <= 5; i++)
if ((SZ_16K << i) == adreno_gpu->gmem)
break;
gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
ret = adreno_hw_init(gpu);
if (ret)
return ret;
/* NOTE: PM4/micro-engine firmware registers look to be the same
* for a2xx and a3xx.. we could possibly push that part down to
* adreno_gpu base class. Or push both PM4 and PFP but
* parameterize the pfp ucode addr/data registers..
*/
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %x", ptr[1]);
gpu_write(gpu, REG_AXXX_CP_DEBUG,
AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
DBG("loading PFP ucode version: %x", ptr[5]);
gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
return a2xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a2xx_recover(struct msm_gpu *gpu)
{
int i;
adreno_dump_info(gpu);
for (i = 0; i < 8; i++) {
printk("CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
}
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a2xx_dump(gpu);
gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
adreno_recover(gpu);
}
static void a2xx_destroy(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
DBG("%s", gpu->name);
adreno_gpu_cleanup(adreno_gpu);
kfree(a2xx_gpu);
}
static bool a2xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
A2XX_RBBM_STATUS_GUI_ACTIVE))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */
return false;
}
return true;
}
static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
{
uint32_t mstatus, status;
mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
}
if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
/* only RB_INT is expected */
if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
}
if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
}
msm_gpu_retire(gpu);
return IRQ_HANDLED;
}
static const unsigned int a200_registers[] = {
0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
~0 /* sentinel */
};
static const unsigned int a220_registers[] = {
0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
0x4900, 0x4900, 0x4908, 0x4908,
~0 /* sentinel */
};
static const unsigned int a225_registers[] = {
0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
0x4908, 0x4908,
~0 /* sentinel */
};
/* would be nice to not have to duplicate the _show() stuff with printk(): */
static void a2xx_dump(struct msm_gpu *gpu)
{
printk("status: %08x\n",
gpu_read(gpu, REG_A2XX_RBBM_STATUS));
adreno_dump(gpu);
}
static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
adreno_gpu_state_get(gpu, state);
state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
return state;
}
/* Register offset defines for A2XX - copy of A3XX */
static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
};
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.hw_init = a2xx_hw_init,
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a2xx_recover,
.submit = adreno_submit,
.flush = adreno_flush,
.active_ring = adreno_active_ring,
.irq = a2xx_irq,
.destroy = a2xx_destroy,
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = adreno_show,
#endif
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
},
};
static const struct msm_gpu_perfcntr perfcntrs[] = {
/* TODO */
};
struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
{
struct a2xx_gpu *a2xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
int ret;
if (!pdev) {
dev_err(dev->dev, "no a2xx device\n");
ret = -ENXIO;
goto fail;
}
a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
if (!a2xx_gpu) {
ret = -ENOMEM;
goto fail;
}
adreno_gpu = &a2xx_gpu->base;
gpu = &adreno_gpu->base;
gpu->perfcntrs = perfcntrs;
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
if (adreno_is_a20x(adreno_gpu))
adreno_gpu->registers = a200_registers;
else if (adreno_is_a225(adreno_gpu))
adreno_gpu->registers = a225_registers;
else
adreno_gpu->registers = a220_registers;
adreno_gpu->reg_offsets = a2xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
if (!gpu->aspace) {
dev_err(dev->dev, "No memory protection without MMU\n");
ret = -ENXIO;
goto fail;
}
return gpu;
fail:
if (a2xx_gpu)
a2xx_destroy(&a2xx_gpu->base.base);
return ERR_PTR(ret);
}

View file

@ -0,0 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
#ifndef __A2XX_GPU_H__
#define __A2XX_GPU_H__
#include "adreno_gpu.h"
/* arrg, somehow fb.h is getting pulled in: */
#undef ROP_COPY
#undef ROP_XOR
#include "a2xx.xml.h"
struct a2xx_gpu {
struct adreno_gpu base;
bool pm_enabled;
};
#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
#endif /* __A2XX_GPU_H__ */

View file

@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)

View file

@ -481,7 +481,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
dev_err(dev->dev, "no a3xx device\n");
DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
ret = -ENXIO;
goto fail;
}
@ -528,7 +528,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
dev_err(dev->dev, "No memory protection without IOMMU\n");
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}

View file

@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)

View file

@ -561,7 +561,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
dev_err(dev->dev, "no a4xx device\n");
DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
ret = -ENXIO;
goto fail;
}
@ -608,7 +608,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
dev_err(dev->dev, "No memory protection without IOMMU\n");
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}

View file

@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)

View file

@ -130,15 +130,13 @@ reset_set(void *data, u64 val)
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
@ -173,7 +171,7 @@ int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
return ret;
}

View file

@ -20,7 +20,6 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
@ -511,13 +510,16 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
ret);
return ret;
}
msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
}
if (!a5xx_gpu->pfp_bo) {
@ -527,10 +529,12 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
if (IS_ERR(a5xx_gpu->pfp_bo)) {
ret = PTR_ERR(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
ret);
return ret;
}
msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
}
gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@ -841,20 +845,17 @@ static void a5xx_destroy(struct msm_gpu *gpu)
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
}
@ -1028,7 +1029,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
@ -1134,7 +1135,7 @@ static const u32 a5xx_registers[] = {
static void a5xx_dump(struct msm_gpu *gpu)
{
dev_info(gpu->dev->dev, "status: %08x\n",
DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
adreno_dump(gpu);
}
@ -1211,10 +1212,6 @@ struct a5xx_gpu_state {
u32 *hlsqregs;
};
#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
@ -1222,18 +1219,12 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
msm_gem_object_set_name(dumper->bo, "crashdump");
return PTR_ERR_OR_ZERO(dumper->ptr);
}
static void a5xx_crashdumper_free(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
msm_gem_put_iova(dumper->bo, gpu->aspace);
msm_gem_put_vaddr(dumper->bo);
drm_gem_object_put(dumper->bo);
}
static int a5xx_crashdumper_run(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
@ -1326,7 +1317,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
a5xx_crashdumper_free(gpu, &dumper);
msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
return;
}
@ -1334,7 +1325,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
a5xx_crashdumper_free(gpu, &dumper);
msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@ -1505,7 +1496,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
dev_err(dev->dev, "No A5XX device is defined\n");
DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
return ERR_PTR(-ENXIO);
}

View file

@ -298,7 +298,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
goto err;
return;
msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
while (cmds_size > 0) {
int i;
@ -317,15 +319,4 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;
return;
err:
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
if (a5xx_gpu->gpmu_bo)
drm_gem_object_put(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_bo = NULL;
a5xx_gpu->gpmu_iova = 0;
a5xx_gpu->gpmu_dwords = 0;
}

View file

@ -92,7 +92,7 @@ static void a5xx_preempt_timer(struct timer_list *t)
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
return;
dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
queue_work(priv->wq, &gpu->recover_work);
}
@ -188,7 +188,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
if (unlikely(status)) {
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
dev_err(dev->dev, "%s: Preemption failed to complete\n",
DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
gpu->name);
queue_work(priv->wq, &gpu->recover_work);
return;
@ -245,6 +245,8 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
if (IS_ERR(ptr))
return PTR_ERR(ptr);
msm_gem_object_set_name(bo, "preempt");
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
@ -267,18 +269,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
for (i = 0; i < gpu->nr_rings; i++) {
if (!a5xx_gpu->preempt_bo[i])
continue;
msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
if (a5xx_gpu->preempt_iova[i])
msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
drm_gem_object_put(a5xx_gpu->preempt_bo[i]);
a5xx_gpu->preempt_bo[i] = NULL;
}
for (i = 0; i < gpu->nr_rings; i++)
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
}
void a5xx_preempt_init(struct msm_gpu *gpu)

View file

@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@ -501,7 +501,7 @@ enum a6xx_vfd_perfcounter_select {
PERF_VFDP_VS_STAGE_WAVES = 22,
};
enum a6xx_hslq_perfcounter_select {
enum a6xx_hlsq_perfcounter_select {
PERF_HLSQ_BUSY_CYCLES = 0,
PERF_HLSQ_STALL_CYCLES_UCHE = 1,
PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
@ -2959,6 +2959,8 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
#define A6XX_GRAS_LRZ_CNTL_UNK3 0x00000008
#define A6XX_GRAS_LRZ_CNTL_UNK4 0x00000010
#define REG_A6XX_GRAS_UNKNOWN_8101 0x00008101
@ -2997,6 +2999,13 @@ static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
{
return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
}
#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000
#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
#define A6XX_GRAS_2D_SRC_TL_X_X__MASK 0x00ffff00
@ -3449,6 +3458,7 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
}
#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@ -3642,6 +3652,9 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
#define REG_A6XX_RB_LRZ_CNTL 0x00008898
#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001
#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
@ -3674,6 +3687,14 @@ static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
}
#define REG_A6XX_RB_MSAA_CNTL 0x000088d5
#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK 0x00000018
#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT 3
static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
{
return ((val) << A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_MSAA_CNTL_SAMPLES__MASK;
}
#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
@ -3684,6 +3705,12 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
}
#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK 0x00000018
#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT 3
static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
{
return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
}
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
@ -3780,6 +3807,9 @@ static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val
{
return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
}
#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000
#define REG_A6XX_RB_UNKNOWN_8C01 0x00008c01
#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
@ -4465,6 +4495,7 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
@ -4643,6 +4674,8 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
#define REG_A6XX_SP_UNKNOWN_AB20 0x0000ab20
#define REG_A6XX_SP_UNKNOWN_ACC0 0x0000acc0
#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
#define REG_A6XX_SP_UNKNOWN_AE03 0x0000ae03
@ -4700,11 +4733,34 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap va
return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1
#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0
static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
{
return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
}
#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000
#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15
static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
{
return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
}
#define REG_A6XX_SP_PS_2D_SRC_LO 0x0000b4c2
#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x01fffe00
#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
{
return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
}
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO 0x0000b4ca
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
@ -5033,6 +5089,12 @@ static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
}
#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20
static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
{
return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK;
}
#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
#define A6XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
@ -5365,5 +5427,9 @@ static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030
#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 0x00000001
#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
#endif /* A6XX_XML */

View file

@ -51,10 +51,31 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
return IRQ_HANDLED;
}
/* Check to see if the GX rail is still powered */
static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
{
u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
if (IS_ERR_OR_NULL(gmu->mmio))
return false;
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
}
/* Check to see if the GX rail is still powered */
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
{
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
if (IS_ERR_OR_NULL(gmu->mmio))
return false;
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
@ -153,7 +174,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
val == 0xbabeface, 100, 10000);
if (ret)
dev_err(gmu->dev, "GMU firmware initialization timed out\n");
DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
return ret;
}
@ -168,7 +189,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
val & 1, 100, 10000);
if (ret)
dev_err(gmu->dev, "Unable to start the HFI queues\n");
DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
return ret;
}
@ -209,7 +230,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
val & (1 << ack), 100, 10000);
if (ret)
dev_err(gmu->dev,
DRM_DEV_ERROR(gmu->dev,
"Timeout waiting for GMU OOB set %s: 0x%x\n",
name,
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
@ -251,7 +272,7 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
(val & 0x38) == 0x28, 1, 100);
if (ret) {
dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
@ -273,7 +294,7 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
(val & 0x04), 100, 10000);
if (ret)
dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
@ -317,7 +338,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
/* Check to see if the GMU really did slumber */
if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
!= 0x0f) {
dev_err(gmu->dev, "The GMU did not go into slumber\n");
DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
ret = -ETIMEDOUT;
}
}
@ -339,23 +360,27 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
val & (1 << 1), 100, 10000);
if (ret) {
dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
return ret;
}
ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
!val, 100, 10000);
if (!ret) {
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
/* Re-enable the power counter */
gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
return 0;
if (ret) {
DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
return ret;
}
dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
return ret;
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
/* Set up CX GMU counter 0 to count busy ticks */
gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
/* Enable the power counter */
gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
return 0;
}
static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
@ -368,7 +393,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
val, val & (1 << 16), 100, 10000);
if (ret)
dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
}
@ -520,7 +545,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
/* Sanity check the size of the firmware that was loaded */
if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
dev_err(gmu->dev,
DRM_DEV_ERROR(gmu->dev,
"GMU firmware is bigger than the available region\n");
return -EINVAL;
}
@ -764,7 +789,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
*/
if (ret)
dev_err(gmu->dev,
DRM_DEV_ERROR(gmu->dev,
"Unable to slumber GMU: status = 0%x/0%x\n",
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
@ -843,7 +868,7 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
dev_err(gmu->dev, "Unable to map GMU buffer object\n");
DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
for (i = i - 1 ; i >= 0; i--)
iommu_unmap(gmu->domain,
@ -969,12 +994,12 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
}
if (j == pri_count) {
dev_err(dev,
DRM_DEV_ERROR(dev,
"Level %u not found in in the RPMh list\n",
level);
dev_err(dev, "Available levels:\n");
DRM_DEV_ERROR(dev, "Available levels:\n");
for (j = 0; j < pri_count; j++)
dev_err(dev, " %u\n", pri[j]);
DRM_DEV_ERROR(dev, " %u\n", pri[j]);
return -EINVAL;
}
@ -1081,7 +1106,7 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
*/
ret = dev_pm_opp_of_add_table(gmu->dev);
if (ret) {
dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
return ret;
}
@ -1122,13 +1147,13 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
IORESOURCE_MEM, name);
if (!res) {
dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!ret) {
dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
@ -1145,7 +1170,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
name, gmu);
if (ret) {
dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
return ret;
}

View file

@ -164,4 +164,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
#endif

View file

@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)

View file

@ -4,6 +4,7 @@
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_gpu_trace.h"
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
@ -67,13 +68,36 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
}
static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
u64 iova)
{
OUT_PKT7(ring, CP_REG_TO_MEM, 3);
OUT_RING(ring, counter | (1 << 30) | (2 << 18));
OUT_RING(ring, lower_32_bits(iova));
OUT_RING(ring, upper_32_bits(iova));
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct msm_drm_private *priv = gpu->dev->dev_private;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
rbmemptr_stats(ring, index, cpcycles_start));
/*
* For PM4 the GMU register offsets are calculated from the base of the
* GPU registers so we need to add 0x1a800 to the register value on A630
* to get the right value from PM4.
*/
get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
rbmemptr_stats(ring, index, alwayson_start));
/* Invalidate CCU depth and color */
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
@ -98,6 +122,11 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
}
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
rbmemptr_stats(ring, index, cpcycles_end));
get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
@ -112,6 +141,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
trace_msm_gpu_submit_flush(submit,
gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
a6xx_flush(gpu, ring);
}
@ -300,6 +333,8 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
return ret;
}
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@ -387,14 +422,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
/* FIXME: not sure if this should live here or in a6xx_gmu.c */
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
0xff000000);
gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
0xff, 0x20);
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
0x01);
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
@ -481,7 +508,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
static void a6xx_dump(struct msm_gpu *gpu)
{
dev_info(&gpu->pdev->dev, "status: %08x\n",
DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
gpu_read(gpu, REG_A6XX_RBBM_STATUS));
adreno_dump(gpu);
}
@ -498,7 +525,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
adreno_dump_info(gpu);
for (i = 0; i < 8; i++)
dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
if (hang_debug)
@ -645,33 +672,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
static const u32 a6xx_registers[] = {
0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
0xa610, 0xa617, 0xa630, 0xa630,
~0
};
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@ -724,14 +724,6 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p)
{
adreno_show(gpu, state, p);
}
#endif
static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@ -746,8 +738,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
if (a6xx_gpu->sqe_iova)
msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
}
@ -796,6 +787,8 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gmu_set_freq,
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
},
.get_timestamp = a6xx_get_timestamp,
};
@ -817,7 +810,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a6xx_gpu->base;
gpu = &adreno_gpu->base;
adreno_gpu->registers = a6xx_registers;
adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);

View file

@ -56,6 +56,14 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p);
struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
int a6xx_gpu_state_put(struct msm_gpu_state *state);
#endif /* __A6XX_GPU_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,430 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
#ifndef _A6XX_CRASH_DUMP_H_
#define _A6XX_CRASH_DUMP_H_
#include "a6xx.xml.h"
#define A6XX_NUM_CONTEXTS 2
#define A6XX_NUM_SHADER_BANKS 3
static const u32 a6xx_gras_cluster[] = {
0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809d, 0x80a0, 0x80a6,
0x80af, 0x80f1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
0x8400, 0x840b,
};
static const u32 a6xx_ps_cluster_rac[] = {
0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881e, 0x8820, 0x8865,
0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
0x88c0, 0x88c1, 0x88d0, 0x88e3, 0x8900, 0x890c, 0x890f, 0x891a,
0x8c00, 0x8c01, 0x8c08, 0x8c10, 0x8c17, 0x8c1f, 0x8c26, 0x8c33,
};
static const u32 a6xx_ps_cluster_rbp[] = {
0x88f0, 0x88f3, 0x890d, 0x890e, 0x8927, 0x8928, 0x8bf0, 0x8bf1,
0x8c02, 0x8c07, 0x8c11, 0x8c16, 0x8c20, 0x8c25,
};
static const u32 a6xx_ps_cluster[] = {
0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
};
static const u32 a6xx_fe_cluster[] = {
0x9300, 0x9306, 0x9800, 0x9806, 0x9b00, 0x9b07, 0xa000, 0xa009,
0xa00e, 0xa0ef, 0xa0f8, 0xa0f8,
};
static const u32 a6xx_pc_vs_cluster[] = {
0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07,
};
#define CLUSTER_FE 0
#define CLUSTER_SP_VS 1
#define CLUSTER_PC_VS 2
#define CLUSTER_GRAS 3
#define CLUSTER_SP_PS 4
#define CLUSTER_PS 5
#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
{ .id = _id, .name = #_id,\
.registers = _reg, \
.count = ARRAY_SIZE(_reg), \
.sel_reg = _sel_reg, .sel_val = _sel_val }
static const struct a6xx_cluster {
u32 id;
const char *name;
const u32 *registers;
size_t count;
u32 sel_reg;
u32 sel_val;
} a6xx_clusters[] = {
CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
};
static const u32 a6xx_sp_vs_hlsq_cluster[] = {
0xb800, 0xb803, 0xb820, 0xb822,
};
static const u32 a6xx_sp_vs_sp_cluster[] = {
0xa800, 0xa824, 0xa830, 0xa83c, 0xa840, 0xa864, 0xa870, 0xa895,
0xa8a0, 0xa8af, 0xa8c0, 0xa8c3,
};
static const u32 a6xx_hlsq_duplicate_cluster[] = {
0xbb10, 0xbb11, 0xbb20, 0xbb29,
};
static const u32 a6xx_hlsq_2d_duplicate_cluster[] = {
0xbd80, 0xbd80,
};
static const u32 a6xx_sp_duplicate_cluster[] = {
0xab00, 0xab00, 0xab04, 0xab05, 0xab10, 0xab1b, 0xab20, 0xab20,
};
static const u32 a6xx_tp_duplicate_cluster[] = {
0xb300, 0xb307, 0xb309, 0xb309, 0xb380, 0xb382,
};
static const u32 a6xx_sp_ps_hlsq_cluster[] = {
0xb980, 0xb980, 0xb982, 0xb987, 0xb990, 0xb99b, 0xb9a0, 0xb9a2,
0xb9c0, 0xb9c9,
};
static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = {
0xbd80, 0xbd80,
};
static const u32 a6xx_sp_ps_sp_cluster[] = {
0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3,
0xaa00, 0xaa00, 0xaa30, 0xaa31,
};
static const u32 a6xx_sp_ps_sp_2d_cluster[] = {
0xacc0, 0xacc0,
};
static const u32 a6xx_sp_ps_tp_cluster[] = {
0xb180, 0xb183, 0xb190, 0xb191,
};
static const u32 a6xx_sp_ps_tp_2d_cluster[] = {
0xb4c0, 0xb4d1,
};
#define CLUSTER_DBGAHB(_id, _base, _type, _reg) \
{ .name = #_id, .statetype = _type, .base = _base, \
.registers = _reg, .count = ARRAY_SIZE(_reg) }
static const struct a6xx_dbgahb_cluster {
const char *name;
u32 statetype;
u32 base;
const u32 *registers;
size_t count;
} a6xx_dbgahb_clusters[] = {
CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_sp_vs_hlsq_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_vs_sp_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_hlsq_duplicate_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002f000, 0x45, a6xx_hlsq_2d_duplicate_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_duplicate_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002c000, 0x1, a6xx_tp_duplicate_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_sp_ps_hlsq_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002f000, 0x46, a6xx_sp_ps_hlsq_2d_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_ps_sp_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002b000, 0x26, a6xx_sp_ps_sp_2d_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_sp_ps_tp_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002d000, 0x6, a6xx_sp_ps_tp_2d_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_hlsq_duplicate_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_duplicate_cluster),
CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_tp_duplicate_cluster),
};
static const u32 a6xx_hlsq_registers[] = {
0xbe00, 0xbe01, 0xbe04, 0xbe05, 0xbe08, 0xbe09, 0xbe10, 0xbe15,
0xbe20, 0xbe23,
};
static const u32 a6xx_sp_registers[] = {
0xae00, 0xae04, 0xae0c, 0xae0c, 0xae0f, 0xae2b, 0xae30, 0xae32,
0xae35, 0xae35, 0xae3a, 0xae3f, 0xae50, 0xae52,
};
static const u32 a6xx_tp_registers[] = {
0xb600, 0xb601, 0xb604, 0xb605, 0xb610, 0xb61b, 0xb620, 0xb623,
};
struct a6xx_registers {
const u32 *registers;
size_t count;
u32 val0;
u32 val1;
};
#define HLSQ_DBG_REGS(_base, _type, _array) \
{ .val0 = _base, .val1 = _type, .registers = _array, \
.count = ARRAY_SIZE(_array), }
static const struct a6xx_registers a6xx_hlsq_reglist[] = {
HLSQ_DBG_REGS(0x0002F800, 0x40, a6xx_hlsq_registers),
HLSQ_DBG_REGS(0x0002B800, 0x20, a6xx_sp_registers),
HLSQ_DBG_REGS(0x0002D800, 0x0, a6xx_tp_registers),
};
#define SHADER(_type, _size) \
{ .type = _type, .name = #_type, .size = _size }
static const struct a6xx_shader_block {
const char *name;
u32 type;
u32 size;
} a6xx_shader_blocks[] = {
SHADER(A6XX_TP0_TMO_DATA, 0x200),
SHADER(A6XX_TP0_SMO_DATA, 0x80),
SHADER(A6XX_TP0_MIPMAP_BASE_DATA, 0x3c0),
SHADER(A6XX_TP1_TMO_DATA, 0x200),
SHADER(A6XX_TP1_SMO_DATA, 0x80),
SHADER(A6XX_TP1_MIPMAP_BASE_DATA, 0x3c0),
SHADER(A6XX_SP_INST_DATA, 0x800),
SHADER(A6XX_SP_LB_0_DATA, 0x800),
SHADER(A6XX_SP_LB_1_DATA, 0x800),
SHADER(A6XX_SP_LB_2_DATA, 0x800),
SHADER(A6XX_SP_LB_3_DATA, 0x800),
SHADER(A6XX_SP_LB_4_DATA, 0x800),
SHADER(A6XX_SP_LB_5_DATA, 0x200),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
SHADER(A6XX_SP_UAV_DATA, 0x80),
SHADER(A6XX_SP_INST_TAG, 0x80),
SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
SHADER(A6XX_SP_SMO_TAG, 0x80),
SHADER(A6XX_SP_STATE_DATA, 0x3f),
SHADER(A6XX_HLSQ_CHUNK_CVS_RAM, 0x1c0),
SHADER(A6XX_HLSQ_CHUNK_CPS_RAM, 0x280),
SHADER(A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40),
SHADER(A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40),
SHADER(A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4),
SHADER(A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4),
SHADER(A6XX_HLSQ_CVS_MISC_RAM, 0x1c0),
SHADER(A6XX_HLSQ_CPS_MISC_RAM, 0x580),
SHADER(A6XX_HLSQ_INST_RAM, 0x800),
SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800),
SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800),
SHADER(A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8),
SHADER(A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4),
SHADER(A6XX_HLSQ_INST_RAM_TAG, 0x80),
SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xc),
SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10),
SHADER(A6XX_HLSQ_PWR_REST_RAM, 0x28),
SHADER(A6XX_HLSQ_PWR_REST_TAG, 0x14),
SHADER(A6XX_HLSQ_DATAPATH_META, 0x40),
SHADER(A6XX_HLSQ_FRONTEND_META, 0x40),
SHADER(A6XX_HLSQ_INDIRECT_META, 0x40),
};
static const u32 a6xx_rb_rac_registers[] = {
0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e10, 0x8e1c, 0x8e20, 0x8e25,
0x8e28, 0x8e28, 0x8e2c, 0x8e2f, 0x8e50, 0x8e52,
};
static const u32 a6xx_rb_rbp_registers[] = {
0x8e01, 0x8e01, 0x8e0c, 0x8e0c, 0x8e3b, 0x8e3e, 0x8e40, 0x8e43,
0x8e53, 0x8e5f, 0x8e70, 0x8e77,
};
static const u32 a6xx_registers[] = {
/* RBBM */
0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
0x0100, 0x011d, 0x0200, 0x020d, 0x0218, 0x023d, 0x0400, 0x04f9,
0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511, 0x0533, 0x0533,
0x0540, 0x0555,
/* CP */
0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084f, 0x086f,
0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd,
0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e,
0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e,
0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1, 0x09c2, 0x09c8,
0x0a00, 0x0a03,
/* VSC */
0x0c00, 0x0c04, 0x0c06, 0x0c06, 0x0c10, 0x0cd9, 0x0e00, 0x0e0e,
/* UCHE */
0x0e10, 0x0e13, 0x0e17, 0x0e19, 0x0e1c, 0x0e2b, 0x0e30, 0x0e32,
0x0e38, 0x0e39,
/* GRAS */
0x8600, 0x8601, 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b,
0x8630, 0x8637,
/* VPC */
0x9600, 0x9604, 0x9624, 0x9637,
/* PC */
0x9e00, 0x9e01, 0x9e03, 0x9e0e, 0x9e11, 0x9e16, 0x9e19, 0x9e19,
0x9e1c, 0x9e1c, 0x9e20, 0x9e23, 0x9e30, 0x9e31, 0x9e34, 0x9e34,
0x9e70, 0x9e72, 0x9e78, 0x9e79, 0x9e80, 0x9fff,
/* VFD */
0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617,
0xa630, 0xa630,
};
#define REGS(_array, _sel_reg, _sel_val) \
{ .registers = _array, .count = ARRAY_SIZE(_array), \
.val0 = _sel_reg, .val1 = _sel_val }
static const struct a6xx_registers a6xx_reglist[] = {
REGS(a6xx_registers, 0, 0),
REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
};
static const u32 a6xx_ahb_registers[] = {
/* RBBM_STATUS - RBBM_STATUS3 */
0x210, 0x213,
/* CP_STATUS_1 */
0x825, 0x825,
};
static const u32 a6xx_vbif_registers[] = {
0x3000, 0x3007, 0x300c, 0x3014, 0x3018, 0x302d, 0x3030, 0x3031,
0x3034, 0x3036, 0x303c, 0x303d, 0x3040, 0x3040, 0x3042, 0x3042,
0x3049, 0x3049, 0x3058, 0x3058, 0x305a, 0x3061, 0x3064, 0x3068,
0x306c, 0x306d, 0x3080, 0x3088, 0x308b, 0x308c, 0x3090, 0x3094,
0x3098, 0x3098, 0x309c, 0x309c, 0x30c0, 0x30c0, 0x30c8, 0x30c8,
0x30d0, 0x30d0, 0x30d8, 0x30d8, 0x30e0, 0x30e0, 0x3100, 0x3100,
0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
0x3156, 0x3156, 0x3158, 0x3158, 0x315a, 0x315a, 0x315c, 0x315c,
0x315e, 0x315e, 0x3160, 0x3160, 0x3162, 0x3162, 0x340c, 0x340c,
0x3410, 0x3410, 0x3800, 0x3801,
};
static const struct a6xx_registers a6xx_ahb_reglist[] = {
REGS(a6xx_ahb_registers, 0, 0),
REGS(a6xx_vbif_registers, 0, 0),
};
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
0x0100, 0x012b, 0x0140, 0x0140,
};
static const u32 a6xx_gmu_cx_registers[] = {
/* GMU CX */
0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
/* GPU RSCC */
0x8c8c, 0x8c8c, 0x8d01, 0x8d02, 0x8f40, 0x8f42, 0x8f44, 0x8f47,
0x8f4c, 0x8f87, 0x8fec, 0x8fef, 0x8ff4, 0x902f, 0x9094, 0x9097,
0x909c, 0x90d7, 0x913c, 0x913f, 0x9144, 0x917f,
/* GMU AO */
0x9300, 0x9316, 0x9400, 0x9400,
/* GPU CC */
0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
0xb800, 0xb802,
/* GPU CC ACD */
0xbc00, 0xbc16, 0xbc20, 0xbc27,
};
static const struct a6xx_registers a6xx_gmu_reglist[] = {
REGS(a6xx_gmu_cx_registers, 0, 0),
REGS(a6xx_gmu_gx_registers, 0, 0),
};
static const struct a6xx_indexed_registers {
const char *name;
u32 addr;
u32 data;
u32 count;
} a6xx_indexed_reglist[] = {
{ "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 },
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0x400 },
};
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
"CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
};
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
static const struct a6xx_debugbus_block {
const char *name;
u32 id;
u32 count;
} a6xx_debugbus_blocks[] = {
DEBUGBUS(A6XX_DBGBUS_CP, 0x100),
DEBUGBUS(A6XX_DBGBUS_RBBM, 0x100),
DEBUGBUS(A6XX_DBGBUS_HLSQ, 0x100),
DEBUGBUS(A6XX_DBGBUS_UCHE, 0x100),
DEBUGBUS(A6XX_DBGBUS_DPM, 0x100),
DEBUGBUS(A6XX_DBGBUS_TESS, 0x100),
DEBUGBUS(A6XX_DBGBUS_PC, 0x100),
DEBUGBUS(A6XX_DBGBUS_VFDP, 0x100),
DEBUGBUS(A6XX_DBGBUS_VPC, 0x100),
DEBUGBUS(A6XX_DBGBUS_TSE, 0x100),
DEBUGBUS(A6XX_DBGBUS_RAS, 0x100),
DEBUGBUS(A6XX_DBGBUS_VSC, 0x100),
DEBUGBUS(A6XX_DBGBUS_COM, 0x100),
DEBUGBUS(A6XX_DBGBUS_LRZ, 0x100),
DEBUGBUS(A6XX_DBGBUS_A2D, 0x100),
DEBUGBUS(A6XX_DBGBUS_CCUFCHE, 0x100),
DEBUGBUS(A6XX_DBGBUS_RBP, 0x100),
DEBUGBUS(A6XX_DBGBUS_DCS, 0x100),
DEBUGBUS(A6XX_DBGBUS_DBGC, 0x100),
DEBUGBUS(A6XX_DBGBUS_GMU_GX, 0x100),
DEBUGBUS(A6XX_DBGBUS_TPFCHE, 0x100),
DEBUGBUS(A6XX_DBGBUS_GPC, 0x100),
DEBUGBUS(A6XX_DBGBUS_LARC, 0x100),
DEBUGBUS(A6XX_DBGBUS_HLSQ_SPTP, 0x100),
DEBUGBUS(A6XX_DBGBUS_RB_0, 0x100),
DEBUGBUS(A6XX_DBGBUS_RB_1, 0x100),
DEBUGBUS(A6XX_DBGBUS_UCHE_WRAPPER, 0x100),
DEBUGBUS(A6XX_DBGBUS_CCU_0, 0x100),
DEBUGBUS(A6XX_DBGBUS_CCU_1, 0x100),
DEBUGBUS(A6XX_DBGBUS_VFD_0, 0x100),
DEBUGBUS(A6XX_DBGBUS_VFD_1, 0x100),
DEBUGBUS(A6XX_DBGBUS_VFD_2, 0x100),
DEBUGBUS(A6XX_DBGBUS_VFD_3, 0x100),
DEBUGBUS(A6XX_DBGBUS_SP_0, 0x100),
DEBUGBUS(A6XX_DBGBUS_SP_1, 0x100),
DEBUGBUS(A6XX_DBGBUS_TPL1_0, 0x100),
DEBUGBUS(A6XX_DBGBUS_TPL1_1, 0x100),
DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100),
DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
};
static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
};
#endif

View file

@ -91,7 +91,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
if (ret) {
dev_err(gmu->dev,
DRM_DEV_ERROR(gmu->dev,
"Message %s id %d timed out waiting for response\n",
a6xx_hfi_msg_id[id], seqnum);
return -ETIMEDOUT;
@ -110,7 +110,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
/* If the queue is empty our response never made it */
if (!ret) {
dev_err(gmu->dev,
DRM_DEV_ERROR(gmu->dev,
"The HFI response queue is unexpectedly empty\n");
return -ENOENT;
@ -120,20 +120,20 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
struct a6xx_hfi_msg_error *error =
(struct a6xx_hfi_msg_error *) &resp;
dev_err(gmu->dev, "GMU firmware error %d\n",
DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
error->code);
continue;
}
if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
dev_err(gmu->dev,
DRM_DEV_ERROR(gmu->dev,
"Unexpected message id %d on the response queue\n",
HFI_HEADER_SEQNUM(resp.ret_header));
continue;
}
if (resp.error) {
dev_err(gmu->dev,
DRM_DEV_ERROR(gmu->dev,
"Message %s id %d returned error %d\n",
a6xx_hfi_msg_id[id], seqnum, resp.error);
return -EINVAL;
@ -163,7 +163,7 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
if (ret) {
dev_err(gmu->dev, "Unable to send message %s id %d\n",
DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
a6xx_hfi_msg_id[id], seqnum);
return ret;
}
@ -317,7 +317,7 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu)
continue;
if (queue->header->read_index != queue->header->write_index)
dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
queue->header->read_index = 0;
queue->header->write_index = 0;

View file

@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@ -339,6 +339,15 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
#define REG_AXXX_CP_INT_CNTL 0x000001f2
#define AXXX_CP_INT_CNTL_SW_INT_MASK 0x00080000
#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK 0x00800000
#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK 0x01000000
#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK 0x02000000
#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK 0x04000000
#define AXXX_CP_INT_CNTL_IB_ERROR_MASK 0x08000000
#define AXXX_CP_INT_CNTL_IB2_INT_MASK 0x20000000
#define AXXX_CP_INT_CNTL_IB1_INT_MASK 0x40000000
#define AXXX_CP_INT_CNTL_RB_INT_MASK 0x80000000
#define REG_AXXX_CP_INT_STATUS 0x000001f3

View file

@ -27,6 +27,39 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(2, 0, 0, 0),
.revn = 200,
.name = "A200",
.fw = {
[ADRENO_FW_PM4] = "yamato_pm4.fw",
[ADRENO_FW_PFP] = "yamato_pfp.fw",
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, { /* a200 on i.mx51 has only 128kib gmem */
.rev = ADRENO_REV(2, 0, 0, 1),
.revn = 201,
.name = "A200",
.fw = {
[ADRENO_FW_PM4] = "yamato_pm4.fw",
[ADRENO_FW_PFP] = "yamato_pfp.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, {
.rev = ADRENO_REV(2, 2, 0, ANY_ID),
.revn = 220,
.name = "A220",
.fw = {
[ADRENO_FW_PM4] = "leia_pm4_470.fw",
[ADRENO_FW_PFP] = "leia_pfp_470.fw",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
.revn = 305,
.name = "A305",
@ -196,7 +229,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
return NULL;
}
@ -205,7 +238,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
}
@ -238,7 +271,8 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
if (ret == 0) {
unsigned int r, patch;
if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2) {
if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
rev->core = r / 100;
r %= 100;
rev->major = r / 10;
@ -253,7 +287,7 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
/* and if that fails, fall back to legacy "qcom,chipid" property: */
ret = of_property_read_u32(node, "qcom,chipid", &chipid);
if (ret) {
dev_err(dev, "could not parse qcom,chipid: %d\n", ret);
DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
return ret;
}
@ -274,6 +308,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
static struct adreno_platform_config config = {};
const struct adreno_info *info;
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu *gpu;
int ret;
@ -296,6 +331,8 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
config.rev.minor, config.rev.patchid);
priv->is_a2xx = config.rev.core == 2;
gpu = info->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
@ -323,9 +360,37 @@ static const struct component_ops a3xx_ops = {
.unbind = adreno_unbind,
};
static void adreno_device_register_headless(void)
{
/* on imx5, we don't have a top-level mdp/dpu node
* this creates a dummy node for the driver for that case
*/
struct platform_device_info dummy_info = {
.parent = NULL,
.name = "msm",
.id = -1,
.res = NULL,
.num_res = 0,
.data = NULL,
.size_data = 0,
.dma_mask = ~0,
};
platform_device_register_full(&dummy_info);
}
static int adreno_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &a3xx_ops);
int ret;
ret = component_add(&pdev->dev, &a3xx_ops);
if (ret)
return ret;
if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
adreno_device_register_headless();
return 0;
}
static int adreno_remove(struct platform_device *pdev)
@ -337,6 +402,8 @@ static int adreno_remove(struct platform_device *pdev)
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
/* for compatibility with imx5 gpu: */
{ .compatible = "amd,imageon" },
/* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" },
{}

View file

@ -89,12 +89,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, newname, drm->dev);
if (!ret) {
dev_info(drm->dev, "loaded %s from new location\n",
DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
@ -109,12 +109,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
dev_info(drm->dev, "loaded %s from legacy location\n",
DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
fwname, ret);
fw = ERR_PTR(ret);
goto out;
@ -130,19 +130,19 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware(&fw, newname, drm->dev);
if (!ret) {
dev_info(drm->dev, "loaded %s with helper\n",
DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
}
}
dev_err(drm->dev, "failed to load %s\n", fwname);
DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
fw = ERR_PTR(-ENOENT);
out:
kfree(newname);
@ -209,14 +209,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
if (!ring)
continue;
ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
if (ret) {
ring->iova = 0;
dev_err(gpu->dev->dev,
"could not map ringbuffer %d: %d\n", i, ret);
return ret;
}
ring->cur = ring->start;
ring->next = ring->start;
@ -277,7 +269,7 @@ void adreno_recover(struct msm_gpu *gpu)
ret = msm_gpu_hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
}
@ -319,16 +311,27 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
*/
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, HLSQ_FLUSH);
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
}
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
/* wait for idle before cache flush/interrupt */
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
if (!adreno_is_a2xx(adreno_gpu)) {
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
} else {
/* BIT(31) means something else on a2xx */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
OUT_PKT3(ring, CP_INTERRUPT, 1);
OUT_RING(ring, 0x80000000);
}
#if 0
if (adreno_is_a3xx(adreno_gpu)) {
@ -406,7 +409,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
size = j + 1;
if (size) {
state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
if (state->ring[i].data) {
memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
state->ring[i].data_size = size << 2;
@ -414,6 +417,10 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
}
}
/* Some targets prefer to collect their own registers */
if (!adreno_gpu->registers)
return 0;
/* Count the number of registers */
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
count += adreno_gpu->registers[i + 1] -
@ -445,7 +452,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state)
int i;
for (i = 0; i < ARRAY_SIZE(state->ring); i++)
kfree(state->ring[i].data);
kvfree(state->ring[i].data);
for (i = 0; state->bos && i < state->nr_bos; i++)
kvfree(state->bos[i].data);
@ -475,34 +482,74 @@ int adreno_gpu_state_put(struct msm_gpu_state *state)
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
{
void *buf;
size_t buf_itr = 0, buffer_size;
char out[ASCII85_BUFSZ];
long l, datalen, i;
long l;
int i;
if (!ptr || !len)
return;
if (!src || !len)
return NULL;
l = ascii85_encode_len(len);
/*
* Only dump the non-zero part of the buffer - rarely will any data
* completely fill the entire allocated size of the buffer
* Ascii85 outputs either a 5 byte string or a 1 byte string. So we
* account for the worst case of 5 bytes per dword plus the 1 for '\0'
*/
for (datalen = 0, i = 0; i < len >> 2; i++) {
if (ptr[i])
datalen = (i << 2) + 1;
}
buffer_size = (l * 5) + 1;
/* Skip printing the object if it is empty */
if (datalen == 0)
buf = kvmalloc(buffer_size, GFP_KERNEL);
if (!buf)
return NULL;
for (i = 0; i < l; i++)
buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
ascii85_encode(src[i], out));
return buf;
}
/* len is expected to be in bytes */
static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
bool *encoded)
{
if (!*ptr || !len)
return;
l = ascii85_encode_len(datalen);
if (!*encoded) {
long datalen, i;
u32 *buf = *ptr;
/*
* Only dump the non-zero part of the buffer - rarely will
* any data completely fill the entire allocated size of
* the buffer.
*/
for (datalen = 0, i = 0; i < len >> 2; i++)
if (buf[i])
datalen = ((i + 1) << 2);
/*
* If we reach here, then the originally captured binary buffer
* will be replaced with the ascii85 encoded string
*/
*ptr = adreno_gpu_ascii85_encode(buf, datalen);
kvfree(buf);
*encoded = true;
}
if (!*ptr)
return;
drm_puts(p, " data: !!ascii85 |\n");
drm_puts(p, " ");
for (i = 0; i < l; i++)
drm_puts(p, ascii85_encode(ptr[i], out));
drm_puts(p, *ptr);
drm_puts(p, "\n");
}
@ -534,8 +581,8 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
adreno_show_object(p, state->ring[i].data,
state->ring[i].data_size);
adreno_show_object(p, &state->ring[i].data,
state->ring[i].data_size, &state->ring[i].encoded);
}
if (state->bos) {
@ -546,17 +593,19 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
state->bos[i].iova);
drm_printf(p, " size: %zd\n", state->bos[i].size);
adreno_show_object(p, state->bos[i].data,
state->bos[i].size);
adreno_show_object(p, &state->bos[i].data,
state->bos[i].size, &state->bos[i].encoded);
}
}
drm_puts(p, "registers:\n");
if (state->nr_registers) {
drm_puts(p, "registers:\n");
for (i = 0; i < state->nr_registers; i++) {
drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
state->registers[i * 2] << 2,
state->registers[(i * 2) + 1]);
for (i = 0; i < state->nr_registers; i++) {
drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
state->registers[i * 2] << 2,
state->registers[(i * 2) + 1]);
}
}
}
#endif
@ -595,6 +644,9 @@ void adreno_dump(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
if (!adreno_gpu->registers)
return;
/* dump these out in a form that can be parsed by demsm: */
printk("IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@ -635,7 +687,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
dev_err(dev, "Could not find the GPU powerlevels\n");
DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
}
@ -674,7 +726,7 @@ static int adreno_get_pwrlevels(struct device *dev,
else {
ret = dev_pm_opp_of_add_table(dev);
if (ret)
dev_err(dev, "Unable to set the OPP table\n");
DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
}
if (!ret) {
@ -717,6 +769,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
/* maximum range of a2xx mmu */
if (adreno_is_a2xx(adreno_gpu))
adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
adreno_gpu_config.nr_rings = nr_rings;

View file

@ -21,6 +21,7 @@
#define __ADRENO_GPU_H__
#include <linux/firmware.h>
#include <linux/iopoll.h>
#include "msm_gpu.h"
@ -154,6 +155,20 @@ struct adreno_platform_config {
__ret; \
})
static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
{
return (gpu->revn < 300);
}
static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
{
return (gpu->revn < 210);
}
static inline bool adreno_is_a225(struct adreno_gpu *gpu)
{
return gpu->revn == 225;
}
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
@ -334,6 +349,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
@ -375,4 +391,9 @@ static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
((1 << 29) \
((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
#endif /* __ADRENO_GPU_H__ */

View file

@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@ -108,6 +108,13 @@ enum pc_di_src_sel {
DI_SRC_SEL_RESERVED = 3,
};
enum pc_di_face_cull_sel {
DI_FACE_CULL_NONE = 0,
DI_FACE_CULL_FETCH = 1,
DI_FACE_BACKFACE_CULL = 2,
DI_FACE_FRONTFACE_CULL = 3,
};
enum pc_di_index_size {
INDEX_SIZE_IGN = 0,
INDEX_SIZE_16_BIT = 0,
@ -356,6 +363,7 @@ enum a6xx_render_mode {
RM6_GMEM = 4,
RM6_BLIT2D = 5,
RM6_RESOLVE = 6,
RM6_BLIT2DSCALE = 12,
};
enum pseudo_reg {

View file

@ -319,10 +319,8 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
unsigned long irq_flags;
int i, irq_count, enable_count, cb_count;
if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
DPU_ERROR("invalid parameters\n");
if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
return 0;
}
for (i = 0; i < irq_obj->total_irqs; i++) {
spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
@ -343,31 +341,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
parent, &dpu_kms->irq_obj,
&dpu_debugfs_core_irq_fops);
return 0;
}
void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
{
debugfs_remove(dpu_kms->irq_obj.debugfs_file);
dpu_kms->irq_obj.debugfs_file = NULL;
}
#else
int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
return 0;
}
void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
{
debugfs_create_file("core_irq", 0600, parent, &dpu_kms->irq_obj,
&dpu_debugfs_core_irq_fops);
}
#endif
@ -376,10 +354,7 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
struct msm_drm_private *priv;
int i;
if (!dpu_kms) {
DPU_ERROR("invalid dpu_kms\n");
return;
} else if (!dpu_kms->dev) {
if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
@ -410,20 +385,12 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
}
}
int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
{
return 0;
}
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
{
struct msm_drm_private *priv;
int i;
if (!dpu_kms) {
DPU_ERROR("invalid dpu_kms\n");
return;
} else if (!dpu_kms->dev) {
if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {

View file

@ -23,13 +23,6 @@
*/
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
/**
* dpu_core_irq_postinstall - perform post-installation of core IRQ handler
* @dpu_kms: DPU handle
* @return: 0 if success; error code otherwise
*/
int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
/**
* dpu_core_irq_uninstall - uninstall core IRQ handler
* @dpu_kms: DPU handle
@ -139,15 +132,8 @@ int dpu_core_irq_unregister_callback(
* dpu_debugfs_core_irq_init - register core irq debugfs
* @dpu_kms: pointer to kms
* @parent: debugfs directory root
* @Return: 0 on success
*/
int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent);
/**
* dpu_debugfs_core_irq_destroy - deregister core irq debugfs
* @dpu_kms: pointer to kms
*/
void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
#endif /* __DPU_CORE_IRQ_H__ */

View file

@ -24,8 +24,6 @@
#include "dpu_crtc.h"
#include "dpu_core_perf.h"
#define DPU_PERF_MODE_STRING_SIZE 128
/**
* enum dpu_perf_mode - performance tuning mode
* @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
@ -57,31 +55,20 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
return to_dpu_kms(priv->kms);
}
static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
{
return dpu_crtc_is_enabled(crtc);
}
static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
bool intf_connected = false;
if (!crtc)
goto end;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
_dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
tmp_crtc->enabled) {
DPU_DEBUG("video interface connected crtc:%d\n",
tmp_crtc->base.id);
intf_connected = true;
goto end;
return true;
}
}
end:
return intf_connected;
return false;
}
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
@ -101,20 +88,20 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
memset(perf, 0, sizeof(struct dpu_core_perf_params));
if (!dpu_cstate->bw_control) {
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
1000ULL;
perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
}
perf->core_clk_rate = kms->perf.max_core_clk_rate;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = 0;
perf->max_per_pipe_ib[i] = 0;
}
perf->core_clk_rate = 0;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
}
@ -124,12 +111,12 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
DPU_DEBUG(
"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_EBI],
perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI]);
}
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
@ -164,13 +151,13 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* obtain new values */
_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
curr_client_type = dpu_crtc_get_client_type(crtc);
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
if (tmp_crtc->enabled &&
(dpu_crtc_get_client_type(tmp_crtc) ==
curr_client_type) &&
(tmp_crtc != crtc)) {
@ -229,7 +216,7 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
int ret = 0;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
if (tmp_crtc->enabled &&
curr_client_type ==
dpu_crtc_get_client_type(tmp_crtc)) {
dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
@ -286,7 +273,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
*/
if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
if (tmp_crtc->enabled &&
dpu_crtc_get_intf_mode(tmp_crtc) ==
INTF_MODE_VIDEO)
return;
@ -296,7 +283,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
dpu_crtc->cur_perf.bw_ctl[i] = 0;
_dpu_core_perf_crtc_update_bus(kms, crtc, i);
}
@ -321,7 +308,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
struct dpu_crtc_state *dpu_cstate;
drm_for_each_crtc(crtc, kms->dev) {
if (_dpu_core_perf_crtc_is_power_on(crtc)) {
if (crtc->enabled) {
dpu_cstate = to_dpu_crtc_state(crtc->state);
clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
clk_rate);
@ -372,8 +359,8 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
old = &dpu_crtc->cur_perf;
new = &dpu_cstate->new_perf;
if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
if (crtc->enabled && !stop_req) {
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote - "ab or ib vote" is higher
@ -415,13 +402,13 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
update_clk = 1;
}
trace_dpu_perf_crtc_update(crtc->base.id,
new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI],
new->core_clk_rate, stop_req,
update_bus, update_clk);
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
if (update_bus & BIT(i)) {
ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
if (ret) {
@ -462,24 +449,14 @@ static ssize_t _dpu_core_perf_mode_write(struct file *file,
struct dpu_core_perf *perf = file->private_data;
struct dpu_perf_cfg *cfg = &perf->catalog->perf;
u32 perf_mode = 0;
char buf[10];
int ret;
if (!perf)
return -ENODEV;
if (count >= sizeof(buf))
return -EFAULT;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
buf[count] = 0; /* end of string */
if (kstrtouint(buf, 0, &perf_mode))
return -EFAULT;
ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode);
if (ret)
return ret;
if (perf_mode >= DPU_PERF_MODE_MAX)
return -EFAULT;
return -EINVAL;
if (perf_mode == DPU_PERF_MODE_FIXED) {
DRM_INFO("fix performance mode\n");
@ -504,29 +481,16 @@ static ssize_t _dpu_core_perf_mode_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
int len = 0;
char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
int len;
char buf[128];
if (!perf)
return -ENODEV;
if (*ppos)
return 0; /* the end */
len = snprintf(buf, sizeof(buf),
len = scnprintf(buf, sizeof(buf),
"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
perf->perf_tune.mode,
perf->perf_tune.min_core_clk,
perf->perf_tune.min_bus_vote);
if (len < 0 || len >= sizeof(buf))
return 0;
if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
return -EFAULT;
*ppos += len; /* increase offset */
return len;
return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static const struct file_operations dpu_core_perf_mode_fops = {
@ -535,70 +499,43 @@ static const struct file_operations dpu_core_perf_mode_fops = {
.write = _dpu_core_perf_mode_write,
};
static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
{
debugfs_remove_recursive(perf->debugfs_root);
perf->debugfs_root = NULL;
}
int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
struct dentry *parent)
int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
{
struct dpu_core_perf *perf = &dpu_kms->perf;
struct dpu_mdss_cfg *catalog = perf->catalog;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
struct dentry *entry;
priv = perf->dev->dev_private;
if (!priv || !priv->kms) {
DPU_ERROR("invalid KMS reference\n");
entry = debugfs_create_dir("core_perf", parent);
if (IS_ERR_OR_NULL(entry))
return -EINVAL;
}
dpu_kms = to_dpu_kms(priv->kms);
perf->debugfs_root = debugfs_create_dir("core_perf", parent);
if (!perf->debugfs_root) {
DPU_ERROR("failed to create core perf debugfs\n");
return -EINVAL;
}
debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
debugfs_create_u64("max_core_clk_rate", 0600, entry,
&perf->max_core_clk_rate);
debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
debugfs_create_u64("core_clk_rate", 0600, entry,
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
debugfs_create_u32("threshold_low", 0600, entry,
(u32 *)&catalog->perf.max_bw_low);
debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
debugfs_create_u32("threshold_high", 0600, entry,
(u32 *)&catalog->perf.max_bw_high);
debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
debugfs_create_u32("min_core_ib", 0600, entry,
(u32 *)&catalog->perf.min_core_ib);
debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
debugfs_create_u32("min_llcc_ib", 0600, entry,
(u32 *)&catalog->perf.min_llcc_ib);
debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
debugfs_create_u32("min_dram_ib", 0600, entry,
(u32 *)&catalog->perf.min_dram_ib);
debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
debugfs_create_u64("fix_core_clk_rate", 0600, entry,
&perf->fix_core_clk_rate);
debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
debugfs_create_u64("fix_core_ib_vote", 0600, entry,
&perf->fix_core_ib_vote);
debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
debugfs_create_u64("fix_core_ab_vote", 0600, entry,
&perf->fix_core_ab_vote);
return 0;
}
#else
static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
{
}
int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
struct dentry *parent)
{
return 0;
}
#endif
void dpu_core_perf_destroy(struct dpu_core_perf *perf)
@ -608,10 +545,8 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
return;
}
dpu_core_perf_debugfs_destroy(perf);
perf->max_core_clk_rate = 0;
perf->core_clk = NULL;
perf->phandle = NULL;
perf->catalog = NULL;
perf->dev = NULL;
}
@ -619,12 +554,10 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
struct dpu_power_handle *phandle,
struct dss_clk *core_clk)
{
perf->dev = dev;
perf->catalog = catalog;
perf->phandle = phandle;
perf->core_clk = core_clk;
perf->max_core_clk_rate = core_clk->max_rate;

View file

@ -19,10 +19,22 @@
#include <drm/drm_crtc.h>
#include "dpu_hw_catalog.h"
#include "dpu_power_handle.h"
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
* enum dpu_core_perf_data_bus_id - data bus identifier
* @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
* @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
* @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
*/
enum dpu_core_perf_data_bus_id {
DPU_CORE_PERF_DATA_BUS_ID_MNOC,
DPU_CORE_PERF_DATA_BUS_ID_LLCC,
DPU_CORE_PERF_DATA_BUS_ID_EBI,
DPU_CORE_PERF_DATA_BUS_ID_MAX,
};
/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request
@ -30,8 +42,8 @@
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
u64 max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MAX];
u64 bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MAX];
u64 core_clk_rate;
};
@ -52,7 +64,6 @@ struct dpu_core_perf_tune {
* @dev: Pointer to drm device
* @debugfs_root: top level debug folder
* @catalog: Pointer to catalog configuration
* @phandle: Pointer to power handler
* @core_clk: Pointer to core clock structure
* @core_clk_rate: current core clock rate
* @max_core_clk_rate: maximum allowable core clock rate
@ -66,7 +77,6 @@ struct dpu_core_perf {
struct drm_device *dev;
struct dentry *debugfs_root;
struct dpu_mdss_cfg *catalog;
struct dpu_power_handle *phandle;
struct dss_clk *core_clk;
u64 core_clk_rate;
u64 max_core_clk_rate;
@ -113,21 +123,20 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf);
* @perf: Pointer to core performance context
* @dev: Pointer to drm device
* @catalog: Pointer to catalog
* @phandle: Pointer to power handle
* @core_clk: pointer to core clock
*/
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
struct dpu_power_handle *phandle,
struct dss_clk *core_clk);
struct dpu_kms;
/**
* dpu_core_perf_debugfs_init - initialize debugfs for core performance context
* @perf: Pointer to core performance context
* @dpu_kms: Pointer to the dpu_kms struct
* @debugfs_parent: Pointer to parent debugfs
*/
int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
struct dentry *parent);
int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
#endif /* _DPU_CORE_PERF_H_ */

View file

@ -33,7 +33,6 @@
#include "dpu_plane.h"
#include "dpu_encoder.h"
#include "dpu_vbif.h"
#include "dpu_power_handle.h"
#include "dpu_core_perf.h"
#include "dpu_trace.h"
@ -47,13 +46,7 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
struct drm_display_mode *mode)
{
return mode->hdisplay / cstate->num_mixers;
}
static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@ -69,10 +62,7 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
if (!crtc)
return;
dpu_crtc->phandle = NULL;
drm_crtc_cleanup(crtc);
mutex_destroy(&dpu_crtc->crtc_lock);
kfree(dpu_crtc);
}
@ -287,16 +277,17 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
return INTF_MODE_NONE;
}
drm_for_each_encoder(encoder, crtc->dev)
if (encoder->crtc == crtc)
return dpu_encoder_get_intf_mode(encoder);
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
/* TODO: Returns the first INTF_MODE, could there be multiple values? */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
return dpu_encoder_get_intf_mode(encoder);
return INTF_MODE_NONE;
}
static void dpu_crtc_vblank_cb(void *data)
void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
{
struct drm_crtc *crtc = (struct drm_crtc *)data;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
/* keep statistics on vblank callback - with auto reset via debugfs */
@ -309,6 +300,19 @@ static void dpu_crtc_vblank_cb(void *data)
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc)
{
int ret = 0;
struct drm_modeset_acquire_ctx ctx;
DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
dpu_core_perf_crtc_release_bw(crtc);
DRM_MODESET_LOCK_ALL_END(ctx, ret);
if (ret)
DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
ret);
}
static void dpu_crtc_frame_event_work(struct kthread_work *work)
{
struct dpu_crtc_frame_event *fevent = container_of(work,
@ -338,7 +342,7 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
/* release bandwidth and other resources */
trace_dpu_crtc_frame_event_done(DRMID(crtc),
fevent->event);
dpu_core_perf_crtc_release_bw(crtc);
dpu_crtc_release_bw_unlocked(crtc);
} else {
trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
fevent->event);
@ -473,28 +477,21 @@ static void _dpu_crtc_setup_mixer_for_encoder(
static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *enc;
mutex_lock(&dpu_crtc->crtc_lock);
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
/* Check for mixers on all encoders attached to this crtc */
list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
if (enc->crtc != crtc)
continue;
drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
_dpu_crtc_setup_mixer_for_encoder(crtc, enc);
}
mutex_unlock(&dpu_crtc->crtc_lock);
}
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
struct drm_display_mode *adj_mode = &state->adjusted_mode;
u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
int i;
for (i = 0; i < cstate->num_mixers; i++) {
@ -502,7 +499,7 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
r->x1 = crtc_split_width * i;
r->y1 = 0;
r->x2 = r->x1 + crtc_split_width;
r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
r->y2 = adj_mode->vdisplay;
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
}
@ -552,13 +549,9 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
/* encoder will trigger pending mask now */
/* encoder will trigger pending mask now */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_trigger_kickoff_pending(encoder);
}
/*
* If no mixers have been allocated in dpu_crtc_atomic_check(),
@ -702,10 +695,9 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
{
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
@ -721,127 +713,59 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
DPU_ATRACE_BEGIN("crtc_commit");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
/*
* Encoder will flush/start now, unless it has a tx pending. If so, it
* may delay and flush at an irq event (e.g. ppdone)
*/
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask) {
struct dpu_encoder_kickoff_params params = { 0 };
if (encoder->crtc != crtc)
continue;
/*
* Encoder will flush/start now, unless it has a tx pending.
* If so, it may delay and flush at an irq event (e.g. ppdone)
*/
dpu_encoder_prepare_for_kickoff(encoder, &params);
dpu_encoder_prepare_for_kickoff(encoder, &params, async);
}
/* wait for frame_event_done completion */
DPU_ATRACE_BEGIN("wait_for_frame_done_event");
ret = _dpu_crtc_wait_for_frame_done(crtc);
DPU_ATRACE_END("wait_for_frame_done_event");
if (ret) {
DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
crtc->base.id,
atomic_read(&dpu_crtc->frame_pending));
goto end;
if (!async) {
/* wait for frame_event_done completion */
DPU_ATRACE_BEGIN("wait_for_frame_done_event");
ret = _dpu_crtc_wait_for_frame_done(crtc);
DPU_ATRACE_END("wait_for_frame_done_event");
if (ret) {
DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
crtc->base.id,
atomic_read(&dpu_crtc->frame_pending));
goto end;
}
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
} else
DPU_DEBUG("crtc%d commit\n", crtc->base.id);
dpu_crtc->play_count++;
}
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
} else
DPU_DEBUG("crtc%d commit\n", crtc->base.id);
dpu_crtc->play_count++;
dpu_vbif_clear_errors(dpu_kms);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
dpu_encoder_kickoff(encoder);
}
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_kickoff(encoder, async);
end:
reinit_completion(&dpu_crtc->frame_done_comp);
if (!async)
reinit_completion(&dpu_crtc->frame_done_comp);
DPU_ATRACE_END("crtc_commit");
}
/**
* _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
* @dpu_crtc: Pointer to dpu crtc structure
* @enable: Whether to enable/disable vblanks
*/
static void _dpu_crtc_vblank_enable_no_lock(
struct dpu_crtc *dpu_crtc, bool enable)
static void dpu_crtc_reset(struct drm_crtc *crtc)
{
struct drm_crtc *crtc = &dpu_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_encoder *enc;
struct dpu_crtc_state *cstate;
if (enable) {
/* drop lock since power crtc cb may try to re-acquire lock */
mutex_unlock(&dpu_crtc->crtc_lock);
pm_runtime_get_sync(dev->dev);
mutex_lock(&dpu_crtc->crtc_lock);
if (crtc->state)
dpu_crtc_destroy_state(crtc, crtc->state);
list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
if (enc->crtc != crtc)
continue;
trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
DRMID(enc), enable,
dpu_crtc);
dpu_encoder_register_vblank_callback(enc,
dpu_crtc_vblank_cb, (void *)crtc);
}
} else {
list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
if (enc->crtc != crtc)
continue;
trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
DRMID(enc), enable,
dpu_crtc);
dpu_encoder_register_vblank_callback(enc, NULL, NULL);
}
/* drop lock since power crtc cb may try to re-acquire lock */
mutex_unlock(&dpu_crtc->crtc_lock);
pm_runtime_put_sync(dev->dev);
mutex_lock(&dpu_crtc->crtc_lock);
}
}
/**
* _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
* @crtc: Pointer to drm crtc object
* @enable: true to enable suspend, false to indicate resume
*/
static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
mutex_lock(&dpu_crtc->crtc_lock);
/*
* If the vblank is enabled, release a power reference on suspend
* and take it back during resume (if it is still enabled).
*/
trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
if (dpu_crtc->suspend == enable)
DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
crtc->base.id, enable);
else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
_dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
}
dpu_crtc->suspend = enable;
mutex_unlock(&dpu_crtc->crtc_lock);
crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
if (crtc->state)
crtc->state->crtc = crtc;
}
/**
@ -873,65 +797,8 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
return &cstate->base;
}
/**
* dpu_crtc_reset - reset hook for CRTCs
* Resets the atomic state for @crtc by freeing the state pointer (which might
* be NULL, e.g. at driver load time) and allocating a new empty state object.
* @crtc: Pointer to drm crtc structure
*/
static void dpu_crtc_reset(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
/* revert suspend actions, if necessary */
if (dpu_kms_is_suspend_state(crtc->dev))
_dpu_crtc_set_suspend(crtc, false);
/* remove previous state, if present */
if (crtc->state) {
dpu_crtc_destroy_state(crtc, crtc->state);
crtc->state = 0;
}
dpu_crtc = to_dpu_crtc(crtc);
cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
if (!cstate) {
DPU_ERROR("failed to allocate state\n");
return;
}
cstate->base.crtc = crtc;
crtc->state = &cstate->base;
}
static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
{
struct drm_crtc *crtc = arg;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *encoder;
mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
/* restore encoder; crtc will be programmed during commit */
drm_for_each_encoder(encoder, crtc->dev) {
if (encoder->crtc != crtc)
continue;
dpu_encoder_virt_restore(encoder);
}
mutex_unlock(&dpu_crtc->crtc_lock);
}
static void dpu_crtc_disable(struct drm_crtc *crtc)
static void dpu_crtc_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
@ -951,13 +818,12 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
if (dpu_kms_is_suspend_state(crtc->dev))
_dpu_crtc_set_suspend(crtc, true);
/* Disable/save vblank irq handling */
drm_crtc_vblank_off(crtc);
mutex_lock(&dpu_crtc->crtc_lock);
drm_for_each_encoder_mask(encoder, crtc->dev,
old_crtc_state->encoder_mask)
dpu_encoder_assign_crtc(encoder, NULL);
/* wait for frame_event_done completion */
if (_dpu_crtc_wait_for_frame_done(crtc))
@ -966,10 +832,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
atomic_read(&dpu_crtc->frame_pending));
trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
if (dpu_crtc->enabled && !dpu_crtc->suspend &&
dpu_crtc->vblank_requested) {
_dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
}
dpu_crtc->enabled = false;
if (atomic_read(&dpu_crtc->frame_pending)) {
@ -981,15 +843,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
dpu_core_perf_crtc_update(crtc, 0, true);
drm_for_each_encoder(encoder, crtc->dev) {
if (encoder->crtc != crtc)
continue;
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
}
if (dpu_crtc->power_event)
dpu_power_handle_unregister_event(dpu_crtc->phandle,
dpu_crtc->power_event);
memset(cstate->mixers, 0, sizeof(cstate->mixers));
cstate->num_mixers = 0;
@ -998,14 +853,14 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
cstate->bw_control = false;
cstate->bw_split_vote = false;
mutex_unlock(&dpu_crtc->crtc_lock);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
pm_runtime_put_sync(crtc->dev->dev);
}
static void dpu_crtc_enable(struct drm_crtc *crtc,
@ -1021,33 +876,23 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
}
priv = crtc->dev->dev_private;
pm_runtime_get_sync(crtc->dev->dev);
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
drm_for_each_encoder(encoder, crtc->dev) {
if (encoder->crtc != crtc)
continue;
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder,
dpu_crtc_frame_event_cb, (void *)crtc);
}
mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
dpu_crtc->vblank_requested) {
_dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
}
dpu_crtc->enabled = true;
mutex_unlock(&dpu_crtc->crtc_lock);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_assign_crtc(encoder, crtc);
/* Enable/restore vblank irq handling */
drm_crtc_vblank_on(crtc);
dpu_crtc->power_event = dpu_power_handle_register_event(
dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
}
struct plane_state {
@ -1101,7 +946,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged));
mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
mixer_width = mode->hdisplay / cstate->num_mixers;
_dpu_crtc_setup_lm_bounds(crtc, state);
@ -1289,21 +1134,32 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *enc;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return -EINVAL;
}
dpu_crtc = to_dpu_crtc(crtc);
mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
if (dpu_crtc->enabled && !dpu_crtc->suspend) {
_dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
/*
* Normally we would iterate through encoder_mask in crtc state to find
* attached encoders. In this case, we might be disabling vblank _after_
* encoder_mask has been cleared.
*
* Instead, we "assign" a crtc to the encoder in enable and clear it in
* disable (which is also after encoder_mask is cleared). So instead of
* using encoder mask, we'll ask the encoder to toggle itself iff it's
* currently assigned to our crtc.
*
* Note also that this function cannot be called while crtc is disabled
* since we use drm_crtc_vblank_on/off. So we don't need to worry
* about the assigned crtcs being inconsistent with the current state
* (which means no need to worry about modeset locks).
*/
list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
dpu_crtc);
dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
}
dpu_crtc->vblank_requested = en;
mutex_unlock(&dpu_crtc->crtc_lock);
return 0;
}
@ -1324,18 +1180,14 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
int i, out_width;
if (!s || !s->private)
return -EINVAL;
dpu_crtc = s->private;
crtc = &dpu_crtc->base;
drm_modeset_lock_all(crtc->dev);
cstate = to_dpu_crtc_state(crtc->state);
mutex_lock(&dpu_crtc->crtc_lock);
mode = &crtc->state->adjusted_mode;
out_width = _dpu_crtc_get_mixer_width(cstate, mode);
out_width = mode->hdisplay / cstate->num_mixers;
seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
mode->hdisplay, mode->vdisplay);
@ -1420,9 +1272,6 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
dpu_crtc->vblank_cb_time = ktime_set(0, 0);
}
seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
mutex_unlock(&dpu_crtc->crtc_lock);
drm_modeset_unlock_all(crtc->dev);
return 0;
@ -1456,13 +1305,11 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
seq_printf(s, "bw_ctl[%s]: %llu\n",
dpu_power_handle_get_dbus_name(i),
for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
seq_printf(s, "bw_ctl[%d]: %llu\n", i,
dpu_crtc->cur_perf.bw_ctl[i]);
seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
dpu_power_handle_get_dbus_name(i),
seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
dpu_crtc->cur_perf.max_per_pipe_ib[i]);
}
@ -1472,8 +1319,7 @@ DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc;
struct dpu_kms *dpu_kms;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
static const struct file_operations debugfs_status_fops = {
.open = _dpu_debugfs_status_open,
@ -1482,12 +1328,6 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
.release = single_release,
};
if (!crtc)
return -EINVAL;
dpu_crtc = to_dpu_crtc(crtc);
dpu_kms = _dpu_crtc_get_kms(crtc);
dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
if (!dpu_crtc->debugfs_root)
@ -1504,25 +1344,11 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
return 0;
}
static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc;
if (!crtc)
return;
dpu_crtc = to_dpu_crtc(crtc);
debugfs_remove_recursive(dpu_crtc->debugfs_root);
}
#else
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
return 0;
}
static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
{
}
#endif /* CONFIG_DEBUG_FS */
static int dpu_crtc_late_register(struct drm_crtc *crtc)
@ -1532,7 +1358,9 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc)
static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
{
_dpu_crtc_destroy_debugfs(crtc);
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
debugfs_remove_recursive(dpu_crtc->debugfs_root);
}
static const struct drm_crtc_funcs dpu_crtc_funcs = {
@ -1547,7 +1375,7 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
.disable = dpu_crtc_disable,
.atomic_disable = dpu_crtc_disable,
.atomic_enable = dpu_crtc_enable,
.atomic_check = dpu_crtc_atomic_check,
.atomic_begin = dpu_crtc_atomic_begin,
@ -1574,7 +1402,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
crtc = &dpu_crtc->base;
crtc->dev = dev;
mutex_init(&dpu_crtc->crtc_lock);
spin_lock_init(&dpu_crtc->spin_lock);
atomic_set(&dpu_crtc->frame_pending, 0);
@ -1602,8 +1429,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
/* initialize event handling */
spin_lock_init(&dpu_crtc->event_lock);
dpu_crtc->phandle = &kms->phandle;
DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
return crtc;
}

View file

@ -132,8 +132,6 @@ struct dpu_crtc_frame_event {
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
* @vblank_requested : whether the user has requested vblank events
* @suspend : whether or not a suspend operation is in progress
* @enabled : whether the DPU CRTC is currently enabled. updated in the
* commit-thread, not state-swap time which is earlier, so
* safe to make decisions on during VBLANK on/off work
@ -142,7 +140,6 @@ struct dpu_crtc_frame_event {
* @dirty_list : list of color processing features are dirty
* @ad_dirty: list containing ad properties that are dirty
* @ad_active: list containing ad properties that are active
* @crtc_lock : crtc lock around create, destroy and access.
* @frame_pending : Whether or not an update is pending
* @frame_events : static allocation of in-flight frame events
* @frame_event_list : available frame event list
@ -152,7 +149,6 @@ struct dpu_crtc_frame_event {
* @event_worker : Event worker queue
* @event_lock : Spinlock around event handling code
* @phandle: Pointer to power handler
* @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
*/
struct dpu_crtc {
@ -168,8 +164,6 @@ struct dpu_crtc {
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
bool vblank_requested;
bool suspend;
bool enabled;
struct list_head feature_list;
@ -178,8 +172,6 @@ struct dpu_crtc {
struct list_head ad_dirty;
struct list_head ad_active;
struct mutex crtc_lock;
atomic_t frame_pending;
struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
@ -189,9 +181,6 @@ struct dpu_crtc {
/* for handling internal event thread */
spinlock_t event_lock;
struct dpu_power_handle *phandle;
struct dpu_power_event *power_event;
struct dpu_core_perf_params cur_perf;
struct dpu_crtc_smmu_state_data smmu_state;
@ -237,42 +226,13 @@ struct dpu_crtc_state {
#define to_dpu_crtc_state(x) \
container_of(x, struct dpu_crtc_state, base)
/**
* dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers?
* @cstate: Pointer to dpu crtc state
* @Return: true - has two mixers, false - has one mixer
*/
static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate)
{
return cstate->num_mixers == CRTC_DUAL_MIXERS;
}
/**
* dpu_crtc_get_mixer_height - get the mixer height
* Mixer height will be same as panel height
*/
static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
{
if (!dpu_crtc || !cstate || !mode)
return 0;
return mode->vdisplay;
}
/**
* dpu_crtc_frame_pending - retun the number of pending frames
* @crtc: Pointer to drm crtc object
*/
static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc;
if (!crtc)
return -EINVAL;
dpu_crtc = to_dpu_crtc(crtc);
return atomic_read(&dpu_crtc->frame_pending);
return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
/**
@ -283,10 +243,17 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* dpu_crtc_vblank_callback - called on vblank irq, issues completion events
* @crtc: Pointer to drm crtc object
*/
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
* @async: true if the commit is asynchronous, false otherwise
*/
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async);
/**
* dpu_crtc_complete_commit - callback signalling completion of current commit
@ -329,22 +296,7 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
struct drm_crtc *crtc)
{
struct dpu_crtc_state *cstate =
crtc ? to_dpu_crtc_state(crtc->state) : NULL;
if (!cstate)
return NRT_CLIENT;
return RT_CLIENT;
}
/**
* dpu_crtc_is_enabled - check if dpu crtc is enabled or not
* @crtc: Pointer to crtc
*/
static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
{
return crtc ? crtc->enabled : false;
return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT;
}
#endif /* _DPU_CRTC_H_ */

File diff suppressed because it is too large Load diff

View file

@ -1,103 +0,0 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef DPU_DBG_H_
#define DPU_DBG_H_
#include <stdarg.h>
#include <linux/debugfs.h>
#include <linux/list.h>
enum dpu_dbg_dump_flag {
DPU_DBG_DUMP_IN_LOG = BIT(0),
DPU_DBG_DUMP_IN_MEM = BIT(1),
};
#if defined(CONFIG_DEBUG_FS)
/**
* dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
* @hwversion: Chipset revision
*/
void dpu_dbg_init_dbg_buses(u32 hwversion);
/**
* dpu_dbg_init - initialize global dpu debug facilities: regdump
* @dev: device handle
* Returns: 0 or -ERROR
*/
int dpu_dbg_init(struct device *dev);
/**
* dpu_dbg_debugfs_register - register entries at the given debugfs dir
* @debugfs_root: debugfs root in which to create dpu debug entries
* Returns: 0 or -ERROR
*/
int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
/**
* dpu_dbg_destroy - destroy the global dpu debug facilities
* Returns: none
*/
void dpu_dbg_destroy(void);
/**
* dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
* @queue_work: whether to queue the dumping work to the work_struct
* @name: string indicating origin of dump
* @dump_dbgbus: dump the dpu debug bus
* @dump_vbif_rt: dump the vbif rt bus
* Returns: none
*/
void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
bool dump_dbgbus_vbif_rt);
/**
* dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
* address of the top registers. Used for accessing debug bus controls.
* @blk_off: offset from mdss base of the top block
*/
void dpu_dbg_set_dpu_top_offset(u32 blk_off);
#else
static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
{
}
static inline int dpu_dbg_init(struct device *dev)
{
return 0;
}
static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
{
return 0;
}
static inline void dpu_dbg_destroy(void)
{
}
static inline void dpu_dbg_dump(bool queue_work, const char *name,
bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
{
}
static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
{
}
#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* DPU_DBG_H_ */

View file

@ -130,8 +130,9 @@ enum dpu_enc_rc_states {
* Virtual encoder defers as much as possible to the physical encoders.
* Virtual encoder registers itself with the DRM Framework as the encoder.
* @base: drm_encoder base class for registration with DRM
* @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @bus_scaling_client: Client handle to the bus scaling interface
* @enabled: True if the encoder is active, protected by enc_lock
* @num_phys_encs: Actual number of physical encoders contained.
* @phys_encs: Container of physical encoders managed.
* @cur_master: Pointer to the current master in this mode. Optimization
@ -141,15 +142,17 @@ enum dpu_enc_rc_states {
* @intfs_swapped Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
* @crtc_vblank_cb: Callback into the upper layer / CRTC for
* notification of the VBLANK
* @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
* @crtc: Pointer to the currently assigned crtc. Normally you
* would use crtc->state->encoder_mask to determine the
* link between encoder/crtc. However in this case we need
* to track crtc in the disable() hook which is called
* _after_ encoder_mask is cleared.
* @crtc_kickoff_cb: Callback into CRTC that will flush & start
* all CTL paths
* @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
* @debugfs_root: Debug file system root file node
* @enc_lock: Lock around physical encoder create/destroy and
access.
* @enc_lock: Lock around physical encoder
* create/destroy/enable/disable
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
* busy processing current command.
* Bit0 = phys_encs[0] etc.
@ -175,6 +178,8 @@ struct dpu_encoder_virt {
spinlock_t enc_spinlock;
uint32_t bus_scaling_client;
bool enabled;
unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
struct dpu_encoder_phys *cur_master;
@ -183,8 +188,7 @@ struct dpu_encoder_virt {
bool intfs_swapped;
void (*crtc_vblank_cb)(void *);
void *crtc_vblank_cb_data;
struct drm_crtc *crtc;
struct dentry *debugfs_root;
struct mutex enc_lock;
@ -210,39 +214,6 @@ struct dpu_encoder_virt {
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
bool enable)
{
struct drm_encoder *drm_enc;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
if (!dpu_enc) {
DPU_ERROR("invalid dpu enc\n");
return -EINVAL;
}
drm_enc = &dpu_enc->base;
if (!drm_enc->dev || !drm_enc->dev->dev_private) {
DPU_ERROR("drm device invalid\n");
return -EINVAL;
}
priv = drm_enc->dev->dev_private;
if (!priv->kms) {
DPU_ERROR("invalid kms\n");
return -EINVAL;
}
dpu_kms = to_dpu_kms(priv->kms);
if (enable)
pm_runtime_get_sync(&dpu_kms->pdev->dev);
else
pm_runtime_put_sync(&dpu_kms->pdev->dev);
return 0;
}
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
@ -1119,28 +1090,24 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
}
void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i;
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
phys->ops.restore(phys);
}
if (!dpu_enc->enabled)
goto out;
if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
_dpu_encoder_virt_enable_helper(drm_enc);
out:
mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
@ -1154,6 +1121,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
@ -1170,10 +1139,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
if (ret) {
DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
ret);
return;
goto out;
}
_dpu_encoder_virt_enable_helper(drm_enc);
dpu_enc->enabled = true;
out:
mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
@ -1195,11 +1169,14 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
return;
}
mode = &drm_enc->crtc->state->adjusted_mode;
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
mutex_lock(&dpu_enc->enc_lock);
dpu_enc->enabled = false;
mode = &drm_enc->crtc->state->adjusted_mode;
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@ -1233,6 +1210,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(&dpu_kms->rm, drm_enc);
mutex_unlock(&dpu_enc->enc_lock);
}
static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
@ -1263,8 +1242,8 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
dpu_enc = to_dpu_encoder_virt(drm_enc);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
if (dpu_enc->crtc_vblank_cb)
dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
if (dpu_enc->crtc)
dpu_crtc_vblank_callback(dpu_enc->crtc);
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
atomic_inc(&phy_enc->vsync_cnt);
@ -1284,25 +1263,32 @@ static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_END("encoder_underrun_callback");
}
void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
void (*vbl_cb)(void *), void *vbl_data)
void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned long lock_flags;
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
/* crtc should always be cleared before re-assigning */
WARN_ON(crtc && dpu_enc->crtc);
dpu_enc->crtc = crtc;
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
struct drm_crtc *crtc, bool enable)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned long lock_flags;
bool enable;
int i;
enable = vbl_cb ? true : false;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
dpu_enc->crtc_vblank_cb = vbl_cb;
dpu_enc->crtc_vblank_cb_data = vbl_data;
if (dpu_enc->crtc != crtc) {
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
return;
}
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@ -1407,8 +1393,9 @@ static void dpu_encoder_off_work(struct kthread_work *work)
* phys: Pointer to physical encoder structure
* extra_flush_bits: Additional bit mask to include in flush trigger
*/
static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phys, uint32_t extra_flush_bits,
bool async)
{
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
@ -1431,7 +1418,10 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
return;
}
pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
if (!async)
pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
else
pending_kickoff_cnt = atomic_read(&phys->pending_kickoff_cnt);
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@ -1450,7 +1440,7 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
* _dpu_encoder_trigger_start - trigger start for a physical encoder
* phys: Pointer to physical encoder structure
*/
static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
@ -1507,7 +1497,7 @@ static int dpu_encoder_helper_wait_event_timeout(
return rc;
}
void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_hw_ctl *ctl;
@ -1527,10 +1517,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
ctl->idx);
rc = ctl->ops.reset(ctl);
if (rc) {
if (rc)
DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
dpu_dbg_dump(false, __func__, true, true);
}
phys_enc->enable_state = DPU_ENC_ENABLED;
}
@ -1544,7 +1532,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
* a time.
* dpu_enc: Pointer to virtual encoder structure
*/
static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
bool async)
{
struct dpu_hw_ctl *ctl;
uint32_t i, pending_flush;
@ -1575,7 +1564,8 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
set_bit(i, dpu_enc->frame_busy_mask);
if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys))
_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
async);
else if (ctl->ops.get_pending_flush)
pending_flush |= ctl->ops.get_pending_flush(ctl);
}
@ -1585,7 +1575,7 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
_dpu_encoder_trigger_flush(
&dpu_enc->base,
dpu_enc->cur_master,
pending_flush);
pending_flush, async);
}
_dpu_encoder_trigger_start(dpu_enc->cur_master);
@ -1769,7 +1759,7 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
}
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
struct dpu_encoder_kickoff_params *params)
struct dpu_encoder_kickoff_params *params, bool async)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
@ -1803,14 +1793,12 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
if (needs_hw_reset) {
trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
if (phys && phys->ops.hw_reset)
phys->ops.hw_reset(phys);
dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
}
}
}
void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
@ -1833,7 +1821,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc);
_dpu_encoder_kickoff_phys(dpu_enc, async);
/* allow phys encs to handle any post-kickoff business */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@ -1875,14 +1863,9 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
#ifdef CONFIG_DEBUG_FS
static int _dpu_encoder_status_show(struct seq_file *s, void *data)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_virt *dpu_enc = s->private;
int i;
if (!s || !s->private)
return -EINVAL;
dpu_enc = s->private;
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@ -1920,7 +1903,7 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
int i;
@ -1934,12 +1917,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
char name[DPU_NAME_SIZE];
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
if (!drm_enc->dev || !drm_enc->dev->dev_private) {
DPU_ERROR("invalid encoder or kms\n");
return -EINVAL;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@ -1964,26 +1946,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
return 0;
}
static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
if (!drm_enc)
return;
dpu_enc = to_dpu_encoder_virt(drm_enc);
debugfs_remove_recursive(dpu_enc->debugfs_root);
}
#else
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
return 0;
}
static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
{
}
#endif
static int dpu_encoder_late_register(struct drm_encoder *encoder)
@ -1993,7 +1960,9 @@ static int dpu_encoder_late_register(struct drm_encoder *encoder)
static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
{
_dpu_encoder_destroy_debugfs(encoder);
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
debugfs_remove_recursive(dpu_enc->debugfs_root);
}
static int dpu_encoder_virt_add_phys_encs(
@ -2268,6 +2237,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
dpu_enc->enabled = false;
return &dpu_enc->base;
}

View file

@ -55,14 +55,22 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
struct dpu_encoder_hw_resources *hw_res);
/**
* dpu_encoder_register_vblank_callback - provide callback to encoder that
* will be called on the next vblank.
* dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
* @encoder: encoder pointer
* @cb: callback pointer, provide NULL to deregister and disable IRQs
* @data: user data provided to callback
* @crtc: crtc pointer
*/
void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
void (*cb)(void *), void *data);
void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
struct drm_crtc *crtc);
/**
* dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
* the encoder is assigned to the given crtc
* @encoder: encoder pointer
* @crtc: crtc pointer
* @enable: true if vblank should be enabled
*/
void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
struct drm_crtc *crtc, bool enable);
/**
* dpu_encoder_register_frame_event_callback - provide callback to encoder that
@ -81,9 +89,10 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
* Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer
* @params: kickoff time parameters
* @async: true if this is an asynchronous commit
*/
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
struct dpu_encoder_kickoff_params *params);
struct dpu_encoder_kickoff_params *params, bool async);
/**
* dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
@ -96,8 +105,9 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately.
* @encoder: encoder pointer
* @async: true if this is an asynchronous commit
*/
void dpu_encoder_kickoff(struct drm_encoder *encoder);
void dpu_encoder_kickoff(struct drm_encoder *encoder, bool async);
/**
* dpu_encoder_wait_for_event - Waits for encoder events
@ -126,10 +136,10 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
/**
* dpu_encoder_virt_restore - restore the encoder configs
* dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
* @encoder: encoder pointer
*/
void dpu_encoder_virt_restore(struct drm_encoder *encoder);
void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
/**
* dpu_encoder_init - initialize virtual encoder object

View file

@ -114,8 +114,6 @@ struct dpu_encoder_virt_ops {
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
* @hw_reset: Issue HW recovery such as CTL reset and clear
* DPU_ENC_ERR_NEEDS_HW_RESET state
* @irq_control: Handler to enable/disable all the encoder IRQs
* @prepare_idle_pc: phys encoder can update the vsync_enable status
* on idle power collapse prepare
@ -151,7 +149,6 @@ struct dpu_encoder_phys_ops {
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
void (*restore)(struct dpu_encoder_phys *phys);
@ -342,15 +339,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
*/
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
/**
* dpu_encoder_helper_hw_reset - issue ctl hw reset
* This helper function may be optionally specified by physical
* encoders if they require ctl hw reset. If state is currently
* DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
* @phys_enc: Pointer to physical encoder structure
*/
void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
struct dpu_encoder_phys *phys_enc)
{
@ -362,7 +350,7 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
if (phys_enc->split_role == ENC_ROLE_SOLO &&
dpu_crtc_state_is_stereo(dpu_cstate))
dpu_cstate->num_mixers == CRTC_DUAL_MIXERS)
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;

View file

@ -44,14 +44,7 @@
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
struct dpu_encoder_phys_cmd *cmd_enc)
{
return KICKOFF_TIMEOUT_MS;
}
static inline bool dpu_encoder_phys_cmd_is_master(
struct dpu_encoder_phys *phys_enc)
static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
}
@ -243,7 +236,6 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
atomic_read(&phys_enc->pending_kickoff_cnt));
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
dpu_dbg_dump(false, __func__, true, true);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@ -496,14 +488,11 @@ static void dpu_encoder_phys_cmd_enable_helper(
_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
goto skip_flush;
return;
ctl = phys_enc->hw_ctl;
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
skip_flush:
return;
}
static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
@ -727,7 +716,7 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
wait_info.wq = &cmd_enc->pending_vblank_wq;
wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
atomic_inc(&cmd_enc->pending_vblank_cnt);
@ -776,7 +765,6 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->irq_control = dpu_encoder_phys_cmd_irq_control;
ops->restore = dpu_encoder_phys_cmd_enable_helper;
ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
@ -798,7 +786,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
if (!cmd_enc) {
ret = -ENOMEM;
DPU_ERROR("failed to allocate\n");
goto fail;
return ERR_PTR(ret);
}
phys_enc = &cmd_enc->base;
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
@ -856,6 +844,5 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
return phys_enc;
fail:
return ERR_PTR(ret);
}

View file

@ -110,7 +110,7 @@ static void drm_mode_to_intf_timing_params(
*/
}
static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
static u32 get_horizontal_total(const struct intf_timing_params *timing)
{
u32 active = timing->xres;
u32 inactive =
@ -119,7 +119,7 @@ static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
return active + inactive;
}
static inline u32 get_vertical_total(const struct intf_timing_params *timing)
static u32 get_vertical_total(const struct intf_timing_params *timing)
{
u32 active = timing->yres;
u32 inactive =
@ -331,7 +331,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
if (hw_ctl && hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
if (flush_register == 0)
if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@ -613,7 +613,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
dpu_dbg_dump(false, __func__, true, true);
}
}
@ -766,7 +765,6 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
}

View file

@ -921,7 +921,7 @@ static int _dpu_format_populate_addrs_ubwc(
+ layout->plane_size[2] + layout->plane_size[3];
if (!meta)
goto done;
return 0;
/* configure Y metadata plane */
layout->plane_addr[2] = base_addr;
@ -952,12 +952,11 @@ static int _dpu_format_populate_addrs_ubwc(
layout->plane_addr[1] = 0;
if (!meta)
goto done;
return 0;
layout->plane_addr[2] = base_addr;
layout->plane_addr[3] = 0;
}
done:
return 0;
}

View file

@ -30,16 +30,10 @@ static LIST_HEAD(dpu_hw_blk_list);
* @type: hw block type - enum dpu_hw_blk_type
* @id: instance id of the hw block
* @ops: Pointer to block operations
* return: 0 if success; error code otherwise
*/
int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops)
{
if (!hw_blk) {
pr_err("invalid parameters\n");
return -EINVAL;
}
INIT_LIST_HEAD(&hw_blk->list);
hw_blk->type = type;
hw_blk->id = id;
@ -51,8 +45,6 @@ int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
mutex_lock(&dpu_hw_blk_lock);
list_add(&hw_blk->list, &dpu_hw_blk_list);
mutex_unlock(&dpu_hw_blk_lock);
return 0;
}
/**

View file

@ -44,7 +44,7 @@ struct dpu_hw_blk {
struct dpu_hw_blk_ops ops;
};
int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops);
void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);

View file

@ -736,13 +736,4 @@ struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
*/
void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
/**
* dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
* @cfg: pointer to sspp cfg
*/
static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
{
return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
}
#endif /* _DPU_HW_CATALOG_H */

View file

@ -13,8 +13,8 @@
#include <linux/delay.h>
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
#define CTL_LAYER(lm) \
(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
@ -72,24 +72,39 @@ static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
return stages;
}
static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
return DPU_REG_READ(c, CTL_FLUSH);
}
static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
}
static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
}
static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
ctx->pending_flush_mask = 0x0;
}
static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
u32 flushbits)
{
trace_dpu_hw_ctl_update_pending_flush(flushbits,
ctx->pending_flush_mask);
ctx->pending_flush_mask |= flushbits;
}
@ -103,18 +118,12 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
return DPU_REG_READ(c, CTL_FLUSH);
}
static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
uint32_t flushbits = 0;
@ -169,7 +178,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
return flushbits;
}
static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
uint32_t flushbits = 0;
@ -202,7 +211,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
return flushbits;
}
static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf)
{
switch (intf) {
@ -474,10 +483,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
@ -485,7 +491,6 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
{
struct dpu_hw_ctl *c;
struct dpu_ctl_cfg *cfg;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@ -504,18 +509,9 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
c->mixer_count = m->mixer_count;
c->mixer_hw_caps = m->mixer;
rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
return c;
blk_init_error:
kzfree(c);
return ERR_PTR(rc);
}
void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)

View file

@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define INTF_TIMING_ENGINE_EN 0x000
@ -265,10 +264,7 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->get_line_count = dpu_hw_intf_get_line_count;
}
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
@ -276,7 +272,6 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
{
struct dpu_hw_intf *c;
struct dpu_intf_cfg *cfg;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@ -297,18 +292,9 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
c->mdss = m;
_setup_intf_ops(&c->ops, c->cap->features);
rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
return c;
blk_init_error:
kzfree(c);
return ERR_PTR(rc);
}
void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)

View file

@ -91,16 +91,6 @@ struct dpu_hw_intf {
struct dpu_hw_intf_ops ops;
};
/**
* to_dpu_hw_intf - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_intf, base);
}
/**
* dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx.

View file

@ -15,7 +15,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define LM_OP_MODE 0x00
@ -64,16 +63,10 @@ static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
{
const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
int rc;
if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages)
return sblk->blendstage_base[stage - DPU_STAGE_0];
if (stage == DPU_STAGE_BASE)
rc = -EINVAL;
else if (stage <= sblk->maxblendstages)
rc = sblk->blendstage_base[stage - DPU_STAGE_0];
else
rc = -EINVAL;
return rc;
return -EINVAL;
}
static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
@ -163,11 +156,6 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
void *cfg)
{
}
static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
@ -179,13 +167,9 @@ static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
ops->setup_gc = dpu_hw_lm_gc;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
@ -193,7 +177,6 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
{
struct dpu_hw_mixer *c;
struct dpu_lm_cfg *cfg;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@ -210,18 +193,9 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
c->cap = cfg;
_setup_mixer_ops(m, &c->ops, c->cap->features);
rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
return c;
blk_init_error:
kzfree(c);
return ERR_PTR(rc);
}
void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)

View file

@ -61,11 +61,6 @@ struct dpu_hw_lm_ops {
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en);
/**
* setup_gc : enable/disable gamma correction feature
*/
void (*setup_gc)(struct dpu_hw_mixer *mixer,
void *cfg);
};
struct dpu_hw_mixer {

View file

@ -16,7 +16,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_pingpong.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
@ -177,7 +176,7 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
if (height < init)
goto line_count_exit;
return line;
line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
@ -186,7 +185,6 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
else
line -= init;
line_count_exit:
return line;
}
@ -201,10 +199,7 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
ops->get_line_count = dpu_hw_pp_get_line_count;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
@ -212,7 +207,6 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
{
struct dpu_hw_pingpong *c;
struct dpu_pingpong_cfg *cfg;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@ -228,18 +222,9 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
c->caps = cfg;
_setup_pingpong_ops(&c->ops, c->caps);
rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
return c;
blk_init_error:
kzfree(c);
return ERR_PTR(rc);
}
void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)

View file

@ -104,16 +104,6 @@ struct dpu_hw_pingpong {
struct dpu_hw_pingpong_ops ops;
};
/**
* dpu_hw_pingpong - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_pingpong, base);
}
/**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.

View file

@ -14,7 +14,6 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_sspp.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
@ -141,7 +140,7 @@
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
int s_id,
u32 *idx)
{
@ -662,7 +661,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
test_bit(DPU_SSPP_CSC_10BIT, &features))
c->ops.setup_csc = dpu_hw_sspp_setup_csc;
if (dpu_hw_sspp_multirect_enabled(c->cap))
if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) ||
test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
@ -697,10 +697,7 @@ static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
return ERR_PTR(-ENOMEM);
}
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
void __iomem *addr, struct dpu_mdss_cfg *catalog,
@ -708,7 +705,6 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
{
struct dpu_hw_pipe *hw_pipe;
struct dpu_sspp_cfg *cfg;
int rc;
if (!addr || !catalog)
return ERR_PTR(-EINVAL);
@ -730,18 +726,9 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
hw_pipe->cap = cfg;
_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
return hw_pipe;
blk_init_error:
kzfree(hw_pipe);
return ERR_PTR(rc);
}
void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)

View file

@ -391,16 +391,6 @@ struct dpu_hw_pipe {
struct dpu_hw_sspp_ops ops;
};
/**
* dpu_hw_pipe - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_pipe, base);
}
/**
* dpu_hw_sspp_init - initializes the sspp hw driver object.
* Should be called once before accessing every pipe.

View file

@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_top.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define SSPP_SPARE 0x28
@ -322,10 +321,7 @@ static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
return ERR_PTR(-EINVAL);
}
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
void __iomem *addr,
@ -333,7 +329,6 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
{
struct dpu_hw_mdp *mdp;
const struct dpu_mdp_cfg *cfg;
int rc;
if (!addr || !m)
return ERR_PTR(-EINVAL);
@ -355,20 +350,9 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
mdp->caps = cfg;
_setup_mdp_ops(&mdp->ops, mdp->caps->features);
rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
return mdp;
blk_init_error:
kzfree(mdp);
return ERR_PTR(rc);
}
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)

View file

@ -160,16 +160,6 @@ struct dpu_hw_mdp {
struct dpu_hw_mdp_ops ops;
};
/**
* to_dpu_hw_mdp - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_mdp, base);
}
/**
* dpu_hw_mdptop_init - initializes the top driver for the passed idx
* @idx: Interface index for which driver object is required

View file

@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_vbif.h"
#include "dpu_dbg.h"
#define VBIF_VERSION 0x0000
#define VBIF_CLK_FORCE_CTRL0 0x0008

View file

@ -16,6 +16,8 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <drm/drm_print.h>
#include "dpu_io_util.h"
void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
@ -164,7 +166,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
"clock-names", i,
&clock_name);
if (rc) {
dev_err(&pdev->dev, "Failed to get clock name for %d\n",
DRM_DEV_ERROR(&pdev->dev, "Failed to get clock name for %d\n",
i);
break;
}
@ -176,13 +178,13 @@ int msm_dss_parse_clock(struct platform_device *pdev,
rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
if (rc) {
dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
DRM_DEV_ERROR(&pdev->dev, "Failed to get clock refs %d\n", rc);
goto err;
}
rc = of_clk_set_defaults(pdev->dev.of_node, false);
if (rc) {
dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
DRM_DEV_ERROR(&pdev->dev, "Failed to set clock defaults %d\n", rc);
goto err;
}

View file

@ -1,66 +0,0 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include "dpu_irq.h"
#include "dpu_core_irq.h"
irqreturn_t dpu_irq(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
return dpu_core_irq(dpu_kms);
}
void dpu_irq_preinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
if (!dpu_kms->dev || !dpu_kms->dev->dev) {
pr_err("invalid device handles\n");
return;
}
dpu_core_irq_preinstall(dpu_kms);
}
int dpu_irq_postinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int rc;
if (!kms) {
DPU_ERROR("invalid parameters\n");
return -EINVAL;
}
rc = dpu_core_irq_postinstall(dpu_kms);
return rc;
}
void dpu_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
if (!kms) {
DPU_ERROR("invalid parameters\n");
return;
}
dpu_core_irq_uninstall(dpu_kms);
}

View file

@ -1,59 +0,0 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DPU_IRQ_H__
#define __DPU_IRQ_H__
#include <linux/kernel.h>
#include <linux/irqdomain.h>
#include "msm_kms.h"
/**
* dpu_irq_controller - define MDSS level interrupt controller context
* @enabled_mask: enable status of MDSS level interrupt
* @domain: interrupt domain of this controller
*/
struct dpu_irq_controller {
unsigned long enabled_mask;
struct irq_domain *domain;
};
/**
* dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
* @kms: pointer to kms context
* @return: none
*/
void dpu_irq_preinstall(struct msm_kms *kms);
/**
* dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
* @kms: pointer to kms context
* @return: 0 if success; error code otherwise
*/
int dpu_irq_postinstall(struct msm_kms *kms);
/**
* dpu_irq_uninstall - uninstall MDSS IRQ handler
* @drm_dev: pointer to kms context
* @return: none
*/
void dpu_irq_uninstall(struct msm_kms *kms);
/**
* dpu_irq - MDSS level IRQ handler
* @kms: pointer to kms context
* @return: interrupt handling status
*/
irqreturn_t dpu_irq(struct msm_kms *kms);
#endif /* __DPU_IRQ_H__ */

View file

@ -81,7 +81,7 @@ static int _dpu_danger_signal_status(struct seq_file *s,
struct dpu_danger_safe_status status;
int i;
if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
DPU_ERROR("invalid arg(s)\n");
return 0;
}
@ -138,46 +138,29 @@ static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
{
debugfs_remove_recursive(dpu_kms->debugfs_danger);
dpu_kms->debugfs_danger = NULL;
}
static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
dpu_kms->debugfs_danger = debugfs_create_dir("danger",
parent);
if (!dpu_kms->debugfs_danger) {
DPU_ERROR("failed to create danger debugfs\n");
return -EINVAL;
}
struct dentry *entry = debugfs_create_dir("danger", parent);
if (IS_ERR_OR_NULL(entry))
return;
debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
debugfs_create_file("danger_status", 0600, entry,
dpu_kms, &dpu_debugfs_danger_stats_fops);
debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
debugfs_create_file("safe_status", 0600, entry,
dpu_kms, &dpu_debugfs_safe_stats_fops);
return 0;
}
static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
struct dpu_debugfs_regset32 *regset;
struct dpu_kms *dpu_kms;
struct dpu_debugfs_regset32 *regset = s->private;
struct dpu_kms *dpu_kms = regset->dpu_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
void __iomem *base;
uint32_t i, addr;
if (!s || !s->private)
return 0;
regset = s->private;
dpu_kms = regset->dpu_kms;
if (!dpu_kms || !dpu_kms->mmio)
if (!dpu_kms->mmio)
return 0;
dev = dpu_kms->dev;
@ -250,57 +233,24 @@ void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
{
void *p;
int rc;
void *p = dpu_hw_util_get_log_mask_ptr();
struct dentry *entry;
p = dpu_hw_util_get_log_mask_ptr();
if (!dpu_kms || !p)
if (!p)
return -EINVAL;
dpu_kms->debugfs_root = debugfs_create_dir("debug",
dpu_kms->dev->primary->debugfs_root);
if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
DRM_ERROR("debugfs create_dir failed %ld\n",
PTR_ERR(dpu_kms->debugfs_root));
return PTR_ERR(dpu_kms->debugfs_root);
}
rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
if (rc) {
DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
return rc;
}
entry = debugfs_create_dir("debug", dpu_kms->dev->primary->debugfs_root);
if (IS_ERR_OR_NULL(entry))
return -ENODEV;
/* allow root to be NULL */
debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
(void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
(void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
(void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
dpu_debugfs_danger_init(dpu_kms, entry);
dpu_debugfs_vbif_init(dpu_kms, entry);
dpu_debugfs_core_irq_init(dpu_kms, entry);
rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
return rc;
}
return 0;
}
static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
{
/* don't need to NULL check debugfs_root */
if (dpu_kms) {
dpu_debugfs_vbif_destroy(dpu_kms);
dpu_debugfs_danger_destroy(dpu_kms);
dpu_debugfs_core_irq_destroy(dpu_kms);
debugfs_remove_recursive(dpu_kms->debugfs_root);
}
}
#else
static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
{
return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
@ -320,7 +270,10 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct dpu_kms *dpu_kms;
struct msm_drm_private *priv;
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_encoder *encoder;
int i;
if (!kms)
return;
@ -332,9 +285,13 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
priv = dev->dev_private;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->crtc != NULL)
/* Call prepare_commit for all affected encoders */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc_state->encoder_mask) {
dpu_encoder_prepare_commit(encoder);
}
}
}
/*
@ -344,15 +301,20 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
void dpu_kms_encoder_enable(struct drm_encoder *encoder)
{
const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
struct drm_crtc *crtc = encoder->crtc;
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc;
/* Forward this enable call to the commit hook */
if (funcs && funcs->commit)
funcs->commit(encoder);
if (crtc && crtc->state->active) {
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
drm_for_each_crtc(crtc, dev) {
if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
continue;
trace_dpu_kms_enc_enable(DRMID(crtc));
dpu_crtc_commit_kickoff(crtc);
dpu_crtc_commit_kickoff(crtc, false);
}
}
@ -369,7 +331,8 @@ static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
if (crtc->state->active) {
trace_dpu_kms_commit(DRMID(crtc));
dpu_crtc_commit_kickoff(crtc);
dpu_crtc_commit_kickoff(crtc,
state->legacy_cursor_update);
}
}
}
@ -613,22 +576,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
#ifdef CONFIG_DEBUG_FS
static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct drm_device *dev;
int rc;
if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
DPU_ERROR("invalid dpu_kms\n");
return -EINVAL;
}
dev = dpu_kms->dev;
rc = _dpu_debugfs_init(dpu_kms);
if (rc)
DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
return rc;
return _dpu_debugfs_init(to_dpu_kms(kms));
}
#endif
@ -651,12 +599,7 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
if (dpu_kms->power_event)
dpu_power_handle_unregister_event(
&dpu_kms->phandle, dpu_kms->power_event);
/* safe to call these more than once during shutdown */
_dpu_debugfs_destroy(dpu_kms);
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
@ -676,11 +619,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_hw_catalog_deinit(dpu_kms->catalog);
dpu_kms->catalog = NULL;
if (dpu_kms->core_client)
dpu_power_client_destroy(&dpu_kms->phandle,
dpu_kms->core_client);
dpu_kms->core_client = NULL;
if (dpu_kms->vbif[VBIF_NRT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
dpu_kms->vbif[VBIF_NRT] = NULL;
@ -705,131 +643,9 @@ static void dpu_kms_destroy(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
dpu_dbg_destroy();
_dpu_kms_hw_destroy(dpu_kms);
}
static int dpu_kms_pm_suspend(struct device *dev)
{
struct drm_device *ddev;
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct dpu_kms *dpu_kms;
int ret = 0, num_crtcs = 0;
if (!dev)
return -EINVAL;
ddev = dev_get_drvdata(dev);
if (!ddev || !ddev_to_msm_kms(ddev))
return -EINVAL;
dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
/* disable hot-plug polling */
drm_kms_helper_poll_disable(ddev);
/* acquire modeset lock(s) */
drm_modeset_acquire_init(&ctx, 0);
retry:
DPU_ATRACE_BEGIN("kms_pm_suspend");
ret = drm_modeset_lock_all_ctx(ddev, &ctx);
if (ret)
goto unlock;
/* save current state for resume */
if (dpu_kms->suspend_state)
drm_atomic_state_put(dpu_kms->suspend_state);
dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
DRM_ERROR("failed to back up suspend state\n");
dpu_kms->suspend_state = NULL;
goto unlock;
}
/* create atomic state to disable all CRTCs */
state = drm_atomic_state_alloc(ddev);
if (IS_ERR_OR_NULL(state)) {
DRM_ERROR("failed to allocate crtc disable state\n");
goto unlock;
}
state->acquire_ctx = &ctx;
/* check for nothing to do */
if (num_crtcs == 0) {
DRM_DEBUG("all crtcs are already in the off state\n");
drm_atomic_state_put(state);
goto suspended;
}
/* commit the "disable all" state */
ret = drm_atomic_commit(state);
if (ret < 0) {
DRM_ERROR("failed to disable crtcs, %d\n", ret);
drm_atomic_state_put(state);
goto unlock;
}
suspended:
dpu_kms->suspend_block = true;
unlock:
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
DPU_ATRACE_END("kms_pm_suspend");
return 0;
}
static int dpu_kms_pm_resume(struct device *dev)
{
struct drm_device *ddev;
struct dpu_kms *dpu_kms;
int ret;
if (!dev)
return -EINVAL;
ddev = dev_get_drvdata(dev);
if (!ddev || !ddev_to_msm_kms(ddev))
return -EINVAL;
dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
DPU_ATRACE_BEGIN("kms_pm_resume");
drm_mode_config_reset(ddev);
drm_modeset_lock_all(ddev);
dpu_kms->suspend_block = false;
if (dpu_kms->suspend_state) {
dpu_kms->suspend_state->acquire_ctx =
ddev->mode_config.acquire_ctx;
ret = drm_atomic_commit(dpu_kms->suspend_state);
if (ret < 0) {
DRM_ERROR("failed to restore state, %d\n", ret);
drm_atomic_state_put(dpu_kms->suspend_state);
}
dpu_kms->suspend_state = NULL;
}
drm_modeset_unlock_all(ddev);
/* enable hot-plug polling */
drm_kms_helper_poll_enable(ddev);
DPU_ATRACE_END("kms_pm_resume");
return 0;
}
static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode)
@ -858,10 +674,30 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
encoder->base.id, rc);
}
static irqreturn_t dpu_irq(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
return dpu_core_irq(dpu_kms);
}
static void dpu_irq_preinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
dpu_core_irq_preinstall(dpu_kms);
}
static void dpu_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
dpu_core_irq_uninstall(dpu_kms);
}
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
.irq_preinstall = dpu_irq_preinstall,
.irq_postinstall = dpu_irq_postinstall,
.irq_uninstall = dpu_irq_uninstall,
.irq = dpu_irq,
.prepare_commit = dpu_kms_prepare_commit,
@ -873,8 +709,6 @@ static const struct msm_kms_funcs kms_funcs = {
.check_modified_format = dpu_format_check_modified_format,
.get_format = dpu_get_msm_format,
.round_pixclk = dpu_kms_round_pixclk,
.pm_suspend = dpu_kms_pm_suspend,
.pm_resume = dpu_kms_pm_resume,
.destroy = dpu_kms_destroy,
.set_encoder_mode = _dpu_kms_set_encoder_mode,
#ifdef CONFIG_DEBUG_FS
@ -882,12 +716,6 @@ static const struct msm_kms_funcs kms_funcs = {
#endif
};
/* the caller api needs to turn on clock before calling it */
static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
{
dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
}
static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
{
struct msm_mmu *mmu;
@ -911,6 +739,9 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
if (!domain)
return 0;
domain->geometry.aperture_start = 0x1000;
domain->geometry.aperture_end = 0xffffffff;
aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
domain, "dpu1");
if (IS_ERR(aspace)) {
@ -960,16 +791,6 @@ u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
return clk_get_rate(clk->clk);
}
static void dpu_kms_handle_power_event(u32 event_type, void *usr)
{
struct dpu_kms *dpu_kms = usr;
if (!dpu_kms)
return;
dpu_vbif_init_memtypes(dpu_kms);
}
static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
@ -979,26 +800,20 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
if (!kms) {
DPU_ERROR("invalid kms\n");
goto end;
return rc;
}
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
if (!dev) {
DPU_ERROR("invalid device\n");
goto end;
}
rc = dpu_dbg_init(&dpu_kms->pdev->dev);
if (rc) {
DRM_ERROR("failed to init dpu dbg: %d\n", rc);
goto end;
return rc;
}
priv = dev->dev_private;
if (!priv) {
DPU_ERROR("invalid private data\n");
goto dbg_destroy;
return rc;
}
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
@ -1036,20 +851,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
}
dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
"core");
if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
rc = PTR_ERR(dpu_kms->core_client);
if (!dpu_kms->core_client)
rc = -EINVAL;
DPU_ERROR("dpu power client create failed: %d\n", rc);
dpu_kms->core_client = NULL;
goto error;
}
pm_runtime_get_sync(&dpu_kms->pdev->dev);
_dpu_kms_core_hw_rev_init(dpu_kms);
dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
@ -1063,8 +867,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
/*
* Now we need to read the HW catalog and initialize resources such as
* clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
@ -1110,7 +912,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
}
rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
&dpu_kms->phandle,
_dpu_kms_get_clk(dpu_kms, "core"));
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
@ -1151,13 +952,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
/*
* Handle (re)initializations during power enable
*/
dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
dpu_kms->power_event = dpu_power_handle_register_event(
&dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
dpu_kms_handle_power_event, dpu_kms, "kms");
dpu_vbif_init_memtypes(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
@ -1171,9 +966,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
pm_runtime_put_sync(&dpu_kms->pdev->dev);
error:
_dpu_kms_hw_destroy(dpu_kms);
dbg_destroy:
dpu_dbg_destroy();
end:
return rc;
}
@ -1221,8 +1014,6 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
return ret;
}
dpu_power_resource_init(pdev, &dpu_kms->phandle);
platform_set_drvdata(pdev, dpu_kms);
msm_kms_init(&dpu_kms->base, &kms_funcs);
@ -1242,7 +1033,6 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
mp->num_clk = 0;
@ -1278,19 +1068,13 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
goto exit;
return rc;
}
rc = dpu_power_resource_enable(&dpu_kms->phandle,
dpu_kms->core_client, false);
if (rc)
DPU_ERROR("resource disable failed: %d\n", rc);
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
exit:
return rc;
}
@ -1299,27 +1083,27 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct drm_encoder *encoder;
struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
goto exit;
return rc;
}
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
goto exit;
return rc;
}
rc = dpu_power_resource_enable(&dpu_kms->phandle,
dpu_kms->core_client, true);
if (rc)
DPU_ERROR("resource enable failed: %d\n", rc);
dpu_vbif_init_memtypes(dpu_kms);
drm_for_each_encoder(encoder, ddev)
dpu_encoder_virt_runtime_resume(encoder);
exit:
return rc;
}

View file

@ -23,15 +23,13 @@
#include "msm_kms.h"
#include "msm_mmu.h"
#include "msm_gem.h"
#include "dpu_dbg.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_top.h"
#include "dpu_io_util.h"
#include "dpu_rm.h"
#include "dpu_power_handle.h"
#include "dpu_irq.h"
#include "dpu_core_perf.h"
#define DRMID(x) ((x) ? (x)->base.id : -1)
@ -104,7 +102,6 @@ struct dpu_irq {
atomic_t *enable_counts;
atomic_t *irq_counts;
spinlock_t cb_lock;
struct dentry *debugfs_file;
};
struct dpu_kms {
@ -113,15 +110,6 @@ struct dpu_kms {
int core_rev;
struct dpu_mdss_cfg *catalog;
struct dpu_power_handle phandle;
struct dpu_power_client *core_client;
struct dpu_power_event *power_event;
/* directory entry for debugfs */
struct dentry *debugfs_root;
struct dentry *debugfs_danger;
struct dentry *debugfs_vbif;
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
@ -135,10 +123,6 @@ struct dpu_kms {
struct dpu_core_perf perf;
/* saved atomic state during system suspend */
struct drm_atomic_state *suspend_state;
bool suspend_block;
struct dpu_rm rm;
bool rm_init;
@ -163,33 +147,6 @@ struct vsync_info {
#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
/**
* dpu_kms_is_suspend_state - whether or not the system is pm suspended
* @dev: Pointer to drm device
* Return: Suspend status
*/
static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
{
if (!ddev_to_msm_kms(dev))
return false;
return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
}
/**
* dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
* suspend status
* @dev: Pointer to drm device
* Return: True if commits should be rejected due to pm suspend
*/
static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
{
if (!dpu_kms_is_suspend_state(dev))
return false;
return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
}
/**
* Debugfs functions - extra helper functions for debugfs support
*

View file

@ -9,6 +9,11 @@
#define HW_INTR_STATUS 0x0010
struct dpu_irq_controller {
unsigned long enabled_mask;
struct irq_domain *domain;
};
struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
@ -115,13 +120,12 @@ static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
return 0;
}
static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
{
if (dpu_mdss->irq_controller.domain) {
irq_domain_remove(dpu_mdss->irq_controller.domain);
dpu_mdss->irq_controller.domain = NULL;
}
return 0;
}
static int dpu_mdss_enable(struct msm_mdss *mdss)
{
@ -156,18 +160,16 @@ static void dpu_mdss_destroy(struct drm_device *dev)
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
pm_runtime_suspend(dev->dev);
pm_runtime_disable(dev->dev);
_dpu_mdss_irq_domain_fini(dpu_mdss);
free_irq(platform_get_irq(pdev, 0), dpu_mdss);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
pm_runtime_disable(dev->dev);
priv->mdss = NULL;
}

View file

@ -137,7 +137,7 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
* @src_wdith: width of source buffer
* Return: fill level corresponding to the source buffer/format or 0 if error
*/
static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
const struct dpu_format *fmt, u32 src_width)
{
struct dpu_plane *pdpu, *tmp;
@ -430,24 +430,14 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
}
/**
* _dpu_plane_get_aspace: gets the address space
*/
static inline struct msm_gem_address_space *_dpu_plane_get_aspace(
struct dpu_plane *pdpu)
{
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
return kms->base.aspace;
}
static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
static void _dpu_plane_set_scanout(struct drm_plane *plane,
struct dpu_plane_state *pstate,
struct dpu_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
struct msm_gem_address_space *aspace = kms->base.aspace;
int ret;
ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
@ -525,7 +515,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->enable = 1;
}
static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
static void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
{
static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
{
@ -801,7 +791,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
struct dma_fence *fence;
struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
if (!new_state->fb)
@ -810,7 +800,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
/* cache aspace */
pstate->aspace = aspace;
pstate->aspace = kms->base.aspace;
/*
* TODO: Need to sort out the msm_framebuffer_prepare() call below so
@ -1191,19 +1181,8 @@ static void dpu_plane_destroy(struct drm_plane *plane)
static void dpu_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct dpu_plane_state *pstate;
if (!plane || !state) {
DPU_ERROR("invalid arg(s), plane %d state %d\n",
plane != 0, state != 0);
return;
}
pstate = to_dpu_plane_state(state);
__drm_atomic_helper_plane_destroy_state(state);
kfree(pstate);
kfree(to_dpu_plane_state(state));
}
static struct drm_plane_state *
@ -1273,26 +1252,12 @@ static ssize_t _dpu_plane_danger_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
struct dpu_mdss_cfg *cfg = kms->catalog;
int len = 0;
char buf[40] = {'\0'};
int len;
char buf[40];
if (!cfg)
return -ENODEV;
len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
if (*ppos)
return 0; /* the end */
len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
if (len < 0 || len >= sizeof(buf))
return 0;
if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
return -EFAULT;
*ppos += len; /* increase offset */
return len;
return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
@ -1322,23 +1287,12 @@ static ssize_t _dpu_plane_danger_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
struct dpu_mdss_cfg *cfg = kms->catalog;
int disable_panic;
char buf[10];
int ret;
if (!cfg)
return -EFAULT;
if (count >= sizeof(buf))
return -EFAULT;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
buf[count] = 0; /* end of string */
if (kstrtoint(buf, 0, &disable_panic))
return -EFAULT;
ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
if (ret)
return ret;
if (disable_panic) {
/* Disable panic signal for all active pipes */
@ -1363,33 +1317,10 @@ static const struct file_operations dpu_plane_danger_enable = {
static int _dpu_plane_init_debugfs(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
struct dpu_kms *kms;
struct msm_drm_private *priv;
const struct dpu_sspp_sub_blks *sblk = 0;
const struct dpu_sspp_cfg *cfg = 0;
if (!plane || !plane->dev) {
DPU_ERROR("invalid arguments\n");
return -EINVAL;
}
priv = plane->dev->dev_private;
if (!priv || !priv->kms) {
DPU_ERROR("invalid KMS reference\n");
return -EINVAL;
}
kms = to_dpu_kms(priv->kms);
pdpu = to_dpu_plane(plane);
if (pdpu && pdpu->pipe_hw)
cfg = pdpu->pipe_hw->cap;
if (cfg)
sblk = cfg->sblk;
if (!sblk)
return 0;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_kms *kms = _dpu_plane_get_kms(plane);
const struct dpu_sspp_cfg *cfg = pdpu->pipe_hw->cap;
const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
/* create overall sub-directory for the pipe */
pdpu->debugfs_root =
@ -1460,25 +1391,11 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
return 0;
}
static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
if (!plane)
return;
pdpu = to_dpu_plane(plane);
debugfs_remove_recursive(pdpu->debugfs_root);
}
#else
static int _dpu_plane_init_debugfs(struct drm_plane *plane)
{
return 0;
}
static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
{
}
#endif
static int dpu_plane_late_register(struct drm_plane *plane)
@ -1488,7 +1405,9 @@ static int dpu_plane_late_register(struct drm_plane *plane)
static void dpu_plane_early_unregister(struct drm_plane *plane)
{
_dpu_plane_destroy_debugfs(plane);
struct dpu_plane *pdpu = to_dpu_plane(plane);
debugfs_remove_recursive(pdpu->debugfs_root);
}
static const struct drm_plane_funcs dpu_plane_funcs = {
@ -1537,7 +1456,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
if (!pdpu) {
DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
ret = -ENOMEM;
goto exit;
return ERR_PTR(ret);
}
/* cache local stuff for later */
@ -1623,6 +1542,5 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
kfree(pdpu);
exit:
return ERR_PTR(ret);
}

View file

@ -1,240 +0,0 @@
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/string.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include "dpu_power_handle.h"
#include "dpu_trace.h"
static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
[DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
[DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
[DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
};
const char *dpu_power_handle_get_dbus_name(u32 bus_id)
{
if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
return data_bus_name[bus_id];
return NULL;
}
static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
u32 event_type)
{
struct dpu_power_event *event;
list_for_each_entry(event, &phandle->event_list, list) {
if (event->event_type & event_type)
event->cb_fnc(event_type, event->usr);
}
}
struct dpu_power_client *dpu_power_client_create(
struct dpu_power_handle *phandle, char *client_name)
{
struct dpu_power_client *client;
static u32 id;
if (!client_name || !phandle) {
pr_err("client name is null or invalid power data\n");
return ERR_PTR(-EINVAL);
}
client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
mutex_lock(&phandle->phandle_lock);
strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
client->usecase_ndx = VOTE_INDEX_DISABLE;
client->id = id;
client->active = true;
pr_debug("client %s created:%pK id :%d\n", client_name,
client, id);
id++;
list_add(&client->list, &phandle->power_client_clist);
mutex_unlock(&phandle->phandle_lock);
return client;
}
void dpu_power_client_destroy(struct dpu_power_handle *phandle,
struct dpu_power_client *client)
{
if (!client || !phandle) {
pr_err("reg bus vote: invalid client handle\n");
} else if (!client->active) {
pr_err("dpu power deinit already done\n");
kfree(client);
} else {
pr_debug("bus vote client %s destroyed:%pK id:%u\n",
client->name, client, client->id);
mutex_lock(&phandle->phandle_lock);
list_del_init(&client->list);
mutex_unlock(&phandle->phandle_lock);
kfree(client);
}
}
void dpu_power_resource_init(struct platform_device *pdev,
struct dpu_power_handle *phandle)
{
phandle->dev = &pdev->dev;
INIT_LIST_HEAD(&phandle->power_client_clist);
INIT_LIST_HEAD(&phandle->event_list);
mutex_init(&phandle->phandle_lock);
}
void dpu_power_resource_deinit(struct platform_device *pdev,
struct dpu_power_handle *phandle)
{
struct dpu_power_client *curr_client, *next_client;
struct dpu_power_event *curr_event, *next_event;
if (!phandle || !pdev) {
pr_err("invalid input param\n");
return;
}
mutex_lock(&phandle->phandle_lock);
list_for_each_entry_safe(curr_client, next_client,
&phandle->power_client_clist, list) {
pr_err("client:%s-%d still registered with refcount:%d\n",
curr_client->name, curr_client->id,
curr_client->refcount);
curr_client->active = false;
list_del(&curr_client->list);
}
list_for_each_entry_safe(curr_event, next_event,
&phandle->event_list, list) {
pr_err("event:%d, client:%s still registered\n",
curr_event->event_type,
curr_event->client_name);
curr_event->active = false;
list_del(&curr_event->list);
}
mutex_unlock(&phandle->phandle_lock);
}
int dpu_power_resource_enable(struct dpu_power_handle *phandle,
struct dpu_power_client *pclient, bool enable)
{
bool changed = false;
u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
struct dpu_power_client *client;
u32 event_type;
if (!phandle || !pclient) {
pr_err("invalid input argument\n");
return -EINVAL;
}
mutex_lock(&phandle->phandle_lock);
if (enable)
pclient->refcount++;
else if (pclient->refcount)
pclient->refcount--;
if (pclient->refcount)
pclient->usecase_ndx = VOTE_INDEX_LOW;
else
pclient->usecase_ndx = VOTE_INDEX_DISABLE;
list_for_each_entry(client, &phandle->power_client_clist, list) {
if (client->usecase_ndx < VOTE_INDEX_MAX &&
client->usecase_ndx > max_usecase_ndx)
max_usecase_ndx = client->usecase_ndx;
}
if (phandle->current_usecase_ndx != max_usecase_ndx) {
changed = true;
prev_usecase_ndx = phandle->current_usecase_ndx;
phandle->current_usecase_ndx = max_usecase_ndx;
}
pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
__builtin_return_address(0), changed, max_usecase_ndx,
pclient->name, pclient->id, enable, pclient->refcount);
if (!changed)
goto end;
event_type = enable ? DPU_POWER_EVENT_ENABLE : DPU_POWER_EVENT_DISABLE;
dpu_power_event_trigger_locked(phandle, event_type);
end:
mutex_unlock(&phandle->phandle_lock);
return 0;
}
struct dpu_power_event *dpu_power_handle_register_event(
struct dpu_power_handle *phandle,
u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
void *usr, char *client_name)
{
struct dpu_power_event *event;
if (!phandle) {
pr_err("invalid power handle\n");
return ERR_PTR(-EINVAL);
} else if (!cb_fnc || !event_type) {
pr_err("no callback fnc or event type\n");
return ERR_PTR(-EINVAL);
}
event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
if (!event)
return ERR_PTR(-ENOMEM);
event->event_type = event_type;
event->cb_fnc = cb_fnc;
event->usr = usr;
strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
event->active = true;
mutex_lock(&phandle->phandle_lock);
list_add(&event->list, &phandle->event_list);
mutex_unlock(&phandle->phandle_lock);
return event;
}
void dpu_power_handle_unregister_event(
struct dpu_power_handle *phandle,
struct dpu_power_event *event)
{
if (!phandle || !event) {
pr_err("invalid phandle or event\n");
} else if (!event->active) {
pr_err("power handle deinit already done\n");
kfree(event);
} else {
mutex_lock(&phandle->phandle_lock);
list_del_init(&event->list);
mutex_unlock(&phandle->phandle_lock);
kfree(event);
}
}

View file

@ -1,217 +0,0 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _DPU_POWER_HANDLE_H_
#define _DPU_POWER_HANDLE_H_
#define MAX_CLIENT_NAME_LEN 128
#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
#include "dpu_io_util.h"
/* events will be triggered on power handler enable/disable */
#define DPU_POWER_EVENT_DISABLE BIT(0)
#define DPU_POWER_EVENT_ENABLE BIT(1)
/**
* mdss_bus_vote_type: register bus vote type
* VOTE_INDEX_DISABLE: removes the client vote
* VOTE_INDEX_LOW: keeps the lowest vote for register bus
* VOTE_INDEX_MAX: invalid
*/
enum mdss_bus_vote_type {
VOTE_INDEX_DISABLE,
VOTE_INDEX_LOW,
VOTE_INDEX_MAX,
};
/**
* enum dpu_power_handle_data_bus_client - type of axi bus clients
* @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
* @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
* @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
*/
enum dpu_power_handle_data_bus_client {
DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
};
/**
* enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
* @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
* @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
* @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
*/
enum DPU_POWER_HANDLE_DBUS_ID {
DPU_POWER_HANDLE_DBUS_ID_MNOC,
DPU_POWER_HANDLE_DBUS_ID_LLCC,
DPU_POWER_HANDLE_DBUS_ID_EBI,
DPU_POWER_HANDLE_DBUS_ID_MAX,
};
/**
* struct dpu_power_client: stores the power client for dpu driver
* @name: name of the client
* @usecase_ndx: current regs bus vote type
* @refcount: current refcount if multiple modules are using same
* same client for enable/disable. Power module will
* aggregate the refcount and vote accordingly for this
* client.
* @id: assigned during create. helps for debugging.
* @list: list to attach power handle master list
* @ab: arbitrated bandwidth for each bus client
* @ib: instantaneous bandwidth for each bus client
* @active: inidcates the state of dpu power handle
*/
struct dpu_power_client {
char name[MAX_CLIENT_NAME_LEN];
short usecase_ndx;
short refcount;
u32 id;
struct list_head list;
u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
bool active;
};
/*
* struct dpu_power_event - local event registration structure
* @client_name: name of the client registering
* @cb_fnc: pointer to desired callback function
* @usr: user pointer to pass to callback event trigger
* @event: refer to DPU_POWER_HANDLE_EVENT_*
* @list: list to attach event master list
* @active: indicates the state of dpu power handle
*/
struct dpu_power_event {
char client_name[MAX_CLIENT_NAME_LEN];
void (*cb_fnc)(u32 event_type, void *usr);
void *usr;
u32 event_type;
struct list_head list;
bool active;
};
/**
* struct dpu_power_handle: power handle main struct
* @client_clist: master list to store all clients
* @phandle_lock: lock to synchronize the enable/disable
* @dev: pointer to device structure
* @usecase_ndx: current usecase index
* @event_list: current power handle event list
*/
struct dpu_power_handle {
struct list_head power_client_clist;
struct mutex phandle_lock;
struct device *dev;
u32 current_usecase_ndx;
struct list_head event_list;
};
/**
* dpu_power_resource_init() - initializes the dpu power handle
* @pdev: platform device to search the power resources
* @pdata: power handle to store the power resources
*/
void dpu_power_resource_init(struct platform_device *pdev,
struct dpu_power_handle *pdata);
/**
* dpu_power_resource_deinit() - release the dpu power handle
* @pdev: platform device for power resources
* @pdata: power handle containing the resources
*
* Return: error code.
*/
void dpu_power_resource_deinit(struct platform_device *pdev,
struct dpu_power_handle *pdata);
/**
* dpu_power_client_create() - create the client on power handle
* @pdata: power handle containing the resources
* @client_name: new client name for registration
*
* Return: error code.
*/
struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
char *client_name);
/**
* dpu_power_client_destroy() - destroy the client on power handle
* @pdata: power handle containing the resources
* @client_name: new client name for registration
*
* Return: none
*/
void dpu_power_client_destroy(struct dpu_power_handle *phandle,
struct dpu_power_client *client);
/**
* dpu_power_resource_enable() - enable/disable the power resources
* @pdata: power handle containing the resources
* @client: client information to enable/disable its vote
* @enable: boolean request for enable/disable
*
* Return: error code.
*/
int dpu_power_resource_enable(struct dpu_power_handle *pdata,
struct dpu_power_client *pclient, bool enable);
/**
* dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
* @phandle: power handle containing the resources
* @client: client information to bandwidth control
* @enable: true to enable bandwidth for data base
*
* Return: none
*/
void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
struct dpu_power_client *pclient, int enable);
/**
* dpu_power_handle_register_event - register a callback function for an event.
* Clients can register for multiple events with a single register.
* Any block with access to phandle can register for the event
* notification.
* @phandle: power handle containing the resources
* @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
* @cb_fnc: pointer to desired callback function
* @usr: user pointer to pass to callback on event trigger
*
* Return: event pointer if success, or error code otherwise
*/
struct dpu_power_event *dpu_power_handle_register_event(
struct dpu_power_handle *phandle,
u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
void *usr, char *client_name);
/**
* dpu_power_handle_unregister_event - unregister callback for event(s)
* @phandle: power handle containing the resources
* @event: event pointer returned after power handle register
*/
void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
struct dpu_power_event *event);
/**
* dpu_power_handle_get_dbus_name - get name of given data bus identifier
* @bus_id: data bus identifier
* Return: Pointer to name string if success; NULL otherwise
*/
const char *dpu_power_handle_get_dbus_name(u32 bus_id);
#endif /* _DPU_POWER_HANDLE_H_ */

View file

@ -99,27 +99,6 @@ TRACE_EVENT(dpu_perf_set_ot,
__entry->vbif_idx)
)
TRACE_EVENT(dpu_perf_update_bus,
TP_PROTO(int client, unsigned long long ab_quota,
unsigned long long ib_quota),
TP_ARGS(client, ab_quota, ib_quota),
TP_STRUCT__entry(
__field(int, client)
__field(u64, ab_quota)
__field(u64, ib_quota)
),
TP_fast_assign(
__entry->client = client;
__entry->ab_quota = ab_quota;
__entry->ib_quota = ib_quota;
),
TP_printk("Request client:%d ab=%llu ib=%llu",
__entry->client,
__entry->ab_quota,
__entry->ib_quota)
)
TRACE_EVENT(dpu_cmd_release_bw,
TP_PROTO(u32 crtc_id),
TP_ARGS(crtc_id),
@ -319,6 +298,10 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
);
DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
);
TRACE_EVENT(dpu_enc_enable,
TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
@ -539,10 +522,6 @@ DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
);
DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
);
DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
@ -749,24 +728,17 @@ TRACE_EVENT(dpu_crtc_vblank_enable,
__field( uint32_t, enc_id )
__field( bool, enable )
__field( bool, enabled )
__field( bool, suspend )
__field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enc_id = enc_id;
__entry->enable = enable;
__entry->enabled = crtc->enabled;
__entry->suspend = crtc->suspend;
__entry->vblank_requested = crtc->vblank_requested;
),
TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
"vblank_req:%s}",
TP_printk("id:%u encoder:%u enable:%s state{enabled:%s}",
__entry->drm_id, __entry->enc_id,
__entry->enable ? "true" : "false",
__entry->enabled ? "true" : "false",
__entry->suspend ? "true" : "false",
__entry->vblank_requested ? "true" : "false")
__entry->enabled ? "true" : "false")
);
DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
@ -776,25 +748,15 @@ DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
__field( uint32_t, drm_id )
__field( bool, enable )
__field( bool, enabled )
__field( bool, suspend )
__field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enable = enable;
__entry->enabled = crtc->enabled;
__entry->suspend = crtc->suspend;
__entry->vblank_requested = crtc->vblank_requested;
),
TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
TP_printk("id:%u enable:%s state{enabled:%s}",
__entry->drm_id, __entry->enable ? "true" : "false",
__entry->enabled ? "true" : "false",
__entry->suspend ? "true" : "false",
__entry->vblank_requested ? "true" : "false")
);
DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
TP_ARGS(drm_id, enable, crtc)
__entry->enabled ? "true" : "false")
);
DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
@ -1004,6 +966,53 @@ TRACE_EVENT(dpu_core_perf_update_clk,
__entry->stop_req ? "true" : "false", __entry->clk_rate)
);
TRACE_EVENT(dpu_hw_ctl_update_pending_flush,
TP_PROTO(u32 new_bits, u32 pending_mask),
TP_ARGS(new_bits, pending_mask),
TP_STRUCT__entry(
__field( u32, new_bits )
__field( u32, pending_mask )
),
TP_fast_assign(
__entry->new_bits = new_bits;
__entry->pending_mask = pending_mask;
),
TP_printk("new=%x existing=%x", __entry->new_bits,
__entry->pending_mask)
);
DECLARE_EVENT_CLASS(dpu_hw_ctl_pending_flush_template,
TP_PROTO(u32 pending_mask, u32 ctl_flush),
TP_ARGS(pending_mask, ctl_flush),
TP_STRUCT__entry(
__field( u32, pending_mask )
__field( u32, ctl_flush )
),
TP_fast_assign(
__entry->pending_mask = pending_mask;
__entry->ctl_flush = ctl_flush;
),
TP_printk("pending_mask=%x CTL_FLUSH=%x", __entry->pending_mask,
__entry->ctl_flush)
);
DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_clear_pending_flush,
TP_PROTO(u32 pending_mask, u32 ctl_flush),
TP_ARGS(pending_mask, ctl_flush)
);
DEFINE_EVENT(dpu_hw_ctl_pending_flush_template,
dpu_hw_ctl_trigger_pending_flush,
TP_PROTO(u32 pending_mask, u32 ctl_flush),
TP_ARGS(pending_mask, ctl_flush)
);
DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_prepare,
TP_PROTO(u32 pending_mask, u32 ctl_flush),
TP_ARGS(pending_mask, ctl_flush)
);
DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_start,
TP_PROTO(u32 pending_mask, u32 ctl_flush),
TP_ARGS(pending_mask, ctl_flush)
);
#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)

View file

@ -191,7 +191,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
if (ot_lim == 0)
goto exit;
return;
trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
params->vbif_idx);
@ -210,8 +210,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
exit:
return;
}
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
@ -312,31 +310,25 @@ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
}
#ifdef CONFIG_DEBUG_FS
void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
{
debugfs_remove_recursive(dpu_kms->debugfs_vbif);
dpu_kms->debugfs_vbif = NULL;
}
int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
{
char vbif_name[32];
struct dentry *debugfs_vbif;
struct dentry *entry, *debugfs_vbif;
int i, j;
dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
if (!dpu_kms->debugfs_vbif) {
DPU_ERROR("failed to create vbif debugfs\n");
return -EINVAL;
}
entry = debugfs_create_dir("vbif", debugfs_root);
if (IS_ERR_OR_NULL(entry))
return;
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
debugfs_vbif = debugfs_create_dir(vbif_name,
dpu_kms->debugfs_vbif);
debugfs_vbif = debugfs_create_dir(vbif_name, entry);
if (IS_ERR_OR_NULL(debugfs_vbif))
continue;
debugfs_create_u32("features", 0600, debugfs_vbif,
(u32 *)&vbif->features);
@ -378,7 +370,5 @@ int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
(u32 *)&cfg->ot_limit);
}
}
return 0;
}
#endif

View file

@ -78,17 +78,6 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
*/
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
#ifdef CONFIG_DEBUG_FS
int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
#else
static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
struct dentry *debugfs_root)
{
return 0;
}
static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
{
}
#endif
void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
#endif /* __DPU_VBIF_H__ */

View file

@ -813,18 +813,6 @@ enum color_fmts {
#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
#define COLOR_FMT_P010 COLOR_FMT_P010
static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
{
(void)height;
(void)width;
/*
* In the future, calculate the size based on the w/h but just
* hardcode it for now since 16K satisfies all current usecases.
*/
return 16 * 1024;
}
/*
* Function arguments:
* @color_fmt
@ -832,114 +820,99 @@ static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
* Progressive: width
* Interlaced: width
*/
static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
static unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
{
unsigned int alignment, stride = 0;
unsigned int stride = 0;
if (!width)
goto invalid_input;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
alignment = 128;
stride = MSM_MEDIA_ALIGN(width, alignment);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
alignment = 256;
stride = MSM_MEDIA_ALIGN(width, 192);
stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
break;
case COLOR_FMT_P010_UBWC:
alignment = 256;
stride = MSM_MEDIA_ALIGN(width * 2, alignment);
break;
case COLOR_FMT_P010:
alignment = 128;
stride = MSM_MEDIA_ALIGN(width*2, alignment);
break;
default:
break;
}
invalid_input:
return stride;
}
/*
* Function arguments:
* @color_fmt
* @width
* Progressive: width
* Interlaced: width
*/
static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
{
unsigned int alignment, stride = 0;
if (!width)
goto invalid_input;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
alignment = 128;
stride = MSM_MEDIA_ALIGN(width, alignment);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
alignment = 256;
stride = MSM_MEDIA_ALIGN(width, 192);
stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
break;
case COLOR_FMT_P010_UBWC:
alignment = 256;
stride = MSM_MEDIA_ALIGN(width * 2, alignment);
break;
case COLOR_FMT_P010:
alignment = 128;
stride = MSM_MEDIA_ALIGN(width*2, alignment);
break;
default:
break;
}
invalid_input:
return stride;
}
/*
* Function arguments:
* @color_fmt
* @height
* Progressive: height
* Interlaced: (height+1)>>1
*/
static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
{
unsigned int alignment, sclines = 0;
if (!height)
goto invalid_input;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
case COLOR_FMT_P010:
alignment = 32;
break;
case COLOR_FMT_NV12_BPP10_UBWC:
case COLOR_FMT_P010_UBWC:
alignment = 16;
break;
default:
return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
stride = MSM_MEDIA_ALIGN(width, 128);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
stride = MSM_MEDIA_ALIGN(width, 192);
stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
break;
case COLOR_FMT_P010_UBWC:
stride = MSM_MEDIA_ALIGN(width * 2, 256);
break;
case COLOR_FMT_P010:
stride = MSM_MEDIA_ALIGN(width * 2, 128);
break;
}
sclines = MSM_MEDIA_ALIGN(height, alignment);
invalid_input:
return stride;
}
/*
* Function arguments:
* @color_fmt
* @width
* Progressive: width
* Interlaced: width
*/
static unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
{
unsigned int stride = 0;
if (!width)
return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
stride = MSM_MEDIA_ALIGN(width, 128);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
stride = MSM_MEDIA_ALIGN(width, 192);
stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
break;
case COLOR_FMT_P010_UBWC:
stride = MSM_MEDIA_ALIGN(width * 2, 256);
break;
case COLOR_FMT_P010:
stride = MSM_MEDIA_ALIGN(width * 2, 128);
break;
}
return stride;
}
/*
* Function arguments:
* @color_fmt
* @height
* Progressive: height
* Interlaced: (height+1)>>1
*/
static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
{
unsigned int sclines = 0;
if (!height)
return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
case COLOR_FMT_P010:
sclines = MSM_MEDIA_ALIGN(height, 32);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
case COLOR_FMT_P010_UBWC:
sclines = MSM_MEDIA_ALIGN(height, 16);
break;
}
return sclines;
}
@ -950,12 +923,12 @@ static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
* Progressive: height
* Interlaced: (height+1)>>1
*/
static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
{
unsigned int alignment, sclines = 0;
unsigned int sclines = 0;
if (!height)
goto invalid_input;
return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
@ -964,18 +937,13 @@ static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
case COLOR_FMT_NV12_BPP10_UBWC:
case COLOR_FMT_P010_UBWC:
case COLOR_FMT_P010:
alignment = 16;
sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16);
break;
case COLOR_FMT_NV12_UBWC:
alignment = 32;
sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32);
break;
default:
goto invalid_input;
}
sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
invalid_input:
return sclines;
}
@ -986,12 +954,12 @@ static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
* Progressive: width
* Interlaced: width
*/
static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
{
int y_tile_width = 0, y_meta_stride = 0;
int y_tile_width = 0, y_meta_stride;
if (!width)
goto invalid_input;
return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@ -1002,14 +970,11 @@ static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
y_tile_width = 48;
break;
default:
goto invalid_input;
return 0;
}
y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
invalid_input:
return y_meta_stride;
return MSM_MEDIA_ALIGN(y_meta_stride, 64);
}
/*
@ -1019,12 +984,12 @@ static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
* Progressive: height
* Interlaced: (height+1)>>1
*/
static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
{
int y_tile_height = 0, y_meta_scanlines = 0;
int y_tile_height = 0, y_meta_scanlines;
if (!height)
goto invalid_input;
return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@ -1035,14 +1000,11 @@ static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
y_tile_height = 4;
break;
default:
goto invalid_input;
return 0;
}
y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
invalid_input:
return y_meta_scanlines;
return MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
}
/*
@ -1052,12 +1014,12 @@ static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
* Progressive: width
* Interlaced: width
*/
static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
{
int uv_tile_width = 0, uv_meta_stride = 0;
int uv_tile_width = 0, uv_meta_stride;
if (!width)
goto invalid_input;
return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@ -1068,14 +1030,11 @@ static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
uv_tile_width = 24;
break;
default:
goto invalid_input;
return 0;
}
uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
invalid_input:
return uv_meta_stride;
return MSM_MEDIA_ALIGN(uv_meta_stride, 64);
}
/*
@ -1085,12 +1044,12 @@ static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
* Progressive: height
* Interlaced: (height+1)>>1
*/
static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
{
int uv_tile_height = 0, uv_meta_scanlines = 0;
int uv_tile_height = 0, uv_meta_scanlines;
if (!height)
goto invalid_input;
return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@ -1101,22 +1060,19 @@ static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
uv_tile_height = 4;
break;
default:
goto invalid_input;
return 0;
}
uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
invalid_input:
return uv_meta_scanlines;
return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
}
static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
{
unsigned int alignment = 0, stride = 0, bpp = 4;
unsigned int alignment = 0, bpp = 4;
if (!width)
goto invalid_input;
return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888:
@ -1131,21 +1087,18 @@ static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
alignment = 256;
break;
default:
goto invalid_input;
return 0;
}
stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
invalid_input:
return stride;
return MSM_MEDIA_ALIGN(width * bpp, alignment);
}
static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
{
unsigned int alignment = 0, scanlines = 0;
unsigned int alignment = 0;
if (!height)
goto invalid_input;
return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888:
@ -1157,220 +1110,46 @@ static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
alignment = 16;
break;
default:
goto invalid_input;
return 0;
}
scanlines = MSM_MEDIA_ALIGN(height, alignment);
invalid_input:
return scanlines;
return MSM_MEDIA_ALIGN(height, alignment);
}
static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
{
int rgb_tile_width = 0, rgb_meta_stride = 0;
int rgb_meta_stride;
if (!width)
goto invalid_input;
return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
rgb_tile_width = 16;
break;
default:
goto invalid_input;
rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16);
return MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
}
rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
invalid_input:
return rgb_meta_stride;
return 0;
}
static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
{
int rgb_tile_height = 0, rgb_meta_scanlines = 0;
int rgb_meta_scanlines;
if (!height)
goto invalid_input;
return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
rgb_tile_height = 4;
break;
default:
goto invalid_input;
rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4);
return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
}
rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
invalid_input:
return rgb_meta_scanlines;
}
/*
* Function arguments:
* @color_fmt
* @width
* Progressive: width
* Interlaced: width
* @height
* Progressive: height
* Interlaced: height
*/
static inline unsigned int VENUS_BUFFER_SIZE(
int color_fmt, int width, int height)
{
const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
unsigned int uv_alignment = 0, size = 0;
unsigned int y_plane, uv_plane, y_stride,
uv_stride, y_sclines, uv_sclines;
unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
unsigned int y_meta_plane = 0, uv_meta_plane = 0;
unsigned int rgb_stride = 0, rgb_scanlines = 0;
unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
if (!width || !height)
goto invalid_input;
y_stride = VENUS_Y_STRIDE(color_fmt, width);
uv_stride = VENUS_UV_STRIDE(color_fmt, width);
y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_P010:
uv_alignment = 4096;
y_plane = y_stride * y_sclines;
uv_plane = uv_stride * uv_sclines + uv_alignment;
size = y_plane + uv_plane +
MSM_MEDIA_MAX(extra_size, 8 * y_stride);
size = MSM_MEDIA_ALIGN(size, 4096);
break;
case COLOR_FMT_NV12_MVTB:
uv_alignment = 4096;
y_plane = y_stride * y_sclines;
uv_plane = uv_stride * uv_sclines + uv_alignment;
size = y_plane + uv_plane;
size = 2 * size + extra_size;
size = MSM_MEDIA_ALIGN(size, 4096);
break;
case COLOR_FMT_NV12_UBWC:
y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
y_meta_scanlines =
VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
y_meta_plane = MSM_MEDIA_ALIGN(
y_meta_stride * y_meta_scanlines, 4096);
uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
uv_meta_scanlines =
VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
uv_meta_scanlines, 4096);
size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
uv_meta_plane)*2 +
MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
size = MSM_MEDIA_ALIGN(size, 4096);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
y_meta_plane = MSM_MEDIA_ALIGN(
y_meta_stride * y_meta_scanlines, 4096);
uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
uv_meta_scanlines, 4096);
size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
uv_meta_plane +
MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
size = MSM_MEDIA_ALIGN(size, 4096);
break;
case COLOR_FMT_P010_UBWC:
y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
y_meta_plane = MSM_MEDIA_ALIGN(
y_meta_stride * y_meta_scanlines, 4096);
uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
uv_meta_scanlines, 4096);
size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
uv_meta_plane;
size = MSM_MEDIA_ALIGN(size, 4096);
break;
case COLOR_FMT_RGBA8888:
rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
size = rgb_plane;
size = MSM_MEDIA_ALIGN(size, 4096);
break;
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
4096);
rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
height);
rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
rgb_meta_scanlines, 4096);
size = rgb_ubwc_plane + rgb_meta_plane;
size = MSM_MEDIA_ALIGN(size, 4096);
break;
default:
break;
}
invalid_input:
return size;
}
static inline unsigned int VENUS_VIEW2_OFFSET(
int color_fmt, int width, int height)
{
unsigned int offset = 0;
unsigned int y_plane, uv_plane, y_stride,
uv_stride, y_sclines, uv_sclines;
if (!width || !height)
goto invalid_input;
y_stride = VENUS_Y_STRIDE(color_fmt, width);
uv_stride = VENUS_UV_STRIDE(color_fmt, width);
y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
switch (color_fmt) {
case COLOR_FMT_NV12_MVTB:
y_plane = y_stride * y_sclines;
uv_plane = uv_stride * uv_sclines;
offset = y_plane + uv_plane;
break;
default:
break;
}
invalid_input:
return offset;
return 0;
}
#endif

View file

@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
msm_gem_put_iova(val, kms->aspace);
msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
@ -384,7 +384,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_get(next_bo);
msm_gem_get_iova(next_bo, kms->aspace, &iova);
msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@ -429,7 +429,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
@ -442,7 +442,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
if (ret)
goto fail;
} else {

View file

@ -45,7 +45,7 @@ static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
if (!dtv_pdata) {
dev_err(dev->dev, "could not find dtv pdata\n");
DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
return;
}
@ -209,16 +209,16 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
if (ret)
dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
pc, ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
if (ret)
dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
if (ret)
dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
@ -258,14 +258,14 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
dev_err(dev->dev, "failed to get hdmi_clk\n");
DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
goto fail;
}
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
dev_err(dev->dev, "failed to get tv_clk\n");
DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
goto fail;
}

View file

@ -43,7 +43,7 @@ static int mdp4_hw_init(struct msm_kms *kms)
DBG("found MDP4 version v%d.%d", major, minor);
if (major != 4) {
dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
@ -165,7 +165,7 @@ static void mdp4_destroy(struct msm_kms *kms)
struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
@ -206,7 +206,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
clk_disable_unprepare(mdp4_kms->clk);
if (mdp4_kms->pclk)
clk_disable_unprepare(mdp4_kms->pclk);
clk_disable_unprepare(mdp4_kms->lut_clk);
if (mdp4_kms->lut_clk)
clk_disable_unprepare(mdp4_kms->lut_clk);
if (mdp4_kms->axi_clk)
clk_disable_unprepare(mdp4_kms->axi_clk);
@ -220,7 +221,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
clk_prepare_enable(mdp4_kms->clk);
if (mdp4_kms->pclk)
clk_prepare_enable(mdp4_kms->pclk);
clk_prepare_enable(mdp4_kms->lut_clk);
if (mdp4_kms->lut_clk)
clk_prepare_enable(mdp4_kms->lut_clk);
if (mdp4_kms->axi_clk)
clk_prepare_enable(mdp4_kms->axi_clk);
@ -251,7 +253,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct LCDC encoder\n");
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
return PTR_ERR(encoder);
}
@ -260,7 +262,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
dev_err(dev->dev, "failed to initialize LVDS connector\n");
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
return PTR_ERR(connector);
}
@ -271,7 +273,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct DTV encoder\n");
DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
return PTR_ERR(encoder);
}
@ -282,7 +284,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* Construct bridge/connector for HDMI: */
ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
}
}
@ -300,7 +302,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_dsi_encoder_init(dev);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
dev_err(dev->dev,
DRM_DEV_ERROR(dev->dev,
"failed to construct DSI encoder: %d\n", ret);
return ret;
}
@ -311,14 +313,14 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (ret) {
dev_err(dev->dev, "failed to initialize DSI: %d\n",
DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
ret);
return ret;
}
break;
default:
dev_err(dev->dev, "Invalid or unsupported interface\n");
DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
return -EINVAL;
}
@ -354,7 +356,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
plane = mdp4_plane_init(dev, vg_planes[i], false);
if (IS_ERR(plane)) {
dev_err(dev->dev,
DRM_DEV_ERROR(dev->dev,
"failed to construct plane for VG%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
@ -365,7 +367,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
plane = mdp4_plane_init(dev, rgb_planes[i], true);
if (IS_ERR(plane)) {
dev_err(dev->dev,
DRM_DEV_ERROR(dev->dev,
"failed to construct plane for RGB%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
@ -374,7 +376,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
mdp4_crtcs[i]);
if (IS_ERR(crtc)) {
dev_err(dev->dev, "failed to construct crtc for %s\n",
DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
mdp4_crtc_names[i]);
ret = PTR_ERR(crtc);
goto fail;
@ -396,7 +398,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
if (ret) {
dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
i, ret);
goto fail;
}
@ -419,7 +421,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
dev_err(dev->dev, "failed to allocate kms\n");
DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
ret = -ENOMEM;
goto fail;
}
@ -439,7 +441,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
dev_err(dev->dev, "failed to get irq: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@ -456,14 +458,14 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (mdp4_kms->vdd) {
ret = regulator_enable(mdp4_kms->vdd);
if (ret) {
dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
goto fail;
}
}
mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(mdp4_kms->clk)) {
dev_err(dev->dev, "failed to get core_clk\n");
DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
ret = PTR_ERR(mdp4_kms->clk);
goto fail;
}
@ -472,23 +474,25 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL;
// XXX if (rev >= MDP_REV_42) { ???
mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
if (IS_ERR(mdp4_kms->lut_clk)) {
dev_err(dev->dev, "failed to get lut_clk\n");
ret = PTR_ERR(mdp4_kms->lut_clk);
goto fail;
if (mdp4_kms->rev >= 2) {
mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
if (IS_ERR(mdp4_kms->lut_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
ret = PTR_ERR(mdp4_kms->lut_clk);
goto fail;
}
}
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
dev_err(dev->dev, "failed to get axi_clk\n");
DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk);
goto fail;
}
clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
if (mdp4_kms->lut_clk)
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
pm_runtime_enable(dev->dev);
mdp4_kms->rpm_enabled = true;
@ -519,29 +523,29 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (ret)
goto fail;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
aspace = NULL;
}
ret = modeset_init(mdp4_kms);
if (ret) {
dev_err(dev->dev, "modeset_init failed: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
mdp4_kms->blank_cursor_bo = NULL;
goto fail;
}
ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
goto fail;
}

View file

@ -47,7 +47,7 @@ static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
if (!lcdc_pdata) {
dev_err(dev->dev, "could not find lvds pdata\n");
DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
return;
}
@ -224,7 +224,7 @@ static void setup_phy(struct drm_encoder *encoder)
break;
default:
dev_err(dev->dev, "unknown bpp: %d\n", bpp);
DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
return;
}
@ -241,7 +241,7 @@ static void setup_phy(struct drm_encoder *encoder)
MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
break;
default:
dev_err(dev->dev, "unknown # of channels: %d\n", nchan);
DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
return;
}
@ -361,7 +361,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
if (ret)
dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
}
bs_set(mdp4_lcdc_encoder, 0);
@ -377,20 +377,25 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
uint32_t config;
int i, ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
return;
/* TODO: hard-coded for 18bpp: */
mdp4_crtc_set_config(encoder->crtc,
MDP4_DMA_CONFIG_R_BPC(BPC6) |
MDP4_DMA_CONFIG_G_BPC(BPC6) |
MDP4_DMA_CONFIG_B_BPC(BPC6) |
MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
MDP4_DMA_CONFIG_PACK(0x21) |
MDP4_DMA_CONFIG_DEFLKR_EN |
MDP4_DMA_CONFIG_DITHER_EN);
config =
MDP4_DMA_CONFIG_R_BPC(BPC6) |
MDP4_DMA_CONFIG_G_BPC(BPC6) |
MDP4_DMA_CONFIG_B_BPC(BPC6) |
MDP4_DMA_CONFIG_PACK(0x21) |
MDP4_DMA_CONFIG_DEFLKR_EN |
MDP4_DMA_CONFIG_DITHER_EN;
if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb"))
config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB;
mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
bs_set(mdp4_lcdc_encoder, 1);
@ -398,16 +403,16 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
if (ret)
dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
}
DBG("setting lcdc_clk=%lu", pc);
ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
if (ret)
dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
if (ret)
dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (!IS_ERR(panel)) {
@ -461,7 +466,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
/* TODO: do we need different pll in other cases? */
mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
dev_err(dev->dev, "failed to get lvds_clk\n");
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
goto fail;
}
@ -470,7 +475,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[0] = reg;
@ -478,7 +483,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[1] = reg;
@ -486,7 +491,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[2] = reg;

View file

@ -234,22 +234,22 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
format = to_mdp_format(msm_framebuffer_format(fb));
if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
dev_err(dev->dev, "Width down scaling exceeds limits!\n");
DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
return -ERANGE;
}
if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
dev_err(dev->dev, "Height down scaling exceeds limits!\n");
DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_w > (src_w * UP_SCALE_MAX)) {
dev_err(dev->dev, "Width up scaling exceeds limits!\n");
DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_h > (src_h * UP_SCALE_MAX)) {
dev_err(dev->dev, "Height up scaling exceeds limits!\n");
DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
return -ERANGE;
}

View file

@ -553,6 +553,91 @@ const struct mdp5_cfg_hw msm8x96_config = {
.max_clk = 412500000,
};
const struct mdp5_cfg_hw msm8917_config = {
.name = "msm8917",
.mdp = {
.count = 1,
.caps = MDP_CAP_CDM,
},
.ctl = {
.count = 3,
.base = { 0x01000, 0x01200, 0x01400 },
.flush_hw_mask = 0xffffffff,
},
.pipe_vig = {
.count = 1,
.base = { 0x04000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_rgb = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_dma = {
.count = 1,
.base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_cursor = {
.count = 1,
.base = { 0x34000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = {
.count = 2,
.base = { 0x44000, 0x45000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 1, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
.base = { 0x54000 },
},
.pp = {
.count = 1,
.base = { 0x70000 },
},
.cdm = {
.count = 1,
.base = { 0x79200 },
},
.intf = {
.base = { 0x6a000, 0x6a800 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
},
},
.max_clk = 320000000,
};
static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
@ -560,6 +645,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 6, .config = { .hw = &msm8x16_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
{ .revision = 15, .config = { .hw = &msm8917_config } },
};
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
@ -600,7 +686,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
}
if (major != 1) {
dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
@ -615,7 +701,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
break;
}
if (unlikely(!mdp5_cfg)) {
dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;

View file

@ -55,20 +55,20 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
int pp_id = mixer->pp;
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
dev_err(dev, "vsync_clk is not initialized\n");
DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
return -EINVAL;
}
total_lines_x100 = mode->vtotal * mode->vrefresh;
if (!total_lines_x100) {
dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
__func__, mode->vtotal, mode->vrefresh);
return -EINVAL;
}
vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
if (vsync_clk_speed <= 0) {
dev_err(dev, "vsync_clk round rate failed %ld\n",
DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
vsync_clk_speed);
return -EINVAL;
}
@ -102,13 +102,13 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
ret = clk_set_rate(mdp5_kms->vsync_clk,
clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
if (ret) {
dev_err(encoder->dev->dev,
DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_set_rate failed, %d\n", ret);
return ret;
}
ret = clk_prepare_enable(mdp5_kms->vsync_clk);
if (ret) {
dev_err(encoder->dev->dev,
DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_prepare_enable failed, %d\n", ret);
return ret;
}

View file

@ -173,7 +173,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
msm_gem_put_iova(val, kms->aspace);
msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
@ -662,7 +662,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
if (ret) {
dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
return ret;
}
@ -679,7 +679,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
* and that we don't have conflicting mixer stages:
*/
if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
cnt, start);
return -EINVAL;
}
@ -879,7 +879,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
}
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
@ -903,7 +903,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
ret = msm_gem_get_iova(cursor_bo, kms->aspace,
ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
if (ret)
return -EINVAL;
@ -924,7 +924,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
dev_err(dev->dev, "failed to %sable cursor: %d\n",
DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
cursor_enable ? "en" : "dis", ret);
goto end;
}

View file

@ -262,13 +262,13 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
struct mdp5_hw_mixer *mixer = pipeline->mixer;
if (unlikely(WARN_ON(!mixer))) {
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
ctl->id);
return -EINVAL;
}
if (pipeline->r_mixer) {
dev_err(ctl_mgr->dev->dev, "unsupported configuration");
DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
return -EINVAL;
}
@ -604,10 +604,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
return 0;
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
return -EINVAL;
} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
return -EINVAL;
}
@ -652,7 +652,7 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
if ((ctl_mgr->ctls[c].status & checkm) == match)
goto found;
dev_err(ctl_mgr->dev->dev, "No more CTL available!");
DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
goto unlock;
found:
@ -698,13 +698,13 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
dev_err(dev->dev, "failed to allocate CTL manager\n");
DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
ret = -ENOMEM;
goto fail;
}
if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
dev_err(dev->dev, "Increase static pool size to at least %d\n",
DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
ret = -ENOSPC;
goto fail;
@ -723,7 +723,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
if (WARN_ON(!ctl_cfg->base[c])) {
dev_err(dev->dev, "CTL_%d: base is null!\n", c);
DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
goto fail;

View file

@ -264,7 +264,7 @@ static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
return ret;
}
@ -337,7 +337,7 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
encoder = mdp5_encoder_init(dev, intf, ctl);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct encoder\n");
DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
return encoder;
}
@ -418,7 +418,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
dev_err(dev->dev, "failed to find dsi from intf %d\n",
DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
intf->num);
ret = -EINVAL;
break;
@ -443,7 +443,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
break;
}
default:
dev_err(dev->dev, "unknown intf: %d\n", intf->type);
DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
ret = -EINVAL;
break;
}
@ -500,7 +500,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
plane = mdp5_plane_init(dev, type);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
priv->planes[priv->num_planes++] = plane;
@ -517,7 +517,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
priv->crtcs[priv->num_crtcs++] = crtc;
@ -552,7 +552,7 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp,
@ -561,7 +561,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
struct device *dev = &pdev->dev;
struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
}
if (IS_ERR(clk))
@ -688,7 +688,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq < 0) {
ret = irq;
dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@ -724,12 +724,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
ret);
goto fail;
}
} else {
dev_info(&pdev->dev,
DRM_DEV_INFO(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
aspace = NULL;
}
@ -738,7 +738,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = modeset_init(mdp5_kms);
if (ret) {
dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
@ -795,7 +795,7 @@ static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
if (IS_ERR(hwpipe)) {
ret = PTR_ERR(hwpipe);
dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
pipe2name(pipes[i]), ret);
return ret;
}
@ -867,7 +867,7 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms)
mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
if (IS_ERR(mixer)) {
ret = PTR_ERR(mixer);
dev_err(dev->dev, "failed to construct LM%d (%d)\n",
DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
i, ret);
return ret;
}
@ -897,7 +897,7 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf) {
dev_err(dev->dev, "failed to construct INTF%d\n", i);
DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
return -ENOMEM;
}

View file

@ -132,7 +132,7 @@ static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
mdp5_mdss);
if (!d) {
dev_err(dev, "mdss irq domain add failed\n");
DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
@ -246,7 +246,7 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to get clocks: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
@ -259,7 +259,7 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = regulator_enable(mdp5_mdss->vdd);
if (ret) {
dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
goto fail;
}
@ -267,13 +267,13 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init irq: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}

View file

@ -125,7 +125,7 @@ static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
SET_PROPERTY(zpos, ZPOS, uint8_t);
dev_err(dev->dev, "Invalid property\n");
DRM_DEV_ERROR(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
return ret;
@ -153,7 +153,7 @@ static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
GET_PROPERTY(zpos, ZPOS, uint8_t);
dev_err(dev->dev, "Invalid property\n");
DRM_DEV_ERROR(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
return ret;
@ -658,7 +658,7 @@ static int calc_scalex_steps(struct drm_plane *plane,
ret = calc_phase_step(src, dest, &phasex_step);
if (ret) {
dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
@ -683,7 +683,7 @@ static int calc_scaley_steps(struct drm_plane *plane,
ret = calc_phase_step(src, dest, &phasey_step);
if (ret) {
dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}

View file

@ -88,7 +88,7 @@ static int smp_request_block(struct mdp5_smp *smp,
avail = cnt - bitmap_weight(state->state, cnt);
if (nblks > avail) {
dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
return -ENOSPC;
}
@ -188,7 +188,7 @@ int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
ret = smp_request_block(smp, state, cid, n);
if (ret) {
dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}

View file

@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
dev_err(&pdev->dev, "cannot find phy device\n");
DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
@ -40,7 +40,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
of_node_put(phy_node);
if (!phy_pdev || !msm_dsi->phy) {
dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__);
DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
@ -210,7 +210,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
if (ret) {
dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
goto fail;
}
@ -222,7 +222,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
if (IS_ERR(msm_dsi->bridge)) {
ret = PTR_ERR(msm_dsi->bridge);
dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
msm_dsi->bridge = NULL;
goto fail;
}
@ -244,7 +244,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
if (IS_ERR(msm_dsi->connector)) {
ret = PTR_ERR(msm_dsi->connector);
dev_err(dev->dev,
DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
msm_dsi->connector = NULL;
goto fail;

View file

@ -1050,7 +1050,7 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
msecs_to_jiffies(70));
if (ret <= 0)
dev_err(dev, "wait for video done timed out\n");
DRM_DEV_ERROR(dev, "wait for video done timed out\n");
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
}
@ -1083,6 +1083,8 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
return PTR_ERR(data);
}
msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
msm_host->tx_size = msm_host->tx_gem_obj->size;
return 0;
@ -1118,7 +1120,7 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
}
@ -1248,7 +1250,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
if (!dma_base)
return -EINVAL;
return msm_gem_get_iova(msm_host->tx_gem_obj,
return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
priv->kms->aspace, dma_base);
}
@ -1673,7 +1675,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
prop = of_find_property(ep, "data-lanes", &len);
if (!prop) {
dev_dbg(dev,
DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
return 0;
}
@ -1681,7 +1683,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
num_lanes = len / sizeof(u32);
if (num_lanes < 1 || num_lanes > 4) {
dev_err(dev, "bad number of data lanes\n");
DRM_DEV_ERROR(dev, "bad number of data lanes\n");
return -EINVAL;
}
@ -1690,7 +1692,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
num_lanes);
if (ret) {
dev_err(dev, "failed to read lane data\n");
DRM_DEV_ERROR(dev, "failed to read lane data\n");
return ret;
}
@ -1711,7 +1713,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
*/
for (j = 0; j < num_lanes; j++) {
if (lane_map[j] < 0 || lane_map[j] > 3)
dev_err(dev, "bad physical lane entry %u\n",
DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
lane_map[j]);
if (swap[lane_map[j]] != j)
@ -1742,13 +1744,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
*/
endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
if (!endpoint) {
dev_dbg(dev, "%s: no endpoint\n", __func__);
DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
return 0;
}
ret = dsi_host_parse_lane_data(msm_host, endpoint);
if (ret) {
dev_err(dev, "%s: invalid lane configuration %d\n",
DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
__func__, ret);
ret = -EINVAL;
goto err;
@ -1757,7 +1759,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
/* Get panel node from the output port's endpoint data */
device_node = of_graph_get_remote_node(np, 1, 0);
if (!device_node) {
dev_dbg(dev, "%s: no valid device\n", __func__);
DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
ret = -ENODEV;
goto err;
}
@ -1768,7 +1770,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
if (IS_ERR(msm_host->sfpb)) {
dev_err(dev, "%s: failed to get sfpb regmap\n",
DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
__func__);
ret = PTR_ERR(msm_host->sfpb);
}
@ -1918,7 +1920,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (msm_host->irq < 0) {
ret = msm_host->irq;
dev_err(dev->dev, "failed to get irq: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
return ret;
}
@ -1926,7 +1928,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"dsi_isr", msm_host);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
msm_host->irq, ret);
return ret;
}

View file

@ -404,7 +404,7 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
ret = devm_regulator_bulk_get(dev, num, s);
if (ret < 0) {
dev_err(dev, "%s: failed to init regulator, ret=%d\n",
DRM_DEV_ERROR(dev, "%s: failed to init regulator, ret=%d\n",
__func__, ret);
return ret;
}
@ -441,7 +441,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
ret = regulator_set_load(s[i].consumer,
regs[i].enable_load);
if (ret < 0) {
dev_err(dev,
DRM_DEV_ERROR(dev,
"regulator %d set op mode failed, %d\n",
i, ret);
goto fail;
@ -451,7 +451,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
ret = regulator_bulk_enable(num, s);
if (ret < 0) {
dev_err(dev, "regulator enable failed, %d\n", ret);
DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
goto fail;
}
@ -472,7 +472,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
pm_runtime_put_sync(dev);
}
@ -543,7 +543,7 @@ int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
"DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
__func__);
ret = -ENOMEM;
goto fail;
@ -574,7 +574,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->id = dsi_phy_get_id(phy);
if (phy->id < 0) {
ret = phy->id;
dev_err(dev, "%s: couldn't identify PHY index, %d\n",
DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
__func__, ret);
goto fail;
}
@ -584,20 +584,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR(phy->base)) {
dev_err(dev, "%s: failed to map phy base\n", __func__);
DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
ret = -ENOMEM;
goto fail;
}
ret = dsi_phy_regulator_init(phy);
if (ret) {
dev_err(dev, "%s: failed to init regulator\n", __func__);
DRM_DEV_ERROR(dev, "%s: failed to init regulator\n", __func__);
goto fail;
}
phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) {
dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
goto fail;
}
@ -617,7 +617,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
if (IS_ERR_OR_NULL(phy->pll))
dev_info(dev,
DRM_DEV_INFO(dev,
"%s: pll init failed: %ld, need separate pll clk driver\n",
__func__, PTR_ERR(phy->pll));
@ -675,21 +675,21 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = dsi_phy_enable_resource(phy);
if (ret) {
dev_err(dev, "%s: resource enable failed, %d\n",
DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
__func__, ret);
goto res_en_fail;
}
ret = dsi_phy_regulator_enable(phy);
if (ret) {
dev_err(dev, "%s: regulator enable failed, %d\n",
DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
goto reg_en_fail;
}
ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
if (ret) {
dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
goto phy_en_fail;
}
@ -702,7 +702,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
if (phy->usecase != MSM_DSI_PHY_SLAVE) {
ret = msm_dsi_pll_restore_state(phy->pll);
if (ret) {
dev_err(dev, "%s: failed to restore pll state, %d\n",
DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
__func__, ret);
goto pll_restor_fail;
}

View file

@ -93,7 +93,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
dev_err(&phy->pdev->dev,
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@ -172,7 +172,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
@ -196,7 +196,7 @@ static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}

View file

@ -64,7 +64,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
void __iomem *lane_base = phy->lane_base;
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
dev_err(&phy->pdev->dev,
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@ -115,7 +115,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
@ -142,7 +142,7 @@ static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}

View file

@ -82,7 +82,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
dev_err(&phy->pdev->dev,
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}

View file

@ -76,7 +76,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
dev_err(&phy->pdev->dev,
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}

View file

@ -132,7 +132,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
dev_err(&phy->pdev->dev,
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}

View file

@ -175,7 +175,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
}
if (IS_ERR(pll)) {
dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
return pll;
}

View file

@ -17,7 +17,7 @@
* | |
* | |
* +---------+ | +----------+ | +----+
* dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
* dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
* +---------+ | +----------+ | +----+
* | |
* | | dsi0_pll_by_2_bit_clk
@ -25,7 +25,7 @@
* | | +----+ | |\ dsi0_pclk_mux
* | |--| /2 |--o--| \ |
* | | +----+ | \ | +---------+
* | --------------| |--o--| div_7_4 |-- dsi0pll
* | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
* |------------------------------| / +---------+
* | +-----+ | /
* -----------| /4? |--o----------|/
@ -688,7 +688,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hws[num++] = hw;
snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
@ -737,7 +737,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hws[num++] = hw;
snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
@ -760,7 +760,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_10nm->hw_data);
if (ret) {
dev_err(dev, "failed to register clk provider: %d\n", ret);
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@ -788,13 +788,13 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
dev_err(&pdev->dev, "failed to map CMN PHY base\n");
DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
dev_err(&pdev->dev, "failed to map PLL base\n");
DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
@ -813,7 +813,7 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
ret = pll_10nm_register(pll_10nm);
if (ret) {
dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}

View file

@ -783,7 +783,7 @@ static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
POLL_TIMEOUT_US);
if (unlikely(!locked))
dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@ -829,7 +829,7 @@ static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
dev_err(&pll_14nm->pdev->dev,
DRM_DEV_ERROR(&pll_14nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@ -1039,7 +1039,7 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_14nm->hw_data);
if (ret) {
dev_err(dev, "failed to register clk provider: %d\n", ret);
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@ -1067,13 +1067,13 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
dev_err(&pdev->dev, "failed to map CMN PHY base\n");
DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
dev_err(&pdev->dev, "failed to map PLL base\n");
DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
@ -1096,7 +1096,7 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
ret = pll_14nm_register(pll_14nm);
if (ret) {
dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}

View file

@ -156,7 +156,7 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate <= lpfr_lut[i].vco_rate)
break;
if (i == LPFR_LUT_SIZE) {
dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
rate);
return -EINVAL;
}
@ -386,7 +386,7 @@ static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
}
if (unlikely(!locked))
dev_err(dev, "DSI PLL lock failed\n");
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL Lock success");
@ -429,7 +429,7 @@ static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
dev_err(dev, "DSI PLL lock failed\n");
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@ -468,7 +468,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
dev_err(&pll_28nm->pdev->dev,
DRM_DEV_ERROR(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@ -581,7 +581,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
dev_err(dev, "failed to register clk provider: %d\n", ret);
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@ -607,7 +607,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
@ -633,13 +633,13 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
pll->en_seq_cnt = 1;
pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
} else {
dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
return ERR_PTR(-EINVAL);
}
ret = pll_28nm_register(pll_28nm);
if (ret) {
dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}

View file

@ -327,7 +327,7 @@ static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
dev_err(dev, "DSI PLL lock failed\n");
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@ -368,7 +368,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
dev_err(&pll_28nm->pdev->dev,
DRM_DEV_ERROR(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@ -482,7 +482,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
dev_err(dev, "failed to register clk provider: %d\n", ret);
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@ -508,7 +508,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
@ -526,7 +526,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
ret = pll_28nm_register(pll_28nm);
if (ret) {
dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}

View file

@ -157,7 +157,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->bridge = msm_edp_bridge_init(edp);
if (IS_ERR(edp->bridge)) {
ret = PTR_ERR(edp->bridge);
dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
edp->bridge = NULL;
goto fail;
}
@ -165,7 +165,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->connector = msm_edp_connector_init(edp);
if (IS_ERR(edp->connector)) {
ret = PTR_ERR(edp->connector);
dev_err(dev->dev, "failed to create eDP connector: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
edp->connector = NULL;
goto fail;
}
@ -173,7 +173,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (edp->irq < 0) {
ret = edp->irq;
dev_err(dev->dev, "failed to get IRQ: %d\n", ret);
DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
goto fail;
}
@ -181,7 +181,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"edp_isr", edp);
if (ret < 0) {
dev_err(dev->dev, "failed to request IRQ%u: %d\n",
DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
edp->irq, ret);
goto fail;
}

Some files were not shown because too many files have changed in this diff Show more